2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
56 /* ---- HCI notifications ---- */
58 static void hci_notify(struct hci_dev *hdev, int event)
60 hci_sock_dev_event(hdev, event);
63 /* ---- HCI debugfs entries ---- */
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
68 struct hci_dev *hdev = file->private_data;
71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
83 size_t buf_size = min(count, (sizeof(buf)-1));
87 if (!test_bit(HCI_UP, &hdev->flags))
90 if (copy_from_user(buf, user_buf, buf_size))
94 if (strtobool(buf, &enable))
97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 hci_req_unlock(hdev);
112 err = -bt_to_errno(skb->data[0]);
118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
123 static const struct file_operations dut_mode_fops = {
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
130 static int features_show(struct seq_file *f, void *ptr)
132 struct hci_dev *hdev = f->private;
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
151 hci_dev_unlock(hdev);
156 static int features_open(struct inode *inode, struct file *file)
158 return single_open(file, features_show, inode->i_private);
161 static const struct file_operations features_fops = {
162 .open = features_open,
165 .release = single_release,
168 static int blacklist_show(struct seq_file *f, void *p)
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
174 list_for_each_entry(b, &hdev->blacklist, list)
175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176 hci_dev_unlock(hdev);
181 static int blacklist_open(struct inode *inode, struct file *file)
183 return single_open(file, blacklist_show, inode->i_private);
186 static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
190 .release = single_release,
193 static int uuids_show(struct seq_file *f, void *p)
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
199 list_for_each_entry(uuid, &hdev->uuids, list) {
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
209 seq_printf(f, "%pUb\n", val);
211 hci_dev_unlock(hdev);
216 static int uuids_open(struct inode *inode, struct file *file)
218 return single_open(file, uuids_show, inode->i_private);
221 static const struct file_operations uuids_fops = {
225 .release = single_release,
228 static int inquiry_cache_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
247 hci_dev_unlock(hdev);
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 return single_open(file, inquiry_cache_show, inode->i_private);
257 static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
261 .release = single_release,
264 static int link_keys_show(struct seq_file *f, void *ptr)
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 hci_dev_unlock(hdev);
280 static int link_keys_open(struct inode *inode, struct file *file)
282 return single_open(file, link_keys_show, inode->i_private);
285 static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
289 .release = single_release,
292 static int dev_class_show(struct seq_file *f, void *ptr)
294 struct hci_dev *hdev = f->private;
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
304 static int dev_class_open(struct inode *inode, struct file *file)
306 return single_open(file, dev_class_show, inode->i_private);
309 static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
313 .release = single_release,
316 static int voice_setting_get(void *data, u64 *val)
318 struct hci_dev *hdev = data;
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
330 static int auto_accept_delay_set(void *data, u64 val)
332 struct hci_dev *hdev = data;
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
341 static int auto_accept_delay_get(void *data, u64 *val)
343 struct hci_dev *hdev = data;
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
355 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
358 struct hci_dev *hdev = file->private_data;
361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
367 static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
371 struct hci_dev *hdev = file->private_data;
373 size_t buf_size = min(count, (sizeof(buf)-1));
376 if (test_bit(HCI_UP, &hdev->flags))
379 if (copy_from_user(buf, user_buf, buf_size))
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
394 static const struct file_operations force_sc_support_fops = {
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
401 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
404 struct hci_dev *hdev = file->private_data;
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413 static const struct file_operations sc_only_mode_fops = {
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
419 static int idle_timeout_set(void *data, u64 val)
421 struct hci_dev *hdev = data;
423 if (val != 0 && (val < 500 || val > 3600000))
427 hdev->idle_timeout = val;
428 hci_dev_unlock(hdev);
433 static int idle_timeout_get(void *data, u64 *val)
435 struct hci_dev *hdev = data;
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
444 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
447 static int rpa_timeout_set(void *data, u64 val)
449 struct hci_dev *hdev = data;
451 /* Require the RPA timeout to be at least 30 seconds and at most
454 if (val < 30 || val > (60 * 60 * 24))
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
464 static int rpa_timeout_get(void *data, u64 *val)
466 struct hci_dev *hdev = data;
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
475 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
478 static int sniff_min_interval_set(void *data, u64 val)
480 struct hci_dev *hdev = data;
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
486 hdev->sniff_min_interval = val;
487 hci_dev_unlock(hdev);
492 static int sniff_min_interval_get(void *data, u64 *val)
494 struct hci_dev *hdev = data;
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
503 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
506 static int sniff_max_interval_set(void *data, u64 val)
508 struct hci_dev *hdev = data;
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
514 hdev->sniff_max_interval = val;
515 hci_dev_unlock(hdev);
520 static int sniff_max_interval_get(void *data, u64 *val)
522 struct hci_dev *hdev = data;
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
531 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
534 static int conn_info_min_age_set(void *data, u64 val)
536 struct hci_dev *hdev = data;
538 if (val == 0 || val > hdev->conn_info_max_age)
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
548 static int conn_info_min_age_get(void *data, u64 *val)
550 struct hci_dev *hdev = data;
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
559 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
562 static int conn_info_max_age_set(void *data, u64 val)
564 struct hci_dev *hdev = data;
566 if (val == 0 || val < hdev->conn_info_min_age)
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
576 static int conn_info_max_age_get(void *data, u64 *val)
578 struct hci_dev *hdev = data;
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
587 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
590 static int identity_show(struct seq_file *f, void *p)
592 struct hci_dev *hdev = f->private;
598 hci_copy_identity_address(hdev, &addr, &addr_type);
600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
601 16, hdev->irk, &hdev->rpa);
603 hci_dev_unlock(hdev);
608 static int identity_open(struct inode *inode, struct file *file)
610 return single_open(file, identity_show, inode->i_private);
613 static const struct file_operations identity_fops = {
614 .open = identity_open,
617 .release = single_release,
620 static int random_address_show(struct seq_file *f, void *p)
622 struct hci_dev *hdev = f->private;
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
631 static int random_address_open(struct inode *inode, struct file *file)
633 return single_open(file, random_address_show, inode->i_private);
636 static const struct file_operations random_address_fops = {
637 .open = random_address_open,
640 .release = single_release,
643 static int static_address_show(struct seq_file *f, void *p)
645 struct hci_dev *hdev = f->private;
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
654 static int static_address_open(struct inode *inode, struct file *file)
656 return single_open(file, static_address_show, inode->i_private);
659 static const struct file_operations static_address_fops = {
660 .open = static_address_open,
663 .release = single_release,
666 static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
670 struct hci_dev *hdev = file->private_data;
673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
679 static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
683 struct hci_dev *hdev = file->private_data;
685 size_t buf_size = min(count, (sizeof(buf)-1));
688 if (test_bit(HCI_UP, &hdev->flags))
691 if (copy_from_user(buf, user_buf, buf_size))
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
706 static const struct file_operations force_static_address_fops = {
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
713 static int white_list_show(struct seq_file *f, void *ptr)
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
726 static int white_list_open(struct inode *inode, struct file *file)
728 return single_open(file, white_list_show, inode->i_private);
731 static const struct file_operations white_list_fops = {
732 .open = white_list_open,
735 .release = single_release,
738 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
750 hci_dev_unlock(hdev);
755 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 return single_open(file, identity_resolving_keys_show,
761 static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
765 .release = single_release,
768 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
774 list_for_each_safe(p, n, &hdev->long_term_keys) {
775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
777 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
779 __le64_to_cpu(ltk->rand), 16, ltk->val);
781 hci_dev_unlock(hdev);
786 static int long_term_keys_open(struct inode *inode, struct file *file)
788 return single_open(file, long_term_keys_show, inode->i_private);
791 static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
795 .release = single_release,
798 static int conn_min_interval_set(void *data, u64 val)
800 struct hci_dev *hdev = data;
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
806 hdev->le_conn_min_interval = val;
807 hci_dev_unlock(hdev);
812 static int conn_min_interval_get(void *data, u64 *val)
814 struct hci_dev *hdev = data;
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
823 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
826 static int conn_max_interval_set(void *data, u64 val)
828 struct hci_dev *hdev = data;
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
834 hdev->le_conn_max_interval = val;
835 hci_dev_unlock(hdev);
840 static int conn_max_interval_get(void *data, u64 *val)
842 struct hci_dev *hdev = data;
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
851 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
854 static int adv_channel_map_set(void *data, u64 val)
856 struct hci_dev *hdev = data;
858 if (val < 0x01 || val > 0x07)
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
868 static int adv_channel_map_get(void *data, u64 *val)
870 struct hci_dev *hdev = data;
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
879 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
882 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
884 struct hci_dev *hdev = sf->private;
885 struct hci_conn_params *p;
889 list_for_each_entry(p, &hdev->le_conn_params, list) {
890 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
894 hci_dev_unlock(hdev);
899 static int le_auto_conn_open(struct inode *inode, struct file *file)
901 return single_open(file, le_auto_conn_show, inode->i_private);
904 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905 size_t count, loff_t *offset)
907 struct seq_file *sf = file->private_data;
908 struct hci_dev *hdev = sf->private;
916 /* Don't allow partial write */
923 buf = memdup_user(data, count);
927 if (memcmp(buf, "add", 3) == 0) {
928 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930 &addr.b[1], &addr.b[0], &addr_type,
939 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940 hdev->le_conn_min_interval,
941 hdev->le_conn_max_interval);
942 hci_dev_unlock(hdev);
946 } else if (memcmp(buf, "del", 3) == 0) {
947 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949 &addr.b[1], &addr.b[0], &addr_type);
957 hci_conn_params_del(hdev, &addr, addr_type);
958 hci_dev_unlock(hdev);
959 } else if (memcmp(buf, "clr", 3) == 0) {
961 hci_conn_params_clear(hdev);
962 hci_dev_unlock(hdev);
976 static const struct file_operations le_auto_conn_fops = {
977 .open = le_auto_conn_open,
979 .write = le_auto_conn_write,
981 .release = single_release,
984 /* ---- HCI requests ---- */
986 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
988 BT_DBG("%s result 0x%2.2x", hdev->name, result);
990 if (hdev->req_status == HCI_REQ_PEND) {
991 hdev->req_result = result;
992 hdev->req_status = HCI_REQ_DONE;
993 wake_up_interruptible(&hdev->req_wait_q);
997 static void hci_req_cancel(struct hci_dev *hdev, int err)
999 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1001 if (hdev->req_status == HCI_REQ_PEND) {
1002 hdev->req_result = err;
1003 hdev->req_status = HCI_REQ_CANCELED;
1004 wake_up_interruptible(&hdev->req_wait_q);
1008 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1011 struct hci_ev_cmd_complete *ev;
1012 struct hci_event_hdr *hdr;
1013 struct sk_buff *skb;
1017 skb = hdev->recv_evt;
1018 hdev->recv_evt = NULL;
1020 hci_dev_unlock(hdev);
1023 return ERR_PTR(-ENODATA);
1025 if (skb->len < sizeof(*hdr)) {
1026 BT_ERR("Too short HCI event");
1030 hdr = (void *) skb->data;
1031 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1034 if (hdr->evt != event)
1039 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1040 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1044 if (skb->len < sizeof(*ev)) {
1045 BT_ERR("Too short cmd_complete event");
1049 ev = (void *) skb->data;
1050 skb_pull(skb, sizeof(*ev));
1052 if (opcode == __le16_to_cpu(ev->opcode))
1055 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1056 __le16_to_cpu(ev->opcode));
1060 return ERR_PTR(-ENODATA);
1063 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1064 const void *param, u8 event, u32 timeout)
1066 DECLARE_WAITQUEUE(wait, current);
1067 struct hci_request req;
1070 BT_DBG("%s", hdev->name);
1072 hci_req_init(&req, hdev);
1074 hci_req_add_ev(&req, opcode, plen, param, event);
1076 hdev->req_status = HCI_REQ_PEND;
1078 err = hci_req_run(&req, hci_req_sync_complete);
1080 return ERR_PTR(err);
1082 add_wait_queue(&hdev->req_wait_q, &wait);
1083 set_current_state(TASK_INTERRUPTIBLE);
1085 schedule_timeout(timeout);
1087 remove_wait_queue(&hdev->req_wait_q, &wait);
1089 if (signal_pending(current))
1090 return ERR_PTR(-EINTR);
1092 switch (hdev->req_status) {
1094 err = -bt_to_errno(hdev->req_result);
1097 case HCI_REQ_CANCELED:
1098 err = -hdev->req_result;
1106 hdev->req_status = hdev->req_result = 0;
1108 BT_DBG("%s end: err %d", hdev->name, err);
1111 return ERR_PTR(err);
1113 return hci_get_cmd_complete(hdev, opcode, event);
1115 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1117 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1118 const void *param, u32 timeout)
1120 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1122 EXPORT_SYMBOL(__hci_cmd_sync);
1124 /* Execute request and wait for completion. */
1125 static int __hci_req_sync(struct hci_dev *hdev,
1126 void (*func)(struct hci_request *req,
1128 unsigned long opt, __u32 timeout)
1130 struct hci_request req;
1131 DECLARE_WAITQUEUE(wait, current);
1134 BT_DBG("%s start", hdev->name);
1136 hci_req_init(&req, hdev);
1138 hdev->req_status = HCI_REQ_PEND;
1142 err = hci_req_run(&req, hci_req_sync_complete);
1144 hdev->req_status = 0;
1146 /* ENODATA means the HCI request command queue is empty.
1147 * This can happen when a request with conditionals doesn't
1148 * trigger any commands to be sent. This is normal behavior
1149 * and should not trigger an error return.
1151 if (err == -ENODATA)
1157 add_wait_queue(&hdev->req_wait_q, &wait);
1158 set_current_state(TASK_INTERRUPTIBLE);
1160 schedule_timeout(timeout);
1162 remove_wait_queue(&hdev->req_wait_q, &wait);
1164 if (signal_pending(current))
1167 switch (hdev->req_status) {
1169 err = -bt_to_errno(hdev->req_result);
1172 case HCI_REQ_CANCELED:
1173 err = -hdev->req_result;
1181 hdev->req_status = hdev->req_result = 0;
1183 BT_DBG("%s end: err %d", hdev->name, err);
1188 static int hci_req_sync(struct hci_dev *hdev,
1189 void (*req)(struct hci_request *req,
1191 unsigned long opt, __u32 timeout)
1195 if (!test_bit(HCI_UP, &hdev->flags))
1198 /* Serialize all requests */
1200 ret = __hci_req_sync(hdev, req, opt, timeout);
1201 hci_req_unlock(hdev);
1206 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1208 BT_DBG("%s %ld", req->hdev->name, opt);
1211 set_bit(HCI_RESET, &req->hdev->flags);
1212 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1215 static void bredr_init(struct hci_request *req)
1217 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1219 /* Read Local Supported Features */
1220 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1222 /* Read Local Version */
1223 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1225 /* Read BD Address */
1226 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1229 static void amp_init(struct hci_request *req)
1231 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1233 /* Read Local Version */
1234 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1236 /* Read Local Supported Commands */
1237 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1239 /* Read Local Supported Features */
1240 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1242 /* Read Local AMP Info */
1243 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1245 /* Read Data Blk size */
1246 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1248 /* Read Flow Control Mode */
1249 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1251 /* Read Location Data */
1252 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1255 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1257 struct hci_dev *hdev = req->hdev;
1259 BT_DBG("%s %ld", hdev->name, opt);
1262 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1263 hci_reset_req(req, 0);
1265 switch (hdev->dev_type) {
1275 BT_ERR("Unknown device type %d", hdev->dev_type);
1280 static void bredr_setup(struct hci_request *req)
1282 struct hci_dev *hdev = req->hdev;
1287 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1288 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1290 /* Read Class of Device */
1291 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1293 /* Read Local Name */
1294 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1296 /* Read Voice Setting */
1297 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1299 /* Read Number of Supported IAC */
1300 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1302 /* Read Current IAC LAP */
1303 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1305 /* Clear Event Filters */
1306 flt_type = HCI_FLT_CLEAR_ALL;
1307 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1309 /* Connection accept timeout ~20 secs */
1310 param = cpu_to_le16(0x7d00);
1311 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1313 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1314 * but it does not support page scan related HCI commands.
1316 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1317 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1318 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1322 static void le_setup(struct hci_request *req)
1324 struct hci_dev *hdev = req->hdev;
1326 /* Read LE Buffer Size */
1327 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1329 /* Read LE Local Supported Features */
1330 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1332 /* Read LE Supported States */
1333 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1335 /* Read LE Advertising Channel TX Power */
1336 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1338 /* Read LE White List Size */
1339 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1341 /* Clear LE White List */
1342 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1344 /* LE-only controllers have LE implicitly enabled */
1345 if (!lmp_bredr_capable(hdev))
1346 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1349 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1351 if (lmp_ext_inq_capable(hdev))
1354 if (lmp_inq_rssi_capable(hdev))
1357 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1358 hdev->lmp_subver == 0x0757)
1361 if (hdev->manufacturer == 15) {
1362 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1364 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1366 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1370 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1371 hdev->lmp_subver == 0x1805)
1377 static void hci_setup_inquiry_mode(struct hci_request *req)
1381 mode = hci_get_inquiry_mode(req->hdev);
1383 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1386 static void hci_setup_event_mask(struct hci_request *req)
1388 struct hci_dev *hdev = req->hdev;
1390 /* The second byte is 0xff instead of 0x9f (two reserved bits
1391 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1392 * command otherwise.
1394 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1396 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1397 * any event mask for pre 1.2 devices.
1399 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1402 if (lmp_bredr_capable(hdev)) {
1403 events[4] |= 0x01; /* Flow Specification Complete */
1404 events[4] |= 0x02; /* Inquiry Result with RSSI */
1405 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1406 events[5] |= 0x08; /* Synchronous Connection Complete */
1407 events[5] |= 0x10; /* Synchronous Connection Changed */
1409 /* Use a different default for LE-only devices */
1410 memset(events, 0, sizeof(events));
1411 events[0] |= 0x10; /* Disconnection Complete */
1412 events[0] |= 0x80; /* Encryption Change */
1413 events[1] |= 0x08; /* Read Remote Version Information Complete */
1414 events[1] |= 0x20; /* Command Complete */
1415 events[1] |= 0x40; /* Command Status */
1416 events[1] |= 0x80; /* Hardware Error */
1417 events[2] |= 0x04; /* Number of Completed Packets */
1418 events[3] |= 0x02; /* Data Buffer Overflow */
1419 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1422 if (lmp_inq_rssi_capable(hdev))
1423 events[4] |= 0x02; /* Inquiry Result with RSSI */
1425 if (lmp_sniffsubr_capable(hdev))
1426 events[5] |= 0x20; /* Sniff Subrating */
1428 if (lmp_pause_enc_capable(hdev))
1429 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1431 if (lmp_ext_inq_capable(hdev))
1432 events[5] |= 0x40; /* Extended Inquiry Result */
1434 if (lmp_no_flush_capable(hdev))
1435 events[7] |= 0x01; /* Enhanced Flush Complete */
1437 if (lmp_lsto_capable(hdev))
1438 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1440 if (lmp_ssp_capable(hdev)) {
1441 events[6] |= 0x01; /* IO Capability Request */
1442 events[6] |= 0x02; /* IO Capability Response */
1443 events[6] |= 0x04; /* User Confirmation Request */
1444 events[6] |= 0x08; /* User Passkey Request */
1445 events[6] |= 0x10; /* Remote OOB Data Request */
1446 events[6] |= 0x20; /* Simple Pairing Complete */
1447 events[7] |= 0x04; /* User Passkey Notification */
1448 events[7] |= 0x08; /* Keypress Notification */
1449 events[7] |= 0x10; /* Remote Host Supported
1450 * Features Notification
1454 if (lmp_le_capable(hdev))
1455 events[7] |= 0x20; /* LE Meta-Event */
1457 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1459 if (lmp_le_capable(hdev)) {
1460 memset(events, 0, sizeof(events));
1462 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1463 sizeof(events), events);
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1469 struct hci_dev *hdev = req->hdev;
1471 if (lmp_bredr_capable(hdev))
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1476 if (lmp_le_capable(hdev))
1479 hci_setup_event_mask(req);
1481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1487 if (lmp_ssp_capable(hdev)) {
1488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1494 hdev->max_page = 0x01;
1496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
1501 struct hci_cp_write_eir cp;
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1510 if (lmp_inq_rssi_capable(hdev))
1511 hci_setup_inquiry_mode(req);
1513 if (lmp_inq_tx_pwr_capable(hdev))
1514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1531 static void hci_setup_link_policy(struct hci_request *req)
1533 struct hci_dev *hdev = req->hdev;
1534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1546 cp.policy = cpu_to_le16(link_policy);
1547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1550 static void hci_set_le_support(struct hci_request *req)
1552 struct hci_dev *hdev = req->hdev;
1553 struct hci_cp_write_le_host_supported cp;
1555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1559 memset(&cp, 0, sizeof(cp));
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1563 cp.simul = lmp_le_br_capable(hdev);
1566 if (cp.le != lmp_host_le_capable(hdev))
1567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1579 if (lmp_csb_master_capable(hdev)) {
1580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1589 if (lmp_csb_slave_capable(hdev)) {
1590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1596 /* Enable Authenticated Payload Timeout Expired event if supported */
1597 if (lmp_ping_capable(hdev))
1600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1605 struct hci_dev *hdev = req->hdev;
1608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
1621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623 struct hci_cp_delete_stored_link_key cp;
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1631 if (hdev->commands[5] & 0x10)
1632 hci_setup_link_policy(req);
1634 if (lmp_le_capable(hdev))
1635 hci_set_le_support(req);
1637 /* Read features beyond page 1 if available */
1638 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1639 struct hci_cp_read_local_ext_features cp;
1642 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1647 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1649 struct hci_dev *hdev = req->hdev;
1651 /* Set event mask page 2 if the HCI command for it is supported */
1652 if (hdev->commands[22] & 0x04)
1653 hci_set_event_mask_page_2(req);
1655 /* Check for Synchronization Train support */
1656 if (lmp_sync_train_capable(hdev))
1657 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1659 /* Enable Secure Connections if supported and configured */
1660 if ((lmp_sc_capable(hdev) ||
1661 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1662 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1664 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1665 sizeof(support), &support);
1669 static int __hci_init(struct hci_dev *hdev)
1673 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1677 /* The Device Under Test (DUT) mode is special and available for
1678 * all controller types. So just create it early on.
1680 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1681 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1685 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1686 * BR/EDR/LE type controllers. AMP controllers only need the
1689 if (hdev->dev_type != HCI_BREDR)
1692 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1696 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1700 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1704 /* Only create debugfs entries during the initial setup
1705 * phase and not every time the controller gets powered on.
1707 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1710 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1712 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1713 &hdev->manufacturer);
1714 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1715 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1716 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1718 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1720 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1721 &conn_info_min_age_fops);
1722 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1723 &conn_info_max_age_fops);
1725 if (lmp_bredr_capable(hdev)) {
1726 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1727 hdev, &inquiry_cache_fops);
1728 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1729 hdev, &link_keys_fops);
1730 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1731 hdev, &dev_class_fops);
1732 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1733 hdev, &voice_setting_fops);
1736 if (lmp_ssp_capable(hdev)) {
1737 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1738 hdev, &auto_accept_delay_fops);
1739 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1740 hdev, &force_sc_support_fops);
1741 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1742 hdev, &sc_only_mode_fops);
1745 if (lmp_sniff_capable(hdev)) {
1746 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1747 hdev, &idle_timeout_fops);
1748 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1749 hdev, &sniff_min_interval_fops);
1750 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1751 hdev, &sniff_max_interval_fops);
1754 if (lmp_le_capable(hdev)) {
1755 debugfs_create_file("identity", 0400, hdev->debugfs,
1756 hdev, &identity_fops);
1757 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1758 hdev, &rpa_timeout_fops);
1759 debugfs_create_file("random_address", 0444, hdev->debugfs,
1760 hdev, &random_address_fops);
1761 debugfs_create_file("static_address", 0444, hdev->debugfs,
1762 hdev, &static_address_fops);
1764 /* For controllers with a public address, provide a debug
1765 * option to force the usage of the configured static
1766 * address. By default the public address is used.
1768 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1769 debugfs_create_file("force_static_address", 0644,
1770 hdev->debugfs, hdev,
1771 &force_static_address_fops);
1773 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1774 &hdev->le_white_list_size);
1775 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1777 debugfs_create_file("identity_resolving_keys", 0400,
1778 hdev->debugfs, hdev,
1779 &identity_resolving_keys_fops);
1780 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1781 hdev, &long_term_keys_fops);
1782 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1783 hdev, &conn_min_interval_fops);
1784 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1785 hdev, &conn_max_interval_fops);
1786 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1787 hdev, &adv_channel_map_fops);
1788 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1789 &le_auto_conn_fops);
1790 debugfs_create_u16("discov_interleaved_timeout", 0644,
1792 &hdev->discov_interleaved_timeout);
1798 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1802 BT_DBG("%s %x", req->hdev->name, scan);
1804 /* Inquiry and Page scans */
1805 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1808 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1812 BT_DBG("%s %x", req->hdev->name, auth);
1814 /* Authentication */
1815 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1818 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1822 BT_DBG("%s %x", req->hdev->name, encrypt);
1825 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1828 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1830 __le16 policy = cpu_to_le16(opt);
1832 BT_DBG("%s %x", req->hdev->name, policy);
1834 /* Default link policy */
1835 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1838 /* Get HCI device by index.
1839 * Device is held on return. */
1840 struct hci_dev *hci_dev_get(int index)
1842 struct hci_dev *hdev = NULL, *d;
1844 BT_DBG("%d", index);
1849 read_lock(&hci_dev_list_lock);
1850 list_for_each_entry(d, &hci_dev_list, list) {
1851 if (d->id == index) {
1852 hdev = hci_dev_hold(d);
1856 read_unlock(&hci_dev_list_lock);
1860 /* ---- Inquiry support ---- */
1862 bool hci_discovery_active(struct hci_dev *hdev)
1864 struct discovery_state *discov = &hdev->discovery;
1866 switch (discov->state) {
1867 case DISCOVERY_FINDING:
1868 case DISCOVERY_RESOLVING:
1876 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1878 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1880 if (hdev->discovery.state == state)
1884 case DISCOVERY_STOPPED:
1885 hci_update_background_scan(hdev);
1887 if (hdev->discovery.state != DISCOVERY_STARTING)
1888 mgmt_discovering(hdev, 0);
1890 case DISCOVERY_STARTING:
1892 case DISCOVERY_FINDING:
1893 mgmt_discovering(hdev, 1);
1895 case DISCOVERY_RESOLVING:
1897 case DISCOVERY_STOPPING:
1901 hdev->discovery.state = state;
1904 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1906 struct discovery_state *cache = &hdev->discovery;
1907 struct inquiry_entry *p, *n;
1909 list_for_each_entry_safe(p, n, &cache->all, all) {
1914 INIT_LIST_HEAD(&cache->unknown);
1915 INIT_LIST_HEAD(&cache->resolve);
1918 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1921 struct discovery_state *cache = &hdev->discovery;
1922 struct inquiry_entry *e;
1924 BT_DBG("cache %p, %pMR", cache, bdaddr);
1926 list_for_each_entry(e, &cache->all, all) {
1927 if (!bacmp(&e->data.bdaddr, bdaddr))
1934 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1937 struct discovery_state *cache = &hdev->discovery;
1938 struct inquiry_entry *e;
1940 BT_DBG("cache %p, %pMR", cache, bdaddr);
1942 list_for_each_entry(e, &cache->unknown, list) {
1943 if (!bacmp(&e->data.bdaddr, bdaddr))
1950 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1954 struct discovery_state *cache = &hdev->discovery;
1955 struct inquiry_entry *e;
1957 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1959 list_for_each_entry(e, &cache->resolve, list) {
1960 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1962 if (!bacmp(&e->data.bdaddr, bdaddr))
1969 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1970 struct inquiry_entry *ie)
1972 struct discovery_state *cache = &hdev->discovery;
1973 struct list_head *pos = &cache->resolve;
1974 struct inquiry_entry *p;
1976 list_del(&ie->list);
1978 list_for_each_entry(p, &cache->resolve, list) {
1979 if (p->name_state != NAME_PENDING &&
1980 abs(p->data.rssi) >= abs(ie->data.rssi))
1985 list_add(&ie->list, pos);
1988 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1989 bool name_known, bool *ssp)
1991 struct discovery_state *cache = &hdev->discovery;
1992 struct inquiry_entry *ie;
1994 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1996 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1998 *ssp = data->ssp_mode;
2000 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2002 if (ie->data.ssp_mode)
2005 if (ie->name_state == NAME_NEEDED &&
2006 data->rssi != ie->data.rssi) {
2007 ie->data.rssi = data->rssi;
2008 hci_inquiry_cache_update_resolve(hdev, ie);
2014 /* Entry not in the cache. Add new one. */
2015 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2019 list_add(&ie->all, &cache->all);
2022 ie->name_state = NAME_KNOWN;
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2029 if (name_known && ie->name_state != NAME_KNOWN &&
2030 ie->name_state != NAME_PENDING) {
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
2037 cache->timestamp = jiffies;
2039 if (ie->name_state == NAME_NOT_KNOWN)
2045 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047 struct discovery_state *cache = &hdev->discovery;
2048 struct inquiry_info *info = (struct inquiry_info *) buf;
2049 struct inquiry_entry *e;
2052 list_for_each_entry(e, &cache->all, all) {
2053 struct inquiry_data *data = &e->data;
2058 bacpy(&info->bdaddr, &data->bdaddr);
2059 info->pscan_rep_mode = data->pscan_rep_mode;
2060 info->pscan_period_mode = data->pscan_period_mode;
2061 info->pscan_mode = data->pscan_mode;
2062 memcpy(info->dev_class, data->dev_class, 3);
2063 info->clock_offset = data->clock_offset;
2069 BT_DBG("cache %p, copied %d", cache, copied);
2073 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2075 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2076 struct hci_dev *hdev = req->hdev;
2077 struct hci_cp_inquiry cp;
2079 BT_DBG("%s", hdev->name);
2081 if (test_bit(HCI_INQUIRY, &hdev->flags))
2085 memcpy(&cp.lap, &ir->lap, 3);
2086 cp.length = ir->length;
2087 cp.num_rsp = ir->num_rsp;
2088 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2091 static int wait_inquiry(void *word)
2094 return signal_pending(current);
2097 int hci_inquiry(void __user *arg)
2099 __u8 __user *ptr = arg;
2100 struct hci_inquiry_req ir;
2101 struct hci_dev *hdev;
2102 int err = 0, do_inquiry = 0, max_rsp;
2106 if (copy_from_user(&ir, ptr, sizeof(ir)))
2109 hdev = hci_dev_get(ir.dev_id);
2113 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2118 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2123 if (hdev->dev_type != HCI_BREDR) {
2128 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2134 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2135 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2136 hci_inquiry_cache_flush(hdev);
2139 hci_dev_unlock(hdev);
2141 timeo = ir.length * msecs_to_jiffies(2000);
2144 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2149 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2150 * cleared). If it is interrupted by a signal, return -EINTR.
2152 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2153 TASK_INTERRUPTIBLE))
2157 /* for unlimited number of responses we will use buffer with
2160 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2163 * copy it to the user space.
2165 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2172 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2173 hci_dev_unlock(hdev);
2175 BT_DBG("num_rsp %d", ir.num_rsp);
2177 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2192 static int hci_dev_do_open(struct hci_dev *hdev)
2196 BT_DBG("%s %p", hdev->name, hdev);
2200 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2205 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2206 /* Check for rfkill but allow the HCI setup stage to
2207 * proceed (which in itself doesn't cause any RF activity).
2209 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2214 /* Check for valid public address or a configured static
2215 * random adddress, but let the HCI setup proceed to
2216 * be able to determine if there is a public address
2219 * In case of user channel usage, it is not important
2220 * if a public address or static random address is
2223 * This check is only valid for BR/EDR controllers
2224 * since AMP controllers do not have an address.
2226 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2227 hdev->dev_type == HCI_BREDR &&
2228 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2229 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2230 ret = -EADDRNOTAVAIL;
2235 if (test_bit(HCI_UP, &hdev->flags)) {
2240 if (hdev->open(hdev)) {
2245 atomic_set(&hdev->cmd_cnt, 1);
2246 set_bit(HCI_INIT, &hdev->flags);
2248 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2249 ret = hdev->setup(hdev);
2252 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2253 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2254 ret = __hci_init(hdev);
2257 clear_bit(HCI_INIT, &hdev->flags);
2261 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2262 set_bit(HCI_UP, &hdev->flags);
2263 hci_notify(hdev, HCI_DEV_UP);
2264 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2266 hdev->dev_type == HCI_BREDR) {
2268 mgmt_powered(hdev, 1);
2269 hci_dev_unlock(hdev);
2272 /* Init failed, cleanup */
2273 flush_work(&hdev->tx_work);
2274 flush_work(&hdev->cmd_work);
2275 flush_work(&hdev->rx_work);
2277 skb_queue_purge(&hdev->cmd_q);
2278 skb_queue_purge(&hdev->rx_q);
2283 if (hdev->sent_cmd) {
2284 kfree_skb(hdev->sent_cmd);
2285 hdev->sent_cmd = NULL;
2289 hdev->flags &= BIT(HCI_RAW);
2293 hci_req_unlock(hdev);
2297 /* ---- HCI ioctl helpers ---- */
2299 int hci_dev_open(__u16 dev)
2301 struct hci_dev *hdev;
2304 hdev = hci_dev_get(dev);
2308 /* Devices that are marked for raw-only usage can only be powered
2309 * up as user channel. Trying to bring them up as normal devices
2310 * will result into a failure. Only user channel operation is
2313 * When this function is called for a user channel, the flag
2314 * HCI_USER_CHANNEL will be set first before attempting to
2317 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2318 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2323 /* We need to ensure that no other power on/off work is pending
2324 * before proceeding to call hci_dev_do_open. This is
2325 * particularly important if the setup procedure has not yet
2328 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2329 cancel_delayed_work(&hdev->power_off);
2331 /* After this call it is guaranteed that the setup procedure
2332 * has finished. This means that error conditions like RFKILL
2333 * or no valid public or static random address apply.
2335 flush_workqueue(hdev->req_workqueue);
2337 err = hci_dev_do_open(hdev);
2344 static int hci_dev_do_close(struct hci_dev *hdev)
2346 BT_DBG("%s %p", hdev->name, hdev);
2348 cancel_delayed_work(&hdev->power_off);
2350 hci_req_cancel(hdev, ENODEV);
2353 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2354 cancel_delayed_work_sync(&hdev->cmd_timer);
2355 hci_req_unlock(hdev);
2359 /* Flush RX and TX works */
2360 flush_work(&hdev->tx_work);
2361 flush_work(&hdev->rx_work);
2363 if (hdev->discov_timeout > 0) {
2364 cancel_delayed_work(&hdev->discov_off);
2365 hdev->discov_timeout = 0;
2366 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2367 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2370 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2371 cancel_delayed_work(&hdev->service_cache);
2373 cancel_delayed_work_sync(&hdev->le_scan_disable);
2375 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2376 cancel_delayed_work_sync(&hdev->rpa_expired);
2379 hci_inquiry_cache_flush(hdev);
2380 hci_conn_hash_flush(hdev);
2381 hci_pend_le_conns_clear(hdev);
2382 hci_dev_unlock(hdev);
2384 hci_notify(hdev, HCI_DEV_DOWN);
2390 skb_queue_purge(&hdev->cmd_q);
2391 atomic_set(&hdev->cmd_cnt, 1);
2392 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2393 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2394 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2395 set_bit(HCI_INIT, &hdev->flags);
2396 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2397 clear_bit(HCI_INIT, &hdev->flags);
2400 /* flush cmd work */
2401 flush_work(&hdev->cmd_work);
2404 skb_queue_purge(&hdev->rx_q);
2405 skb_queue_purge(&hdev->cmd_q);
2406 skb_queue_purge(&hdev->raw_q);
2408 /* Drop last sent command */
2409 if (hdev->sent_cmd) {
2410 cancel_delayed_work_sync(&hdev->cmd_timer);
2411 kfree_skb(hdev->sent_cmd);
2412 hdev->sent_cmd = NULL;
2415 kfree_skb(hdev->recv_evt);
2416 hdev->recv_evt = NULL;
2418 /* After this point our queues are empty
2419 * and no tasks are scheduled. */
2423 hdev->flags &= BIT(HCI_RAW);
2424 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2426 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2427 if (hdev->dev_type == HCI_BREDR) {
2429 mgmt_powered(hdev, 0);
2430 hci_dev_unlock(hdev);
2434 /* Controller radio is available but is currently powered down */
2435 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2437 memset(hdev->eir, 0, sizeof(hdev->eir));
2438 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2439 bacpy(&hdev->random_addr, BDADDR_ANY);
2441 hci_req_unlock(hdev);
2447 int hci_dev_close(__u16 dev)
2449 struct hci_dev *hdev;
2452 hdev = hci_dev_get(dev);
2456 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2461 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2462 cancel_delayed_work(&hdev->power_off);
2464 err = hci_dev_do_close(hdev);
2471 int hci_dev_reset(__u16 dev)
2473 struct hci_dev *hdev;
2476 hdev = hci_dev_get(dev);
2482 if (!test_bit(HCI_UP, &hdev->flags)) {
2487 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2498 skb_queue_purge(&hdev->rx_q);
2499 skb_queue_purge(&hdev->cmd_q);
2502 hci_inquiry_cache_flush(hdev);
2503 hci_conn_hash_flush(hdev);
2504 hci_dev_unlock(hdev);
2509 atomic_set(&hdev->cmd_cnt, 1);
2510 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2512 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2515 hci_req_unlock(hdev);
2520 int hci_dev_reset_stat(__u16 dev)
2522 struct hci_dev *hdev;
2525 hdev = hci_dev_get(dev);
2529 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2534 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2539 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2546 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2548 struct hci_dev *hdev;
2549 struct hci_dev_req dr;
2552 if (copy_from_user(&dr, arg, sizeof(dr)))
2555 hdev = hci_dev_get(dr.dev_id);
2559 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2564 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2569 if (hdev->dev_type != HCI_BREDR) {
2574 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2581 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2586 if (!lmp_encrypt_capable(hdev)) {
2591 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2592 /* Auth must be enabled first */
2593 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2599 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2604 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2609 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2613 case HCISETLINKMODE:
2614 hdev->link_mode = ((__u16) dr.dev_opt) &
2615 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2619 hdev->pkt_type = (__u16) dr.dev_opt;
2623 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2624 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2628 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2629 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2642 int hci_get_dev_list(void __user *arg)
2644 struct hci_dev *hdev;
2645 struct hci_dev_list_req *dl;
2646 struct hci_dev_req *dr;
2647 int n = 0, size, err;
2650 if (get_user(dev_num, (__u16 __user *) arg))
2653 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2656 size = sizeof(*dl) + dev_num * sizeof(*dr);
2658 dl = kzalloc(size, GFP_KERNEL);
2664 read_lock(&hci_dev_list_lock);
2665 list_for_each_entry(hdev, &hci_dev_list, list) {
2666 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2667 cancel_delayed_work(&hdev->power_off);
2669 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2670 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2672 (dr + n)->dev_id = hdev->id;
2673 (dr + n)->dev_opt = hdev->flags;
2678 read_unlock(&hci_dev_list_lock);
2681 size = sizeof(*dl) + n * sizeof(*dr);
2683 err = copy_to_user(arg, dl, size);
2686 return err ? -EFAULT : 0;
2689 int hci_get_dev_info(void __user *arg)
2691 struct hci_dev *hdev;
2692 struct hci_dev_info di;
2695 if (copy_from_user(&di, arg, sizeof(di)))
2698 hdev = hci_dev_get(di.dev_id);
2702 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2703 cancel_delayed_work_sync(&hdev->power_off);
2705 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2706 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2708 strcpy(di.name, hdev->name);
2709 di.bdaddr = hdev->bdaddr;
2710 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2711 di.flags = hdev->flags;
2712 di.pkt_type = hdev->pkt_type;
2713 if (lmp_bredr_capable(hdev)) {
2714 di.acl_mtu = hdev->acl_mtu;
2715 di.acl_pkts = hdev->acl_pkts;
2716 di.sco_mtu = hdev->sco_mtu;
2717 di.sco_pkts = hdev->sco_pkts;
2719 di.acl_mtu = hdev->le_mtu;
2720 di.acl_pkts = hdev->le_pkts;
2724 di.link_policy = hdev->link_policy;
2725 di.link_mode = hdev->link_mode;
2727 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2728 memcpy(&di.features, &hdev->features, sizeof(di.features));
2730 if (copy_to_user(arg, &di, sizeof(di)))
2738 /* ---- Interface to HCI drivers ---- */
2740 static int hci_rfkill_set_block(void *data, bool blocked)
2742 struct hci_dev *hdev = data;
2744 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2746 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2750 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2751 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2752 hci_dev_do_close(hdev);
2754 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2760 static const struct rfkill_ops hci_rfkill_ops = {
2761 .set_block = hci_rfkill_set_block,
2764 static void hci_power_on(struct work_struct *work)
2766 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2769 BT_DBG("%s", hdev->name);
2771 err = hci_dev_do_open(hdev);
2773 mgmt_set_powered_failed(hdev, err);
2777 /* During the HCI setup phase, a few error conditions are
2778 * ignored and they need to be checked now. If they are still
2779 * valid, it is important to turn the device back off.
2781 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2782 (hdev->dev_type == HCI_BREDR &&
2783 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2784 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2785 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2786 hci_dev_do_close(hdev);
2787 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2788 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2789 HCI_AUTO_OFF_TIMEOUT);
2792 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2793 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2794 mgmt_index_added(hdev);
2798 static void hci_power_off(struct work_struct *work)
2800 struct hci_dev *hdev = container_of(work, struct hci_dev,
2803 BT_DBG("%s", hdev->name);
2805 hci_dev_do_close(hdev);
2808 static void hci_discov_off(struct work_struct *work)
2810 struct hci_dev *hdev;
2812 hdev = container_of(work, struct hci_dev, discov_off.work);
2814 BT_DBG("%s", hdev->name);
2816 mgmt_discoverable_timeout(hdev);
2819 void hci_uuids_clear(struct hci_dev *hdev)
2821 struct bt_uuid *uuid, *tmp;
2823 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2824 list_del(&uuid->list);
2829 void hci_link_keys_clear(struct hci_dev *hdev)
2831 struct list_head *p, *n;
2833 list_for_each_safe(p, n, &hdev->link_keys) {
2834 struct link_key *key;
2836 key = list_entry(p, struct link_key, list);
2843 void hci_smp_ltks_clear(struct hci_dev *hdev)
2845 struct smp_ltk *k, *tmp;
2847 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2853 void hci_smp_irks_clear(struct hci_dev *hdev)
2855 struct smp_irk *k, *tmp;
2857 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2863 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2867 list_for_each_entry(k, &hdev->link_keys, list)
2868 if (bacmp(bdaddr, &k->bdaddr) == 0)
2874 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2875 u8 key_type, u8 old_key_type)
2878 if (key_type < 0x03)
2881 /* Debug keys are insecure so don't store them persistently */
2882 if (key_type == HCI_LK_DEBUG_COMBINATION)
2885 /* Changed combination key and there's no previous one */
2886 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2889 /* Security mode 3 case */
2893 /* Neither local nor remote side had no-bonding as requirement */
2894 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2897 /* Local side had dedicated bonding as requirement */
2898 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2901 /* Remote side had dedicated bonding as requirement */
2902 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2905 /* If none of the above criteria match, then don't store the key
2910 static bool ltk_type_master(u8 type)
2912 return (type == SMP_LTK);
2915 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2920 list_for_each_entry(k, &hdev->long_term_keys, list) {
2921 if (k->ediv != ediv || k->rand != rand)
2924 if (ltk_type_master(k->type) != master)
2933 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2934 u8 addr_type, bool master)
2938 list_for_each_entry(k, &hdev->long_term_keys, list)
2939 if (addr_type == k->bdaddr_type &&
2940 bacmp(bdaddr, &k->bdaddr) == 0 &&
2941 ltk_type_master(k->type) == master)
2947 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2949 struct smp_irk *irk;
2951 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2952 if (!bacmp(&irk->rpa, rpa))
2956 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2957 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2958 bacpy(&irk->rpa, rpa);
2966 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2969 struct smp_irk *irk;
2971 /* Identity Address must be public or static random */
2972 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2975 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2976 if (addr_type == irk->addr_type &&
2977 bacmp(bdaddr, &irk->bdaddr) == 0)
2984 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2985 bdaddr_t *bdaddr, u8 *val, u8 type,
2986 u8 pin_len, bool *persistent)
2988 struct link_key *key, *old_key;
2991 old_key = hci_find_link_key(hdev, bdaddr);
2993 old_key_type = old_key->type;
2996 old_key_type = conn ? conn->key_type : 0xff;
2997 key = kzalloc(sizeof(*key), GFP_KERNEL);
3000 list_add(&key->list, &hdev->link_keys);
3003 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3005 /* Some buggy controller combinations generate a changed
3006 * combination key for legacy pairing even when there's no
3008 if (type == HCI_LK_CHANGED_COMBINATION &&
3009 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3010 type = HCI_LK_COMBINATION;
3012 conn->key_type = type;
3015 bacpy(&key->bdaddr, bdaddr);
3016 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3017 key->pin_len = pin_len;
3019 if (type == HCI_LK_CHANGED_COMBINATION)
3020 key->type = old_key_type;
3025 *persistent = hci_persistent_key(hdev, conn, type,
3031 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3032 u8 addr_type, u8 type, u8 authenticated,
3033 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3035 struct smp_ltk *key, *old_key;
3036 bool master = ltk_type_master(type);
3038 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3042 key = kzalloc(sizeof(*key), GFP_KERNEL);
3045 list_add(&key->list, &hdev->long_term_keys);
3048 bacpy(&key->bdaddr, bdaddr);
3049 key->bdaddr_type = addr_type;
3050 memcpy(key->val, tk, sizeof(key->val));
3051 key->authenticated = authenticated;
3054 key->enc_size = enc_size;
3060 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3061 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3063 struct smp_irk *irk;
3065 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3067 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3071 bacpy(&irk->bdaddr, bdaddr);
3072 irk->addr_type = addr_type;
3074 list_add(&irk->list, &hdev->identity_resolving_keys);
3077 memcpy(irk->val, val, 16);
3078 bacpy(&irk->rpa, rpa);
3083 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3085 struct link_key *key;
3087 key = hci_find_link_key(hdev, bdaddr);
3091 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3093 list_del(&key->list);
3099 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3101 struct smp_ltk *k, *tmp;
3104 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3105 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3108 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3115 return removed ? 0 : -ENOENT;
3118 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3120 struct smp_irk *k, *tmp;
3122 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3123 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3126 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3133 /* HCI command timer function */
3134 static void hci_cmd_timeout(struct work_struct *work)
3136 struct hci_dev *hdev = container_of(work, struct hci_dev,
3139 if (hdev->sent_cmd) {
3140 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3141 u16 opcode = __le16_to_cpu(sent->opcode);
3143 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3145 BT_ERR("%s command tx timeout", hdev->name);
3148 atomic_set(&hdev->cmd_cnt, 1);
3149 queue_work(hdev->workqueue, &hdev->cmd_work);
3152 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3155 struct oob_data *data;
3157 list_for_each_entry(data, &hdev->remote_oob_data, list)
3158 if (bacmp(bdaddr, &data->bdaddr) == 0)
3164 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3166 struct oob_data *data;
3168 data = hci_find_remote_oob_data(hdev, bdaddr);
3172 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3174 list_del(&data->list);
3180 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3182 struct oob_data *data, *n;
3184 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3185 list_del(&data->list);
3190 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3191 u8 *hash, u8 *randomizer)
3193 struct oob_data *data;
3195 data = hci_find_remote_oob_data(hdev, bdaddr);
3197 data = kmalloc(sizeof(*data), GFP_KERNEL);
3201 bacpy(&data->bdaddr, bdaddr);
3202 list_add(&data->list, &hdev->remote_oob_data);
3205 memcpy(data->hash192, hash, sizeof(data->hash192));
3206 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3208 memset(data->hash256, 0, sizeof(data->hash256));
3209 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3211 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3216 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *hash192, u8 *randomizer192,
3218 u8 *hash256, u8 *randomizer256)
3220 struct oob_data *data;
3222 data = hci_find_remote_oob_data(hdev, bdaddr);
3224 data = kmalloc(sizeof(*data), GFP_KERNEL);
3228 bacpy(&data->bdaddr, bdaddr);
3229 list_add(&data->list, &hdev->remote_oob_data);
3232 memcpy(data->hash192, hash192, sizeof(data->hash192));
3233 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3235 memcpy(data->hash256, hash256, sizeof(data->hash256));
3236 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3238 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3243 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3244 bdaddr_t *bdaddr, u8 type)
3246 struct bdaddr_list *b;
3248 list_for_each_entry(b, &hdev->blacklist, list) {
3249 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3256 static void hci_blacklist_clear(struct hci_dev *hdev)
3258 struct list_head *p, *n;
3260 list_for_each_safe(p, n, &hdev->blacklist) {
3261 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3268 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3270 struct bdaddr_list *entry;
3272 if (!bacmp(bdaddr, BDADDR_ANY))
3275 if (hci_blacklist_lookup(hdev, bdaddr, type))
3278 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3282 bacpy(&entry->bdaddr, bdaddr);
3283 entry->bdaddr_type = type;
3285 list_add(&entry->list, &hdev->blacklist);
3287 return mgmt_device_blocked(hdev, bdaddr, type);
3290 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3292 struct bdaddr_list *entry;
3294 if (!bacmp(bdaddr, BDADDR_ANY)) {
3295 hci_blacklist_clear(hdev);
3299 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3303 list_del(&entry->list);
3306 return mgmt_device_unblocked(hdev, bdaddr, type);
3309 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3310 bdaddr_t *bdaddr, u8 type)
3312 struct bdaddr_list *b;
3314 list_for_each_entry(b, &hdev->le_white_list, list) {
3315 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3322 void hci_white_list_clear(struct hci_dev *hdev)
3324 struct list_head *p, *n;
3326 list_for_each_safe(p, n, &hdev->le_white_list) {
3327 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3334 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3336 struct bdaddr_list *entry;
3338 if (!bacmp(bdaddr, BDADDR_ANY))
3341 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3345 bacpy(&entry->bdaddr, bdaddr);
3346 entry->bdaddr_type = type;
3348 list_add(&entry->list, &hdev->le_white_list);
3353 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3355 struct bdaddr_list *entry;
3357 if (!bacmp(bdaddr, BDADDR_ANY))
3360 entry = hci_white_list_lookup(hdev, bdaddr, type);
3364 list_del(&entry->list);
3370 /* This function requires the caller holds hdev->lock */
3371 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3372 bdaddr_t *addr, u8 addr_type)
3374 struct hci_conn_params *params;
3376 list_for_each_entry(params, &hdev->le_conn_params, list) {
3377 if (bacmp(¶ms->addr, addr) == 0 &&
3378 params->addr_type == addr_type) {
3386 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3388 struct hci_conn *conn;
3390 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3394 if (conn->dst_type != type)
3397 if (conn->state != BT_CONNECTED)
3403 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3405 if (addr_type == ADDR_LE_DEV_PUBLIC)
3408 /* Check for Random Static address type */
3409 if ((addr->b[5] & 0xc0) == 0xc0)
3415 /* This function requires the caller holds hdev->lock */
3416 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3417 bdaddr_t *addr, u8 addr_type)
3419 struct bdaddr_list *entry;
3421 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3422 if (bacmp(&entry->bdaddr, addr) == 0 &&
3423 entry->bdaddr_type == addr_type)
3430 /* This function requires the caller holds hdev->lock */
3431 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3433 struct bdaddr_list *entry;
3435 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3439 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3441 BT_ERR("Out of memory");
3445 bacpy(&entry->bdaddr, addr);
3446 entry->bdaddr_type = addr_type;
3448 list_add(&entry->list, &hdev->pend_le_conns);
3450 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3453 hci_update_background_scan(hdev);
3456 /* This function requires the caller holds hdev->lock */
3457 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3459 struct bdaddr_list *entry;
3461 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3465 list_del(&entry->list);
3468 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3471 hci_update_background_scan(hdev);
3474 /* This function requires the caller holds hdev->lock */
3475 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3477 struct bdaddr_list *entry, *tmp;
3479 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3480 list_del(&entry->list);
3484 BT_DBG("All LE pending connections cleared");
3486 hci_update_background_scan(hdev);
3489 /* This function requires the caller holds hdev->lock */
3490 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3491 u8 auto_connect, u16 conn_min_interval,
3492 u16 conn_max_interval)
3494 struct hci_conn_params *params;
3496 if (!is_identity_address(addr, addr_type))
3499 params = hci_conn_params_lookup(hdev, addr, addr_type);
3503 params = kzalloc(sizeof(*params), GFP_KERNEL);
3505 BT_ERR("Out of memory");
3509 bacpy(¶ms->addr, addr);
3510 params->addr_type = addr_type;
3512 list_add(¶ms->list, &hdev->le_conn_params);
3515 params->conn_min_interval = conn_min_interval;
3516 params->conn_max_interval = conn_max_interval;
3517 params->conn_latency = 0x0000;
3518 params->supervision_timeout = 0x002a;
3519 params->auto_connect = auto_connect;
3521 switch (auto_connect) {
3522 case HCI_AUTO_CONN_DISABLED:
3523 case HCI_AUTO_CONN_LINK_LOSS:
3524 hci_pend_le_conn_del(hdev, addr, addr_type);
3526 case HCI_AUTO_CONN_ALWAYS:
3527 if (!is_connected(hdev, addr, addr_type))
3528 hci_pend_le_conn_add(hdev, addr, addr_type);
3532 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3533 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3534 conn_min_interval, conn_max_interval);
3539 /* This function requires the caller holds hdev->lock */
3540 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3542 struct hci_conn_params *params;
3544 params = hci_conn_params_lookup(hdev, addr, addr_type);
3548 hci_pend_le_conn_del(hdev, addr, addr_type);
3550 list_del(¶ms->list);
3553 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3556 /* This function requires the caller holds hdev->lock */
3557 void hci_conn_params_clear(struct hci_dev *hdev)
3559 struct hci_conn_params *params, *tmp;
3561 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3562 list_del(¶ms->list);
3566 hci_pend_le_conns_clear(hdev);
3568 BT_DBG("All LE connection parameters were removed");
3571 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3574 BT_ERR("Failed to start inquiry: status %d", status);
3577 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3578 hci_dev_unlock(hdev);
3583 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3585 /* General inquiry access code (GIAC) */
3586 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3587 struct hci_request req;
3588 struct hci_cp_inquiry cp;
3592 BT_ERR("Failed to disable LE scanning: status %d", status);
3596 switch (hdev->discovery.type) {
3597 case DISCOV_TYPE_LE:
3599 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3600 hci_dev_unlock(hdev);
3603 case DISCOV_TYPE_INTERLEAVED:
3604 hci_req_init(&req, hdev);
3606 memset(&cp, 0, sizeof(cp));
3607 memcpy(&cp.lap, lap, sizeof(cp.lap));
3608 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3609 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3613 hci_inquiry_cache_flush(hdev);
3615 err = hci_req_run(&req, inquiry_complete);
3617 BT_ERR("Inquiry request failed: err %d", err);
3618 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3621 hci_dev_unlock(hdev);
3626 static void le_scan_disable_work(struct work_struct *work)
3628 struct hci_dev *hdev = container_of(work, struct hci_dev,
3629 le_scan_disable.work);
3630 struct hci_request req;
3633 BT_DBG("%s", hdev->name);
3635 hci_req_init(&req, hdev);
3637 hci_req_add_le_scan_disable(&req);
3639 err = hci_req_run(&req, le_scan_disable_work_complete);
3641 BT_ERR("Disable LE scanning request failed: err %d", err);
3644 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3646 struct hci_dev *hdev = req->hdev;
3648 /* If we're advertising or initiating an LE connection we can't
3649 * go ahead and change the random address at this time. This is
3650 * because the eventual initiator address used for the
3651 * subsequently created connection will be undefined (some
3652 * controllers use the new address and others the one we had
3653 * when the operation started).
3655 * In this kind of scenario skip the update and let the random
3656 * address be updated at the next cycle.
3658 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3659 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3660 BT_DBG("Deferring random address update");
3664 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3667 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3670 struct hci_dev *hdev = req->hdev;
3673 /* If privacy is enabled use a resolvable private address. If
3674 * current RPA has expired or there is something else than
3675 * the current RPA in use, then generate a new one.
3677 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3680 *own_addr_type = ADDR_LE_DEV_RANDOM;
3682 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3683 !bacmp(&hdev->random_addr, &hdev->rpa))
3686 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3688 BT_ERR("%s failed to generate new RPA", hdev->name);
3692 set_random_addr(req, &hdev->rpa);
3694 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3695 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3700 /* In case of required privacy without resolvable private address,
3701 * use an unresolvable private address. This is useful for active
3702 * scanning and non-connectable advertising.
3704 if (require_privacy) {
3707 get_random_bytes(&urpa, 6);
3708 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3710 *own_addr_type = ADDR_LE_DEV_RANDOM;
3711 set_random_addr(req, &urpa);
3715 /* If forcing static address is in use or there is no public
3716 * address use the static address as random address (but skip
3717 * the HCI command if the current random address is already the
3720 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3721 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3722 *own_addr_type = ADDR_LE_DEV_RANDOM;
3723 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3724 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3725 &hdev->static_addr);
3729 /* Neither privacy nor static address is being used so use a
3732 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3737 /* Copy the Identity Address of the controller.
3739 * If the controller has a public BD_ADDR, then by default use that one.
3740 * If this is a LE only controller without a public address, default to
3741 * the static random address.
3743 * For debugging purposes it is possible to force controllers with a
3744 * public address to use the static random address instead.
3746 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3749 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3750 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3751 bacpy(bdaddr, &hdev->static_addr);
3752 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3754 bacpy(bdaddr, &hdev->bdaddr);
3755 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3759 /* Alloc HCI device */
3760 struct hci_dev *hci_alloc_dev(void)
3762 struct hci_dev *hdev;
3764 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3768 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3769 hdev->esco_type = (ESCO_HV1);
3770 hdev->link_mode = (HCI_LM_ACCEPT);
3771 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3772 hdev->io_capability = 0x03; /* No Input No Output */
3773 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3774 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3776 hdev->sniff_max_interval = 800;
3777 hdev->sniff_min_interval = 80;
3779 hdev->le_adv_channel_map = 0x07;
3780 hdev->le_scan_interval = 0x0060;
3781 hdev->le_scan_window = 0x0030;
3782 hdev->le_conn_min_interval = 0x0028;
3783 hdev->le_conn_max_interval = 0x0038;
3785 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3786 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3787 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3788 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3790 mutex_init(&hdev->lock);
3791 mutex_init(&hdev->req_lock);
3793 INIT_LIST_HEAD(&hdev->mgmt_pending);
3794 INIT_LIST_HEAD(&hdev->blacklist);
3795 INIT_LIST_HEAD(&hdev->uuids);
3796 INIT_LIST_HEAD(&hdev->link_keys);
3797 INIT_LIST_HEAD(&hdev->long_term_keys);
3798 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3799 INIT_LIST_HEAD(&hdev->remote_oob_data);
3800 INIT_LIST_HEAD(&hdev->le_white_list);
3801 INIT_LIST_HEAD(&hdev->le_conn_params);
3802 INIT_LIST_HEAD(&hdev->pend_le_conns);
3803 INIT_LIST_HEAD(&hdev->conn_hash.list);
3805 INIT_WORK(&hdev->rx_work, hci_rx_work);
3806 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3807 INIT_WORK(&hdev->tx_work, hci_tx_work);
3808 INIT_WORK(&hdev->power_on, hci_power_on);
3810 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3811 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3812 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3814 skb_queue_head_init(&hdev->rx_q);
3815 skb_queue_head_init(&hdev->cmd_q);
3816 skb_queue_head_init(&hdev->raw_q);
3818 init_waitqueue_head(&hdev->req_wait_q);
3820 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3822 hci_init_sysfs(hdev);
3823 discovery_init(hdev);
3827 EXPORT_SYMBOL(hci_alloc_dev);
3829 /* Free HCI device */
3830 void hci_free_dev(struct hci_dev *hdev)
3832 /* will free via device release */
3833 put_device(&hdev->dev);
3835 EXPORT_SYMBOL(hci_free_dev);
3837 /* Register HCI device */
3838 int hci_register_dev(struct hci_dev *hdev)
3842 if (!hdev->open || !hdev->close)
3845 /* Do not allow HCI_AMP devices to register at index 0,
3846 * so the index can be used as the AMP controller ID.
3848 switch (hdev->dev_type) {
3850 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3853 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3862 sprintf(hdev->name, "hci%d", id);
3865 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3867 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3868 WQ_MEM_RECLAIM, 1, hdev->name);
3869 if (!hdev->workqueue) {
3874 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3875 WQ_MEM_RECLAIM, 1, hdev->name);
3876 if (!hdev->req_workqueue) {
3877 destroy_workqueue(hdev->workqueue);
3882 if (!IS_ERR_OR_NULL(bt_debugfs))
3883 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3885 dev_set_name(&hdev->dev, "%s", hdev->name);
3887 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3889 if (IS_ERR(hdev->tfm_aes)) {
3890 BT_ERR("Unable to create crypto context");
3891 error = PTR_ERR(hdev->tfm_aes);
3892 hdev->tfm_aes = NULL;
3896 error = device_add(&hdev->dev);
3900 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3901 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3904 if (rfkill_register(hdev->rfkill) < 0) {
3905 rfkill_destroy(hdev->rfkill);
3906 hdev->rfkill = NULL;
3910 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3911 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3913 set_bit(HCI_SETUP, &hdev->dev_flags);
3914 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3916 if (hdev->dev_type == HCI_BREDR) {
3917 /* Assume BR/EDR support until proven otherwise (such as
3918 * through reading supported features during init.
3920 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3923 write_lock(&hci_dev_list_lock);
3924 list_add(&hdev->list, &hci_dev_list);
3925 write_unlock(&hci_dev_list_lock);
3927 /* Devices that are marked for raw-only usage need to set
3928 * the HCI_RAW flag to indicate that only user channel is
3931 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3932 set_bit(HCI_RAW, &hdev->flags);
3934 hci_notify(hdev, HCI_DEV_REG);
3937 queue_work(hdev->req_workqueue, &hdev->power_on);
3942 crypto_free_blkcipher(hdev->tfm_aes);
3944 destroy_workqueue(hdev->workqueue);
3945 destroy_workqueue(hdev->req_workqueue);
3947 ida_simple_remove(&hci_index_ida, hdev->id);
3951 EXPORT_SYMBOL(hci_register_dev);
3953 /* Unregister HCI device */
3954 void hci_unregister_dev(struct hci_dev *hdev)
3958 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3960 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3964 write_lock(&hci_dev_list_lock);
3965 list_del(&hdev->list);
3966 write_unlock(&hci_dev_list_lock);
3968 hci_dev_do_close(hdev);
3970 for (i = 0; i < NUM_REASSEMBLY; i++)
3971 kfree_skb(hdev->reassembly[i]);
3973 cancel_work_sync(&hdev->power_on);
3975 if (!test_bit(HCI_INIT, &hdev->flags) &&
3976 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3977 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
3979 mgmt_index_removed(hdev);
3980 hci_dev_unlock(hdev);
3983 /* mgmt_index_removed should take care of emptying the
3985 BUG_ON(!list_empty(&hdev->mgmt_pending));
3987 hci_notify(hdev, HCI_DEV_UNREG);
3990 rfkill_unregister(hdev->rfkill);
3991 rfkill_destroy(hdev->rfkill);
3995 crypto_free_blkcipher(hdev->tfm_aes);
3997 device_del(&hdev->dev);
3999 debugfs_remove_recursive(hdev->debugfs);
4001 destroy_workqueue(hdev->workqueue);
4002 destroy_workqueue(hdev->req_workqueue);
4005 hci_blacklist_clear(hdev);
4006 hci_uuids_clear(hdev);
4007 hci_link_keys_clear(hdev);
4008 hci_smp_ltks_clear(hdev);
4009 hci_smp_irks_clear(hdev);
4010 hci_remote_oob_data_clear(hdev);
4011 hci_white_list_clear(hdev);
4012 hci_conn_params_clear(hdev);
4013 hci_dev_unlock(hdev);
4017 ida_simple_remove(&hci_index_ida, id);
4019 EXPORT_SYMBOL(hci_unregister_dev);
4021 /* Suspend HCI device */
4022 int hci_suspend_dev(struct hci_dev *hdev)
4024 hci_notify(hdev, HCI_DEV_SUSPEND);
4027 EXPORT_SYMBOL(hci_suspend_dev);
4029 /* Resume HCI device */
4030 int hci_resume_dev(struct hci_dev *hdev)
4032 hci_notify(hdev, HCI_DEV_RESUME);
4035 EXPORT_SYMBOL(hci_resume_dev);
4037 /* Receive frame from HCI drivers */
4038 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4040 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4041 && !test_bit(HCI_INIT, &hdev->flags))) {
4047 bt_cb(skb)->incoming = 1;
4050 __net_timestamp(skb);
4052 skb_queue_tail(&hdev->rx_q, skb);
4053 queue_work(hdev->workqueue, &hdev->rx_work);
4057 EXPORT_SYMBOL(hci_recv_frame);
4059 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4060 int count, __u8 index)
4065 struct sk_buff *skb;
4066 struct bt_skb_cb *scb;
4068 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4069 index >= NUM_REASSEMBLY)
4072 skb = hdev->reassembly[index];
4076 case HCI_ACLDATA_PKT:
4077 len = HCI_MAX_FRAME_SIZE;
4078 hlen = HCI_ACL_HDR_SIZE;
4081 len = HCI_MAX_EVENT_SIZE;
4082 hlen = HCI_EVENT_HDR_SIZE;
4084 case HCI_SCODATA_PKT:
4085 len = HCI_MAX_SCO_SIZE;
4086 hlen = HCI_SCO_HDR_SIZE;
4090 skb = bt_skb_alloc(len, GFP_ATOMIC);
4094 scb = (void *) skb->cb;
4096 scb->pkt_type = type;
4098 hdev->reassembly[index] = skb;
4102 scb = (void *) skb->cb;
4103 len = min_t(uint, scb->expect, count);
4105 memcpy(skb_put(skb, len), data, len);
4114 if (skb->len == HCI_EVENT_HDR_SIZE) {
4115 struct hci_event_hdr *h = hci_event_hdr(skb);
4116 scb->expect = h->plen;
4118 if (skb_tailroom(skb) < scb->expect) {
4120 hdev->reassembly[index] = NULL;
4126 case HCI_ACLDATA_PKT:
4127 if (skb->len == HCI_ACL_HDR_SIZE) {
4128 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4129 scb->expect = __le16_to_cpu(h->dlen);
4131 if (skb_tailroom(skb) < scb->expect) {
4133 hdev->reassembly[index] = NULL;
4139 case HCI_SCODATA_PKT:
4140 if (skb->len == HCI_SCO_HDR_SIZE) {
4141 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4142 scb->expect = h->dlen;
4144 if (skb_tailroom(skb) < scb->expect) {
4146 hdev->reassembly[index] = NULL;
4153 if (scb->expect == 0) {
4154 /* Complete frame */
4156 bt_cb(skb)->pkt_type = type;
4157 hci_recv_frame(hdev, skb);
4159 hdev->reassembly[index] = NULL;
4167 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4171 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4175 rem = hci_reassembly(hdev, type, data, count, type - 1);
4179 data += (count - rem);
4185 EXPORT_SYMBOL(hci_recv_fragment);
4187 #define STREAM_REASSEMBLY 0
4189 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4195 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4198 struct { char type; } *pkt;
4200 /* Start of the frame */
4207 type = bt_cb(skb)->pkt_type;
4209 rem = hci_reassembly(hdev, type, data, count,
4214 data += (count - rem);
4220 EXPORT_SYMBOL(hci_recv_stream_fragment);
4222 /* ---- Interface to upper protocols ---- */
4224 int hci_register_cb(struct hci_cb *cb)
4226 BT_DBG("%p name %s", cb, cb->name);
4228 write_lock(&hci_cb_list_lock);
4229 list_add(&cb->list, &hci_cb_list);
4230 write_unlock(&hci_cb_list_lock);
4234 EXPORT_SYMBOL(hci_register_cb);
4236 int hci_unregister_cb(struct hci_cb *cb)
4238 BT_DBG("%p name %s", cb, cb->name);
4240 write_lock(&hci_cb_list_lock);
4241 list_del(&cb->list);
4242 write_unlock(&hci_cb_list_lock);
4246 EXPORT_SYMBOL(hci_unregister_cb);
4248 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4250 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4253 __net_timestamp(skb);
4255 /* Send copy to monitor */
4256 hci_send_to_monitor(hdev, skb);
4258 if (atomic_read(&hdev->promisc)) {
4259 /* Send copy to the sockets */
4260 hci_send_to_sock(hdev, skb);
4263 /* Get rid of skb owner, prior to sending to the driver. */
4266 if (hdev->send(hdev, skb) < 0)
4267 BT_ERR("%s sending frame failed", hdev->name);
4270 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4272 skb_queue_head_init(&req->cmd_q);
4277 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4279 struct hci_dev *hdev = req->hdev;
4280 struct sk_buff *skb;
4281 unsigned long flags;
4283 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4285 /* If an error occured during request building, remove all HCI
4286 * commands queued on the HCI request queue.
4289 skb_queue_purge(&req->cmd_q);
4293 /* Do not allow empty requests */
4294 if (skb_queue_empty(&req->cmd_q))
4297 skb = skb_peek_tail(&req->cmd_q);
4298 bt_cb(skb)->req.complete = complete;
4300 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4301 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4302 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4304 queue_work(hdev->workqueue, &hdev->cmd_work);
4309 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4310 u32 plen, const void *param)
4312 int len = HCI_COMMAND_HDR_SIZE + plen;
4313 struct hci_command_hdr *hdr;
4314 struct sk_buff *skb;
4316 skb = bt_skb_alloc(len, GFP_ATOMIC);
4320 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4321 hdr->opcode = cpu_to_le16(opcode);
4325 memcpy(skb_put(skb, plen), param, plen);
4327 BT_DBG("skb len %d", skb->len);
4329 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4334 /* Send HCI command */
4335 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4338 struct sk_buff *skb;
4340 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4342 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4344 BT_ERR("%s no memory for command", hdev->name);
4348 /* Stand-alone HCI commands must be flaged as
4349 * single-command requests.
4351 bt_cb(skb)->req.start = true;
4353 skb_queue_tail(&hdev->cmd_q, skb);
4354 queue_work(hdev->workqueue, &hdev->cmd_work);
4359 /* Queue a command to an asynchronous HCI request */
4360 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4361 const void *param, u8 event)
4363 struct hci_dev *hdev = req->hdev;
4364 struct sk_buff *skb;
4366 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4368 /* If an error occured during request building, there is no point in
4369 * queueing the HCI command. We can simply return.
4374 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4376 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4377 hdev->name, opcode);
4382 if (skb_queue_empty(&req->cmd_q))
4383 bt_cb(skb)->req.start = true;
4385 bt_cb(skb)->req.event = event;
4387 skb_queue_tail(&req->cmd_q, skb);
4390 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4393 hci_req_add_ev(req, opcode, plen, param, 0);
4396 /* Get data from the previously sent command */
4397 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4399 struct hci_command_hdr *hdr;
4401 if (!hdev->sent_cmd)
4404 hdr = (void *) hdev->sent_cmd->data;
4406 if (hdr->opcode != cpu_to_le16(opcode))
4409 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4411 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4415 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4417 struct hci_acl_hdr *hdr;
4420 skb_push(skb, HCI_ACL_HDR_SIZE);
4421 skb_reset_transport_header(skb);
4422 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4423 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4424 hdr->dlen = cpu_to_le16(len);
4427 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4428 struct sk_buff *skb, __u16 flags)
4430 struct hci_conn *conn = chan->conn;
4431 struct hci_dev *hdev = conn->hdev;
4432 struct sk_buff *list;
4434 skb->len = skb_headlen(skb);
4437 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4439 switch (hdev->dev_type) {
4441 hci_add_acl_hdr(skb, conn->handle, flags);
4444 hci_add_acl_hdr(skb, chan->handle, flags);
4447 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4451 list = skb_shinfo(skb)->frag_list;
4453 /* Non fragmented */
4454 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4456 skb_queue_tail(queue, skb);
4459 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4461 skb_shinfo(skb)->frag_list = NULL;
4463 /* Queue all fragments atomically */
4464 spin_lock(&queue->lock);
4466 __skb_queue_tail(queue, skb);
4468 flags &= ~ACL_START;
4471 skb = list; list = list->next;
4473 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4474 hci_add_acl_hdr(skb, conn->handle, flags);
4476 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4478 __skb_queue_tail(queue, skb);
4481 spin_unlock(&queue->lock);
4485 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4487 struct hci_dev *hdev = chan->conn->hdev;
4489 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4491 hci_queue_acl(chan, &chan->data_q, skb, flags);
4493 queue_work(hdev->workqueue, &hdev->tx_work);
4497 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4499 struct hci_dev *hdev = conn->hdev;
4500 struct hci_sco_hdr hdr;
4502 BT_DBG("%s len %d", hdev->name, skb->len);
4504 hdr.handle = cpu_to_le16(conn->handle);
4505 hdr.dlen = skb->len;
4507 skb_push(skb, HCI_SCO_HDR_SIZE);
4508 skb_reset_transport_header(skb);
4509 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4511 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4513 skb_queue_tail(&conn->data_q, skb);
4514 queue_work(hdev->workqueue, &hdev->tx_work);
4517 /* ---- HCI TX task (outgoing data) ---- */
4519 /* HCI Connection scheduler */
4520 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4523 struct hci_conn_hash *h = &hdev->conn_hash;
4524 struct hci_conn *conn = NULL, *c;
4525 unsigned int num = 0, min = ~0;
4527 /* We don't have to lock device here. Connections are always
4528 * added and removed with TX task disabled. */
4532 list_for_each_entry_rcu(c, &h->list, list) {
4533 if (c->type != type || skb_queue_empty(&c->data_q))
4536 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4541 if (c->sent < min) {
4546 if (hci_conn_num(hdev, type) == num)
4555 switch (conn->type) {
4557 cnt = hdev->acl_cnt;
4561 cnt = hdev->sco_cnt;
4564 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4568 BT_ERR("Unknown link type");
4576 BT_DBG("conn %p quote %d", conn, *quote);
4580 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4582 struct hci_conn_hash *h = &hdev->conn_hash;
4585 BT_ERR("%s link tx timeout", hdev->name);
4589 /* Kill stalled connections */
4590 list_for_each_entry_rcu(c, &h->list, list) {
4591 if (c->type == type && c->sent) {
4592 BT_ERR("%s killing stalled connection %pMR",
4593 hdev->name, &c->dst);
4594 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4601 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4604 struct hci_conn_hash *h = &hdev->conn_hash;
4605 struct hci_chan *chan = NULL;
4606 unsigned int num = 0, min = ~0, cur_prio = 0;
4607 struct hci_conn *conn;
4608 int cnt, q, conn_num = 0;
4610 BT_DBG("%s", hdev->name);
4614 list_for_each_entry_rcu(conn, &h->list, list) {
4615 struct hci_chan *tmp;
4617 if (conn->type != type)
4620 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4625 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4626 struct sk_buff *skb;
4628 if (skb_queue_empty(&tmp->data_q))
4631 skb = skb_peek(&tmp->data_q);
4632 if (skb->priority < cur_prio)
4635 if (skb->priority > cur_prio) {
4638 cur_prio = skb->priority;
4643 if (conn->sent < min) {
4649 if (hci_conn_num(hdev, type) == conn_num)
4658 switch (chan->conn->type) {
4660 cnt = hdev->acl_cnt;
4663 cnt = hdev->block_cnt;
4667 cnt = hdev->sco_cnt;
4670 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4674 BT_ERR("Unknown link type");
4679 BT_DBG("chan %p quote %d", chan, *quote);
4683 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4685 struct hci_conn_hash *h = &hdev->conn_hash;
4686 struct hci_conn *conn;
4689 BT_DBG("%s", hdev->name);
4693 list_for_each_entry_rcu(conn, &h->list, list) {
4694 struct hci_chan *chan;
4696 if (conn->type != type)
4699 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4704 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4705 struct sk_buff *skb;
4712 if (skb_queue_empty(&chan->data_q))
4715 skb = skb_peek(&chan->data_q);
4716 if (skb->priority >= HCI_PRIO_MAX - 1)
4719 skb->priority = HCI_PRIO_MAX - 1;
4721 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4725 if (hci_conn_num(hdev, type) == num)
4733 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4735 /* Calculate count of blocks used by this packet */
4736 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4739 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4741 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4742 /* ACL tx timeout must be longer than maximum
4743 * link supervision timeout (40.9 seconds) */
4744 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4745 HCI_ACL_TX_TIMEOUT))
4746 hci_link_tx_to(hdev, ACL_LINK);
4750 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4752 unsigned int cnt = hdev->acl_cnt;
4753 struct hci_chan *chan;
4754 struct sk_buff *skb;
4757 __check_timeout(hdev, cnt);
4759 while (hdev->acl_cnt &&
4760 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4761 u32 priority = (skb_peek(&chan->data_q))->priority;
4762 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4763 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4764 skb->len, skb->priority);
4766 /* Stop if priority has changed */
4767 if (skb->priority < priority)
4770 skb = skb_dequeue(&chan->data_q);
4772 hci_conn_enter_active_mode(chan->conn,
4773 bt_cb(skb)->force_active);
4775 hci_send_frame(hdev, skb);
4776 hdev->acl_last_tx = jiffies;
4784 if (cnt != hdev->acl_cnt)
4785 hci_prio_recalculate(hdev, ACL_LINK);
4788 static void hci_sched_acl_blk(struct hci_dev *hdev)
4790 unsigned int cnt = hdev->block_cnt;
4791 struct hci_chan *chan;
4792 struct sk_buff *skb;
4796 __check_timeout(hdev, cnt);
4798 BT_DBG("%s", hdev->name);
4800 if (hdev->dev_type == HCI_AMP)
4805 while (hdev->block_cnt > 0 &&
4806 (chan = hci_chan_sent(hdev, type, "e))) {
4807 u32 priority = (skb_peek(&chan->data_q))->priority;
4808 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4811 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4812 skb->len, skb->priority);
4814 /* Stop if priority has changed */
4815 if (skb->priority < priority)
4818 skb = skb_dequeue(&chan->data_q);
4820 blocks = __get_blocks(hdev, skb);
4821 if (blocks > hdev->block_cnt)
4824 hci_conn_enter_active_mode(chan->conn,
4825 bt_cb(skb)->force_active);
4827 hci_send_frame(hdev, skb);
4828 hdev->acl_last_tx = jiffies;
4830 hdev->block_cnt -= blocks;
4833 chan->sent += blocks;
4834 chan->conn->sent += blocks;
4838 if (cnt != hdev->block_cnt)
4839 hci_prio_recalculate(hdev, type);
4842 static void hci_sched_acl(struct hci_dev *hdev)
4844 BT_DBG("%s", hdev->name);
4846 /* No ACL link over BR/EDR controller */
4847 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4850 /* No AMP link over AMP controller */
4851 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4854 switch (hdev->flow_ctl_mode) {
4855 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4856 hci_sched_acl_pkt(hdev);
4859 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4860 hci_sched_acl_blk(hdev);
4866 static void hci_sched_sco(struct hci_dev *hdev)
4868 struct hci_conn *conn;
4869 struct sk_buff *skb;
4872 BT_DBG("%s", hdev->name);
4874 if (!hci_conn_num(hdev, SCO_LINK))
4877 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4878 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4879 BT_DBG("skb %p len %d", skb, skb->len);
4880 hci_send_frame(hdev, skb);
4883 if (conn->sent == ~0)
4889 static void hci_sched_esco(struct hci_dev *hdev)
4891 struct hci_conn *conn;
4892 struct sk_buff *skb;
4895 BT_DBG("%s", hdev->name);
4897 if (!hci_conn_num(hdev, ESCO_LINK))
4900 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4902 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4903 BT_DBG("skb %p len %d", skb, skb->len);
4904 hci_send_frame(hdev, skb);
4907 if (conn->sent == ~0)
4913 static void hci_sched_le(struct hci_dev *hdev)
4915 struct hci_chan *chan;
4916 struct sk_buff *skb;
4917 int quote, cnt, tmp;
4919 BT_DBG("%s", hdev->name);
4921 if (!hci_conn_num(hdev, LE_LINK))
4924 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4925 /* LE tx timeout must be longer than maximum
4926 * link supervision timeout (40.9 seconds) */
4927 if (!hdev->le_cnt && hdev->le_pkts &&
4928 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4929 hci_link_tx_to(hdev, LE_LINK);
4932 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4934 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4935 u32 priority = (skb_peek(&chan->data_q))->priority;
4936 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4937 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4938 skb->len, skb->priority);
4940 /* Stop if priority has changed */
4941 if (skb->priority < priority)
4944 skb = skb_dequeue(&chan->data_q);
4946 hci_send_frame(hdev, skb);
4947 hdev->le_last_tx = jiffies;
4958 hdev->acl_cnt = cnt;
4961 hci_prio_recalculate(hdev, LE_LINK);
4964 static void hci_tx_work(struct work_struct *work)
4966 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4967 struct sk_buff *skb;
4969 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4970 hdev->sco_cnt, hdev->le_cnt);
4972 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4973 /* Schedule queues and send stuff to HCI driver */
4974 hci_sched_acl(hdev);
4975 hci_sched_sco(hdev);
4976 hci_sched_esco(hdev);
4980 /* Send next queued raw (unknown type) packet */
4981 while ((skb = skb_dequeue(&hdev->raw_q)))
4982 hci_send_frame(hdev, skb);
4985 /* ----- HCI RX task (incoming data processing) ----- */
4987 /* ACL data packet */
4988 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4990 struct hci_acl_hdr *hdr = (void *) skb->data;
4991 struct hci_conn *conn;
4992 __u16 handle, flags;
4994 skb_pull(skb, HCI_ACL_HDR_SIZE);
4996 handle = __le16_to_cpu(hdr->handle);
4997 flags = hci_flags(handle);
4998 handle = hci_handle(handle);
5000 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5003 hdev->stat.acl_rx++;
5006 conn = hci_conn_hash_lookup_handle(hdev, handle);
5007 hci_dev_unlock(hdev);
5010 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5012 /* Send to upper protocol */
5013 l2cap_recv_acldata(conn, skb, flags);
5016 BT_ERR("%s ACL packet for unknown connection handle %d",
5017 hdev->name, handle);
5023 /* SCO data packet */
5024 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5026 struct hci_sco_hdr *hdr = (void *) skb->data;
5027 struct hci_conn *conn;
5030 skb_pull(skb, HCI_SCO_HDR_SIZE);
5032 handle = __le16_to_cpu(hdr->handle);
5034 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5036 hdev->stat.sco_rx++;
5039 conn = hci_conn_hash_lookup_handle(hdev, handle);
5040 hci_dev_unlock(hdev);
5043 /* Send to upper protocol */
5044 sco_recv_scodata(conn, skb);
5047 BT_ERR("%s SCO packet for unknown connection handle %d",
5048 hdev->name, handle);
5054 static bool hci_req_is_complete(struct hci_dev *hdev)
5056 struct sk_buff *skb;
5058 skb = skb_peek(&hdev->cmd_q);
5062 return bt_cb(skb)->req.start;
5065 static void hci_resend_last(struct hci_dev *hdev)
5067 struct hci_command_hdr *sent;
5068 struct sk_buff *skb;
5071 if (!hdev->sent_cmd)
5074 sent = (void *) hdev->sent_cmd->data;
5075 opcode = __le16_to_cpu(sent->opcode);
5076 if (opcode == HCI_OP_RESET)
5079 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5083 skb_queue_head(&hdev->cmd_q, skb);
5084 queue_work(hdev->workqueue, &hdev->cmd_work);
5087 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5089 hci_req_complete_t req_complete = NULL;
5090 struct sk_buff *skb;
5091 unsigned long flags;
5093 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5095 /* If the completed command doesn't match the last one that was
5096 * sent we need to do special handling of it.
5098 if (!hci_sent_cmd_data(hdev, opcode)) {
5099 /* Some CSR based controllers generate a spontaneous
5100 * reset complete event during init and any pending
5101 * command will never be completed. In such a case we
5102 * need to resend whatever was the last sent
5105 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5106 hci_resend_last(hdev);
5111 /* If the command succeeded and there's still more commands in
5112 * this request the request is not yet complete.
5114 if (!status && !hci_req_is_complete(hdev))
5117 /* If this was the last command in a request the complete
5118 * callback would be found in hdev->sent_cmd instead of the
5119 * command queue (hdev->cmd_q).
5121 if (hdev->sent_cmd) {
5122 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5125 /* We must set the complete callback to NULL to
5126 * avoid calling the callback more than once if
5127 * this function gets called again.
5129 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5135 /* Remove all pending commands belonging to this request */
5136 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5137 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5138 if (bt_cb(skb)->req.start) {
5139 __skb_queue_head(&hdev->cmd_q, skb);
5143 req_complete = bt_cb(skb)->req.complete;
5146 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5150 req_complete(hdev, status);
5153 static void hci_rx_work(struct work_struct *work)
5155 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5156 struct sk_buff *skb;
5158 BT_DBG("%s", hdev->name);
5160 while ((skb = skb_dequeue(&hdev->rx_q))) {
5161 /* Send copy to monitor */
5162 hci_send_to_monitor(hdev, skb);
5164 if (atomic_read(&hdev->promisc)) {
5165 /* Send copy to the sockets */
5166 hci_send_to_sock(hdev, skb);
5169 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5174 if (test_bit(HCI_INIT, &hdev->flags)) {
5175 /* Don't process data packets in this states. */
5176 switch (bt_cb(skb)->pkt_type) {
5177 case HCI_ACLDATA_PKT:
5178 case HCI_SCODATA_PKT:
5185 switch (bt_cb(skb)->pkt_type) {
5187 BT_DBG("%s Event packet", hdev->name);
5188 hci_event_packet(hdev, skb);
5191 case HCI_ACLDATA_PKT:
5192 BT_DBG("%s ACL data packet", hdev->name);
5193 hci_acldata_packet(hdev, skb);
5196 case HCI_SCODATA_PKT:
5197 BT_DBG("%s SCO data packet", hdev->name);
5198 hci_scodata_packet(hdev, skb);
5208 static void hci_cmd_work(struct work_struct *work)
5210 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5211 struct sk_buff *skb;
5213 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5214 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5216 /* Send queued commands */
5217 if (atomic_read(&hdev->cmd_cnt)) {
5218 skb = skb_dequeue(&hdev->cmd_q);
5222 kfree_skb(hdev->sent_cmd);
5224 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5225 if (hdev->sent_cmd) {
5226 atomic_dec(&hdev->cmd_cnt);
5227 hci_send_frame(hdev, skb);
5228 if (test_bit(HCI_RESET, &hdev->flags))
5229 cancel_delayed_work(&hdev->cmd_timer);
5231 schedule_delayed_work(&hdev->cmd_timer,
5234 skb_queue_head(&hdev->cmd_q, skb);
5235 queue_work(hdev->workqueue, &hdev->cmd_work);
5240 void hci_req_add_le_scan_disable(struct hci_request *req)
5242 struct hci_cp_le_set_scan_enable cp;
5244 memset(&cp, 0, sizeof(cp));
5245 cp.enable = LE_SCAN_DISABLE;
5246 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5249 void hci_req_add_le_passive_scan(struct hci_request *req)
5251 struct hci_cp_le_set_scan_param param_cp;
5252 struct hci_cp_le_set_scan_enable enable_cp;
5253 struct hci_dev *hdev = req->hdev;
5256 /* Set require_privacy to false since no SCAN_REQ are send
5257 * during passive scanning. Not using an unresolvable address
5258 * here is important so that peer devices using direct
5259 * advertising with our address will be correctly reported
5260 * by the controller.
5262 if (hci_update_random_address(req, false, &own_addr_type))
5265 memset(¶m_cp, 0, sizeof(param_cp));
5266 param_cp.type = LE_SCAN_PASSIVE;
5267 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5268 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5269 param_cp.own_address_type = own_addr_type;
5270 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5273 memset(&enable_cp, 0, sizeof(enable_cp));
5274 enable_cp.enable = LE_SCAN_ENABLE;
5275 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5276 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5280 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5283 BT_DBG("HCI request failed to update background scanning: "
5284 "status 0x%2.2x", status);
5287 /* This function controls the background scanning based on hdev->pend_le_conns
5288 * list. If there are pending LE connection we start the background scanning,
5289 * otherwise we stop it.
5291 * This function requires the caller holds hdev->lock.
5293 void hci_update_background_scan(struct hci_dev *hdev)
5295 struct hci_request req;
5296 struct hci_conn *conn;
5299 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5302 hci_req_init(&req, hdev);
5304 if (list_empty(&hdev->pend_le_conns)) {
5305 /* If there is no pending LE connections, we should stop
5306 * the background scanning.
5309 /* If controller is not scanning we are done. */
5310 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5313 hci_req_add_le_scan_disable(&req);
5315 BT_DBG("%s stopping background scanning", hdev->name);
5317 /* If there is at least one pending LE connection, we should
5318 * keep the background scan running.
5321 /* If controller is connecting, we should not start scanning
5322 * since some controllers are not able to scan and connect at
5325 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5329 /* If controller is currently scanning, we stop it to ensure we
5330 * don't miss any advertising (due to duplicates filter).
5332 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5333 hci_req_add_le_scan_disable(&req);
5335 hci_req_add_le_passive_scan(&req);
5337 BT_DBG("%s starting background scanning", hdev->name);
5340 err = hci_req_run(&req, update_background_scan_complete);
5342 BT_ERR("Failed to run HCI request: err %d", err);