2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
91 hdev->recv_evt = NULL;
96 return ERR_PTR(-ENODATA);
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
107 if (hdr->evt != event)
112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
125 if (opcode == __le16_to_cpu(ev->opcode))
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
133 return ERR_PTR(-ENODATA);
136 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
137 void *param, u8 event, u32 timeout)
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
143 BT_DBG("%s", hdev->name);
145 hci_req_init(&req, hdev);
147 hci_req_add_ev(&req, opcode, plen, param, event);
149 hdev->req_status = HCI_REQ_PEND;
151 err = hci_req_run(&req, hci_req_sync_complete);
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
158 schedule_timeout(timeout);
160 remove_wait_queue(&hdev->req_wait_q, &wait);
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
165 switch (hdev->req_status) {
167 err = -bt_to_errno(hdev->req_result);
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
179 hdev->req_status = hdev->req_result = 0;
181 BT_DBG("%s end: err %d", hdev->name, err);
186 return hci_get_cmd_complete(hdev, opcode, event);
188 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
191 void *param, u32 timeout)
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 EXPORT_SYMBOL(__hci_cmd_sync);
197 /* Execute request and wait for completion. */
198 static int __hci_req_sync(struct hci_dev *hdev,
199 void (*func)(struct hci_request *req,
201 unsigned long opt, __u32 timeout)
203 struct hci_request req;
204 DECLARE_WAITQUEUE(wait, current);
207 BT_DBG("%s start", hdev->name);
209 hci_req_init(&req, hdev);
211 hdev->req_status = HCI_REQ_PEND;
215 err = hci_req_run(&req, hci_req_sync_complete);
217 hdev->req_status = 0;
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
233 schedule_timeout(timeout);
235 remove_wait_queue(&hdev->req_wait_q, &wait);
237 if (signal_pending(current))
240 switch (hdev->req_status) {
242 err = -bt_to_errno(hdev->req_result);
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
254 hdev->req_status = hdev->req_result = 0;
256 BT_DBG("%s end: err %d", hdev->name, err);
261 static int hci_req_sync(struct hci_dev *hdev,
262 void (*req)(struct hci_request *req,
264 unsigned long opt, __u32 timeout)
268 if (!test_bit(HCI_UP, &hdev->flags))
271 /* Serialize all requests */
273 ret = __hci_req_sync(hdev, req, opt, timeout);
274 hci_req_unlock(hdev);
279 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 BT_DBG("%s %ld", req->hdev->name, opt);
284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
288 static void bredr_init(struct hci_request *req)
290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292 /* Read Local Supported Features */
293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295 /* Read Local Version */
296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298 /* Read BD Address */
299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
302 static void amp_init(struct hci_request *req)
304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306 /* Read Local Version */
307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309 /* Read Local AMP Info */
310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312 /* Read Data Blk size */
313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
316 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 struct hci_dev *hdev = req->hdev;
320 BT_DBG("%s %ld", hdev->name, opt);
323 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
324 hci_reset_req(req, 0);
326 switch (hdev->dev_type) {
336 BT_ERR("Unknown device type %d", hdev->dev_type);
341 static void bredr_setup(struct hci_request *req)
343 struct hci_cp_delete_stored_link_key cp;
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 bacpy(&cp.bdaddr, BDADDR_ANY);
368 cp.delete_all = 0x01;
369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
371 /* Read page scan parameters */
372 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
378 static void le_setup(struct hci_request *req)
380 /* Read LE Buffer Size */
381 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
383 /* Read LE Local Supported Features */
384 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
386 /* Read LE Advertising Channel TX Power */
387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
389 /* Read LE White List Size */
390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
392 /* Read LE Supported States */
393 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
396 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
398 if (lmp_ext_inq_capable(hdev))
401 if (lmp_inq_rssi_capable(hdev))
404 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
405 hdev->lmp_subver == 0x0757)
408 if (hdev->manufacturer == 15) {
409 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
411 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
417 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
418 hdev->lmp_subver == 0x1805)
424 static void hci_setup_inquiry_mode(struct hci_request *req)
428 mode = hci_get_inquiry_mode(req->hdev);
430 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 static void hci_setup_event_mask(struct hci_request *req)
435 struct hci_dev *hdev = req->hdev;
437 /* The second byte is 0xff instead of 0x9f (two reserved bits
438 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
443 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
444 * any event mask for pre 1.2 devices.
446 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 if (lmp_bredr_capable(hdev)) {
450 events[4] |= 0x01; /* Flow Specification Complete */
451 events[4] |= 0x02; /* Inquiry Result with RSSI */
452 events[4] |= 0x04; /* Read Remote Extended Features Complete */
453 events[5] |= 0x08; /* Synchronous Connection Complete */
454 events[5] |= 0x10; /* Synchronous Connection Changed */
457 if (lmp_inq_rssi_capable(hdev))
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
460 if (lmp_sniffsubr_capable(hdev))
461 events[5] |= 0x20; /* Sniff Subrating */
463 if (lmp_pause_enc_capable(hdev))
464 events[5] |= 0x80; /* Encryption Key Refresh Complete */
466 if (lmp_ext_inq_capable(hdev))
467 events[5] |= 0x40; /* Extended Inquiry Result */
469 if (lmp_no_flush_capable(hdev))
470 events[7] |= 0x01; /* Enhanced Flush Complete */
472 if (lmp_lsto_capable(hdev))
473 events[6] |= 0x80; /* Link Supervision Timeout Changed */
475 if (lmp_ssp_capable(hdev)) {
476 events[6] |= 0x01; /* IO Capability Request */
477 events[6] |= 0x02; /* IO Capability Response */
478 events[6] |= 0x04; /* User Confirmation Request */
479 events[6] |= 0x08; /* User Passkey Request */
480 events[6] |= 0x10; /* Remote OOB Data Request */
481 events[6] |= 0x20; /* Simple Pairing Complete */
482 events[7] |= 0x04; /* User Passkey Notification */
483 events[7] |= 0x08; /* Keypress Notification */
484 events[7] |= 0x10; /* Remote Host Supported
485 * Features Notification
489 if (lmp_le_capable(hdev))
490 events[7] |= 0x20; /* LE Meta-Event */
492 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
494 if (lmp_le_capable(hdev)) {
495 memset(events, 0, sizeof(events));
497 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
498 sizeof(events), events);
502 static void hci_init2_req(struct hci_request *req, unsigned long opt)
504 struct hci_dev *hdev = req->hdev;
506 if (lmp_bredr_capable(hdev))
509 if (lmp_le_capable(hdev))
512 hci_setup_event_mask(req);
514 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
515 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
517 if (lmp_ssp_capable(hdev)) {
518 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
520 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
521 sizeof(mode), &mode);
523 struct hci_cp_write_eir cp;
525 memset(hdev->eir, 0, sizeof(hdev->eir));
526 memset(&cp, 0, sizeof(cp));
528 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
532 if (lmp_inq_rssi_capable(hdev))
533 hci_setup_inquiry_mode(req);
535 if (lmp_inq_tx_pwr_capable(hdev))
536 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
538 if (lmp_ext_feat_capable(hdev)) {
539 struct hci_cp_read_local_ext_features cp;
542 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
546 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
548 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
553 static void hci_setup_link_policy(struct hci_request *req)
555 struct hci_dev *hdev = req->hdev;
556 struct hci_cp_write_def_link_policy cp;
559 if (lmp_rswitch_capable(hdev))
560 link_policy |= HCI_LP_RSWITCH;
561 if (lmp_hold_capable(hdev))
562 link_policy |= HCI_LP_HOLD;
563 if (lmp_sniff_capable(hdev))
564 link_policy |= HCI_LP_SNIFF;
565 if (lmp_park_capable(hdev))
566 link_policy |= HCI_LP_PARK;
568 cp.policy = cpu_to_le16(link_policy);
569 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
572 static void hci_set_le_support(struct hci_request *req)
574 struct hci_dev *hdev = req->hdev;
575 struct hci_cp_write_le_host_supported cp;
577 memset(&cp, 0, sizeof(cp));
579 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
581 cp.simul = lmp_le_br_capable(hdev);
584 if (cp.le != lmp_host_le_capable(hdev))
585 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
589 static void hci_init3_req(struct hci_request *req, unsigned long opt)
591 struct hci_dev *hdev = req->hdev;
593 if (hdev->commands[5] & 0x10)
594 hci_setup_link_policy(req);
596 if (lmp_le_capable(hdev)) {
597 hci_set_le_support(req);
602 static int __hci_init(struct hci_dev *hdev)
606 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
610 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
611 * BR/EDR/LE type controllers. AMP controllers only need the
614 if (hdev->dev_type != HCI_BREDR)
617 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
621 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
624 static void hci_scan_req(struct hci_request *req, unsigned long opt)
628 BT_DBG("%s %x", req->hdev->name, scan);
630 /* Inquiry and Page scans */
631 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
634 static void hci_auth_req(struct hci_request *req, unsigned long opt)
638 BT_DBG("%s %x", req->hdev->name, auth);
641 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
644 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
648 BT_DBG("%s %x", req->hdev->name, encrypt);
651 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
654 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
656 __le16 policy = cpu_to_le16(opt);
658 BT_DBG("%s %x", req->hdev->name, policy);
660 /* Default link policy */
661 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
664 /* Get HCI device by index.
665 * Device is held on return. */
666 struct hci_dev *hci_dev_get(int index)
668 struct hci_dev *hdev = NULL, *d;
675 read_lock(&hci_dev_list_lock);
676 list_for_each_entry(d, &hci_dev_list, list) {
677 if (d->id == index) {
678 hdev = hci_dev_hold(d);
682 read_unlock(&hci_dev_list_lock);
686 /* ---- Inquiry support ---- */
688 bool hci_discovery_active(struct hci_dev *hdev)
690 struct discovery_state *discov = &hdev->discovery;
692 switch (discov->state) {
693 case DISCOVERY_FINDING:
694 case DISCOVERY_RESOLVING:
702 void hci_discovery_set_state(struct hci_dev *hdev, int state)
704 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
706 if (hdev->discovery.state == state)
710 case DISCOVERY_STOPPED:
711 if (hdev->discovery.state != DISCOVERY_STARTING)
712 mgmt_discovering(hdev, 0);
714 case DISCOVERY_STARTING:
716 case DISCOVERY_FINDING:
717 mgmt_discovering(hdev, 1);
719 case DISCOVERY_RESOLVING:
721 case DISCOVERY_STOPPING:
725 hdev->discovery.state = state;
728 static void inquiry_cache_flush(struct hci_dev *hdev)
730 struct discovery_state *cache = &hdev->discovery;
731 struct inquiry_entry *p, *n;
733 list_for_each_entry_safe(p, n, &cache->all, all) {
738 INIT_LIST_HEAD(&cache->unknown);
739 INIT_LIST_HEAD(&cache->resolve);
742 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
745 struct discovery_state *cache = &hdev->discovery;
746 struct inquiry_entry *e;
748 BT_DBG("cache %p, %pMR", cache, bdaddr);
750 list_for_each_entry(e, &cache->all, all) {
751 if (!bacmp(&e->data.bdaddr, bdaddr))
758 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
761 struct discovery_state *cache = &hdev->discovery;
762 struct inquiry_entry *e;
764 BT_DBG("cache %p, %pMR", cache, bdaddr);
766 list_for_each_entry(e, &cache->unknown, list) {
767 if (!bacmp(&e->data.bdaddr, bdaddr))
774 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
778 struct discovery_state *cache = &hdev->discovery;
779 struct inquiry_entry *e;
781 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
783 list_for_each_entry(e, &cache->resolve, list) {
784 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
786 if (!bacmp(&e->data.bdaddr, bdaddr))
793 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
794 struct inquiry_entry *ie)
796 struct discovery_state *cache = &hdev->discovery;
797 struct list_head *pos = &cache->resolve;
798 struct inquiry_entry *p;
802 list_for_each_entry(p, &cache->resolve, list) {
803 if (p->name_state != NAME_PENDING &&
804 abs(p->data.rssi) >= abs(ie->data.rssi))
809 list_add(&ie->list, pos);
812 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
813 bool name_known, bool *ssp)
815 struct discovery_state *cache = &hdev->discovery;
816 struct inquiry_entry *ie;
818 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
820 hci_remove_remote_oob_data(hdev, &data->bdaddr);
823 *ssp = data->ssp_mode;
825 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
827 if (ie->data.ssp_mode && ssp)
830 if (ie->name_state == NAME_NEEDED &&
831 data->rssi != ie->data.rssi) {
832 ie->data.rssi = data->rssi;
833 hci_inquiry_cache_update_resolve(hdev, ie);
839 /* Entry not in the cache. Add new one. */
840 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
844 list_add(&ie->all, &cache->all);
847 ie->name_state = NAME_KNOWN;
849 ie->name_state = NAME_NOT_KNOWN;
850 list_add(&ie->list, &cache->unknown);
854 if (name_known && ie->name_state != NAME_KNOWN &&
855 ie->name_state != NAME_PENDING) {
856 ie->name_state = NAME_KNOWN;
860 memcpy(&ie->data, data, sizeof(*data));
861 ie->timestamp = jiffies;
862 cache->timestamp = jiffies;
864 if (ie->name_state == NAME_NOT_KNOWN)
870 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_info *info = (struct inquiry_info *) buf;
874 struct inquiry_entry *e;
877 list_for_each_entry(e, &cache->all, all) {
878 struct inquiry_data *data = &e->data;
883 bacpy(&info->bdaddr, &data->bdaddr);
884 info->pscan_rep_mode = data->pscan_rep_mode;
885 info->pscan_period_mode = data->pscan_period_mode;
886 info->pscan_mode = data->pscan_mode;
887 memcpy(info->dev_class, data->dev_class, 3);
888 info->clock_offset = data->clock_offset;
894 BT_DBG("cache %p, copied %d", cache, copied);
898 static void hci_inq_req(struct hci_request *req, unsigned long opt)
900 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
901 struct hci_dev *hdev = req->hdev;
902 struct hci_cp_inquiry cp;
904 BT_DBG("%s", hdev->name);
906 if (test_bit(HCI_INQUIRY, &hdev->flags))
910 memcpy(&cp.lap, &ir->lap, 3);
911 cp.length = ir->length;
912 cp.num_rsp = ir->num_rsp;
913 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
916 static int wait_inquiry(void *word)
919 return signal_pending(current);
922 int hci_inquiry(void __user *arg)
924 __u8 __user *ptr = arg;
925 struct hci_inquiry_req ir;
926 struct hci_dev *hdev;
927 int err = 0, do_inquiry = 0, max_rsp;
931 if (copy_from_user(&ir, ptr, sizeof(ir)))
934 hdev = hci_dev_get(ir.dev_id);
939 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
940 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
941 inquiry_cache_flush(hdev);
944 hci_dev_unlock(hdev);
946 timeo = ir.length * msecs_to_jiffies(2000);
949 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
954 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
955 * cleared). If it is interrupted by a signal, return -EINTR.
957 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
962 /* for unlimited number of responses we will use buffer with
965 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
967 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
968 * copy it to the user space.
970 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
977 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
978 hci_dev_unlock(hdev);
980 BT_DBG("num_rsp %d", ir.num_rsp);
982 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
984 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
997 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
999 u8 ad_len = 0, flags = 0;
1002 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1003 flags |= LE_AD_GENERAL;
1005 if (!lmp_bredr_capable(hdev))
1006 flags |= LE_AD_NO_BREDR;
1008 if (lmp_le_br_capable(hdev))
1009 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1011 if (lmp_host_le_br_capable(hdev))
1012 flags |= LE_AD_SIM_LE_BREDR_HOST;
1015 BT_DBG("adv flags 0x%02x", flags);
1025 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1027 ptr[1] = EIR_TX_POWER;
1028 ptr[2] = (u8) hdev->adv_tx_power;
1034 name_len = strlen(hdev->dev_name);
1036 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1038 if (name_len > max_len) {
1040 ptr[1] = EIR_NAME_SHORT;
1042 ptr[1] = EIR_NAME_COMPLETE;
1044 ptr[0] = name_len + 1;
1046 memcpy(ptr + 2, hdev->dev_name, name_len);
1048 ad_len += (name_len + 2);
1049 ptr += (name_len + 2);
1055 void hci_update_ad(struct hci_request *req)
1057 struct hci_dev *hdev = req->hdev;
1058 struct hci_cp_le_set_adv_data cp;
1061 if (!lmp_le_capable(hdev))
1064 memset(&cp, 0, sizeof(cp));
1066 len = create_ad(hdev, cp.data);
1068 if (hdev->adv_data_len == len &&
1069 memcmp(cp.data, hdev->adv_data, len) == 0)
1072 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1073 hdev->adv_data_len = len;
1077 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1080 /* ---- HCI ioctl helpers ---- */
1082 int hci_dev_open(__u16 dev)
1084 struct hci_dev *hdev;
1087 hdev = hci_dev_get(dev);
1091 BT_DBG("%s %p", hdev->name, hdev);
1095 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1100 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1105 if (test_bit(HCI_UP, &hdev->flags)) {
1110 if (hdev->open(hdev)) {
1115 atomic_set(&hdev->cmd_cnt, 1);
1116 set_bit(HCI_INIT, &hdev->flags);
1118 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1119 ret = hdev->setup(hdev);
1122 /* Treat all non BR/EDR controllers as raw devices if
1123 * enable_hs is not set.
1125 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1126 set_bit(HCI_RAW, &hdev->flags);
1128 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1129 set_bit(HCI_RAW, &hdev->flags);
1131 if (!test_bit(HCI_RAW, &hdev->flags))
1132 ret = __hci_init(hdev);
1135 clear_bit(HCI_INIT, &hdev->flags);
1139 set_bit(HCI_UP, &hdev->flags);
1140 hci_notify(hdev, HCI_DEV_UP);
1141 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142 mgmt_valid_hdev(hdev)) {
1144 mgmt_powered(hdev, 1);
1145 hci_dev_unlock(hdev);
1148 /* Init failed, cleanup */
1149 flush_work(&hdev->tx_work);
1150 flush_work(&hdev->cmd_work);
1151 flush_work(&hdev->rx_work);
1153 skb_queue_purge(&hdev->cmd_q);
1154 skb_queue_purge(&hdev->rx_q);
1159 if (hdev->sent_cmd) {
1160 kfree_skb(hdev->sent_cmd);
1161 hdev->sent_cmd = NULL;
1169 hci_req_unlock(hdev);
1174 static int hci_dev_do_close(struct hci_dev *hdev)
1176 BT_DBG("%s %p", hdev->name, hdev);
1178 cancel_work_sync(&hdev->le_scan);
1180 cancel_delayed_work(&hdev->power_off);
1182 hci_req_cancel(hdev, ENODEV);
1185 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1186 del_timer_sync(&hdev->cmd_timer);
1187 hci_req_unlock(hdev);
1191 /* Flush RX and TX works */
1192 flush_work(&hdev->tx_work);
1193 flush_work(&hdev->rx_work);
1195 if (hdev->discov_timeout > 0) {
1196 cancel_delayed_work(&hdev->discov_off);
1197 hdev->discov_timeout = 0;
1198 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1201 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1202 cancel_delayed_work(&hdev->service_cache);
1204 cancel_delayed_work_sync(&hdev->le_scan_disable);
1207 inquiry_cache_flush(hdev);
1208 hci_conn_hash_flush(hdev);
1209 hci_dev_unlock(hdev);
1211 hci_notify(hdev, HCI_DEV_DOWN);
1217 skb_queue_purge(&hdev->cmd_q);
1218 atomic_set(&hdev->cmd_cnt, 1);
1219 if (!test_bit(HCI_RAW, &hdev->flags) &&
1220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1221 set_bit(HCI_INIT, &hdev->flags);
1222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1223 clear_bit(HCI_INIT, &hdev->flags);
1226 /* flush cmd work */
1227 flush_work(&hdev->cmd_work);
1230 skb_queue_purge(&hdev->rx_q);
1231 skb_queue_purge(&hdev->cmd_q);
1232 skb_queue_purge(&hdev->raw_q);
1234 /* Drop last sent command */
1235 if (hdev->sent_cmd) {
1236 del_timer_sync(&hdev->cmd_timer);
1237 kfree_skb(hdev->sent_cmd);
1238 hdev->sent_cmd = NULL;
1241 kfree_skb(hdev->recv_evt);
1242 hdev->recv_evt = NULL;
1244 /* After this point our queues are empty
1245 * and no tasks are scheduled. */
1250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253 mgmt_valid_hdev(hdev)) {
1255 mgmt_powered(hdev, 0);
1256 hci_dev_unlock(hdev);
1259 /* Controller radio is available but is currently powered down */
1260 hdev->amp_status = 0;
1262 memset(hdev->eir, 0, sizeof(hdev->eir));
1263 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1265 hci_req_unlock(hdev);
1271 int hci_dev_close(__u16 dev)
1273 struct hci_dev *hdev;
1276 hdev = hci_dev_get(dev);
1280 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281 cancel_delayed_work(&hdev->power_off);
1283 err = hci_dev_do_close(hdev);
1289 int hci_dev_reset(__u16 dev)
1291 struct hci_dev *hdev;
1294 hdev = hci_dev_get(dev);
1300 if (!test_bit(HCI_UP, &hdev->flags))
1304 skb_queue_purge(&hdev->rx_q);
1305 skb_queue_purge(&hdev->cmd_q);
1308 inquiry_cache_flush(hdev);
1309 hci_conn_hash_flush(hdev);
1310 hci_dev_unlock(hdev);
1315 atomic_set(&hdev->cmd_cnt, 1);
1316 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1318 if (!test_bit(HCI_RAW, &hdev->flags))
1319 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1322 hci_req_unlock(hdev);
1327 int hci_dev_reset_stat(__u16 dev)
1329 struct hci_dev *hdev;
1332 hdev = hci_dev_get(dev);
1336 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1343 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1345 struct hci_dev *hdev;
1346 struct hci_dev_req dr;
1349 if (copy_from_user(&dr, arg, sizeof(dr)))
1352 hdev = hci_dev_get(dr.dev_id);
1358 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1363 if (!lmp_encrypt_capable(hdev)) {
1368 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369 /* Auth must be enabled first */
1370 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1376 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1381 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1386 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1390 case HCISETLINKMODE:
1391 hdev->link_mode = ((__u16) dr.dev_opt) &
1392 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1396 hdev->pkt_type = (__u16) dr.dev_opt;
1400 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1401 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1405 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1406 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1418 int hci_get_dev_list(void __user *arg)
1420 struct hci_dev *hdev;
1421 struct hci_dev_list_req *dl;
1422 struct hci_dev_req *dr;
1423 int n = 0, size, err;
1426 if (get_user(dev_num, (__u16 __user *) arg))
1429 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1432 size = sizeof(*dl) + dev_num * sizeof(*dr);
1434 dl = kzalloc(size, GFP_KERNEL);
1440 read_lock(&hci_dev_list_lock);
1441 list_for_each_entry(hdev, &hci_dev_list, list) {
1442 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1443 cancel_delayed_work(&hdev->power_off);
1445 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1448 (dr + n)->dev_id = hdev->id;
1449 (dr + n)->dev_opt = hdev->flags;
1454 read_unlock(&hci_dev_list_lock);
1457 size = sizeof(*dl) + n * sizeof(*dr);
1459 err = copy_to_user(arg, dl, size);
1462 return err ? -EFAULT : 0;
1465 int hci_get_dev_info(void __user *arg)
1467 struct hci_dev *hdev;
1468 struct hci_dev_info di;
1471 if (copy_from_user(&di, arg, sizeof(di)))
1474 hdev = hci_dev_get(di.dev_id);
1478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1479 cancel_delayed_work_sync(&hdev->power_off);
1481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1484 strcpy(di.name, hdev->name);
1485 di.bdaddr = hdev->bdaddr;
1486 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1487 di.flags = hdev->flags;
1488 di.pkt_type = hdev->pkt_type;
1489 if (lmp_bredr_capable(hdev)) {
1490 di.acl_mtu = hdev->acl_mtu;
1491 di.acl_pkts = hdev->acl_pkts;
1492 di.sco_mtu = hdev->sco_mtu;
1493 di.sco_pkts = hdev->sco_pkts;
1495 di.acl_mtu = hdev->le_mtu;
1496 di.acl_pkts = hdev->le_pkts;
1500 di.link_policy = hdev->link_policy;
1501 di.link_mode = hdev->link_mode;
1503 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504 memcpy(&di.features, &hdev->features, sizeof(di.features));
1506 if (copy_to_user(arg, &di, sizeof(di)))
1514 /* ---- Interface to HCI drivers ---- */
1516 static int hci_rfkill_set_block(void *data, bool blocked)
1518 struct hci_dev *hdev = data;
1520 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1525 hci_dev_do_close(hdev);
1530 static const struct rfkill_ops hci_rfkill_ops = {
1531 .set_block = hci_rfkill_set_block,
1534 static void hci_power_on(struct work_struct *work)
1536 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1538 BT_DBG("%s", hdev->name);
1540 if (hci_dev_open(hdev->id) < 0)
1543 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1544 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545 HCI_AUTO_OFF_TIMEOUT);
1547 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1548 mgmt_index_added(hdev);
1551 static void hci_power_off(struct work_struct *work)
1553 struct hci_dev *hdev = container_of(work, struct hci_dev,
1556 BT_DBG("%s", hdev->name);
1558 hci_dev_do_close(hdev);
1561 static void hci_discov_off(struct work_struct *work)
1563 struct hci_dev *hdev;
1564 u8 scan = SCAN_PAGE;
1566 hdev = container_of(work, struct hci_dev, discov_off.work);
1568 BT_DBG("%s", hdev->name);
1572 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1574 hdev->discov_timeout = 0;
1576 hci_dev_unlock(hdev);
1579 int hci_uuids_clear(struct hci_dev *hdev)
1581 struct bt_uuid *uuid, *tmp;
1583 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584 list_del(&uuid->list);
1591 int hci_link_keys_clear(struct hci_dev *hdev)
1593 struct list_head *p, *n;
1595 list_for_each_safe(p, n, &hdev->link_keys) {
1596 struct link_key *key;
1598 key = list_entry(p, struct link_key, list);
1607 int hci_smp_ltks_clear(struct hci_dev *hdev)
1609 struct smp_ltk *k, *tmp;
1611 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1619 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1623 list_for_each_entry(k, &hdev->link_keys, list)
1624 if (bacmp(bdaddr, &k->bdaddr) == 0)
1630 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1631 u8 key_type, u8 old_key_type)
1634 if (key_type < 0x03)
1637 /* Debug keys are insecure so don't store them persistently */
1638 if (key_type == HCI_LK_DEBUG_COMBINATION)
1641 /* Changed combination key and there's no previous one */
1642 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1645 /* Security mode 3 case */
1649 /* Neither local nor remote side had no-bonding as requirement */
1650 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1653 /* Local side had dedicated bonding as requirement */
1654 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1657 /* Remote side had dedicated bonding as requirement */
1658 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1661 /* If none of the above criteria match, then don't store the key
1666 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1670 list_for_each_entry(k, &hdev->long_term_keys, list) {
1671 if (k->ediv != ediv ||
1672 memcmp(rand, k->rand, sizeof(k->rand)))
1681 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1686 list_for_each_entry(k, &hdev->long_term_keys, list)
1687 if (addr_type == k->bdaddr_type &&
1688 bacmp(bdaddr, &k->bdaddr) == 0)
1694 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1695 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1697 struct link_key *key, *old_key;
1701 old_key = hci_find_link_key(hdev, bdaddr);
1703 old_key_type = old_key->type;
1706 old_key_type = conn ? conn->key_type : 0xff;
1707 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1710 list_add(&key->list, &hdev->link_keys);
1713 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1715 /* Some buggy controller combinations generate a changed
1716 * combination key for legacy pairing even when there's no
1718 if (type == HCI_LK_CHANGED_COMBINATION &&
1719 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1720 type = HCI_LK_COMBINATION;
1722 conn->key_type = type;
1725 bacpy(&key->bdaddr, bdaddr);
1726 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1727 key->pin_len = pin_len;
1729 if (type == HCI_LK_CHANGED_COMBINATION)
1730 key->type = old_key_type;
1737 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1739 mgmt_new_link_key(hdev, key, persistent);
1742 conn->flush_key = !persistent;
1747 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1748 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1751 struct smp_ltk *key, *old_key;
1753 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1756 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1760 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1763 list_add(&key->list, &hdev->long_term_keys);
1766 bacpy(&key->bdaddr, bdaddr);
1767 key->bdaddr_type = addr_type;
1768 memcpy(key->val, tk, sizeof(key->val));
1769 key->authenticated = authenticated;
1771 key->enc_size = enc_size;
1773 memcpy(key->rand, rand, sizeof(key->rand));
1778 if (type & HCI_SMP_LTK)
1779 mgmt_new_ltk(hdev, key, 1);
1784 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1786 struct link_key *key;
1788 key = hci_find_link_key(hdev, bdaddr);
1792 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1794 list_del(&key->list);
1800 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1802 struct smp_ltk *k, *tmp;
1804 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805 if (bacmp(bdaddr, &k->bdaddr))
1808 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1817 /* HCI command timer function */
1818 static void hci_cmd_timeout(unsigned long arg)
1820 struct hci_dev *hdev = (void *) arg;
1822 if (hdev->sent_cmd) {
1823 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824 u16 opcode = __le16_to_cpu(sent->opcode);
1826 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1828 BT_ERR("%s command tx timeout", hdev->name);
1831 atomic_set(&hdev->cmd_cnt, 1);
1832 queue_work(hdev->workqueue, &hdev->cmd_work);
1835 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1838 struct oob_data *data;
1840 list_for_each_entry(data, &hdev->remote_oob_data, list)
1841 if (bacmp(bdaddr, &data->bdaddr) == 0)
1847 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1849 struct oob_data *data;
1851 data = hci_find_remote_oob_data(hdev, bdaddr);
1855 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1857 list_del(&data->list);
1863 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1865 struct oob_data *data, *n;
1867 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868 list_del(&data->list);
1875 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1878 struct oob_data *data;
1880 data = hci_find_remote_oob_data(hdev, bdaddr);
1883 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1887 bacpy(&data->bdaddr, bdaddr);
1888 list_add(&data->list, &hdev->remote_oob_data);
1891 memcpy(data->hash, hash, sizeof(data->hash));
1892 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1894 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1899 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1901 struct bdaddr_list *b;
1903 list_for_each_entry(b, &hdev->blacklist, list)
1904 if (bacmp(bdaddr, &b->bdaddr) == 0)
1910 int hci_blacklist_clear(struct hci_dev *hdev)
1912 struct list_head *p, *n;
1914 list_for_each_safe(p, n, &hdev->blacklist) {
1915 struct bdaddr_list *b;
1917 b = list_entry(p, struct bdaddr_list, list);
1926 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1928 struct bdaddr_list *entry;
1930 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1933 if (hci_blacklist_lookup(hdev, bdaddr))
1936 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1940 bacpy(&entry->bdaddr, bdaddr);
1942 list_add(&entry->list, &hdev->blacklist);
1944 return mgmt_device_blocked(hdev, bdaddr, type);
1947 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1949 struct bdaddr_list *entry;
1951 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1952 return hci_blacklist_clear(hdev);
1954 entry = hci_blacklist_lookup(hdev, bdaddr);
1958 list_del(&entry->list);
1961 return mgmt_device_unblocked(hdev, bdaddr, type);
1964 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1966 struct le_scan_params *param = (struct le_scan_params *) opt;
1967 struct hci_cp_le_set_scan_param cp;
1969 memset(&cp, 0, sizeof(cp));
1970 cp.type = param->type;
1971 cp.interval = cpu_to_le16(param->interval);
1972 cp.window = cpu_to_le16(param->window);
1974 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1977 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1979 struct hci_cp_le_set_scan_enable cp;
1981 memset(&cp, 0, sizeof(cp));
1985 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1988 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1989 u16 window, int timeout)
1991 long timeo = msecs_to_jiffies(3000);
1992 struct le_scan_params param;
1995 BT_DBG("%s", hdev->name);
1997 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998 return -EINPROGRESS;
2001 param.interval = interval;
2002 param.window = window;
2006 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
2009 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2011 hci_req_unlock(hdev);
2016 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017 msecs_to_jiffies(timeout));
2022 int hci_cancel_le_scan(struct hci_dev *hdev)
2024 BT_DBG("%s", hdev->name);
2026 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2029 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030 struct hci_cp_le_set_scan_enable cp;
2032 /* Send HCI command to disable LE Scan */
2033 memset(&cp, 0, sizeof(cp));
2034 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2040 static void le_scan_disable_work(struct work_struct *work)
2042 struct hci_dev *hdev = container_of(work, struct hci_dev,
2043 le_scan_disable.work);
2044 struct hci_cp_le_set_scan_enable cp;
2046 BT_DBG("%s", hdev->name);
2048 memset(&cp, 0, sizeof(cp));
2050 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2053 static void le_scan_work(struct work_struct *work)
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056 struct le_scan_params *param = &hdev->le_scan_params;
2058 BT_DBG("%s", hdev->name);
2060 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2064 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2067 struct le_scan_params *param = &hdev->le_scan_params;
2069 BT_DBG("%s", hdev->name);
2071 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2074 if (work_busy(&hdev->le_scan))
2075 return -EINPROGRESS;
2078 param->interval = interval;
2079 param->window = window;
2080 param->timeout = timeout;
2082 queue_work(system_long_wq, &hdev->le_scan);
2087 /* Alloc HCI device */
2088 struct hci_dev *hci_alloc_dev(void)
2090 struct hci_dev *hdev;
2092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2096 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097 hdev->esco_type = (ESCO_HV1);
2098 hdev->link_mode = (HCI_LM_ACCEPT);
2099 hdev->io_capability = 0x03; /* No Input No Output */
2100 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2103 hdev->sniff_max_interval = 800;
2104 hdev->sniff_min_interval = 80;
2106 mutex_init(&hdev->lock);
2107 mutex_init(&hdev->req_lock);
2109 INIT_LIST_HEAD(&hdev->mgmt_pending);
2110 INIT_LIST_HEAD(&hdev->blacklist);
2111 INIT_LIST_HEAD(&hdev->uuids);
2112 INIT_LIST_HEAD(&hdev->link_keys);
2113 INIT_LIST_HEAD(&hdev->long_term_keys);
2114 INIT_LIST_HEAD(&hdev->remote_oob_data);
2115 INIT_LIST_HEAD(&hdev->conn_hash.list);
2117 INIT_WORK(&hdev->rx_work, hci_rx_work);
2118 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119 INIT_WORK(&hdev->tx_work, hci_tx_work);
2120 INIT_WORK(&hdev->power_on, hci_power_on);
2121 INIT_WORK(&hdev->le_scan, le_scan_work);
2123 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2127 skb_queue_head_init(&hdev->rx_q);
2128 skb_queue_head_init(&hdev->cmd_q);
2129 skb_queue_head_init(&hdev->raw_q);
2131 init_waitqueue_head(&hdev->req_wait_q);
2133 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2135 hci_init_sysfs(hdev);
2136 discovery_init(hdev);
2140 EXPORT_SYMBOL(hci_alloc_dev);
2142 /* Free HCI device */
2143 void hci_free_dev(struct hci_dev *hdev)
2145 /* will free via device release */
2146 put_device(&hdev->dev);
2148 EXPORT_SYMBOL(hci_free_dev);
2150 /* Register HCI device */
2151 int hci_register_dev(struct hci_dev *hdev)
2155 if (!hdev->open || !hdev->close)
2158 /* Do not allow HCI_AMP devices to register at index 0,
2159 * so the index can be used as the AMP controller ID.
2161 switch (hdev->dev_type) {
2163 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2166 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2175 sprintf(hdev->name, "hci%d", id);
2178 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2180 write_lock(&hci_dev_list_lock);
2181 list_add(&hdev->list, &hci_dev_list);
2182 write_unlock(&hci_dev_list_lock);
2184 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2186 if (!hdev->workqueue) {
2191 hdev->req_workqueue = alloc_workqueue(hdev->name,
2192 WQ_HIGHPRI | WQ_UNBOUND |
2194 if (!hdev->req_workqueue) {
2195 destroy_workqueue(hdev->workqueue);
2200 error = hci_add_sysfs(hdev);
2204 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2205 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2208 if (rfkill_register(hdev->rfkill) < 0) {
2209 rfkill_destroy(hdev->rfkill);
2210 hdev->rfkill = NULL;
2214 set_bit(HCI_SETUP, &hdev->dev_flags);
2216 if (hdev->dev_type != HCI_AMP)
2217 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2219 hci_notify(hdev, HCI_DEV_REG);
2222 queue_work(hdev->req_workqueue, &hdev->power_on);
2227 destroy_workqueue(hdev->workqueue);
2228 destroy_workqueue(hdev->req_workqueue);
2230 ida_simple_remove(&hci_index_ida, hdev->id);
2231 write_lock(&hci_dev_list_lock);
2232 list_del(&hdev->list);
2233 write_unlock(&hci_dev_list_lock);
2237 EXPORT_SYMBOL(hci_register_dev);
2239 /* Unregister HCI device */
2240 void hci_unregister_dev(struct hci_dev *hdev)
2244 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2246 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2250 write_lock(&hci_dev_list_lock);
2251 list_del(&hdev->list);
2252 write_unlock(&hci_dev_list_lock);
2254 hci_dev_do_close(hdev);
2256 for (i = 0; i < NUM_REASSEMBLY; i++)
2257 kfree_skb(hdev->reassembly[i]);
2259 cancel_work_sync(&hdev->power_on);
2261 if (!test_bit(HCI_INIT, &hdev->flags) &&
2262 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2264 mgmt_index_removed(hdev);
2265 hci_dev_unlock(hdev);
2268 /* mgmt_index_removed should take care of emptying the
2270 BUG_ON(!list_empty(&hdev->mgmt_pending));
2272 hci_notify(hdev, HCI_DEV_UNREG);
2275 rfkill_unregister(hdev->rfkill);
2276 rfkill_destroy(hdev->rfkill);
2279 hci_del_sysfs(hdev);
2281 destroy_workqueue(hdev->workqueue);
2282 destroy_workqueue(hdev->req_workqueue);
2285 hci_blacklist_clear(hdev);
2286 hci_uuids_clear(hdev);
2287 hci_link_keys_clear(hdev);
2288 hci_smp_ltks_clear(hdev);
2289 hci_remote_oob_data_clear(hdev);
2290 hci_dev_unlock(hdev);
2294 ida_simple_remove(&hci_index_ida, id);
2296 EXPORT_SYMBOL(hci_unregister_dev);
2298 /* Suspend HCI device */
2299 int hci_suspend_dev(struct hci_dev *hdev)
2301 hci_notify(hdev, HCI_DEV_SUSPEND);
2304 EXPORT_SYMBOL(hci_suspend_dev);
2306 /* Resume HCI device */
2307 int hci_resume_dev(struct hci_dev *hdev)
2309 hci_notify(hdev, HCI_DEV_RESUME);
2312 EXPORT_SYMBOL(hci_resume_dev);
2314 /* Receive frame from HCI drivers */
2315 int hci_recv_frame(struct sk_buff *skb)
2317 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2318 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2319 && !test_bit(HCI_INIT, &hdev->flags))) {
2325 bt_cb(skb)->incoming = 1;
2328 __net_timestamp(skb);
2330 skb_queue_tail(&hdev->rx_q, skb);
2331 queue_work(hdev->workqueue, &hdev->rx_work);
2335 EXPORT_SYMBOL(hci_recv_frame);
2337 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2338 int count, __u8 index)
2343 struct sk_buff *skb;
2344 struct bt_skb_cb *scb;
2346 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2347 index >= NUM_REASSEMBLY)
2350 skb = hdev->reassembly[index];
2354 case HCI_ACLDATA_PKT:
2355 len = HCI_MAX_FRAME_SIZE;
2356 hlen = HCI_ACL_HDR_SIZE;
2359 len = HCI_MAX_EVENT_SIZE;
2360 hlen = HCI_EVENT_HDR_SIZE;
2362 case HCI_SCODATA_PKT:
2363 len = HCI_MAX_SCO_SIZE;
2364 hlen = HCI_SCO_HDR_SIZE;
2368 skb = bt_skb_alloc(len, GFP_ATOMIC);
2372 scb = (void *) skb->cb;
2374 scb->pkt_type = type;
2376 skb->dev = (void *) hdev;
2377 hdev->reassembly[index] = skb;
2381 scb = (void *) skb->cb;
2382 len = min_t(uint, scb->expect, count);
2384 memcpy(skb_put(skb, len), data, len);
2393 if (skb->len == HCI_EVENT_HDR_SIZE) {
2394 struct hci_event_hdr *h = hci_event_hdr(skb);
2395 scb->expect = h->plen;
2397 if (skb_tailroom(skb) < scb->expect) {
2399 hdev->reassembly[index] = NULL;
2405 case HCI_ACLDATA_PKT:
2406 if (skb->len == HCI_ACL_HDR_SIZE) {
2407 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2408 scb->expect = __le16_to_cpu(h->dlen);
2410 if (skb_tailroom(skb) < scb->expect) {
2412 hdev->reassembly[index] = NULL;
2418 case HCI_SCODATA_PKT:
2419 if (skb->len == HCI_SCO_HDR_SIZE) {
2420 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2421 scb->expect = h->dlen;
2423 if (skb_tailroom(skb) < scb->expect) {
2425 hdev->reassembly[index] = NULL;
2432 if (scb->expect == 0) {
2433 /* Complete frame */
2435 bt_cb(skb)->pkt_type = type;
2436 hci_recv_frame(skb);
2438 hdev->reassembly[index] = NULL;
2446 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2450 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2454 rem = hci_reassembly(hdev, type, data, count, type - 1);
2458 data += (count - rem);
2464 EXPORT_SYMBOL(hci_recv_fragment);
2466 #define STREAM_REASSEMBLY 0
2468 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2474 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2477 struct { char type; } *pkt;
2479 /* Start of the frame */
2486 type = bt_cb(skb)->pkt_type;
2488 rem = hci_reassembly(hdev, type, data, count,
2493 data += (count - rem);
2499 EXPORT_SYMBOL(hci_recv_stream_fragment);
2501 /* ---- Interface to upper protocols ---- */
2503 int hci_register_cb(struct hci_cb *cb)
2505 BT_DBG("%p name %s", cb, cb->name);
2507 write_lock(&hci_cb_list_lock);
2508 list_add(&cb->list, &hci_cb_list);
2509 write_unlock(&hci_cb_list_lock);
2513 EXPORT_SYMBOL(hci_register_cb);
2515 int hci_unregister_cb(struct hci_cb *cb)
2517 BT_DBG("%p name %s", cb, cb->name);
2519 write_lock(&hci_cb_list_lock);
2520 list_del(&cb->list);
2521 write_unlock(&hci_cb_list_lock);
2525 EXPORT_SYMBOL(hci_unregister_cb);
2527 static int hci_send_frame(struct sk_buff *skb)
2529 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2536 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2539 __net_timestamp(skb);
2541 /* Send copy to monitor */
2542 hci_send_to_monitor(hdev, skb);
2544 if (atomic_read(&hdev->promisc)) {
2545 /* Send copy to the sockets */
2546 hci_send_to_sock(hdev, skb);
2549 /* Get rid of skb owner, prior to sending to the driver. */
2552 return hdev->send(skb);
2555 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2557 skb_queue_head_init(&req->cmd_q);
2562 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2564 struct hci_dev *hdev = req->hdev;
2565 struct sk_buff *skb;
2566 unsigned long flags;
2568 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2570 /* If an error occured during request building, remove all HCI
2571 * commands queued on the HCI request queue.
2574 skb_queue_purge(&req->cmd_q);
2578 /* Do not allow empty requests */
2579 if (skb_queue_empty(&req->cmd_q))
2582 skb = skb_peek_tail(&req->cmd_q);
2583 bt_cb(skb)->req.complete = complete;
2585 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2586 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2587 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2589 queue_work(hdev->workqueue, &hdev->cmd_work);
2594 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2595 u32 plen, void *param)
2597 int len = HCI_COMMAND_HDR_SIZE + plen;
2598 struct hci_command_hdr *hdr;
2599 struct sk_buff *skb;
2601 skb = bt_skb_alloc(len, GFP_ATOMIC);
2605 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2606 hdr->opcode = cpu_to_le16(opcode);
2610 memcpy(skb_put(skb, plen), param, plen);
2612 BT_DBG("skb len %d", skb->len);
2614 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2615 skb->dev = (void *) hdev;
2620 /* Send HCI command */
2621 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2623 struct sk_buff *skb;
2625 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2627 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2629 BT_ERR("%s no memory for command", hdev->name);
2633 /* Stand-alone HCI commands must be flaged as
2634 * single-command requests.
2636 bt_cb(skb)->req.start = true;
2638 skb_queue_tail(&hdev->cmd_q, skb);
2639 queue_work(hdev->workqueue, &hdev->cmd_work);
2644 /* Queue a command to an asynchronous HCI request */
2645 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2648 struct hci_dev *hdev = req->hdev;
2649 struct sk_buff *skb;
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2653 /* If an error occured during request building, there is no point in
2654 * queueing the HCI command. We can simply return.
2659 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2661 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2662 hdev->name, opcode);
2667 if (skb_queue_empty(&req->cmd_q))
2668 bt_cb(skb)->req.start = true;
2670 bt_cb(skb)->req.event = event;
2672 skb_queue_tail(&req->cmd_q, skb);
2675 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2677 hci_req_add_ev(req, opcode, plen, param, 0);
2680 /* Get data from the previously sent command */
2681 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2683 struct hci_command_hdr *hdr;
2685 if (!hdev->sent_cmd)
2688 hdr = (void *) hdev->sent_cmd->data;
2690 if (hdr->opcode != cpu_to_le16(opcode))
2693 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2695 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2699 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2701 struct hci_acl_hdr *hdr;
2704 skb_push(skb, HCI_ACL_HDR_SIZE);
2705 skb_reset_transport_header(skb);
2706 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2707 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2708 hdr->dlen = cpu_to_le16(len);
2711 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2712 struct sk_buff *skb, __u16 flags)
2714 struct hci_conn *conn = chan->conn;
2715 struct hci_dev *hdev = conn->hdev;
2716 struct sk_buff *list;
2718 skb->len = skb_headlen(skb);
2721 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2723 switch (hdev->dev_type) {
2725 hci_add_acl_hdr(skb, conn->handle, flags);
2728 hci_add_acl_hdr(skb, chan->handle, flags);
2731 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2735 list = skb_shinfo(skb)->frag_list;
2737 /* Non fragmented */
2738 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2740 skb_queue_tail(queue, skb);
2743 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2745 skb_shinfo(skb)->frag_list = NULL;
2747 /* Queue all fragments atomically */
2748 spin_lock(&queue->lock);
2750 __skb_queue_tail(queue, skb);
2752 flags &= ~ACL_START;
2755 skb = list; list = list->next;
2757 skb->dev = (void *) hdev;
2758 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2759 hci_add_acl_hdr(skb, conn->handle, flags);
2761 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2763 __skb_queue_tail(queue, skb);
2766 spin_unlock(&queue->lock);
2770 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2772 struct hci_dev *hdev = chan->conn->hdev;
2774 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2776 skb->dev = (void *) hdev;
2778 hci_queue_acl(chan, &chan->data_q, skb, flags);
2780 queue_work(hdev->workqueue, &hdev->tx_work);
2784 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2786 struct hci_dev *hdev = conn->hdev;
2787 struct hci_sco_hdr hdr;
2789 BT_DBG("%s len %d", hdev->name, skb->len);
2791 hdr.handle = cpu_to_le16(conn->handle);
2792 hdr.dlen = skb->len;
2794 skb_push(skb, HCI_SCO_HDR_SIZE);
2795 skb_reset_transport_header(skb);
2796 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2798 skb->dev = (void *) hdev;
2799 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2801 skb_queue_tail(&conn->data_q, skb);
2802 queue_work(hdev->workqueue, &hdev->tx_work);
2805 /* ---- HCI TX task (outgoing data) ---- */
2807 /* HCI Connection scheduler */
2808 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2811 struct hci_conn_hash *h = &hdev->conn_hash;
2812 struct hci_conn *conn = NULL, *c;
2813 unsigned int num = 0, min = ~0;
2815 /* We don't have to lock device here. Connections are always
2816 * added and removed with TX task disabled. */
2820 list_for_each_entry_rcu(c, &h->list, list) {
2821 if (c->type != type || skb_queue_empty(&c->data_q))
2824 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2829 if (c->sent < min) {
2834 if (hci_conn_num(hdev, type) == num)
2843 switch (conn->type) {
2845 cnt = hdev->acl_cnt;
2849 cnt = hdev->sco_cnt;
2852 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2856 BT_ERR("Unknown link type");
2864 BT_DBG("conn %p quote %d", conn, *quote);
2868 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2870 struct hci_conn_hash *h = &hdev->conn_hash;
2873 BT_ERR("%s link tx timeout", hdev->name);
2877 /* Kill stalled connections */
2878 list_for_each_entry_rcu(c, &h->list, list) {
2879 if (c->type == type && c->sent) {
2880 BT_ERR("%s killing stalled connection %pMR",
2881 hdev->name, &c->dst);
2882 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2889 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2892 struct hci_conn_hash *h = &hdev->conn_hash;
2893 struct hci_chan *chan = NULL;
2894 unsigned int num = 0, min = ~0, cur_prio = 0;
2895 struct hci_conn *conn;
2896 int cnt, q, conn_num = 0;
2898 BT_DBG("%s", hdev->name);
2902 list_for_each_entry_rcu(conn, &h->list, list) {
2903 struct hci_chan *tmp;
2905 if (conn->type != type)
2908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2913 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2914 struct sk_buff *skb;
2916 if (skb_queue_empty(&tmp->data_q))
2919 skb = skb_peek(&tmp->data_q);
2920 if (skb->priority < cur_prio)
2923 if (skb->priority > cur_prio) {
2926 cur_prio = skb->priority;
2931 if (conn->sent < min) {
2937 if (hci_conn_num(hdev, type) == conn_num)
2946 switch (chan->conn->type) {
2948 cnt = hdev->acl_cnt;
2951 cnt = hdev->block_cnt;
2955 cnt = hdev->sco_cnt;
2958 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2962 BT_ERR("Unknown link type");
2967 BT_DBG("chan %p quote %d", chan, *quote);
2971 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2973 struct hci_conn_hash *h = &hdev->conn_hash;
2974 struct hci_conn *conn;
2977 BT_DBG("%s", hdev->name);
2981 list_for_each_entry_rcu(conn, &h->list, list) {
2982 struct hci_chan *chan;
2984 if (conn->type != type)
2987 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2992 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2993 struct sk_buff *skb;
3000 if (skb_queue_empty(&chan->data_q))
3003 skb = skb_peek(&chan->data_q);
3004 if (skb->priority >= HCI_PRIO_MAX - 1)
3007 skb->priority = HCI_PRIO_MAX - 1;
3009 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3013 if (hci_conn_num(hdev, type) == num)
3021 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3023 /* Calculate count of blocks used by this packet */
3024 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3027 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3029 if (!test_bit(HCI_RAW, &hdev->flags)) {
3030 /* ACL tx timeout must be longer than maximum
3031 * link supervision timeout (40.9 seconds) */
3032 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3033 HCI_ACL_TX_TIMEOUT))
3034 hci_link_tx_to(hdev, ACL_LINK);
3038 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3040 unsigned int cnt = hdev->acl_cnt;
3041 struct hci_chan *chan;
3042 struct sk_buff *skb;
3045 __check_timeout(hdev, cnt);
3047 while (hdev->acl_cnt &&
3048 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3049 u32 priority = (skb_peek(&chan->data_q))->priority;
3050 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3051 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3052 skb->len, skb->priority);
3054 /* Stop if priority has changed */
3055 if (skb->priority < priority)
3058 skb = skb_dequeue(&chan->data_q);
3060 hci_conn_enter_active_mode(chan->conn,
3061 bt_cb(skb)->force_active);
3063 hci_send_frame(skb);
3064 hdev->acl_last_tx = jiffies;
3072 if (cnt != hdev->acl_cnt)
3073 hci_prio_recalculate(hdev, ACL_LINK);
3076 static void hci_sched_acl_blk(struct hci_dev *hdev)
3078 unsigned int cnt = hdev->block_cnt;
3079 struct hci_chan *chan;
3080 struct sk_buff *skb;
3084 __check_timeout(hdev, cnt);
3086 BT_DBG("%s", hdev->name);
3088 if (hdev->dev_type == HCI_AMP)
3093 while (hdev->block_cnt > 0 &&
3094 (chan = hci_chan_sent(hdev, type, "e))) {
3095 u32 priority = (skb_peek(&chan->data_q))->priority;
3096 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3100 skb->len, skb->priority);
3102 /* Stop if priority has changed */
3103 if (skb->priority < priority)
3106 skb = skb_dequeue(&chan->data_q);
3108 blocks = __get_blocks(hdev, skb);
3109 if (blocks > hdev->block_cnt)
3112 hci_conn_enter_active_mode(chan->conn,
3113 bt_cb(skb)->force_active);
3115 hci_send_frame(skb);
3116 hdev->acl_last_tx = jiffies;
3118 hdev->block_cnt -= blocks;
3121 chan->sent += blocks;
3122 chan->conn->sent += blocks;
3126 if (cnt != hdev->block_cnt)
3127 hci_prio_recalculate(hdev, type);
3130 static void hci_sched_acl(struct hci_dev *hdev)
3132 BT_DBG("%s", hdev->name);
3134 /* No ACL link over BR/EDR controller */
3135 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3138 /* No AMP link over AMP controller */
3139 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3142 switch (hdev->flow_ctl_mode) {
3143 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3144 hci_sched_acl_pkt(hdev);
3147 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3148 hci_sched_acl_blk(hdev);
3154 static void hci_sched_sco(struct hci_dev *hdev)
3156 struct hci_conn *conn;
3157 struct sk_buff *skb;
3160 BT_DBG("%s", hdev->name);
3162 if (!hci_conn_num(hdev, SCO_LINK))
3165 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3166 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3167 BT_DBG("skb %p len %d", skb, skb->len);
3168 hci_send_frame(skb);
3171 if (conn->sent == ~0)
3177 static void hci_sched_esco(struct hci_dev *hdev)
3179 struct hci_conn *conn;
3180 struct sk_buff *skb;
3183 BT_DBG("%s", hdev->name);
3185 if (!hci_conn_num(hdev, ESCO_LINK))
3188 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3190 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3191 BT_DBG("skb %p len %d", skb, skb->len);
3192 hci_send_frame(skb);
3195 if (conn->sent == ~0)
3201 static void hci_sched_le(struct hci_dev *hdev)
3203 struct hci_chan *chan;
3204 struct sk_buff *skb;
3205 int quote, cnt, tmp;
3207 BT_DBG("%s", hdev->name);
3209 if (!hci_conn_num(hdev, LE_LINK))
3212 if (!test_bit(HCI_RAW, &hdev->flags)) {
3213 /* LE tx timeout must be longer than maximum
3214 * link supervision timeout (40.9 seconds) */
3215 if (!hdev->le_cnt && hdev->le_pkts &&
3216 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3217 hci_link_tx_to(hdev, LE_LINK);
3220 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3222 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3223 u32 priority = (skb_peek(&chan->data_q))->priority;
3224 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3225 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3226 skb->len, skb->priority);
3228 /* Stop if priority has changed */
3229 if (skb->priority < priority)
3232 skb = skb_dequeue(&chan->data_q);
3234 hci_send_frame(skb);
3235 hdev->le_last_tx = jiffies;
3246 hdev->acl_cnt = cnt;
3249 hci_prio_recalculate(hdev, LE_LINK);
3252 static void hci_tx_work(struct work_struct *work)
3254 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3255 struct sk_buff *skb;
3257 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3258 hdev->sco_cnt, hdev->le_cnt);
3260 /* Schedule queues and send stuff to HCI driver */
3262 hci_sched_acl(hdev);
3264 hci_sched_sco(hdev);
3266 hci_sched_esco(hdev);
3270 /* Send next queued raw (unknown type) packet */
3271 while ((skb = skb_dequeue(&hdev->raw_q)))
3272 hci_send_frame(skb);
3275 /* ----- HCI RX task (incoming data processing) ----- */
3277 /* ACL data packet */
3278 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3280 struct hci_acl_hdr *hdr = (void *) skb->data;
3281 struct hci_conn *conn;
3282 __u16 handle, flags;
3284 skb_pull(skb, HCI_ACL_HDR_SIZE);
3286 handle = __le16_to_cpu(hdr->handle);
3287 flags = hci_flags(handle);
3288 handle = hci_handle(handle);
3290 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3293 hdev->stat.acl_rx++;
3296 conn = hci_conn_hash_lookup_handle(hdev, handle);
3297 hci_dev_unlock(hdev);
3300 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3302 /* Send to upper protocol */
3303 l2cap_recv_acldata(conn, skb, flags);
3306 BT_ERR("%s ACL packet for unknown connection handle %d",
3307 hdev->name, handle);
3313 /* SCO data packet */
3314 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3316 struct hci_sco_hdr *hdr = (void *) skb->data;
3317 struct hci_conn *conn;
3320 skb_pull(skb, HCI_SCO_HDR_SIZE);
3322 handle = __le16_to_cpu(hdr->handle);
3324 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3326 hdev->stat.sco_rx++;
3329 conn = hci_conn_hash_lookup_handle(hdev, handle);
3330 hci_dev_unlock(hdev);
3333 /* Send to upper protocol */
3334 sco_recv_scodata(conn, skb);
3337 BT_ERR("%s SCO packet for unknown connection handle %d",
3338 hdev->name, handle);
3344 static bool hci_req_is_complete(struct hci_dev *hdev)
3346 struct sk_buff *skb;
3348 skb = skb_peek(&hdev->cmd_q);
3352 return bt_cb(skb)->req.start;
3355 static void hci_resend_last(struct hci_dev *hdev)
3357 struct hci_command_hdr *sent;
3358 struct sk_buff *skb;
3361 if (!hdev->sent_cmd)
3364 sent = (void *) hdev->sent_cmd->data;
3365 opcode = __le16_to_cpu(sent->opcode);
3366 if (opcode == HCI_OP_RESET)
3369 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3373 skb_queue_head(&hdev->cmd_q, skb);
3374 queue_work(hdev->workqueue, &hdev->cmd_work);
3377 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3379 hci_req_complete_t req_complete = NULL;
3380 struct sk_buff *skb;
3381 unsigned long flags;
3383 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3385 /* If the completed command doesn't match the last one that was
3386 * sent we need to do special handling of it.
3388 if (!hci_sent_cmd_data(hdev, opcode)) {
3389 /* Some CSR based controllers generate a spontaneous
3390 * reset complete event during init and any pending
3391 * command will never be completed. In such a case we
3392 * need to resend whatever was the last sent
3395 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3396 hci_resend_last(hdev);
3401 /* If the command succeeded and there's still more commands in
3402 * this request the request is not yet complete.
3404 if (!status && !hci_req_is_complete(hdev))
3407 /* If this was the last command in a request the complete
3408 * callback would be found in hdev->sent_cmd instead of the
3409 * command queue (hdev->cmd_q).
3411 if (hdev->sent_cmd) {
3412 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3417 /* Remove all pending commands belonging to this request */
3418 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3419 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3420 if (bt_cb(skb)->req.start) {
3421 __skb_queue_head(&hdev->cmd_q, skb);
3425 req_complete = bt_cb(skb)->req.complete;
3428 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3432 req_complete(hdev, status);
3435 static void hci_rx_work(struct work_struct *work)
3437 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3438 struct sk_buff *skb;
3440 BT_DBG("%s", hdev->name);
3442 while ((skb = skb_dequeue(&hdev->rx_q))) {
3443 /* Send copy to monitor */
3444 hci_send_to_monitor(hdev, skb);
3446 if (atomic_read(&hdev->promisc)) {
3447 /* Send copy to the sockets */
3448 hci_send_to_sock(hdev, skb);
3451 if (test_bit(HCI_RAW, &hdev->flags)) {
3456 if (test_bit(HCI_INIT, &hdev->flags)) {
3457 /* Don't process data packets in this states. */
3458 switch (bt_cb(skb)->pkt_type) {
3459 case HCI_ACLDATA_PKT:
3460 case HCI_SCODATA_PKT:
3467 switch (bt_cb(skb)->pkt_type) {
3469 BT_DBG("%s Event packet", hdev->name);
3470 hci_event_packet(hdev, skb);
3473 case HCI_ACLDATA_PKT:
3474 BT_DBG("%s ACL data packet", hdev->name);
3475 hci_acldata_packet(hdev, skb);
3478 case HCI_SCODATA_PKT:
3479 BT_DBG("%s SCO data packet", hdev->name);
3480 hci_scodata_packet(hdev, skb);
3490 static void hci_cmd_work(struct work_struct *work)
3492 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3493 struct sk_buff *skb;
3495 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3496 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3498 /* Send queued commands */
3499 if (atomic_read(&hdev->cmd_cnt)) {
3500 skb = skb_dequeue(&hdev->cmd_q);
3504 kfree_skb(hdev->sent_cmd);
3506 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3507 if (hdev->sent_cmd) {
3508 atomic_dec(&hdev->cmd_cnt);
3509 hci_send_frame(skb);
3510 if (test_bit(HCI_RESET, &hdev->flags))
3511 del_timer(&hdev->cmd_timer);
3513 mod_timer(&hdev->cmd_timer,
3514 jiffies + HCI_CMD_TIMEOUT);
3516 skb_queue_head(&hdev->cmd_q, skb);
3517 queue_work(hdev->workqueue, &hdev->cmd_work);
3522 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3524 /* General inquiry access code (GIAC) */
3525 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3526 struct hci_cp_inquiry cp;
3528 BT_DBG("%s", hdev->name);
3530 if (test_bit(HCI_INQUIRY, &hdev->flags))
3531 return -EINPROGRESS;
3533 inquiry_cache_flush(hdev);
3535 memset(&cp, 0, sizeof(cp));
3536 memcpy(&cp.lap, lap, sizeof(cp.lap));
3539 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3542 int hci_cancel_inquiry(struct hci_dev *hdev)
3544 BT_DBG("%s", hdev->name);
3546 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3549 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3552 u8 bdaddr_to_le(u8 bdaddr_type)
3554 switch (bdaddr_type) {
3555 case BDADDR_LE_PUBLIC:
3556 return ADDR_LE_DEV_PUBLIC;
3559 /* Fallback to LE Random address type */
3560 return ADDR_LE_DEV_RANDOM;