2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode)
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
91 hdev->recv_evt = NULL;
96 return ERR_PTR(-ENODATA);
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
107 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
111 if (skb->len < sizeof(*ev)) {
112 BT_ERR("Too short cmd_complete event");
116 ev = (void *) skb->data;
117 skb_pull(skb, sizeof(*ev));
119 if (opcode == __le16_to_cpu(ev->opcode))
122 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
123 __le16_to_cpu(ev->opcode));
127 return ERR_PTR(-ENODATA);
130 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
131 void *param, u32 timeout)
133 DECLARE_WAITQUEUE(wait, current);
134 struct hci_request req;
137 BT_DBG("%s", hdev->name);
139 hci_req_init(&req, hdev);
141 hci_req_add(&req, opcode, plen, param);
143 hdev->req_status = HCI_REQ_PEND;
145 err = hci_req_run(&req, hci_req_sync_complete);
149 add_wait_queue(&hdev->req_wait_q, &wait);
150 set_current_state(TASK_INTERRUPTIBLE);
152 schedule_timeout(timeout);
154 remove_wait_queue(&hdev->req_wait_q, &wait);
156 if (signal_pending(current))
157 return ERR_PTR(-EINTR);
159 switch (hdev->req_status) {
161 err = -bt_to_errno(hdev->req_result);
164 case HCI_REQ_CANCELED:
165 err = -hdev->req_result;
173 hdev->req_status = hdev->req_result = 0;
175 BT_DBG("%s end: err %d", hdev->name, err);
180 return hci_get_cmd_complete(hdev, opcode);
182 EXPORT_SYMBOL(__hci_cmd_sync);
184 /* Execute request and wait for completion. */
185 static int __hci_req_sync(struct hci_dev *hdev,
186 void (*func)(struct hci_request *req,
188 unsigned long opt, __u32 timeout)
190 struct hci_request req;
191 DECLARE_WAITQUEUE(wait, current);
194 BT_DBG("%s start", hdev->name);
196 hci_req_init(&req, hdev);
198 hdev->req_status = HCI_REQ_PEND;
202 err = hci_req_run(&req, hci_req_sync_complete);
204 hdev->req_status = 0;
206 /* ENODATA means the HCI request command queue is empty.
207 * This can happen when a request with conditionals doesn't
208 * trigger any commands to be sent. This is normal behavior
209 * and should not trigger an error return.
217 add_wait_queue(&hdev->req_wait_q, &wait);
218 set_current_state(TASK_INTERRUPTIBLE);
220 schedule_timeout(timeout);
222 remove_wait_queue(&hdev->req_wait_q, &wait);
224 if (signal_pending(current))
227 switch (hdev->req_status) {
229 err = -bt_to_errno(hdev->req_result);
232 case HCI_REQ_CANCELED:
233 err = -hdev->req_result;
241 hdev->req_status = hdev->req_result = 0;
243 BT_DBG("%s end: err %d", hdev->name, err);
248 static int hci_req_sync(struct hci_dev *hdev,
249 void (*req)(struct hci_request *req,
251 unsigned long opt, __u32 timeout)
255 if (!test_bit(HCI_UP, &hdev->flags))
258 /* Serialize all requests */
260 ret = __hci_req_sync(hdev, req, opt, timeout);
261 hci_req_unlock(hdev);
266 static void hci_reset_req(struct hci_request *req, unsigned long opt)
268 BT_DBG("%s %ld", req->hdev->name, opt);
271 set_bit(HCI_RESET, &req->hdev->flags);
272 hci_req_add(req, HCI_OP_RESET, 0, NULL);
275 static void bredr_init(struct hci_request *req)
277 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
279 /* Read Local Supported Features */
280 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
282 /* Read Local Version */
283 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
285 /* Read BD Address */
286 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
289 static void amp_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
293 /* Read Local Version */
294 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
296 /* Read Local AMP Info */
297 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 /* Read Data Blk size */
300 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
303 static void hci_init1_req(struct hci_request *req, unsigned long opt)
305 struct hci_dev *hdev = req->hdev;
306 struct hci_request init_req;
309 BT_DBG("%s %ld", hdev->name, opt);
311 /* Driver initialization */
313 hci_req_init(&init_req, hdev);
315 /* Special commands */
316 while ((skb = skb_dequeue(&hdev->driver_init))) {
317 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
318 skb->dev = (void *) hdev;
320 if (skb_queue_empty(&init_req.cmd_q))
321 bt_cb(skb)->req.start = true;
323 skb_queue_tail(&init_req.cmd_q, skb);
325 skb_queue_purge(&hdev->driver_init);
327 hci_req_run(&init_req, NULL);
330 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
331 hci_reset_req(req, 0);
333 switch (hdev->dev_type) {
343 BT_ERR("Unknown device type %d", hdev->dev_type);
348 static void bredr_setup(struct hci_request *req)
350 struct hci_cp_delete_stored_link_key cp;
354 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
355 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
357 /* Read Class of Device */
358 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
360 /* Read Local Name */
361 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
363 /* Read Voice Setting */
364 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
366 /* Clear Event Filters */
367 flt_type = HCI_FLT_CLEAR_ALL;
368 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
370 /* Connection accept timeout ~20 secs */
371 param = __constant_cpu_to_le16(0x7d00);
372 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
374 bacpy(&cp.bdaddr, BDADDR_ANY);
375 cp.delete_all = 0x01;
376 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
378 /* Read page scan parameters */
379 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
380 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
381 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
385 static void le_setup(struct hci_request *req)
387 /* Read LE Buffer Size */
388 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
390 /* Read LE Local Supported Features */
391 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
393 /* Read LE Advertising Channel TX Power */
394 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
396 /* Read LE White List Size */
397 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
399 /* Read LE Supported States */
400 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
403 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
405 if (lmp_ext_inq_capable(hdev))
408 if (lmp_inq_rssi_capable(hdev))
411 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412 hdev->lmp_subver == 0x0757)
415 if (hdev->manufacturer == 15) {
416 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
418 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
420 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
424 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425 hdev->lmp_subver == 0x1805)
431 static void hci_setup_inquiry_mode(struct hci_request *req)
435 mode = hci_get_inquiry_mode(req->hdev);
437 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
440 static void hci_setup_event_mask(struct hci_request *req)
442 struct hci_dev *hdev = req->hdev;
444 /* The second byte is 0xff instead of 0x9f (two reserved bits
445 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
448 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
450 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451 * any event mask for pre 1.2 devices.
453 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
456 if (lmp_bredr_capable(hdev)) {
457 events[4] |= 0x01; /* Flow Specification Complete */
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460 events[5] |= 0x08; /* Synchronous Connection Complete */
461 events[5] |= 0x10; /* Synchronous Connection Changed */
464 if (lmp_inq_rssi_capable(hdev))
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
467 if (lmp_sniffsubr_capable(hdev))
468 events[5] |= 0x20; /* Sniff Subrating */
470 if (lmp_pause_enc_capable(hdev))
471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
473 if (lmp_ext_inq_capable(hdev))
474 events[5] |= 0x40; /* Extended Inquiry Result */
476 if (lmp_no_flush_capable(hdev))
477 events[7] |= 0x01; /* Enhanced Flush Complete */
479 if (lmp_lsto_capable(hdev))
480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
482 if (lmp_ssp_capable(hdev)) {
483 events[6] |= 0x01; /* IO Capability Request */
484 events[6] |= 0x02; /* IO Capability Response */
485 events[6] |= 0x04; /* User Confirmation Request */
486 events[6] |= 0x08; /* User Passkey Request */
487 events[6] |= 0x10; /* Remote OOB Data Request */
488 events[6] |= 0x20; /* Simple Pairing Complete */
489 events[7] |= 0x04; /* User Passkey Notification */
490 events[7] |= 0x08; /* Keypress Notification */
491 events[7] |= 0x10; /* Remote Host Supported
492 * Features Notification
496 if (lmp_le_capable(hdev))
497 events[7] |= 0x20; /* LE Meta-Event */
499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
501 if (lmp_le_capable(hdev)) {
502 memset(events, 0, sizeof(events));
504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505 sizeof(events), events);
509 static void hci_init2_req(struct hci_request *req, unsigned long opt)
511 struct hci_dev *hdev = req->hdev;
513 if (lmp_bredr_capable(hdev))
516 if (lmp_le_capable(hdev))
519 hci_setup_event_mask(req);
521 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
524 if (lmp_ssp_capable(hdev)) {
525 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
527 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528 sizeof(mode), &mode);
530 struct hci_cp_write_eir cp;
532 memset(hdev->eir, 0, sizeof(hdev->eir));
533 memset(&cp, 0, sizeof(cp));
535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
539 if (lmp_inq_rssi_capable(hdev))
540 hci_setup_inquiry_mode(req);
542 if (lmp_inq_tx_pwr_capable(hdev))
543 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
545 if (lmp_ext_feat_capable(hdev)) {
546 struct hci_cp_read_local_ext_features cp;
549 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
553 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
555 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
560 static void hci_setup_link_policy(struct hci_request *req)
562 struct hci_dev *hdev = req->hdev;
563 struct hci_cp_write_def_link_policy cp;
566 if (lmp_rswitch_capable(hdev))
567 link_policy |= HCI_LP_RSWITCH;
568 if (lmp_hold_capable(hdev))
569 link_policy |= HCI_LP_HOLD;
570 if (lmp_sniff_capable(hdev))
571 link_policy |= HCI_LP_SNIFF;
572 if (lmp_park_capable(hdev))
573 link_policy |= HCI_LP_PARK;
575 cp.policy = cpu_to_le16(link_policy);
576 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
579 static void hci_set_le_support(struct hci_request *req)
581 struct hci_dev *hdev = req->hdev;
582 struct hci_cp_write_le_host_supported cp;
584 memset(&cp, 0, sizeof(cp));
586 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
588 cp.simul = lmp_le_br_capable(hdev);
591 if (cp.le != lmp_host_le_capable(hdev))
592 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
596 static void hci_init3_req(struct hci_request *req, unsigned long opt)
598 struct hci_dev *hdev = req->hdev;
600 if (hdev->commands[5] & 0x10)
601 hci_setup_link_policy(req);
603 if (lmp_le_capable(hdev)) {
604 hci_set_le_support(req);
609 static int __hci_init(struct hci_dev *hdev)
613 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
617 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
618 * BR/EDR/LE type controllers. AMP controllers only need the
621 if (hdev->dev_type != HCI_BREDR)
624 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
628 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
631 static void hci_scan_req(struct hci_request *req, unsigned long opt)
635 BT_DBG("%s %x", req->hdev->name, scan);
637 /* Inquiry and Page scans */
638 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
641 static void hci_auth_req(struct hci_request *req, unsigned long opt)
645 BT_DBG("%s %x", req->hdev->name, auth);
648 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
651 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
655 BT_DBG("%s %x", req->hdev->name, encrypt);
658 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
661 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
663 __le16 policy = cpu_to_le16(opt);
665 BT_DBG("%s %x", req->hdev->name, policy);
667 /* Default link policy */
668 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
671 /* Get HCI device by index.
672 * Device is held on return. */
673 struct hci_dev *hci_dev_get(int index)
675 struct hci_dev *hdev = NULL, *d;
682 read_lock(&hci_dev_list_lock);
683 list_for_each_entry(d, &hci_dev_list, list) {
684 if (d->id == index) {
685 hdev = hci_dev_hold(d);
689 read_unlock(&hci_dev_list_lock);
693 /* ---- Inquiry support ---- */
695 bool hci_discovery_active(struct hci_dev *hdev)
697 struct discovery_state *discov = &hdev->discovery;
699 switch (discov->state) {
700 case DISCOVERY_FINDING:
701 case DISCOVERY_RESOLVING:
709 void hci_discovery_set_state(struct hci_dev *hdev, int state)
711 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
713 if (hdev->discovery.state == state)
717 case DISCOVERY_STOPPED:
718 if (hdev->discovery.state != DISCOVERY_STARTING)
719 mgmt_discovering(hdev, 0);
721 case DISCOVERY_STARTING:
723 case DISCOVERY_FINDING:
724 mgmt_discovering(hdev, 1);
726 case DISCOVERY_RESOLVING:
728 case DISCOVERY_STOPPING:
732 hdev->discovery.state = state;
735 static void inquiry_cache_flush(struct hci_dev *hdev)
737 struct discovery_state *cache = &hdev->discovery;
738 struct inquiry_entry *p, *n;
740 list_for_each_entry_safe(p, n, &cache->all, all) {
745 INIT_LIST_HEAD(&cache->unknown);
746 INIT_LIST_HEAD(&cache->resolve);
749 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
752 struct discovery_state *cache = &hdev->discovery;
753 struct inquiry_entry *e;
755 BT_DBG("cache %p, %pMR", cache, bdaddr);
757 list_for_each_entry(e, &cache->all, all) {
758 if (!bacmp(&e->data.bdaddr, bdaddr))
765 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
768 struct discovery_state *cache = &hdev->discovery;
769 struct inquiry_entry *e;
771 BT_DBG("cache %p, %pMR", cache, bdaddr);
773 list_for_each_entry(e, &cache->unknown, list) {
774 if (!bacmp(&e->data.bdaddr, bdaddr))
781 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
785 struct discovery_state *cache = &hdev->discovery;
786 struct inquiry_entry *e;
788 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
790 list_for_each_entry(e, &cache->resolve, list) {
791 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
793 if (!bacmp(&e->data.bdaddr, bdaddr))
800 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
801 struct inquiry_entry *ie)
803 struct discovery_state *cache = &hdev->discovery;
804 struct list_head *pos = &cache->resolve;
805 struct inquiry_entry *p;
809 list_for_each_entry(p, &cache->resolve, list) {
810 if (p->name_state != NAME_PENDING &&
811 abs(p->data.rssi) >= abs(ie->data.rssi))
816 list_add(&ie->list, pos);
819 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
820 bool name_known, bool *ssp)
822 struct discovery_state *cache = &hdev->discovery;
823 struct inquiry_entry *ie;
825 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
827 hci_remove_remote_oob_data(hdev, &data->bdaddr);
830 *ssp = data->ssp_mode;
832 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
834 if (ie->data.ssp_mode && ssp)
837 if (ie->name_state == NAME_NEEDED &&
838 data->rssi != ie->data.rssi) {
839 ie->data.rssi = data->rssi;
840 hci_inquiry_cache_update_resolve(hdev, ie);
846 /* Entry not in the cache. Add new one. */
847 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
851 list_add(&ie->all, &cache->all);
854 ie->name_state = NAME_KNOWN;
856 ie->name_state = NAME_NOT_KNOWN;
857 list_add(&ie->list, &cache->unknown);
861 if (name_known && ie->name_state != NAME_KNOWN &&
862 ie->name_state != NAME_PENDING) {
863 ie->name_state = NAME_KNOWN;
867 memcpy(&ie->data, data, sizeof(*data));
868 ie->timestamp = jiffies;
869 cache->timestamp = jiffies;
871 if (ie->name_state == NAME_NOT_KNOWN)
877 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
879 struct discovery_state *cache = &hdev->discovery;
880 struct inquiry_info *info = (struct inquiry_info *) buf;
881 struct inquiry_entry *e;
884 list_for_each_entry(e, &cache->all, all) {
885 struct inquiry_data *data = &e->data;
890 bacpy(&info->bdaddr, &data->bdaddr);
891 info->pscan_rep_mode = data->pscan_rep_mode;
892 info->pscan_period_mode = data->pscan_period_mode;
893 info->pscan_mode = data->pscan_mode;
894 memcpy(info->dev_class, data->dev_class, 3);
895 info->clock_offset = data->clock_offset;
901 BT_DBG("cache %p, copied %d", cache, copied);
905 static void hci_inq_req(struct hci_request *req, unsigned long opt)
907 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
908 struct hci_dev *hdev = req->hdev;
909 struct hci_cp_inquiry cp;
911 BT_DBG("%s", hdev->name);
913 if (test_bit(HCI_INQUIRY, &hdev->flags))
917 memcpy(&cp.lap, &ir->lap, 3);
918 cp.length = ir->length;
919 cp.num_rsp = ir->num_rsp;
920 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
923 static int wait_inquiry(void *word)
926 return signal_pending(current);
929 int hci_inquiry(void __user *arg)
931 __u8 __user *ptr = arg;
932 struct hci_inquiry_req ir;
933 struct hci_dev *hdev;
934 int err = 0, do_inquiry = 0, max_rsp;
938 if (copy_from_user(&ir, ptr, sizeof(ir)))
941 hdev = hci_dev_get(ir.dev_id);
946 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
947 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
948 inquiry_cache_flush(hdev);
951 hci_dev_unlock(hdev);
953 timeo = ir.length * msecs_to_jiffies(2000);
956 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
961 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
962 * cleared). If it is interrupted by a signal, return -EINTR.
964 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
969 /* for unlimited number of responses we will use buffer with
972 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
974 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
975 * copy it to the user space.
977 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
984 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
985 hci_dev_unlock(hdev);
987 BT_DBG("num_rsp %d", ir.num_rsp);
989 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
991 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1004 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1006 u8 ad_len = 0, flags = 0;
1009 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1010 flags |= LE_AD_GENERAL;
1012 if (!lmp_bredr_capable(hdev))
1013 flags |= LE_AD_NO_BREDR;
1015 if (lmp_le_br_capable(hdev))
1016 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1018 if (lmp_host_le_br_capable(hdev))
1019 flags |= LE_AD_SIM_LE_BREDR_HOST;
1022 BT_DBG("adv flags 0x%02x", flags);
1032 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1034 ptr[1] = EIR_TX_POWER;
1035 ptr[2] = (u8) hdev->adv_tx_power;
1041 name_len = strlen(hdev->dev_name);
1043 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1045 if (name_len > max_len) {
1047 ptr[1] = EIR_NAME_SHORT;
1049 ptr[1] = EIR_NAME_COMPLETE;
1051 ptr[0] = name_len + 1;
1053 memcpy(ptr + 2, hdev->dev_name, name_len);
1055 ad_len += (name_len + 2);
1056 ptr += (name_len + 2);
1062 void hci_update_ad(struct hci_request *req)
1064 struct hci_dev *hdev = req->hdev;
1065 struct hci_cp_le_set_adv_data cp;
1068 if (!lmp_le_capable(hdev))
1071 memset(&cp, 0, sizeof(cp));
1073 len = create_ad(hdev, cp.data);
1075 if (hdev->adv_data_len == len &&
1076 memcmp(cp.data, hdev->adv_data, len) == 0)
1079 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1080 hdev->adv_data_len = len;
1084 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1087 /* ---- HCI ioctl helpers ---- */
1089 int hci_dev_open(__u16 dev)
1091 struct hci_dev *hdev;
1094 hdev = hci_dev_get(dev);
1098 BT_DBG("%s %p", hdev->name, hdev);
1102 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1107 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1112 if (test_bit(HCI_UP, &hdev->flags)) {
1117 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1118 set_bit(HCI_RAW, &hdev->flags);
1120 /* Treat all non BR/EDR controllers as raw devices if
1121 enable_hs is not set */
1122 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1123 set_bit(HCI_RAW, &hdev->flags);
1125 if (hdev->open(hdev)) {
1130 if (!test_bit(HCI_RAW, &hdev->flags)) {
1131 atomic_set(&hdev->cmd_cnt, 1);
1132 set_bit(HCI_INIT, &hdev->flags);
1133 ret = __hci_init(hdev);
1134 clear_bit(HCI_INIT, &hdev->flags);
1139 set_bit(HCI_UP, &hdev->flags);
1140 hci_notify(hdev, HCI_DEV_UP);
1141 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142 mgmt_valid_hdev(hdev)) {
1144 mgmt_powered(hdev, 1);
1145 hci_dev_unlock(hdev);
1148 /* Init failed, cleanup */
1149 flush_work(&hdev->tx_work);
1150 flush_work(&hdev->cmd_work);
1151 flush_work(&hdev->rx_work);
1153 skb_queue_purge(&hdev->cmd_q);
1154 skb_queue_purge(&hdev->rx_q);
1159 if (hdev->sent_cmd) {
1160 kfree_skb(hdev->sent_cmd);
1161 hdev->sent_cmd = NULL;
1169 hci_req_unlock(hdev);
1174 static int hci_dev_do_close(struct hci_dev *hdev)
1176 BT_DBG("%s %p", hdev->name, hdev);
1178 cancel_work_sync(&hdev->le_scan);
1180 cancel_delayed_work(&hdev->power_off);
1182 hci_req_cancel(hdev, ENODEV);
1185 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1186 del_timer_sync(&hdev->cmd_timer);
1187 hci_req_unlock(hdev);
1191 /* Flush RX and TX works */
1192 flush_work(&hdev->tx_work);
1193 flush_work(&hdev->rx_work);
1195 if (hdev->discov_timeout > 0) {
1196 cancel_delayed_work(&hdev->discov_off);
1197 hdev->discov_timeout = 0;
1198 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1201 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1202 cancel_delayed_work(&hdev->service_cache);
1204 cancel_delayed_work_sync(&hdev->le_scan_disable);
1207 inquiry_cache_flush(hdev);
1208 hci_conn_hash_flush(hdev);
1209 hci_dev_unlock(hdev);
1211 hci_notify(hdev, HCI_DEV_DOWN);
1217 skb_queue_purge(&hdev->cmd_q);
1218 atomic_set(&hdev->cmd_cnt, 1);
1219 if (!test_bit(HCI_RAW, &hdev->flags) &&
1220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1221 set_bit(HCI_INIT, &hdev->flags);
1222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1223 clear_bit(HCI_INIT, &hdev->flags);
1226 /* flush cmd work */
1227 flush_work(&hdev->cmd_work);
1230 skb_queue_purge(&hdev->rx_q);
1231 skb_queue_purge(&hdev->cmd_q);
1232 skb_queue_purge(&hdev->raw_q);
1234 /* Drop last sent command */
1235 if (hdev->sent_cmd) {
1236 del_timer_sync(&hdev->cmd_timer);
1237 kfree_skb(hdev->sent_cmd);
1238 hdev->sent_cmd = NULL;
1241 kfree_skb(hdev->recv_evt);
1242 hdev->recv_evt = NULL;
1244 /* After this point our queues are empty
1245 * and no tasks are scheduled. */
1250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253 mgmt_valid_hdev(hdev)) {
1255 mgmt_powered(hdev, 0);
1256 hci_dev_unlock(hdev);
1259 /* Controller radio is available but is currently powered down */
1260 hdev->amp_status = 0;
1262 memset(hdev->eir, 0, sizeof(hdev->eir));
1263 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1265 hci_req_unlock(hdev);
1271 int hci_dev_close(__u16 dev)
1273 struct hci_dev *hdev;
1276 hdev = hci_dev_get(dev);
1280 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281 cancel_delayed_work(&hdev->power_off);
1283 err = hci_dev_do_close(hdev);
1289 int hci_dev_reset(__u16 dev)
1291 struct hci_dev *hdev;
1294 hdev = hci_dev_get(dev);
1300 if (!test_bit(HCI_UP, &hdev->flags))
1304 skb_queue_purge(&hdev->rx_q);
1305 skb_queue_purge(&hdev->cmd_q);
1308 inquiry_cache_flush(hdev);
1309 hci_conn_hash_flush(hdev);
1310 hci_dev_unlock(hdev);
1315 atomic_set(&hdev->cmd_cnt, 1);
1316 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1318 if (!test_bit(HCI_RAW, &hdev->flags))
1319 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1322 hci_req_unlock(hdev);
1327 int hci_dev_reset_stat(__u16 dev)
1329 struct hci_dev *hdev;
1332 hdev = hci_dev_get(dev);
1336 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1343 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1345 struct hci_dev *hdev;
1346 struct hci_dev_req dr;
1349 if (copy_from_user(&dr, arg, sizeof(dr)))
1352 hdev = hci_dev_get(dr.dev_id);
1358 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1363 if (!lmp_encrypt_capable(hdev)) {
1368 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369 /* Auth must be enabled first */
1370 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1376 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1381 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1386 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1390 case HCISETLINKMODE:
1391 hdev->link_mode = ((__u16) dr.dev_opt) &
1392 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1396 hdev->pkt_type = (__u16) dr.dev_opt;
1400 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1401 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1405 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1406 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1418 int hci_get_dev_list(void __user *arg)
1420 struct hci_dev *hdev;
1421 struct hci_dev_list_req *dl;
1422 struct hci_dev_req *dr;
1423 int n = 0, size, err;
1426 if (get_user(dev_num, (__u16 __user *) arg))
1429 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1432 size = sizeof(*dl) + dev_num * sizeof(*dr);
1434 dl = kzalloc(size, GFP_KERNEL);
1440 read_lock(&hci_dev_list_lock);
1441 list_for_each_entry(hdev, &hci_dev_list, list) {
1442 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1443 cancel_delayed_work(&hdev->power_off);
1445 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1448 (dr + n)->dev_id = hdev->id;
1449 (dr + n)->dev_opt = hdev->flags;
1454 read_unlock(&hci_dev_list_lock);
1457 size = sizeof(*dl) + n * sizeof(*dr);
1459 err = copy_to_user(arg, dl, size);
1462 return err ? -EFAULT : 0;
1465 int hci_get_dev_info(void __user *arg)
1467 struct hci_dev *hdev;
1468 struct hci_dev_info di;
1471 if (copy_from_user(&di, arg, sizeof(di)))
1474 hdev = hci_dev_get(di.dev_id);
1478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1479 cancel_delayed_work_sync(&hdev->power_off);
1481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1484 strcpy(di.name, hdev->name);
1485 di.bdaddr = hdev->bdaddr;
1486 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1487 di.flags = hdev->flags;
1488 di.pkt_type = hdev->pkt_type;
1489 if (lmp_bredr_capable(hdev)) {
1490 di.acl_mtu = hdev->acl_mtu;
1491 di.acl_pkts = hdev->acl_pkts;
1492 di.sco_mtu = hdev->sco_mtu;
1493 di.sco_pkts = hdev->sco_pkts;
1495 di.acl_mtu = hdev->le_mtu;
1496 di.acl_pkts = hdev->le_pkts;
1500 di.link_policy = hdev->link_policy;
1501 di.link_mode = hdev->link_mode;
1503 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504 memcpy(&di.features, &hdev->features, sizeof(di.features));
1506 if (copy_to_user(arg, &di, sizeof(di)))
1514 /* ---- Interface to HCI drivers ---- */
1516 static int hci_rfkill_set_block(void *data, bool blocked)
1518 struct hci_dev *hdev = data;
1520 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1525 hci_dev_do_close(hdev);
1530 static const struct rfkill_ops hci_rfkill_ops = {
1531 .set_block = hci_rfkill_set_block,
1534 static void hci_power_on(struct work_struct *work)
1536 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1538 BT_DBG("%s", hdev->name);
1540 if (hci_dev_open(hdev->id) < 0)
1543 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1544 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545 HCI_AUTO_OFF_TIMEOUT);
1547 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1548 mgmt_index_added(hdev);
1551 static void hci_power_off(struct work_struct *work)
1553 struct hci_dev *hdev = container_of(work, struct hci_dev,
1556 BT_DBG("%s", hdev->name);
1558 hci_dev_do_close(hdev);
1561 static void hci_discov_off(struct work_struct *work)
1563 struct hci_dev *hdev;
1564 u8 scan = SCAN_PAGE;
1566 hdev = container_of(work, struct hci_dev, discov_off.work);
1568 BT_DBG("%s", hdev->name);
1572 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1574 hdev->discov_timeout = 0;
1576 hci_dev_unlock(hdev);
1579 int hci_uuids_clear(struct hci_dev *hdev)
1581 struct bt_uuid *uuid, *tmp;
1583 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584 list_del(&uuid->list);
1591 int hci_link_keys_clear(struct hci_dev *hdev)
1593 struct list_head *p, *n;
1595 list_for_each_safe(p, n, &hdev->link_keys) {
1596 struct link_key *key;
1598 key = list_entry(p, struct link_key, list);
1607 int hci_smp_ltks_clear(struct hci_dev *hdev)
1609 struct smp_ltk *k, *tmp;
1611 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1619 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1623 list_for_each_entry(k, &hdev->link_keys, list)
1624 if (bacmp(bdaddr, &k->bdaddr) == 0)
1630 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1631 u8 key_type, u8 old_key_type)
1634 if (key_type < 0x03)
1637 /* Debug keys are insecure so don't store them persistently */
1638 if (key_type == HCI_LK_DEBUG_COMBINATION)
1641 /* Changed combination key and there's no previous one */
1642 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1645 /* Security mode 3 case */
1649 /* Neither local nor remote side had no-bonding as requirement */
1650 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1653 /* Local side had dedicated bonding as requirement */
1654 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1657 /* Remote side had dedicated bonding as requirement */
1658 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1661 /* If none of the above criteria match, then don't store the key
1666 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1670 list_for_each_entry(k, &hdev->long_term_keys, list) {
1671 if (k->ediv != ediv ||
1672 memcmp(rand, k->rand, sizeof(k->rand)))
1681 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1686 list_for_each_entry(k, &hdev->long_term_keys, list)
1687 if (addr_type == k->bdaddr_type &&
1688 bacmp(bdaddr, &k->bdaddr) == 0)
1694 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1695 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1697 struct link_key *key, *old_key;
1701 old_key = hci_find_link_key(hdev, bdaddr);
1703 old_key_type = old_key->type;
1706 old_key_type = conn ? conn->key_type : 0xff;
1707 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1710 list_add(&key->list, &hdev->link_keys);
1713 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1715 /* Some buggy controller combinations generate a changed
1716 * combination key for legacy pairing even when there's no
1718 if (type == HCI_LK_CHANGED_COMBINATION &&
1719 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1720 type = HCI_LK_COMBINATION;
1722 conn->key_type = type;
1725 bacpy(&key->bdaddr, bdaddr);
1726 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1727 key->pin_len = pin_len;
1729 if (type == HCI_LK_CHANGED_COMBINATION)
1730 key->type = old_key_type;
1737 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1739 mgmt_new_link_key(hdev, key, persistent);
1742 conn->flush_key = !persistent;
1747 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1748 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1751 struct smp_ltk *key, *old_key;
1753 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1756 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1760 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1763 list_add(&key->list, &hdev->long_term_keys);
1766 bacpy(&key->bdaddr, bdaddr);
1767 key->bdaddr_type = addr_type;
1768 memcpy(key->val, tk, sizeof(key->val));
1769 key->authenticated = authenticated;
1771 key->enc_size = enc_size;
1773 memcpy(key->rand, rand, sizeof(key->rand));
1778 if (type & HCI_SMP_LTK)
1779 mgmt_new_ltk(hdev, key, 1);
1784 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1786 struct link_key *key;
1788 key = hci_find_link_key(hdev, bdaddr);
1792 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1794 list_del(&key->list);
1800 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1802 struct smp_ltk *k, *tmp;
1804 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805 if (bacmp(bdaddr, &k->bdaddr))
1808 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1817 /* HCI command timer function */
1818 static void hci_cmd_timeout(unsigned long arg)
1820 struct hci_dev *hdev = (void *) arg;
1822 if (hdev->sent_cmd) {
1823 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824 u16 opcode = __le16_to_cpu(sent->opcode);
1826 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1828 BT_ERR("%s command tx timeout", hdev->name);
1831 atomic_set(&hdev->cmd_cnt, 1);
1832 queue_work(hdev->workqueue, &hdev->cmd_work);
1835 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1838 struct oob_data *data;
1840 list_for_each_entry(data, &hdev->remote_oob_data, list)
1841 if (bacmp(bdaddr, &data->bdaddr) == 0)
1847 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1849 struct oob_data *data;
1851 data = hci_find_remote_oob_data(hdev, bdaddr);
1855 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1857 list_del(&data->list);
1863 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1865 struct oob_data *data, *n;
1867 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868 list_del(&data->list);
1875 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1878 struct oob_data *data;
1880 data = hci_find_remote_oob_data(hdev, bdaddr);
1883 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1887 bacpy(&data->bdaddr, bdaddr);
1888 list_add(&data->list, &hdev->remote_oob_data);
1891 memcpy(data->hash, hash, sizeof(data->hash));
1892 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1894 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1899 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1901 struct bdaddr_list *b;
1903 list_for_each_entry(b, &hdev->blacklist, list)
1904 if (bacmp(bdaddr, &b->bdaddr) == 0)
1910 int hci_blacklist_clear(struct hci_dev *hdev)
1912 struct list_head *p, *n;
1914 list_for_each_safe(p, n, &hdev->blacklist) {
1915 struct bdaddr_list *b;
1917 b = list_entry(p, struct bdaddr_list, list);
1926 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1928 struct bdaddr_list *entry;
1930 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1933 if (hci_blacklist_lookup(hdev, bdaddr))
1936 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1940 bacpy(&entry->bdaddr, bdaddr);
1942 list_add(&entry->list, &hdev->blacklist);
1944 return mgmt_device_blocked(hdev, bdaddr, type);
1947 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1949 struct bdaddr_list *entry;
1951 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1952 return hci_blacklist_clear(hdev);
1954 entry = hci_blacklist_lookup(hdev, bdaddr);
1958 list_del(&entry->list);
1961 return mgmt_device_unblocked(hdev, bdaddr, type);
1964 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1966 struct le_scan_params *param = (struct le_scan_params *) opt;
1967 struct hci_cp_le_set_scan_param cp;
1969 memset(&cp, 0, sizeof(cp));
1970 cp.type = param->type;
1971 cp.interval = cpu_to_le16(param->interval);
1972 cp.window = cpu_to_le16(param->window);
1974 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1977 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1979 struct hci_cp_le_set_scan_enable cp;
1981 memset(&cp, 0, sizeof(cp));
1985 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1988 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1989 u16 window, int timeout)
1991 long timeo = msecs_to_jiffies(3000);
1992 struct le_scan_params param;
1995 BT_DBG("%s", hdev->name);
1997 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998 return -EINPROGRESS;
2001 param.interval = interval;
2002 param.window = window;
2006 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
2009 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2011 hci_req_unlock(hdev);
2016 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017 msecs_to_jiffies(timeout));
2022 int hci_cancel_le_scan(struct hci_dev *hdev)
2024 BT_DBG("%s", hdev->name);
2026 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2029 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030 struct hci_cp_le_set_scan_enable cp;
2032 /* Send HCI command to disable LE Scan */
2033 memset(&cp, 0, sizeof(cp));
2034 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2040 static void le_scan_disable_work(struct work_struct *work)
2042 struct hci_dev *hdev = container_of(work, struct hci_dev,
2043 le_scan_disable.work);
2044 struct hci_cp_le_set_scan_enable cp;
2046 BT_DBG("%s", hdev->name);
2048 memset(&cp, 0, sizeof(cp));
2050 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2053 static void le_scan_work(struct work_struct *work)
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056 struct le_scan_params *param = &hdev->le_scan_params;
2058 BT_DBG("%s", hdev->name);
2060 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2064 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2067 struct le_scan_params *param = &hdev->le_scan_params;
2069 BT_DBG("%s", hdev->name);
2071 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2074 if (work_busy(&hdev->le_scan))
2075 return -EINPROGRESS;
2078 param->interval = interval;
2079 param->window = window;
2080 param->timeout = timeout;
2082 queue_work(system_long_wq, &hdev->le_scan);
2087 /* Alloc HCI device */
2088 struct hci_dev *hci_alloc_dev(void)
2090 struct hci_dev *hdev;
2092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2096 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097 hdev->esco_type = (ESCO_HV1);
2098 hdev->link_mode = (HCI_LM_ACCEPT);
2099 hdev->io_capability = 0x03; /* No Input No Output */
2100 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2103 hdev->sniff_max_interval = 800;
2104 hdev->sniff_min_interval = 80;
2106 mutex_init(&hdev->lock);
2107 mutex_init(&hdev->req_lock);
2109 INIT_LIST_HEAD(&hdev->mgmt_pending);
2110 INIT_LIST_HEAD(&hdev->blacklist);
2111 INIT_LIST_HEAD(&hdev->uuids);
2112 INIT_LIST_HEAD(&hdev->link_keys);
2113 INIT_LIST_HEAD(&hdev->long_term_keys);
2114 INIT_LIST_HEAD(&hdev->remote_oob_data);
2115 INIT_LIST_HEAD(&hdev->conn_hash.list);
2117 INIT_WORK(&hdev->rx_work, hci_rx_work);
2118 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119 INIT_WORK(&hdev->tx_work, hci_tx_work);
2120 INIT_WORK(&hdev->power_on, hci_power_on);
2121 INIT_WORK(&hdev->le_scan, le_scan_work);
2123 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2127 skb_queue_head_init(&hdev->driver_init);
2128 skb_queue_head_init(&hdev->rx_q);
2129 skb_queue_head_init(&hdev->cmd_q);
2130 skb_queue_head_init(&hdev->raw_q);
2132 init_waitqueue_head(&hdev->req_wait_q);
2134 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2136 hci_init_sysfs(hdev);
2137 discovery_init(hdev);
2141 EXPORT_SYMBOL(hci_alloc_dev);
2143 /* Free HCI device */
2144 void hci_free_dev(struct hci_dev *hdev)
2146 skb_queue_purge(&hdev->driver_init);
2148 /* will free via device release */
2149 put_device(&hdev->dev);
2151 EXPORT_SYMBOL(hci_free_dev);
2153 /* Register HCI device */
2154 int hci_register_dev(struct hci_dev *hdev)
2158 if (!hdev->open || !hdev->close)
2161 /* Do not allow HCI_AMP devices to register at index 0,
2162 * so the index can be used as the AMP controller ID.
2164 switch (hdev->dev_type) {
2166 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2169 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2178 sprintf(hdev->name, "hci%d", id);
2181 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2183 write_lock(&hci_dev_list_lock);
2184 list_add(&hdev->list, &hci_dev_list);
2185 write_unlock(&hci_dev_list_lock);
2187 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2189 if (!hdev->workqueue) {
2194 hdev->req_workqueue = alloc_workqueue(hdev->name,
2195 WQ_HIGHPRI | WQ_UNBOUND |
2197 if (!hdev->req_workqueue) {
2198 destroy_workqueue(hdev->workqueue);
2203 error = hci_add_sysfs(hdev);
2207 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2208 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2211 if (rfkill_register(hdev->rfkill) < 0) {
2212 rfkill_destroy(hdev->rfkill);
2213 hdev->rfkill = NULL;
2217 set_bit(HCI_SETUP, &hdev->dev_flags);
2219 if (hdev->dev_type != HCI_AMP)
2220 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2222 hci_notify(hdev, HCI_DEV_REG);
2225 queue_work(hdev->req_workqueue, &hdev->power_on);
2230 destroy_workqueue(hdev->workqueue);
2231 destroy_workqueue(hdev->req_workqueue);
2233 ida_simple_remove(&hci_index_ida, hdev->id);
2234 write_lock(&hci_dev_list_lock);
2235 list_del(&hdev->list);
2236 write_unlock(&hci_dev_list_lock);
2240 EXPORT_SYMBOL(hci_register_dev);
2242 /* Unregister HCI device */
2243 void hci_unregister_dev(struct hci_dev *hdev)
2247 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2249 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2253 write_lock(&hci_dev_list_lock);
2254 list_del(&hdev->list);
2255 write_unlock(&hci_dev_list_lock);
2257 hci_dev_do_close(hdev);
2259 for (i = 0; i < NUM_REASSEMBLY; i++)
2260 kfree_skb(hdev->reassembly[i]);
2262 cancel_work_sync(&hdev->power_on);
2264 if (!test_bit(HCI_INIT, &hdev->flags) &&
2265 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2267 mgmt_index_removed(hdev);
2268 hci_dev_unlock(hdev);
2271 /* mgmt_index_removed should take care of emptying the
2273 BUG_ON(!list_empty(&hdev->mgmt_pending));
2275 hci_notify(hdev, HCI_DEV_UNREG);
2278 rfkill_unregister(hdev->rfkill);
2279 rfkill_destroy(hdev->rfkill);
2282 hci_del_sysfs(hdev);
2284 destroy_workqueue(hdev->workqueue);
2285 destroy_workqueue(hdev->req_workqueue);
2288 hci_blacklist_clear(hdev);
2289 hci_uuids_clear(hdev);
2290 hci_link_keys_clear(hdev);
2291 hci_smp_ltks_clear(hdev);
2292 hci_remote_oob_data_clear(hdev);
2293 hci_dev_unlock(hdev);
2297 ida_simple_remove(&hci_index_ida, id);
2299 EXPORT_SYMBOL(hci_unregister_dev);
2301 /* Suspend HCI device */
2302 int hci_suspend_dev(struct hci_dev *hdev)
2304 hci_notify(hdev, HCI_DEV_SUSPEND);
2307 EXPORT_SYMBOL(hci_suspend_dev);
2309 /* Resume HCI device */
2310 int hci_resume_dev(struct hci_dev *hdev)
2312 hci_notify(hdev, HCI_DEV_RESUME);
2315 EXPORT_SYMBOL(hci_resume_dev);
2317 /* Receive frame from HCI drivers */
2318 int hci_recv_frame(struct sk_buff *skb)
2320 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2321 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2322 && !test_bit(HCI_INIT, &hdev->flags))) {
2328 bt_cb(skb)->incoming = 1;
2331 __net_timestamp(skb);
2333 skb_queue_tail(&hdev->rx_q, skb);
2334 queue_work(hdev->workqueue, &hdev->rx_work);
2338 EXPORT_SYMBOL(hci_recv_frame);
2340 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2341 int count, __u8 index)
2346 struct sk_buff *skb;
2347 struct bt_skb_cb *scb;
2349 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2350 index >= NUM_REASSEMBLY)
2353 skb = hdev->reassembly[index];
2357 case HCI_ACLDATA_PKT:
2358 len = HCI_MAX_FRAME_SIZE;
2359 hlen = HCI_ACL_HDR_SIZE;
2362 len = HCI_MAX_EVENT_SIZE;
2363 hlen = HCI_EVENT_HDR_SIZE;
2365 case HCI_SCODATA_PKT:
2366 len = HCI_MAX_SCO_SIZE;
2367 hlen = HCI_SCO_HDR_SIZE;
2371 skb = bt_skb_alloc(len, GFP_ATOMIC);
2375 scb = (void *) skb->cb;
2377 scb->pkt_type = type;
2379 skb->dev = (void *) hdev;
2380 hdev->reassembly[index] = skb;
2384 scb = (void *) skb->cb;
2385 len = min_t(uint, scb->expect, count);
2387 memcpy(skb_put(skb, len), data, len);
2396 if (skb->len == HCI_EVENT_HDR_SIZE) {
2397 struct hci_event_hdr *h = hci_event_hdr(skb);
2398 scb->expect = h->plen;
2400 if (skb_tailroom(skb) < scb->expect) {
2402 hdev->reassembly[index] = NULL;
2408 case HCI_ACLDATA_PKT:
2409 if (skb->len == HCI_ACL_HDR_SIZE) {
2410 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2411 scb->expect = __le16_to_cpu(h->dlen);
2413 if (skb_tailroom(skb) < scb->expect) {
2415 hdev->reassembly[index] = NULL;
2421 case HCI_SCODATA_PKT:
2422 if (skb->len == HCI_SCO_HDR_SIZE) {
2423 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2424 scb->expect = h->dlen;
2426 if (skb_tailroom(skb) < scb->expect) {
2428 hdev->reassembly[index] = NULL;
2435 if (scb->expect == 0) {
2436 /* Complete frame */
2438 bt_cb(skb)->pkt_type = type;
2439 hci_recv_frame(skb);
2441 hdev->reassembly[index] = NULL;
2449 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2453 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2457 rem = hci_reassembly(hdev, type, data, count, type - 1);
2461 data += (count - rem);
2467 EXPORT_SYMBOL(hci_recv_fragment);
2469 #define STREAM_REASSEMBLY 0
2471 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2477 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2480 struct { char type; } *pkt;
2482 /* Start of the frame */
2489 type = bt_cb(skb)->pkt_type;
2491 rem = hci_reassembly(hdev, type, data, count,
2496 data += (count - rem);
2502 EXPORT_SYMBOL(hci_recv_stream_fragment);
2504 /* ---- Interface to upper protocols ---- */
2506 int hci_register_cb(struct hci_cb *cb)
2508 BT_DBG("%p name %s", cb, cb->name);
2510 write_lock(&hci_cb_list_lock);
2511 list_add(&cb->list, &hci_cb_list);
2512 write_unlock(&hci_cb_list_lock);
2516 EXPORT_SYMBOL(hci_register_cb);
2518 int hci_unregister_cb(struct hci_cb *cb)
2520 BT_DBG("%p name %s", cb, cb->name);
2522 write_lock(&hci_cb_list_lock);
2523 list_del(&cb->list);
2524 write_unlock(&hci_cb_list_lock);
2528 EXPORT_SYMBOL(hci_unregister_cb);
2530 static int hci_send_frame(struct sk_buff *skb)
2532 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2539 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2542 __net_timestamp(skb);
2544 /* Send copy to monitor */
2545 hci_send_to_monitor(hdev, skb);
2547 if (atomic_read(&hdev->promisc)) {
2548 /* Send copy to the sockets */
2549 hci_send_to_sock(hdev, skb);
2552 /* Get rid of skb owner, prior to sending to the driver. */
2555 return hdev->send(skb);
2558 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2560 skb_queue_head_init(&req->cmd_q);
2565 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2567 struct hci_dev *hdev = req->hdev;
2568 struct sk_buff *skb;
2569 unsigned long flags;
2571 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2573 /* If an error occured during request building, remove all HCI
2574 * commands queued on the HCI request queue.
2577 skb_queue_purge(&req->cmd_q);
2581 /* Do not allow empty requests */
2582 if (skb_queue_empty(&req->cmd_q))
2585 skb = skb_peek_tail(&req->cmd_q);
2586 bt_cb(skb)->req.complete = complete;
2588 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2589 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2590 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2592 queue_work(hdev->workqueue, &hdev->cmd_work);
2597 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2598 u32 plen, void *param)
2600 int len = HCI_COMMAND_HDR_SIZE + plen;
2601 struct hci_command_hdr *hdr;
2602 struct sk_buff *skb;
2604 skb = bt_skb_alloc(len, GFP_ATOMIC);
2608 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2609 hdr->opcode = cpu_to_le16(opcode);
2613 memcpy(skb_put(skb, plen), param, plen);
2615 BT_DBG("skb len %d", skb->len);
2617 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2618 skb->dev = (void *) hdev;
2623 /* Send HCI command */
2624 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2626 struct sk_buff *skb;
2628 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2630 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2632 BT_ERR("%s no memory for command", hdev->name);
2636 /* Stand-alone HCI commands must be flaged as
2637 * single-command requests.
2639 bt_cb(skb)->req.start = true;
2641 skb_queue_tail(&hdev->cmd_q, skb);
2642 queue_work(hdev->workqueue, &hdev->cmd_work);
2647 /* Queue a command to an asynchronous HCI request */
2648 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2651 struct hci_dev *hdev = req->hdev;
2652 struct sk_buff *skb;
2654 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2656 /* If an error occured during request building, there is no point in
2657 * queueing the HCI command. We can simply return.
2662 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2664 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2665 hdev->name, opcode);
2670 if (skb_queue_empty(&req->cmd_q))
2671 bt_cb(skb)->req.start = true;
2673 bt_cb(skb)->req.event = event;
2675 skb_queue_tail(&req->cmd_q, skb);
2678 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2680 hci_req_add_ev(req, opcode, plen, param, 0);
2683 /* Get data from the previously sent command */
2684 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2686 struct hci_command_hdr *hdr;
2688 if (!hdev->sent_cmd)
2691 hdr = (void *) hdev->sent_cmd->data;
2693 if (hdr->opcode != cpu_to_le16(opcode))
2696 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2698 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2702 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2704 struct hci_acl_hdr *hdr;
2707 skb_push(skb, HCI_ACL_HDR_SIZE);
2708 skb_reset_transport_header(skb);
2709 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2710 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2711 hdr->dlen = cpu_to_le16(len);
2714 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2715 struct sk_buff *skb, __u16 flags)
2717 struct hci_conn *conn = chan->conn;
2718 struct hci_dev *hdev = conn->hdev;
2719 struct sk_buff *list;
2721 skb->len = skb_headlen(skb);
2724 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2726 switch (hdev->dev_type) {
2728 hci_add_acl_hdr(skb, conn->handle, flags);
2731 hci_add_acl_hdr(skb, chan->handle, flags);
2734 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2738 list = skb_shinfo(skb)->frag_list;
2740 /* Non fragmented */
2741 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2743 skb_queue_tail(queue, skb);
2746 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2748 skb_shinfo(skb)->frag_list = NULL;
2750 /* Queue all fragments atomically */
2751 spin_lock(&queue->lock);
2753 __skb_queue_tail(queue, skb);
2755 flags &= ~ACL_START;
2758 skb = list; list = list->next;
2760 skb->dev = (void *) hdev;
2761 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2762 hci_add_acl_hdr(skb, conn->handle, flags);
2764 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2766 __skb_queue_tail(queue, skb);
2769 spin_unlock(&queue->lock);
2773 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2775 struct hci_dev *hdev = chan->conn->hdev;
2777 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2779 skb->dev = (void *) hdev;
2781 hci_queue_acl(chan, &chan->data_q, skb, flags);
2783 queue_work(hdev->workqueue, &hdev->tx_work);
2787 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2789 struct hci_dev *hdev = conn->hdev;
2790 struct hci_sco_hdr hdr;
2792 BT_DBG("%s len %d", hdev->name, skb->len);
2794 hdr.handle = cpu_to_le16(conn->handle);
2795 hdr.dlen = skb->len;
2797 skb_push(skb, HCI_SCO_HDR_SIZE);
2798 skb_reset_transport_header(skb);
2799 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2801 skb->dev = (void *) hdev;
2802 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2804 skb_queue_tail(&conn->data_q, skb);
2805 queue_work(hdev->workqueue, &hdev->tx_work);
2808 /* ---- HCI TX task (outgoing data) ---- */
2810 /* HCI Connection scheduler */
2811 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2814 struct hci_conn_hash *h = &hdev->conn_hash;
2815 struct hci_conn *conn = NULL, *c;
2816 unsigned int num = 0, min = ~0;
2818 /* We don't have to lock device here. Connections are always
2819 * added and removed with TX task disabled. */
2823 list_for_each_entry_rcu(c, &h->list, list) {
2824 if (c->type != type || skb_queue_empty(&c->data_q))
2827 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2832 if (c->sent < min) {
2837 if (hci_conn_num(hdev, type) == num)
2846 switch (conn->type) {
2848 cnt = hdev->acl_cnt;
2852 cnt = hdev->sco_cnt;
2855 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2859 BT_ERR("Unknown link type");
2867 BT_DBG("conn %p quote %d", conn, *quote);
2871 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2873 struct hci_conn_hash *h = &hdev->conn_hash;
2876 BT_ERR("%s link tx timeout", hdev->name);
2880 /* Kill stalled connections */
2881 list_for_each_entry_rcu(c, &h->list, list) {
2882 if (c->type == type && c->sent) {
2883 BT_ERR("%s killing stalled connection %pMR",
2884 hdev->name, &c->dst);
2885 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2892 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2895 struct hci_conn_hash *h = &hdev->conn_hash;
2896 struct hci_chan *chan = NULL;
2897 unsigned int num = 0, min = ~0, cur_prio = 0;
2898 struct hci_conn *conn;
2899 int cnt, q, conn_num = 0;
2901 BT_DBG("%s", hdev->name);
2905 list_for_each_entry_rcu(conn, &h->list, list) {
2906 struct hci_chan *tmp;
2908 if (conn->type != type)
2911 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2916 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2917 struct sk_buff *skb;
2919 if (skb_queue_empty(&tmp->data_q))
2922 skb = skb_peek(&tmp->data_q);
2923 if (skb->priority < cur_prio)
2926 if (skb->priority > cur_prio) {
2929 cur_prio = skb->priority;
2934 if (conn->sent < min) {
2940 if (hci_conn_num(hdev, type) == conn_num)
2949 switch (chan->conn->type) {
2951 cnt = hdev->acl_cnt;
2954 cnt = hdev->block_cnt;
2958 cnt = hdev->sco_cnt;
2961 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2965 BT_ERR("Unknown link type");
2970 BT_DBG("chan %p quote %d", chan, *quote);
2974 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2976 struct hci_conn_hash *h = &hdev->conn_hash;
2977 struct hci_conn *conn;
2980 BT_DBG("%s", hdev->name);
2984 list_for_each_entry_rcu(conn, &h->list, list) {
2985 struct hci_chan *chan;
2987 if (conn->type != type)
2990 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2995 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2996 struct sk_buff *skb;
3003 if (skb_queue_empty(&chan->data_q))
3006 skb = skb_peek(&chan->data_q);
3007 if (skb->priority >= HCI_PRIO_MAX - 1)
3010 skb->priority = HCI_PRIO_MAX - 1;
3012 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3016 if (hci_conn_num(hdev, type) == num)
3024 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3026 /* Calculate count of blocks used by this packet */
3027 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3030 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3032 if (!test_bit(HCI_RAW, &hdev->flags)) {
3033 /* ACL tx timeout must be longer than maximum
3034 * link supervision timeout (40.9 seconds) */
3035 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3036 HCI_ACL_TX_TIMEOUT))
3037 hci_link_tx_to(hdev, ACL_LINK);
3041 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3043 unsigned int cnt = hdev->acl_cnt;
3044 struct hci_chan *chan;
3045 struct sk_buff *skb;
3048 __check_timeout(hdev, cnt);
3050 while (hdev->acl_cnt &&
3051 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3052 u32 priority = (skb_peek(&chan->data_q))->priority;
3053 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3054 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3055 skb->len, skb->priority);
3057 /* Stop if priority has changed */
3058 if (skb->priority < priority)
3061 skb = skb_dequeue(&chan->data_q);
3063 hci_conn_enter_active_mode(chan->conn,
3064 bt_cb(skb)->force_active);
3066 hci_send_frame(skb);
3067 hdev->acl_last_tx = jiffies;
3075 if (cnt != hdev->acl_cnt)
3076 hci_prio_recalculate(hdev, ACL_LINK);
3079 static void hci_sched_acl_blk(struct hci_dev *hdev)
3081 unsigned int cnt = hdev->block_cnt;
3082 struct hci_chan *chan;
3083 struct sk_buff *skb;
3087 __check_timeout(hdev, cnt);
3089 BT_DBG("%s", hdev->name);
3091 if (hdev->dev_type == HCI_AMP)
3096 while (hdev->block_cnt > 0 &&
3097 (chan = hci_chan_sent(hdev, type, "e))) {
3098 u32 priority = (skb_peek(&chan->data_q))->priority;
3099 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3102 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3103 skb->len, skb->priority);
3105 /* Stop if priority has changed */
3106 if (skb->priority < priority)
3109 skb = skb_dequeue(&chan->data_q);
3111 blocks = __get_blocks(hdev, skb);
3112 if (blocks > hdev->block_cnt)
3115 hci_conn_enter_active_mode(chan->conn,
3116 bt_cb(skb)->force_active);
3118 hci_send_frame(skb);
3119 hdev->acl_last_tx = jiffies;
3121 hdev->block_cnt -= blocks;
3124 chan->sent += blocks;
3125 chan->conn->sent += blocks;
3129 if (cnt != hdev->block_cnt)
3130 hci_prio_recalculate(hdev, type);
3133 static void hci_sched_acl(struct hci_dev *hdev)
3135 BT_DBG("%s", hdev->name);
3137 /* No ACL link over BR/EDR controller */
3138 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3141 /* No AMP link over AMP controller */
3142 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3145 switch (hdev->flow_ctl_mode) {
3146 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3147 hci_sched_acl_pkt(hdev);
3150 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3151 hci_sched_acl_blk(hdev);
3157 static void hci_sched_sco(struct hci_dev *hdev)
3159 struct hci_conn *conn;
3160 struct sk_buff *skb;
3163 BT_DBG("%s", hdev->name);
3165 if (!hci_conn_num(hdev, SCO_LINK))
3168 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3169 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3170 BT_DBG("skb %p len %d", skb, skb->len);
3171 hci_send_frame(skb);
3174 if (conn->sent == ~0)
3180 static void hci_sched_esco(struct hci_dev *hdev)
3182 struct hci_conn *conn;
3183 struct sk_buff *skb;
3186 BT_DBG("%s", hdev->name);
3188 if (!hci_conn_num(hdev, ESCO_LINK))
3191 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3193 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3194 BT_DBG("skb %p len %d", skb, skb->len);
3195 hci_send_frame(skb);
3198 if (conn->sent == ~0)
3204 static void hci_sched_le(struct hci_dev *hdev)
3206 struct hci_chan *chan;
3207 struct sk_buff *skb;
3208 int quote, cnt, tmp;
3210 BT_DBG("%s", hdev->name);
3212 if (!hci_conn_num(hdev, LE_LINK))
3215 if (!test_bit(HCI_RAW, &hdev->flags)) {
3216 /* LE tx timeout must be longer than maximum
3217 * link supervision timeout (40.9 seconds) */
3218 if (!hdev->le_cnt && hdev->le_pkts &&
3219 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3220 hci_link_tx_to(hdev, LE_LINK);
3223 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3225 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3226 u32 priority = (skb_peek(&chan->data_q))->priority;
3227 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3228 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3229 skb->len, skb->priority);
3231 /* Stop if priority has changed */
3232 if (skb->priority < priority)
3235 skb = skb_dequeue(&chan->data_q);
3237 hci_send_frame(skb);
3238 hdev->le_last_tx = jiffies;
3249 hdev->acl_cnt = cnt;
3252 hci_prio_recalculate(hdev, LE_LINK);
3255 static void hci_tx_work(struct work_struct *work)
3257 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3258 struct sk_buff *skb;
3260 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3261 hdev->sco_cnt, hdev->le_cnt);
3263 /* Schedule queues and send stuff to HCI driver */
3265 hci_sched_acl(hdev);
3267 hci_sched_sco(hdev);
3269 hci_sched_esco(hdev);
3273 /* Send next queued raw (unknown type) packet */
3274 while ((skb = skb_dequeue(&hdev->raw_q)))
3275 hci_send_frame(skb);
3278 /* ----- HCI RX task (incoming data processing) ----- */
3280 /* ACL data packet */
3281 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3283 struct hci_acl_hdr *hdr = (void *) skb->data;
3284 struct hci_conn *conn;
3285 __u16 handle, flags;
3287 skb_pull(skb, HCI_ACL_HDR_SIZE);
3289 handle = __le16_to_cpu(hdr->handle);
3290 flags = hci_flags(handle);
3291 handle = hci_handle(handle);
3293 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3296 hdev->stat.acl_rx++;
3299 conn = hci_conn_hash_lookup_handle(hdev, handle);
3300 hci_dev_unlock(hdev);
3303 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3305 /* Send to upper protocol */
3306 l2cap_recv_acldata(conn, skb, flags);
3309 BT_ERR("%s ACL packet for unknown connection handle %d",
3310 hdev->name, handle);
3316 /* SCO data packet */
3317 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3319 struct hci_sco_hdr *hdr = (void *) skb->data;
3320 struct hci_conn *conn;
3323 skb_pull(skb, HCI_SCO_HDR_SIZE);
3325 handle = __le16_to_cpu(hdr->handle);
3327 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3329 hdev->stat.sco_rx++;
3332 conn = hci_conn_hash_lookup_handle(hdev, handle);
3333 hci_dev_unlock(hdev);
3336 /* Send to upper protocol */
3337 sco_recv_scodata(conn, skb);
3340 BT_ERR("%s SCO packet for unknown connection handle %d",
3341 hdev->name, handle);
3347 static bool hci_req_is_complete(struct hci_dev *hdev)
3349 struct sk_buff *skb;
3351 skb = skb_peek(&hdev->cmd_q);
3355 return bt_cb(skb)->req.start;
3358 static void hci_resend_last(struct hci_dev *hdev)
3360 struct hci_command_hdr *sent;
3361 struct sk_buff *skb;
3364 if (!hdev->sent_cmd)
3367 sent = (void *) hdev->sent_cmd->data;
3368 opcode = __le16_to_cpu(sent->opcode);
3369 if (opcode == HCI_OP_RESET)
3372 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3376 skb_queue_head(&hdev->cmd_q, skb);
3377 queue_work(hdev->workqueue, &hdev->cmd_work);
3380 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3382 hci_req_complete_t req_complete = NULL;
3383 struct sk_buff *skb;
3384 unsigned long flags;
3386 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3388 /* If the completed command doesn't match the last one that was
3389 * sent we need to do special handling of it.
3391 if (!hci_sent_cmd_data(hdev, opcode)) {
3392 /* Some CSR based controllers generate a spontaneous
3393 * reset complete event during init and any pending
3394 * command will never be completed. In such a case we
3395 * need to resend whatever was the last sent
3398 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3399 hci_resend_last(hdev);
3404 /* If the command succeeded and there's still more commands in
3405 * this request the request is not yet complete.
3407 if (!status && !hci_req_is_complete(hdev))
3410 /* If this was the last command in a request the complete
3411 * callback would be found in hdev->sent_cmd instead of the
3412 * command queue (hdev->cmd_q).
3414 if (hdev->sent_cmd) {
3415 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3420 /* Remove all pending commands belonging to this request */
3421 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3422 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3423 if (bt_cb(skb)->req.start) {
3424 __skb_queue_head(&hdev->cmd_q, skb);
3428 req_complete = bt_cb(skb)->req.complete;
3431 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3435 req_complete(hdev, status);
3438 static void hci_rx_work(struct work_struct *work)
3440 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3441 struct sk_buff *skb;
3443 BT_DBG("%s", hdev->name);
3445 while ((skb = skb_dequeue(&hdev->rx_q))) {
3446 /* Send copy to monitor */
3447 hci_send_to_monitor(hdev, skb);
3449 if (atomic_read(&hdev->promisc)) {
3450 /* Send copy to the sockets */
3451 hci_send_to_sock(hdev, skb);
3454 if (test_bit(HCI_RAW, &hdev->flags)) {
3459 if (test_bit(HCI_INIT, &hdev->flags)) {
3460 /* Don't process data packets in this states. */
3461 switch (bt_cb(skb)->pkt_type) {
3462 case HCI_ACLDATA_PKT:
3463 case HCI_SCODATA_PKT:
3470 switch (bt_cb(skb)->pkt_type) {
3472 BT_DBG("%s Event packet", hdev->name);
3473 hci_event_packet(hdev, skb);
3476 case HCI_ACLDATA_PKT:
3477 BT_DBG("%s ACL data packet", hdev->name);
3478 hci_acldata_packet(hdev, skb);
3481 case HCI_SCODATA_PKT:
3482 BT_DBG("%s SCO data packet", hdev->name);
3483 hci_scodata_packet(hdev, skb);
3493 static void hci_cmd_work(struct work_struct *work)
3495 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3496 struct sk_buff *skb;
3498 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3499 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3501 /* Send queued commands */
3502 if (atomic_read(&hdev->cmd_cnt)) {
3503 skb = skb_dequeue(&hdev->cmd_q);
3507 kfree_skb(hdev->sent_cmd);
3509 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3510 if (hdev->sent_cmd) {
3511 atomic_dec(&hdev->cmd_cnt);
3512 hci_send_frame(skb);
3513 if (test_bit(HCI_RESET, &hdev->flags))
3514 del_timer(&hdev->cmd_timer);
3516 mod_timer(&hdev->cmd_timer,
3517 jiffies + HCI_CMD_TIMEOUT);
3519 skb_queue_head(&hdev->cmd_q, skb);
3520 queue_work(hdev->workqueue, &hdev->cmd_work);
3525 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3527 /* General inquiry access code (GIAC) */
3528 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3529 struct hci_cp_inquiry cp;
3531 BT_DBG("%s", hdev->name);
3533 if (test_bit(HCI_INQUIRY, &hdev->flags))
3534 return -EINPROGRESS;
3536 inquiry_cache_flush(hdev);
3538 memset(&cp, 0, sizeof(cp));
3539 memcpy(&cp.lap, lap, sizeof(cp.lap));
3542 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3545 int hci_cancel_inquiry(struct hci_dev *hdev)
3547 BT_DBG("%s", hdev->name);
3549 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3552 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3555 u8 bdaddr_to_le(u8 bdaddr_type)
3557 switch (bdaddr_type) {
3558 case BDADDR_LE_PUBLIC:
3559 return ADDR_LE_DEV_PUBLIC;
3562 /* Fallback to LE Random address type */
3563 return ADDR_LE_DEV_RANDOM;