2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 6
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
90 static const u16 mgmt_events[] = {
91 MGMT_EV_CONTROLLER_ERROR,
93 MGMT_EV_INDEX_REMOVED,
95 MGMT_EV_CLASS_OF_DEV_CHANGED,
96 MGMT_EV_LOCAL_NAME_CHANGED,
98 MGMT_EV_NEW_LONG_TERM_KEY,
99 MGMT_EV_DEVICE_CONNECTED,
100 MGMT_EV_DEVICE_DISCONNECTED,
101 MGMT_EV_CONNECT_FAILED,
102 MGMT_EV_PIN_CODE_REQUEST,
103 MGMT_EV_USER_CONFIRM_REQUEST,
104 MGMT_EV_USER_PASSKEY_REQUEST,
106 MGMT_EV_DEVICE_FOUND,
108 MGMT_EV_DEVICE_BLOCKED,
109 MGMT_EV_DEVICE_UNBLOCKED,
110 MGMT_EV_DEVICE_UNPAIRED,
111 MGMT_EV_PASSKEY_NOTIFY,
116 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
118 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
119 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
122 struct list_head list;
130 /* HCI to MGMT error code conversion table */
131 static u8 mgmt_status_table[] = {
133 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
134 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
135 MGMT_STATUS_FAILED, /* Hardware Failure */
136 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
137 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
138 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
139 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
140 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
141 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
142 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
143 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
144 MGMT_STATUS_BUSY, /* Command Disallowed */
145 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
146 MGMT_STATUS_REJECTED, /* Rejected Security */
147 MGMT_STATUS_REJECTED, /* Rejected Personal */
148 MGMT_STATUS_TIMEOUT, /* Host Timeout */
149 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
150 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
151 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
152 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
153 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
154 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
155 MGMT_STATUS_BUSY, /* Repeated Attempts */
156 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
157 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
158 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
159 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
160 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
161 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
162 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
163 MGMT_STATUS_FAILED, /* Unspecified Error */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
165 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
166 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
167 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
168 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
169 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
170 MGMT_STATUS_FAILED, /* Unit Link Key Used */
171 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
172 MGMT_STATUS_TIMEOUT, /* Instant Passed */
173 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
174 MGMT_STATUS_FAILED, /* Transaction Collision */
175 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
176 MGMT_STATUS_REJECTED, /* QoS Rejected */
177 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
178 MGMT_STATUS_REJECTED, /* Insufficient Security */
179 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
180 MGMT_STATUS_BUSY, /* Role Switch Pending */
181 MGMT_STATUS_FAILED, /* Slot Violation */
182 MGMT_STATUS_FAILED, /* Role Switch Failed */
183 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
184 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
185 MGMT_STATUS_BUSY, /* Host Busy Pairing */
186 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
187 MGMT_STATUS_BUSY, /* Controller Busy */
188 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
189 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
190 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
191 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
192 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
195 static u8 mgmt_status(u8 hci_status)
197 if (hci_status < ARRAY_SIZE(mgmt_status_table))
198 return mgmt_status_table[hci_status];
200 return MGMT_STATUS_FAILED;
203 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
206 struct mgmt_hdr *hdr;
207 struct mgmt_ev_cmd_status *ev;
210 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
212 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
216 hdr = (void *) skb_put(skb, sizeof(*hdr));
218 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
219 hdr->index = cpu_to_le16(index);
220 hdr->len = cpu_to_le16(sizeof(*ev));
222 ev = (void *) skb_put(skb, sizeof(*ev));
224 ev->opcode = cpu_to_le16(cmd);
226 err = sock_queue_rcv_skb(sk, skb);
233 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
234 void *rp, size_t rp_len)
237 struct mgmt_hdr *hdr;
238 struct mgmt_ev_cmd_complete *ev;
241 BT_DBG("sock %p", sk);
243 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
247 hdr = (void *) skb_put(skb, sizeof(*hdr));
249 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
250 hdr->index = cpu_to_le16(index);
251 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
253 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
254 ev->opcode = cpu_to_le16(cmd);
258 memcpy(ev->data, rp, rp_len);
260 err = sock_queue_rcv_skb(sk, skb);
267 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
270 struct mgmt_rp_read_version rp;
272 BT_DBG("sock %p", sk);
274 rp.version = MGMT_VERSION;
275 rp.revision = cpu_to_le16(MGMT_REVISION);
277 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
281 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
284 struct mgmt_rp_read_commands *rp;
285 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
286 const u16 num_events = ARRAY_SIZE(mgmt_events);
291 BT_DBG("sock %p", sk);
293 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
295 rp = kmalloc(rp_size, GFP_KERNEL);
299 rp->num_commands = cpu_to_le16(num_commands);
300 rp->num_events = cpu_to_le16(num_events);
302 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
303 put_unaligned_le16(mgmt_commands[i], opcode);
305 for (i = 0; i < num_events; i++, opcode++)
306 put_unaligned_le16(mgmt_events[i], opcode);
308 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
315 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
318 struct mgmt_rp_read_index_list *rp;
324 BT_DBG("sock %p", sk);
326 read_lock(&hci_dev_list_lock);
329 list_for_each_entry(d, &hci_dev_list, list) {
330 if (d->dev_type == HCI_BREDR)
334 rp_len = sizeof(*rp) + (2 * count);
335 rp = kmalloc(rp_len, GFP_ATOMIC);
337 read_unlock(&hci_dev_list_lock);
342 list_for_each_entry(d, &hci_dev_list, list) {
343 if (test_bit(HCI_SETUP, &d->dev_flags))
346 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
349 if (d->dev_type == HCI_BREDR) {
350 rp->index[count++] = cpu_to_le16(d->id);
351 BT_DBG("Added hci%u", d->id);
355 rp->num_controllers = cpu_to_le16(count);
356 rp_len = sizeof(*rp) + (2 * count);
358 read_unlock(&hci_dev_list_lock);
360 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
368 static u32 get_supported_settings(struct hci_dev *hdev)
372 settings |= MGMT_SETTING_POWERED;
373 settings |= MGMT_SETTING_PAIRABLE;
374 settings |= MGMT_SETTING_DEBUG_KEYS;
376 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY;
384 if (lmp_ssp_capable(hdev)) {
385 settings |= MGMT_SETTING_SSP;
386 settings |= MGMT_SETTING_HS;
389 if (lmp_sc_capable(hdev) ||
390 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
391 settings |= MGMT_SETTING_SECURE_CONN;
394 if (lmp_le_capable(hdev)) {
395 settings |= MGMT_SETTING_LE;
396 settings |= MGMT_SETTING_ADVERTISING;
397 settings |= MGMT_SETTING_PRIVACY;
403 static u32 get_current_settings(struct hci_dev *hdev)
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE;
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE;
422 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_BREDR;
425 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LE;
428 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 settings |= MGMT_SETTING_LINK_SECURITY;
431 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SSP;
434 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_HS;
437 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
438 settings |= MGMT_SETTING_ADVERTISING;
440 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SECURE_CONN;
443 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
444 settings |= MGMT_SETTING_DEBUG_KEYS;
446 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
447 settings |= MGMT_SETTING_PRIVACY;
452 #define PNP_INFO_SVCLASS_ID 0x1200
454 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
456 u8 *ptr = data, *uuids_start = NULL;
457 struct bt_uuid *uuid;
462 list_for_each_entry(uuid, &hdev->uuids, list) {
465 if (uuid->size != 16)
468 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
472 if (uuid16 == PNP_INFO_SVCLASS_ID)
478 uuids_start[1] = EIR_UUID16_ALL;
482 /* Stop if not enough space to put next UUID */
483 if ((ptr - data) + sizeof(u16) > len) {
484 uuids_start[1] = EIR_UUID16_SOME;
488 *ptr++ = (uuid16 & 0x00ff);
489 *ptr++ = (uuid16 & 0xff00) >> 8;
490 uuids_start[0] += sizeof(uuid16);
496 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
498 u8 *ptr = data, *uuids_start = NULL;
499 struct bt_uuid *uuid;
504 list_for_each_entry(uuid, &hdev->uuids, list) {
505 if (uuid->size != 32)
511 uuids_start[1] = EIR_UUID32_ALL;
515 /* Stop if not enough space to put next UUID */
516 if ((ptr - data) + sizeof(u32) > len) {
517 uuids_start[1] = EIR_UUID32_SOME;
521 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
523 uuids_start[0] += sizeof(u32);
529 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
531 u8 *ptr = data, *uuids_start = NULL;
532 struct bt_uuid *uuid;
537 list_for_each_entry(uuid, &hdev->uuids, list) {
538 if (uuid->size != 128)
544 uuids_start[1] = EIR_UUID128_ALL;
548 /* Stop if not enough space to put next UUID */
549 if ((ptr - data) + 16 > len) {
550 uuids_start[1] = EIR_UUID128_SOME;
554 memcpy(ptr, uuid->uuid, 16);
556 uuids_start[0] += 16;
562 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
564 struct pending_cmd *cmd;
566 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
567 if (cmd->opcode == opcode)
574 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
579 name_len = strlen(hdev->dev_name);
581 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
583 if (name_len > max_len) {
585 ptr[1] = EIR_NAME_SHORT;
587 ptr[1] = EIR_NAME_COMPLETE;
589 ptr[0] = name_len + 1;
591 memcpy(ptr + 2, hdev->dev_name, name_len);
593 ad_len += (name_len + 2);
594 ptr += (name_len + 2);
600 static void update_scan_rsp_data(struct hci_request *req)
602 struct hci_dev *hdev = req->hdev;
603 struct hci_cp_le_set_scan_rsp_data cp;
606 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
609 memset(&cp, 0, sizeof(cp));
611 len = create_scan_rsp_data(hdev, cp.data);
613 if (hdev->scan_rsp_data_len == len &&
614 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
617 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
618 hdev->scan_rsp_data_len = len;
622 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
625 static u8 get_adv_discov_flags(struct hci_dev *hdev)
627 struct pending_cmd *cmd;
629 /* If there's a pending mgmt command the flags will not yet have
630 * their final values, so check for this first.
632 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
634 struct mgmt_mode *cp = cmd->param;
636 return LE_AD_GENERAL;
637 else if (cp->val == 0x02)
638 return LE_AD_LIMITED;
640 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
641 return LE_AD_LIMITED;
642 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
643 return LE_AD_GENERAL;
649 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
651 u8 ad_len = 0, flags = 0;
653 flags |= get_adv_discov_flags(hdev);
655 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
656 flags |= LE_AD_NO_BREDR;
659 BT_DBG("adv flags 0x%02x", flags);
669 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
671 ptr[1] = EIR_TX_POWER;
672 ptr[2] = (u8) hdev->adv_tx_power;
681 static void update_adv_data(struct hci_request *req)
683 struct hci_dev *hdev = req->hdev;
684 struct hci_cp_le_set_adv_data cp;
687 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
690 memset(&cp, 0, sizeof(cp));
692 len = create_adv_data(hdev, cp.data);
694 if (hdev->adv_data_len == len &&
695 memcmp(cp.data, hdev->adv_data, len) == 0)
698 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
699 hdev->adv_data_len = len;
703 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
706 static void create_eir(struct hci_dev *hdev, u8 *data)
711 name_len = strlen(hdev->dev_name);
717 ptr[1] = EIR_NAME_SHORT;
719 ptr[1] = EIR_NAME_COMPLETE;
721 /* EIR Data length */
722 ptr[0] = name_len + 1;
724 memcpy(ptr + 2, hdev->dev_name, name_len);
726 ptr += (name_len + 2);
729 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
731 ptr[1] = EIR_TX_POWER;
732 ptr[2] = (u8) hdev->inq_tx_power;
737 if (hdev->devid_source > 0) {
739 ptr[1] = EIR_DEVICE_ID;
741 put_unaligned_le16(hdev->devid_source, ptr + 2);
742 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
743 put_unaligned_le16(hdev->devid_product, ptr + 6);
744 put_unaligned_le16(hdev->devid_version, ptr + 8);
749 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
754 static void update_eir(struct hci_request *req)
756 struct hci_dev *hdev = req->hdev;
757 struct hci_cp_write_eir cp;
759 if (!hdev_is_powered(hdev))
762 if (!lmp_ext_inq_capable(hdev))
765 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
768 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
771 memset(&cp, 0, sizeof(cp));
773 create_eir(hdev, cp.data);
775 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
778 memcpy(hdev->eir, cp.data, sizeof(cp.data));
780 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
783 static u8 get_service_classes(struct hci_dev *hdev)
785 struct bt_uuid *uuid;
788 list_for_each_entry(uuid, &hdev->uuids, list)
789 val |= uuid->svc_hint;
794 static void update_class(struct hci_request *req)
796 struct hci_dev *hdev = req->hdev;
799 BT_DBG("%s", hdev->name);
801 if (!hdev_is_powered(hdev))
804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
807 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
810 cod[0] = hdev->minor_class;
811 cod[1] = hdev->major_class;
812 cod[2] = get_service_classes(hdev);
814 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
817 if (memcmp(cod, hdev->dev_class, 3) == 0)
820 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
823 static bool get_connectable(struct hci_dev *hdev)
825 struct pending_cmd *cmd;
827 /* If there's a pending mgmt command the flag will not yet have
828 * it's final value, so check for this first.
830 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
832 struct mgmt_mode *cp = cmd->param;
836 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
839 static void enable_advertising(struct hci_request *req)
841 struct hci_dev *hdev = req->hdev;
842 struct hci_cp_le_set_adv_param cp;
843 u8 own_addr_type, enable = 0x01;
846 /* Clear the HCI_ADVERTISING bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes.
851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
853 connectable = get_connectable(hdev);
855 /* Set require_privacy to true only when non-connectable
856 * advertising is used. In that case it is fine to use a
857 * non-resolvable private address.
859 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
862 memset(&cp, 0, sizeof(cp));
863 cp.min_interval = cpu_to_le16(0x0800);
864 cp.max_interval = cpu_to_le16(0x0800);
865 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 cp.own_address_type = own_addr_type;
867 cp.channel_map = hdev->le_adv_channel_map;
869 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
871 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
874 static void disable_advertising(struct hci_request *req)
878 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
881 static void service_cache_off(struct work_struct *work)
883 struct hci_dev *hdev = container_of(work, struct hci_dev,
885 struct hci_request req;
887 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
890 hci_req_init(&req, hdev);
897 hci_dev_unlock(hdev);
899 hci_req_run(&req, NULL);
902 static void rpa_expired(struct work_struct *work)
904 struct hci_dev *hdev = container_of(work, struct hci_dev,
906 struct hci_request req;
910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
912 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
913 hci_conn_num(hdev, LE_LINK) > 0)
916 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function.
920 hci_req_init(&req, hdev);
922 disable_advertising(&req);
923 enable_advertising(&req);
925 hci_req_run(&req, NULL);
928 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
930 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
933 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
934 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
936 /* Non-mgmt controlled devices get this bit set
937 * implicitly so that pairing works for them, however
938 * for mgmt we require user-space to explicitly enable
941 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
944 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
945 void *data, u16 data_len)
947 struct mgmt_rp_read_info rp;
949 BT_DBG("sock %p %s", sk, hdev->name);
953 memset(&rp, 0, sizeof(rp));
955 bacpy(&rp.bdaddr, &hdev->bdaddr);
957 rp.version = hdev->hci_ver;
958 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
960 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
961 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
963 memcpy(rp.dev_class, hdev->dev_class, 3);
965 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
966 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
968 hci_dev_unlock(hdev);
970 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
974 static void mgmt_pending_free(struct pending_cmd *cmd)
981 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
982 struct hci_dev *hdev, void *data,
985 struct pending_cmd *cmd;
987 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
991 cmd->opcode = opcode;
992 cmd->index = hdev->id;
994 cmd->param = kmalloc(len, GFP_KERNEL);
1001 memcpy(cmd->param, data, len);
1006 list_add(&cmd->list, &hdev->mgmt_pending);
1011 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1012 void (*cb)(struct pending_cmd *cmd,
1016 struct pending_cmd *cmd, *tmp;
1018 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1019 if (opcode > 0 && cmd->opcode != opcode)
1026 static void mgmt_pending_remove(struct pending_cmd *cmd)
1028 list_del(&cmd->list);
1029 mgmt_pending_free(cmd);
1032 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1034 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1036 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1040 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1042 BT_DBG("%s status 0x%02x", hdev->name, status);
1044 if (hci_conn_count(hdev) == 0) {
1045 cancel_delayed_work(&hdev->power_off);
1046 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1050 static void hci_stop_discovery(struct hci_request *req)
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp;
1054 struct inquiry_entry *e;
1056 switch (hdev->discovery.state) {
1057 case DISCOVERY_FINDING:
1058 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1061 cancel_delayed_work(&hdev->le_scan_disable);
1062 hci_req_add_le_scan_disable(req);
1067 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1073 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 hci_req_add_le_scan_disable(req);
1087 static int clean_up_hci_state(struct hci_dev *hdev)
1089 struct hci_request req;
1090 struct hci_conn *conn;
1092 hci_req_init(&req, hdev);
1094 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1095 test_bit(HCI_PSCAN, &hdev->flags)) {
1097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1101 disable_advertising(&req);
1103 hci_stop_discovery(&req);
1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1106 struct hci_cp_disconnect dc;
1107 struct hci_cp_reject_conn_req rej;
1109 switch (conn->state) {
1112 dc.handle = cpu_to_le16(conn->handle);
1113 dc.reason = 0x15; /* Terminated due to Power Off */
1114 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1117 if (conn->type == LE_LINK)
1118 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1120 else if (conn->type == ACL_LINK)
1121 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1125 bacpy(&rej.bdaddr, &conn->dst);
1126 rej.reason = 0x15; /* Terminated due to Power Off */
1127 if (conn->type == ACL_LINK)
1128 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1130 else if (conn->type == SCO_LINK)
1131 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1137 return hci_req_run(&req, clean_up_hci_complete);
1140 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1143 struct mgmt_mode *cp = data;
1144 struct pending_cmd *cmd;
1147 BT_DBG("request for %s", hdev->name);
1149 if (cp->val != 0x00 && cp->val != 0x01)
1150 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1151 MGMT_STATUS_INVALID_PARAMS);
1155 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1156 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1161 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1162 cancel_delayed_work(&hdev->power_off);
1165 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1167 err = mgmt_powered(hdev, 1);
1172 if (!!cp->val == hdev_is_powered(hdev)) {
1173 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1177 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1184 queue_work(hdev->req_workqueue, &hdev->power_on);
1187 /* Disconnect connections, stop scans, etc */
1188 err = clean_up_hci_state(hdev);
1190 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1191 HCI_POWER_OFF_TIMEOUT);
1193 /* ENODATA means there were no HCI commands queued */
1194 if (err == -ENODATA) {
1195 cancel_delayed_work(&hdev->power_off);
1196 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1202 hci_dev_unlock(hdev);
1206 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1207 struct sock *skip_sk)
1209 struct sk_buff *skb;
1210 struct mgmt_hdr *hdr;
1212 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1216 hdr = (void *) skb_put(skb, sizeof(*hdr));
1217 hdr->opcode = cpu_to_le16(event);
1219 hdr->index = cpu_to_le16(hdev->id);
1221 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1222 hdr->len = cpu_to_le16(data_len);
1225 memcpy(skb_put(skb, data_len), data, data_len);
1228 __net_timestamp(skb);
1230 hci_send_to_control(skb, skip_sk);
1236 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1240 ev = cpu_to_le32(get_current_settings(hdev));
1242 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1247 struct hci_dev *hdev;
1251 static void settings_rsp(struct pending_cmd *cmd, void *data)
1253 struct cmd_lookup *match = data;
1255 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1257 list_del(&cmd->list);
1259 if (match->sk == NULL) {
1260 match->sk = cmd->sk;
1261 sock_hold(match->sk);
1264 mgmt_pending_free(cmd);
1267 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1271 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1272 mgmt_pending_remove(cmd);
1275 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1277 if (!lmp_bredr_capable(hdev))
1278 return MGMT_STATUS_NOT_SUPPORTED;
1279 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1280 return MGMT_STATUS_REJECTED;
1282 return MGMT_STATUS_SUCCESS;
1285 static u8 mgmt_le_support(struct hci_dev *hdev)
1287 if (!lmp_le_capable(hdev))
1288 return MGMT_STATUS_NOT_SUPPORTED;
1289 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1290 return MGMT_STATUS_REJECTED;
1292 return MGMT_STATUS_SUCCESS;
1295 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1297 struct pending_cmd *cmd;
1298 struct mgmt_mode *cp;
1299 struct hci_request req;
1302 BT_DBG("status 0x%02x", status);
1306 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1311 u8 mgmt_err = mgmt_status(status);
1312 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1313 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1319 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1322 if (hdev->discov_timeout > 0) {
1323 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1324 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1328 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1332 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1335 new_settings(hdev, cmd->sk);
1337 /* When the discoverable mode gets changed, make sure
1338 * that class of device has the limited discoverable
1339 * bit correctly set.
1341 hci_req_init(&req, hdev);
1343 hci_req_run(&req, NULL);
1346 mgmt_pending_remove(cmd);
1349 hci_dev_unlock(hdev);
1352 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1355 struct mgmt_cp_set_discoverable *cp = data;
1356 struct pending_cmd *cmd;
1357 struct hci_request req;
1362 BT_DBG("request for %s", hdev->name);
1364 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1365 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1366 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1367 MGMT_STATUS_REJECTED);
1369 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1370 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1371 MGMT_STATUS_INVALID_PARAMS);
1373 timeout = __le16_to_cpu(cp->timeout);
1375 /* Disabling discoverable requires that no timeout is set,
1376 * and enabling limited discoverable requires a timeout.
1378 if ((cp->val == 0x00 && timeout > 0) ||
1379 (cp->val == 0x02 && timeout == 0))
1380 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 MGMT_STATUS_INVALID_PARAMS);
1385 if (!hdev_is_powered(hdev) && timeout > 0) {
1386 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1387 MGMT_STATUS_NOT_POWERED);
1391 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1392 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1393 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1398 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1399 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1404 if (!hdev_is_powered(hdev)) {
1405 bool changed = false;
1407 /* Setting limited discoverable when powered off is
1408 * not a valid operation since it requires a timeout
1409 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1411 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1412 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1416 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1421 err = new_settings(hdev, sk);
1426 /* If the current mode is the same, then just update the timeout
1427 * value with the new value. And if only the timeout gets updated,
1428 * then no need for any HCI transactions.
1430 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1431 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1432 &hdev->dev_flags)) {
1433 cancel_delayed_work(&hdev->discov_off);
1434 hdev->discov_timeout = timeout;
1436 if (cp->val && hdev->discov_timeout > 0) {
1437 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1438 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1442 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1446 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1452 /* Cancel any potential discoverable timeout that might be
1453 * still active and store new timeout value. The arming of
1454 * the timeout happens in the complete handler.
1456 cancel_delayed_work(&hdev->discov_off);
1457 hdev->discov_timeout = timeout;
1459 /* Limited discoverable mode */
1460 if (cp->val == 0x02)
1461 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1463 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1465 hci_req_init(&req, hdev);
1467 /* The procedure for LE-only controllers is much simpler - just
1468 * update the advertising data.
1470 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1476 struct hci_cp_write_current_iac_lap hci_cp;
1478 if (cp->val == 0x02) {
1479 /* Limited discoverable mode */
1480 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1481 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1482 hci_cp.iac_lap[1] = 0x8b;
1483 hci_cp.iac_lap[2] = 0x9e;
1484 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1485 hci_cp.iac_lap[4] = 0x8b;
1486 hci_cp.iac_lap[5] = 0x9e;
1488 /* General discoverable mode */
1490 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1491 hci_cp.iac_lap[1] = 0x8b;
1492 hci_cp.iac_lap[2] = 0x9e;
1495 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1496 (hci_cp.num_iac * 3) + 1, &hci_cp);
1498 scan |= SCAN_INQUIRY;
1500 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1503 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1506 update_adv_data(&req);
1508 err = hci_req_run(&req, set_discoverable_complete);
1510 mgmt_pending_remove(cmd);
1513 hci_dev_unlock(hdev);
1517 static void write_fast_connectable(struct hci_request *req, bool enable)
1519 struct hci_dev *hdev = req->hdev;
1520 struct hci_cp_write_page_scan_activity acp;
1523 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1526 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1530 type = PAGE_SCAN_TYPE_INTERLACED;
1532 /* 160 msec page scan interval */
1533 acp.interval = cpu_to_le16(0x0100);
1535 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1537 /* default 1.28 sec page scan */
1538 acp.interval = cpu_to_le16(0x0800);
1541 acp.window = cpu_to_le16(0x0012);
1543 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1544 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1545 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1548 if (hdev->page_scan_type != type)
1549 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1552 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1554 struct pending_cmd *cmd;
1555 struct mgmt_mode *cp;
1558 BT_DBG("status 0x%02x", status);
1562 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1567 u8 mgmt_err = mgmt_status(status);
1568 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1574 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1576 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1578 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1581 new_settings(hdev, cmd->sk);
1584 mgmt_pending_remove(cmd);
1587 hci_dev_unlock(hdev);
1590 static int set_connectable_update_settings(struct hci_dev *hdev,
1591 struct sock *sk, u8 val)
1593 bool changed = false;
1596 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1600 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1602 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1603 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1606 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1611 return new_settings(hdev, sk);
1616 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1619 struct mgmt_mode *cp = data;
1620 struct pending_cmd *cmd;
1621 struct hci_request req;
1625 BT_DBG("request for %s", hdev->name);
1627 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1628 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1629 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1630 MGMT_STATUS_REJECTED);
1632 if (cp->val != 0x00 && cp->val != 0x01)
1633 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1634 MGMT_STATUS_INVALID_PARAMS);
1638 if (!hdev_is_powered(hdev)) {
1639 err = set_connectable_update_settings(hdev, sk, cp->val);
1643 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1644 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1645 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1650 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1656 hci_req_init(&req, hdev);
1658 /* If BR/EDR is not enabled and we disable advertising as a
1659 * by-product of disabling connectable, we need to update the
1660 * advertising flags.
1662 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1664 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1665 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1667 update_adv_data(&req);
1668 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1674 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1675 hdev->discov_timeout > 0)
1676 cancel_delayed_work(&hdev->discov_off);
1679 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1682 /* If we're going from non-connectable to connectable or
1683 * vice-versa when fast connectable is enabled ensure that fast
1684 * connectable gets disabled. write_fast_connectable won't do
1685 * anything if the page scan parameters are already what they
1688 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1689 write_fast_connectable(&req, false);
1691 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1692 hci_conn_num(hdev, LE_LINK) == 0) {
1693 disable_advertising(&req);
1694 enable_advertising(&req);
1697 err = hci_req_run(&req, set_connectable_complete);
1699 mgmt_pending_remove(cmd);
1700 if (err == -ENODATA)
1701 err = set_connectable_update_settings(hdev, sk,
1707 hci_dev_unlock(hdev);
1711 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1714 struct mgmt_mode *cp = data;
1718 BT_DBG("request for %s", hdev->name);
1720 if (cp->val != 0x00 && cp->val != 0x01)
1721 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1722 MGMT_STATUS_INVALID_PARAMS);
1727 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1729 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1731 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1736 err = new_settings(hdev, sk);
1739 hci_dev_unlock(hdev);
1743 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1746 struct mgmt_mode *cp = data;
1747 struct pending_cmd *cmd;
1751 BT_DBG("request for %s", hdev->name);
1753 status = mgmt_bredr_support(hdev);
1755 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1758 if (cp->val != 0x00 && cp->val != 0x01)
1759 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1760 MGMT_STATUS_INVALID_PARAMS);
1764 if (!hdev_is_powered(hdev)) {
1765 bool changed = false;
1767 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1768 &hdev->dev_flags)) {
1769 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1773 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1778 err = new_settings(hdev, sk);
1783 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1784 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1791 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1792 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1796 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1802 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1804 mgmt_pending_remove(cmd);
1809 hci_dev_unlock(hdev);
1813 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1815 struct mgmt_mode *cp = data;
1816 struct pending_cmd *cmd;
1820 BT_DBG("request for %s", hdev->name);
1822 status = mgmt_bredr_support(hdev);
1824 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1826 if (!lmp_ssp_capable(hdev))
1827 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1828 MGMT_STATUS_NOT_SUPPORTED);
1830 if (cp->val != 0x00 && cp->val != 0x01)
1831 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1832 MGMT_STATUS_INVALID_PARAMS);
1836 if (!hdev_is_powered(hdev)) {
1840 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1843 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1846 changed = test_and_clear_bit(HCI_HS_ENABLED,
1849 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1852 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1857 err = new_settings(hdev, sk);
1862 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1863 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1864 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1869 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1870 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1874 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1880 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1882 mgmt_pending_remove(cmd);
1887 hci_dev_unlock(hdev);
1891 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1893 struct mgmt_mode *cp = data;
1898 BT_DBG("request for %s", hdev->name);
1900 status = mgmt_bredr_support(hdev);
1902 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1904 if (!lmp_ssp_capable(hdev))
1905 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1906 MGMT_STATUS_NOT_SUPPORTED);
1908 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1909 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1910 MGMT_STATUS_REJECTED);
1912 if (cp->val != 0x00 && cp->val != 0x01)
1913 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1914 MGMT_STATUS_INVALID_PARAMS);
1919 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1921 if (hdev_is_powered(hdev)) {
1922 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1923 MGMT_STATUS_REJECTED);
1927 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1930 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1935 err = new_settings(hdev, sk);
1938 hci_dev_unlock(hdev);
1942 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1944 struct cmd_lookup match = { NULL, hdev };
1947 u8 mgmt_err = mgmt_status(status);
1949 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1954 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1956 new_settings(hdev, match.sk);
1961 /* Make sure the controller has a good default for
1962 * advertising data. Restrict the update to when LE
1963 * has actually been enabled. During power on, the
1964 * update in powered_update_hci will take care of it.
1966 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1967 struct hci_request req;
1971 hci_req_init(&req, hdev);
1972 update_adv_data(&req);
1973 update_scan_rsp_data(&req);
1974 hci_req_run(&req, NULL);
1976 hci_dev_unlock(hdev);
1980 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1982 struct mgmt_mode *cp = data;
1983 struct hci_cp_write_le_host_supported hci_cp;
1984 struct pending_cmd *cmd;
1985 struct hci_request req;
1989 BT_DBG("request for %s", hdev->name);
1991 if (!lmp_le_capable(hdev))
1992 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1993 MGMT_STATUS_NOT_SUPPORTED);
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1997 MGMT_STATUS_INVALID_PARAMS);
1999 /* LE-only devices do not allow toggling LE on/off */
2000 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2001 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2002 MGMT_STATUS_REJECTED);
2007 enabled = lmp_host_le_capable(hdev);
2009 if (!hdev_is_powered(hdev) || val == enabled) {
2010 bool changed = false;
2012 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2013 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2017 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2018 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2022 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2027 err = new_settings(hdev, sk);
2032 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2033 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2034 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2039 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2045 hci_req_init(&req, hdev);
2047 memset(&hci_cp, 0, sizeof(hci_cp));
2051 hci_cp.simul = lmp_le_br_capable(hdev);
2053 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2054 disable_advertising(&req);
2057 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2060 err = hci_req_run(&req, le_enable_complete);
2062 mgmt_pending_remove(cmd);
2065 hci_dev_unlock(hdev);
2069 /* This is a helper function to test for pending mgmt commands that can
2070 * cause CoD or EIR HCI commands. We can only allow one such pending
2071 * mgmt command at a time since otherwise we cannot easily track what
2072 * the current values are, will be, and based on that calculate if a new
2073 * HCI command needs to be sent and if yes with what value.
2075 static bool pending_eir_or_class(struct hci_dev *hdev)
2077 struct pending_cmd *cmd;
2079 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2080 switch (cmd->opcode) {
2081 case MGMT_OP_ADD_UUID:
2082 case MGMT_OP_REMOVE_UUID:
2083 case MGMT_OP_SET_DEV_CLASS:
2084 case MGMT_OP_SET_POWERED:
2092 static const u8 bluetooth_base_uuid[] = {
2093 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2094 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2097 static u8 get_uuid_size(const u8 *uuid)
2101 if (memcmp(uuid, bluetooth_base_uuid, 12))
2104 val = get_unaligned_le32(&uuid[12]);
2111 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2113 struct pending_cmd *cmd;
2117 cmd = mgmt_pending_find(mgmt_op, hdev);
2121 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2122 hdev->dev_class, 3);
2124 mgmt_pending_remove(cmd);
2127 hci_dev_unlock(hdev);
2130 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2132 BT_DBG("status 0x%02x", status);
2134 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2137 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2139 struct mgmt_cp_add_uuid *cp = data;
2140 struct pending_cmd *cmd;
2141 struct hci_request req;
2142 struct bt_uuid *uuid;
2145 BT_DBG("request for %s", hdev->name);
2149 if (pending_eir_or_class(hdev)) {
2150 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2155 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2161 memcpy(uuid->uuid, cp->uuid, 16);
2162 uuid->svc_hint = cp->svc_hint;
2163 uuid->size = get_uuid_size(cp->uuid);
2165 list_add_tail(&uuid->list, &hdev->uuids);
2167 hci_req_init(&req, hdev);
2172 err = hci_req_run(&req, add_uuid_complete);
2174 if (err != -ENODATA)
2177 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2178 hdev->dev_class, 3);
2182 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2191 hci_dev_unlock(hdev);
2195 static bool enable_service_cache(struct hci_dev *hdev)
2197 if (!hdev_is_powered(hdev))
2200 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2201 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2209 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2211 BT_DBG("status 0x%02x", status);
2213 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2216 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2219 struct mgmt_cp_remove_uuid *cp = data;
2220 struct pending_cmd *cmd;
2221 struct bt_uuid *match, *tmp;
2222 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2223 struct hci_request req;
2226 BT_DBG("request for %s", hdev->name);
2230 if (pending_eir_or_class(hdev)) {
2231 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2236 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2237 hci_uuids_clear(hdev);
2239 if (enable_service_cache(hdev)) {
2240 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2241 0, hdev->dev_class, 3);
2250 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2251 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2254 list_del(&match->list);
2260 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2261 MGMT_STATUS_INVALID_PARAMS);
2266 hci_req_init(&req, hdev);
2271 err = hci_req_run(&req, remove_uuid_complete);
2273 if (err != -ENODATA)
2276 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2277 hdev->dev_class, 3);
2281 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2290 hci_dev_unlock(hdev);
2294 static void set_class_complete(struct hci_dev *hdev, u8 status)
2296 BT_DBG("status 0x%02x", status);
2298 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2301 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2304 struct mgmt_cp_set_dev_class *cp = data;
2305 struct pending_cmd *cmd;
2306 struct hci_request req;
2309 BT_DBG("request for %s", hdev->name);
2311 if (!lmp_bredr_capable(hdev))
2312 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2313 MGMT_STATUS_NOT_SUPPORTED);
2317 if (pending_eir_or_class(hdev)) {
2318 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2323 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2324 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2325 MGMT_STATUS_INVALID_PARAMS);
2329 hdev->major_class = cp->major;
2330 hdev->minor_class = cp->minor;
2332 if (!hdev_is_powered(hdev)) {
2333 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2334 hdev->dev_class, 3);
2338 hci_req_init(&req, hdev);
2340 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2341 hci_dev_unlock(hdev);
2342 cancel_delayed_work_sync(&hdev->service_cache);
2349 err = hci_req_run(&req, set_class_complete);
2351 if (err != -ENODATA)
2354 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2355 hdev->dev_class, 3);
2359 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2368 hci_dev_unlock(hdev);
2372 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2375 struct mgmt_cp_load_link_keys *cp = data;
2376 u16 key_count, expected_len;
2380 BT_DBG("request for %s", hdev->name);
2382 if (!lmp_bredr_capable(hdev))
2383 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2384 MGMT_STATUS_NOT_SUPPORTED);
2386 key_count = __le16_to_cpu(cp->key_count);
2388 expected_len = sizeof(*cp) + key_count *
2389 sizeof(struct mgmt_link_key_info);
2390 if (expected_len != len) {
2391 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2393 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2394 MGMT_STATUS_INVALID_PARAMS);
2397 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2398 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2399 MGMT_STATUS_INVALID_PARAMS);
2401 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2408 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2409 MGMT_STATUS_INVALID_PARAMS);
2414 hci_link_keys_clear(hdev);
2417 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2419 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2422 new_settings(hdev, NULL);
2424 for (i = 0; i < key_count; i++) {
2425 struct mgmt_link_key_info *key = &cp->keys[i];
2427 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2428 key->type, key->pin_len);
2431 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2433 hci_dev_unlock(hdev);
2438 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2439 u8 addr_type, struct sock *skip_sk)
2441 struct mgmt_ev_device_unpaired ev;
2443 bacpy(&ev.addr.bdaddr, bdaddr);
2444 ev.addr.type = addr_type;
2446 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2450 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2453 struct mgmt_cp_unpair_device *cp = data;
2454 struct mgmt_rp_unpair_device rp;
2455 struct hci_cp_disconnect dc;
2456 struct pending_cmd *cmd;
2457 struct hci_conn *conn;
2460 memset(&rp, 0, sizeof(rp));
2461 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2462 rp.addr.type = cp->addr.type;
2464 if (!bdaddr_type_is_valid(cp->addr.type))
2465 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2469 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2470 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2471 MGMT_STATUS_INVALID_PARAMS,
2476 if (!hdev_is_powered(hdev)) {
2477 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2478 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2482 if (cp->addr.type == BDADDR_BREDR) {
2483 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2487 if (cp->addr.type == BDADDR_LE_PUBLIC)
2488 addr_type = ADDR_LE_DEV_PUBLIC;
2490 addr_type = ADDR_LE_DEV_RANDOM;
2492 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2494 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2496 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2501 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2505 if (cp->disconnect) {
2506 if (cp->addr.type == BDADDR_BREDR)
2507 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2510 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2517 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2519 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2523 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2530 dc.handle = cpu_to_le16(conn->handle);
2531 dc.reason = 0x13; /* Remote User Terminated Connection */
2532 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2534 mgmt_pending_remove(cmd);
2537 hci_dev_unlock(hdev);
2541 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2544 struct mgmt_cp_disconnect *cp = data;
2545 struct mgmt_rp_disconnect rp;
2546 struct hci_cp_disconnect dc;
2547 struct pending_cmd *cmd;
2548 struct hci_conn *conn;
2553 memset(&rp, 0, sizeof(rp));
2554 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2555 rp.addr.type = cp->addr.type;
2557 if (!bdaddr_type_is_valid(cp->addr.type))
2558 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2559 MGMT_STATUS_INVALID_PARAMS,
2564 if (!test_bit(HCI_UP, &hdev->flags)) {
2565 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2566 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2570 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2571 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2572 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2576 if (cp->addr.type == BDADDR_BREDR)
2577 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2580 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2582 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2583 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2584 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2588 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2594 dc.handle = cpu_to_le16(conn->handle);
2595 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2597 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2599 mgmt_pending_remove(cmd);
2602 hci_dev_unlock(hdev);
2606 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2608 switch (link_type) {
2610 switch (addr_type) {
2611 case ADDR_LE_DEV_PUBLIC:
2612 return BDADDR_LE_PUBLIC;
2615 /* Fallback to LE Random address type */
2616 return BDADDR_LE_RANDOM;
2620 /* Fallback to BR/EDR type */
2621 return BDADDR_BREDR;
2625 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2628 struct mgmt_rp_get_connections *rp;
2638 if (!hdev_is_powered(hdev)) {
2639 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2640 MGMT_STATUS_NOT_POWERED);
2645 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2646 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2650 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2651 rp = kmalloc(rp_len, GFP_KERNEL);
2658 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2659 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2661 bacpy(&rp->addr[i].bdaddr, &c->dst);
2662 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2663 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2668 rp->conn_count = cpu_to_le16(i);
2670 /* Recalculate length in case of filtered SCO connections, etc */
2671 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2673 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2679 hci_dev_unlock(hdev);
2683 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2684 struct mgmt_cp_pin_code_neg_reply *cp)
2686 struct pending_cmd *cmd;
2689 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2694 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2695 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2697 mgmt_pending_remove(cmd);
2702 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2705 struct hci_conn *conn;
2706 struct mgmt_cp_pin_code_reply *cp = data;
2707 struct hci_cp_pin_code_reply reply;
2708 struct pending_cmd *cmd;
2715 if (!hdev_is_powered(hdev)) {
2716 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2717 MGMT_STATUS_NOT_POWERED);
2721 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2723 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2724 MGMT_STATUS_NOT_CONNECTED);
2728 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2729 struct mgmt_cp_pin_code_neg_reply ncp;
2731 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2733 BT_ERR("PIN code is not 16 bytes long");
2735 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2737 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2738 MGMT_STATUS_INVALID_PARAMS);
2743 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2749 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2750 reply.pin_len = cp->pin_len;
2751 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2753 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2755 mgmt_pending_remove(cmd);
2758 hci_dev_unlock(hdev);
2762 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2765 struct mgmt_cp_set_io_capability *cp = data;
2769 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2770 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2771 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2775 hdev->io_capability = cp->io_capability;
2777 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2778 hdev->io_capability);
2780 hci_dev_unlock(hdev);
2782 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2786 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2788 struct hci_dev *hdev = conn->hdev;
2789 struct pending_cmd *cmd;
2791 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2792 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2795 if (cmd->user_data != conn)
2804 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2806 struct mgmt_rp_pair_device rp;
2807 struct hci_conn *conn = cmd->user_data;
2809 bacpy(&rp.addr.bdaddr, &conn->dst);
2810 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2812 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2815 /* So we don't get further callbacks for this connection */
2816 conn->connect_cfm_cb = NULL;
2817 conn->security_cfm_cb = NULL;
2818 conn->disconn_cfm_cb = NULL;
2820 hci_conn_drop(conn);
2822 mgmt_pending_remove(cmd);
2825 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2827 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2828 struct pending_cmd *cmd;
2830 cmd = find_pairing(conn);
2832 pairing_complete(cmd, status);
2835 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2837 struct pending_cmd *cmd;
2839 BT_DBG("status %u", status);
2841 cmd = find_pairing(conn);
2843 BT_DBG("Unable to find a pending command");
2845 pairing_complete(cmd, mgmt_status(status));
2848 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2850 struct pending_cmd *cmd;
2852 BT_DBG("status %u", status);
2857 cmd = find_pairing(conn);
2859 BT_DBG("Unable to find a pending command");
2861 pairing_complete(cmd, mgmt_status(status));
2864 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2867 struct mgmt_cp_pair_device *cp = data;
2868 struct mgmt_rp_pair_device rp;
2869 struct pending_cmd *cmd;
2870 u8 sec_level, auth_type;
2871 struct hci_conn *conn;
2876 memset(&rp, 0, sizeof(rp));
2877 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2878 rp.addr.type = cp->addr.type;
2880 if (!bdaddr_type_is_valid(cp->addr.type))
2881 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2882 MGMT_STATUS_INVALID_PARAMS,
2885 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2886 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2887 MGMT_STATUS_INVALID_PARAMS,
2892 if (!hdev_is_powered(hdev)) {
2893 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2894 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2898 sec_level = BT_SECURITY_MEDIUM;
2899 auth_type = HCI_AT_DEDICATED_BONDING;
2901 if (cp->addr.type == BDADDR_BREDR) {
2902 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2907 /* Convert from L2CAP channel address type to HCI address type
2909 if (cp->addr.type == BDADDR_LE_PUBLIC)
2910 addr_type = ADDR_LE_DEV_PUBLIC;
2912 addr_type = ADDR_LE_DEV_RANDOM;
2914 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2915 sec_level, auth_type);
2921 if (PTR_ERR(conn) == -EBUSY)
2922 status = MGMT_STATUS_BUSY;
2924 status = MGMT_STATUS_CONNECT_FAILED;
2926 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2932 if (conn->connect_cfm_cb) {
2933 hci_conn_drop(conn);
2934 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2935 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2939 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2942 hci_conn_drop(conn);
2946 /* For LE, just connecting isn't a proof that the pairing finished */
2947 if (cp->addr.type == BDADDR_BREDR) {
2948 conn->connect_cfm_cb = pairing_complete_cb;
2949 conn->security_cfm_cb = pairing_complete_cb;
2950 conn->disconn_cfm_cb = pairing_complete_cb;
2952 conn->connect_cfm_cb = le_pairing_complete_cb;
2953 conn->security_cfm_cb = le_pairing_complete_cb;
2954 conn->disconn_cfm_cb = le_pairing_complete_cb;
2957 conn->io_capability = cp->io_cap;
2958 cmd->user_data = conn;
2960 if (conn->state == BT_CONNECTED &&
2961 hci_conn_security(conn, sec_level, auth_type))
2962 pairing_complete(cmd, 0);
2967 hci_dev_unlock(hdev);
2971 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2974 struct mgmt_addr_info *addr = data;
2975 struct pending_cmd *cmd;
2976 struct hci_conn *conn;
2983 if (!hdev_is_powered(hdev)) {
2984 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2985 MGMT_STATUS_NOT_POWERED);
2989 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2991 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2992 MGMT_STATUS_INVALID_PARAMS);
2996 conn = cmd->user_data;
2998 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2999 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3000 MGMT_STATUS_INVALID_PARAMS);
3004 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3006 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3007 addr, sizeof(*addr));
3009 hci_dev_unlock(hdev);
3013 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3014 struct mgmt_addr_info *addr, u16 mgmt_op,
3015 u16 hci_op, __le32 passkey)
3017 struct pending_cmd *cmd;
3018 struct hci_conn *conn;
3023 if (!hdev_is_powered(hdev)) {
3024 err = cmd_complete(sk, hdev->id, mgmt_op,
3025 MGMT_STATUS_NOT_POWERED, addr,
3030 if (addr->type == BDADDR_BREDR)
3031 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3033 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3036 err = cmd_complete(sk, hdev->id, mgmt_op,
3037 MGMT_STATUS_NOT_CONNECTED, addr,
3042 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3043 /* Continue with pairing via SMP. The hdev lock must be
3044 * released as SMP may try to recquire it for crypto
3047 hci_dev_unlock(hdev);
3048 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3052 err = cmd_complete(sk, hdev->id, mgmt_op,
3053 MGMT_STATUS_SUCCESS, addr,
3056 err = cmd_complete(sk, hdev->id, mgmt_op,
3057 MGMT_STATUS_FAILED, addr,
3063 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3069 /* Continue with pairing via HCI */
3070 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3071 struct hci_cp_user_passkey_reply cp;
3073 bacpy(&cp.bdaddr, &addr->bdaddr);
3074 cp.passkey = passkey;
3075 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3077 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3081 mgmt_pending_remove(cmd);
3084 hci_dev_unlock(hdev);
3088 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3089 void *data, u16 len)
3091 struct mgmt_cp_pin_code_neg_reply *cp = data;
3095 return user_pairing_resp(sk, hdev, &cp->addr,
3096 MGMT_OP_PIN_CODE_NEG_REPLY,
3097 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3100 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3103 struct mgmt_cp_user_confirm_reply *cp = data;
3107 if (len != sizeof(*cp))
3108 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3109 MGMT_STATUS_INVALID_PARAMS);
3111 return user_pairing_resp(sk, hdev, &cp->addr,
3112 MGMT_OP_USER_CONFIRM_REPLY,
3113 HCI_OP_USER_CONFIRM_REPLY, 0);
3116 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3117 void *data, u16 len)
3119 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3123 return user_pairing_resp(sk, hdev, &cp->addr,
3124 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3125 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3128 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3131 struct mgmt_cp_user_passkey_reply *cp = data;
3135 return user_pairing_resp(sk, hdev, &cp->addr,
3136 MGMT_OP_USER_PASSKEY_REPLY,
3137 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3140 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3141 void *data, u16 len)
3143 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3147 return user_pairing_resp(sk, hdev, &cp->addr,
3148 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3149 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3152 static void update_name(struct hci_request *req)
3154 struct hci_dev *hdev = req->hdev;
3155 struct hci_cp_write_local_name cp;
3157 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3159 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3162 static void set_name_complete(struct hci_dev *hdev, u8 status)
3164 struct mgmt_cp_set_local_name *cp;
3165 struct pending_cmd *cmd;
3167 BT_DBG("status 0x%02x", status);
3171 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3178 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3179 mgmt_status(status));
3181 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3184 mgmt_pending_remove(cmd);
3187 hci_dev_unlock(hdev);
3190 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3193 struct mgmt_cp_set_local_name *cp = data;
3194 struct pending_cmd *cmd;
3195 struct hci_request req;
3202 /* If the old values are the same as the new ones just return a
3203 * direct command complete event.
3205 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3206 !memcmp(hdev->short_name, cp->short_name,
3207 sizeof(hdev->short_name))) {
3208 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3213 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3215 if (!hdev_is_powered(hdev)) {
3216 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3218 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3223 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3229 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3235 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3237 hci_req_init(&req, hdev);
3239 if (lmp_bredr_capable(hdev)) {
3244 /* The name is stored in the scan response data and so
3245 * no need to udpate the advertising data here.
3247 if (lmp_le_capable(hdev))
3248 update_scan_rsp_data(&req);
3250 err = hci_req_run(&req, set_name_complete);
3252 mgmt_pending_remove(cmd);
3255 hci_dev_unlock(hdev);
3259 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3260 void *data, u16 data_len)
3262 struct pending_cmd *cmd;
3265 BT_DBG("%s", hdev->name);
3269 if (!hdev_is_powered(hdev)) {
3270 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3271 MGMT_STATUS_NOT_POWERED);
3275 if (!lmp_ssp_capable(hdev)) {
3276 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3277 MGMT_STATUS_NOT_SUPPORTED);
3281 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3282 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3287 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3293 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3294 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3297 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3300 mgmt_pending_remove(cmd);
3303 hci_dev_unlock(hdev);
3307 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3308 void *data, u16 len)
3312 BT_DBG("%s ", hdev->name);
3316 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3317 struct mgmt_cp_add_remote_oob_data *cp = data;
3320 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3321 cp->hash, cp->randomizer);
3323 status = MGMT_STATUS_FAILED;
3325 status = MGMT_STATUS_SUCCESS;
3327 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3328 status, &cp->addr, sizeof(cp->addr));
3329 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3330 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3333 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3339 status = MGMT_STATUS_FAILED;
3341 status = MGMT_STATUS_SUCCESS;
3343 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3344 status, &cp->addr, sizeof(cp->addr));
3346 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3347 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3348 MGMT_STATUS_INVALID_PARAMS);
3351 hci_dev_unlock(hdev);
3355 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3356 void *data, u16 len)
3358 struct mgmt_cp_remove_remote_oob_data *cp = data;
3362 BT_DBG("%s", hdev->name);
3366 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3368 status = MGMT_STATUS_INVALID_PARAMS;
3370 status = MGMT_STATUS_SUCCESS;
3372 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3373 status, &cp->addr, sizeof(cp->addr));
3375 hci_dev_unlock(hdev);
3379 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3381 struct pending_cmd *cmd;
3385 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3387 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3391 type = hdev->discovery.type;
3393 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3394 &type, sizeof(type));
3395 mgmt_pending_remove(cmd);
3400 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3402 unsigned long timeout = 0;
3404 BT_DBG("status %d", status);
3408 mgmt_start_discovery_failed(hdev, status);
3409 hci_dev_unlock(hdev);
3414 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3415 hci_dev_unlock(hdev);
3417 switch (hdev->discovery.type) {
3418 case DISCOV_TYPE_LE:
3419 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3422 case DISCOV_TYPE_INTERLEAVED:
3423 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3426 case DISCOV_TYPE_BREDR:
3430 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3436 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3439 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3440 void *data, u16 len)
3442 struct mgmt_cp_start_discovery *cp = data;
3443 struct pending_cmd *cmd;
3444 struct hci_cp_le_set_scan_param param_cp;
3445 struct hci_cp_le_set_scan_enable enable_cp;
3446 struct hci_cp_inquiry inq_cp;
3447 struct hci_request req;
3448 /* General inquiry access code (GIAC) */
3449 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3450 u8 status, own_addr_type;
3453 BT_DBG("%s", hdev->name);
3457 if (!hdev_is_powered(hdev)) {
3458 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3459 MGMT_STATUS_NOT_POWERED);
3463 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3464 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3469 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3470 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3475 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3481 hdev->discovery.type = cp->type;
3483 hci_req_init(&req, hdev);
3485 switch (hdev->discovery.type) {
3486 case DISCOV_TYPE_BREDR:
3487 status = mgmt_bredr_support(hdev);
3489 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3491 mgmt_pending_remove(cmd);
3495 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3496 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3498 mgmt_pending_remove(cmd);
3502 hci_inquiry_cache_flush(hdev);
3504 memset(&inq_cp, 0, sizeof(inq_cp));
3505 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3506 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3507 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3510 case DISCOV_TYPE_LE:
3511 case DISCOV_TYPE_INTERLEAVED:
3512 status = mgmt_le_support(hdev);
3514 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3516 mgmt_pending_remove(cmd);
3520 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3521 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3522 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3523 MGMT_STATUS_NOT_SUPPORTED);
3524 mgmt_pending_remove(cmd);
3528 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3529 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3530 MGMT_STATUS_REJECTED);
3531 mgmt_pending_remove(cmd);
3535 /* If controller is scanning, it means the background scanning
3536 * is running. Thus, we should temporarily stop it in order to
3537 * set the discovery scanning parameters.
3539 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3540 hci_req_add_le_scan_disable(&req);
3542 memset(¶m_cp, 0, sizeof(param_cp));
3544 /* All active scans will be done with either a resolvable
3545 * private address (when privacy feature has been enabled)
3546 * or unresolvable private address.
3548 err = hci_update_random_address(&req, true, &own_addr_type);
3550 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3551 MGMT_STATUS_FAILED);
3552 mgmt_pending_remove(cmd);
3556 param_cp.type = LE_SCAN_ACTIVE;
3557 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3558 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3559 param_cp.own_address_type = own_addr_type;
3560 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3563 memset(&enable_cp, 0, sizeof(enable_cp));
3564 enable_cp.enable = LE_SCAN_ENABLE;
3565 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3566 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3571 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3572 MGMT_STATUS_INVALID_PARAMS);
3573 mgmt_pending_remove(cmd);
3577 err = hci_req_run(&req, start_discovery_complete);
3579 mgmt_pending_remove(cmd);
3581 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3584 hci_dev_unlock(hdev);
3588 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3590 struct pending_cmd *cmd;
3593 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3597 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3598 &hdev->discovery.type, sizeof(hdev->discovery.type));
3599 mgmt_pending_remove(cmd);
3604 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3606 BT_DBG("status %d", status);
3611 mgmt_stop_discovery_failed(hdev, status);
3615 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3618 hci_dev_unlock(hdev);
3621 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3624 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3625 struct pending_cmd *cmd;
3626 struct hci_request req;
3629 BT_DBG("%s", hdev->name);
3633 if (!hci_discovery_active(hdev)) {
3634 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3635 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3636 sizeof(mgmt_cp->type));
3640 if (hdev->discovery.type != mgmt_cp->type) {
3641 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3642 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3643 sizeof(mgmt_cp->type));
3647 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3653 hci_req_init(&req, hdev);
3655 hci_stop_discovery(&req);
3657 err = hci_req_run(&req, stop_discovery_complete);
3659 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3663 mgmt_pending_remove(cmd);
3665 /* If no HCI commands were sent we're done */
3666 if (err == -ENODATA) {
3667 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3668 &mgmt_cp->type, sizeof(mgmt_cp->type));
3669 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3673 hci_dev_unlock(hdev);
3677 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3680 struct mgmt_cp_confirm_name *cp = data;
3681 struct inquiry_entry *e;
3684 BT_DBG("%s", hdev->name);
3688 if (!hci_discovery_active(hdev)) {
3689 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3690 MGMT_STATUS_FAILED, &cp->addr,
3695 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3697 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3698 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3703 if (cp->name_known) {
3704 e->name_state = NAME_KNOWN;
3707 e->name_state = NAME_NEEDED;
3708 hci_inquiry_cache_update_resolve(hdev, e);
3711 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3715 hci_dev_unlock(hdev);
3719 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3722 struct mgmt_cp_block_device *cp = data;
3726 BT_DBG("%s", hdev->name);
3728 if (!bdaddr_type_is_valid(cp->addr.type))
3729 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3730 MGMT_STATUS_INVALID_PARAMS,
3731 &cp->addr, sizeof(cp->addr));
3735 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3737 status = MGMT_STATUS_FAILED;
3739 status = MGMT_STATUS_SUCCESS;
3741 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3742 &cp->addr, sizeof(cp->addr));
3744 hci_dev_unlock(hdev);
3749 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3752 struct mgmt_cp_unblock_device *cp = data;
3756 BT_DBG("%s", hdev->name);
3758 if (!bdaddr_type_is_valid(cp->addr.type))
3759 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3760 MGMT_STATUS_INVALID_PARAMS,
3761 &cp->addr, sizeof(cp->addr));
3765 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3767 status = MGMT_STATUS_INVALID_PARAMS;
3769 status = MGMT_STATUS_SUCCESS;
3771 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3772 &cp->addr, sizeof(cp->addr));
3774 hci_dev_unlock(hdev);
3779 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3782 struct mgmt_cp_set_device_id *cp = data;
3783 struct hci_request req;
3787 BT_DBG("%s", hdev->name);
3789 source = __le16_to_cpu(cp->source);
3791 if (source > 0x0002)
3792 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3793 MGMT_STATUS_INVALID_PARAMS);
3797 hdev->devid_source = source;
3798 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3799 hdev->devid_product = __le16_to_cpu(cp->product);
3800 hdev->devid_version = __le16_to_cpu(cp->version);
3802 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3804 hci_req_init(&req, hdev);
3806 hci_req_run(&req, NULL);
3808 hci_dev_unlock(hdev);
3813 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3815 struct cmd_lookup match = { NULL, hdev };
3818 u8 mgmt_err = mgmt_status(status);
3820 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3821 cmd_status_rsp, &mgmt_err);
3825 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3828 new_settings(hdev, match.sk);
3834 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3837 struct mgmt_mode *cp = data;
3838 struct pending_cmd *cmd;
3839 struct hci_request req;
3840 u8 val, enabled, status;
3843 BT_DBG("request for %s", hdev->name);
3845 status = mgmt_le_support(hdev);
3847 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3850 if (cp->val != 0x00 && cp->val != 0x01)
3851 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3852 MGMT_STATUS_INVALID_PARAMS);
3857 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3859 /* The following conditions are ones which mean that we should
3860 * not do any HCI communication but directly send a mgmt
3861 * response to user space (after toggling the flag if
3864 if (!hdev_is_powered(hdev) || val == enabled ||
3865 hci_conn_num(hdev, LE_LINK) > 0) {
3866 bool changed = false;
3868 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3869 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3873 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3878 err = new_settings(hdev, sk);
3883 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3884 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3885 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3890 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3896 hci_req_init(&req, hdev);
3899 enable_advertising(&req);
3901 disable_advertising(&req);
3903 err = hci_req_run(&req, set_advertising_complete);
3905 mgmt_pending_remove(cmd);
3908 hci_dev_unlock(hdev);
3912 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3913 void *data, u16 len)
3915 struct mgmt_cp_set_static_address *cp = data;
3918 BT_DBG("%s", hdev->name);
3920 if (!lmp_le_capable(hdev))
3921 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3922 MGMT_STATUS_NOT_SUPPORTED);
3924 if (hdev_is_powered(hdev))
3925 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3926 MGMT_STATUS_REJECTED);
3928 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3929 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3930 return cmd_status(sk, hdev->id,
3931 MGMT_OP_SET_STATIC_ADDRESS,
3932 MGMT_STATUS_INVALID_PARAMS);
3934 /* Two most significant bits shall be set */
3935 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3936 return cmd_status(sk, hdev->id,
3937 MGMT_OP_SET_STATIC_ADDRESS,
3938 MGMT_STATUS_INVALID_PARAMS);
3943 bacpy(&hdev->static_addr, &cp->bdaddr);
3945 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3947 hci_dev_unlock(hdev);
3952 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3953 void *data, u16 len)
3955 struct mgmt_cp_set_scan_params *cp = data;
3956 __u16 interval, window;
3959 BT_DBG("%s", hdev->name);
3961 if (!lmp_le_capable(hdev))
3962 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3963 MGMT_STATUS_NOT_SUPPORTED);
3965 interval = __le16_to_cpu(cp->interval);
3967 if (interval < 0x0004 || interval > 0x4000)
3968 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3969 MGMT_STATUS_INVALID_PARAMS);
3971 window = __le16_to_cpu(cp->window);
3973 if (window < 0x0004 || window > 0x4000)
3974 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3975 MGMT_STATUS_INVALID_PARAMS);
3977 if (window > interval)
3978 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3979 MGMT_STATUS_INVALID_PARAMS);
3983 hdev->le_scan_interval = interval;
3984 hdev->le_scan_window = window;
3986 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3988 /* If background scan is running, restart it so new parameters are
3991 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3992 hdev->discovery.state == DISCOVERY_STOPPED) {
3993 struct hci_request req;
3995 hci_req_init(&req, hdev);
3997 hci_req_add_le_scan_disable(&req);
3998 hci_req_add_le_passive_scan(&req);
4000 hci_req_run(&req, NULL);
4003 hci_dev_unlock(hdev);
4008 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4010 struct pending_cmd *cmd;
4012 BT_DBG("status 0x%02x", status);
4016 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4021 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4022 mgmt_status(status));
4024 struct mgmt_mode *cp = cmd->param;
4027 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4029 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4031 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4032 new_settings(hdev, cmd->sk);
4035 mgmt_pending_remove(cmd);
4038 hci_dev_unlock(hdev);
4041 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4042 void *data, u16 len)
4044 struct mgmt_mode *cp = data;
4045 struct pending_cmd *cmd;
4046 struct hci_request req;
4049 BT_DBG("%s", hdev->name);
4051 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4052 hdev->hci_ver < BLUETOOTH_VER_1_2)
4053 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4054 MGMT_STATUS_NOT_SUPPORTED);
4056 if (cp->val != 0x00 && cp->val != 0x01)
4057 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4058 MGMT_STATUS_INVALID_PARAMS);
4060 if (!hdev_is_powered(hdev))
4061 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4062 MGMT_STATUS_NOT_POWERED);
4064 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4065 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4066 MGMT_STATUS_REJECTED);
4070 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4071 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4076 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4077 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4082 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4089 hci_req_init(&req, hdev);
4091 write_fast_connectable(&req, cp->val);
4093 err = hci_req_run(&req, fast_connectable_complete);
4095 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4096 MGMT_STATUS_FAILED);
4097 mgmt_pending_remove(cmd);
4101 hci_dev_unlock(hdev);
4106 static void set_bredr_scan(struct hci_request *req)
4108 struct hci_dev *hdev = req->hdev;
4111 /* Ensure that fast connectable is disabled. This function will
4112 * not do anything if the page scan parameters are already what
4115 write_fast_connectable(req, false);
4117 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4119 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4120 scan |= SCAN_INQUIRY;
4123 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4126 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4128 struct pending_cmd *cmd;
4130 BT_DBG("status 0x%02x", status);
4134 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4139 u8 mgmt_err = mgmt_status(status);
4141 /* We need to restore the flag if related HCI commands
4144 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4146 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4148 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4149 new_settings(hdev, cmd->sk);
4152 mgmt_pending_remove(cmd);
4155 hci_dev_unlock(hdev);
4158 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4160 struct mgmt_mode *cp = data;
4161 struct pending_cmd *cmd;
4162 struct hci_request req;
4165 BT_DBG("request for %s", hdev->name);
4167 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4168 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4169 MGMT_STATUS_NOT_SUPPORTED);
4171 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4172 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4173 MGMT_STATUS_REJECTED);
4175 if (cp->val != 0x00 && cp->val != 0x01)
4176 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4177 MGMT_STATUS_INVALID_PARAMS);
4181 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4182 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4186 if (!hdev_is_powered(hdev)) {
4188 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4189 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4190 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4191 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4192 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4195 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4197 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4201 err = new_settings(hdev, sk);
4205 /* Reject disabling when powered on */
4207 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4208 MGMT_STATUS_REJECTED);
4212 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4213 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4218 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4224 /* We need to flip the bit already here so that update_adv_data
4225 * generates the correct flags.
4227 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4229 hci_req_init(&req, hdev);
4231 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4232 set_bredr_scan(&req);
4234 /* Since only the advertising data flags will change, there
4235 * is no need to update the scan response data.
4237 update_adv_data(&req);
4239 err = hci_req_run(&req, set_bredr_complete);
4241 mgmt_pending_remove(cmd);
4244 hci_dev_unlock(hdev);
4248 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4249 void *data, u16 len)
4251 struct mgmt_mode *cp = data;
4252 struct pending_cmd *cmd;
4256 BT_DBG("request for %s", hdev->name);
4258 status = mgmt_bredr_support(hdev);
4260 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4263 if (!lmp_sc_capable(hdev) &&
4264 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4265 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4266 MGMT_STATUS_NOT_SUPPORTED);
4268 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4269 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4270 MGMT_STATUS_INVALID_PARAMS);
4274 if (!hdev_is_powered(hdev)) {
4278 changed = !test_and_set_bit(HCI_SC_ENABLED,
4280 if (cp->val == 0x02)
4281 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4283 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4285 changed = test_and_clear_bit(HCI_SC_ENABLED,
4287 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4290 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4295 err = new_settings(hdev, sk);
4300 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4301 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4308 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4309 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4310 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4314 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4320 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4322 mgmt_pending_remove(cmd);
4326 if (cp->val == 0x02)
4327 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4329 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4332 hci_dev_unlock(hdev);
4336 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4337 void *data, u16 len)
4339 struct mgmt_mode *cp = data;
4343 BT_DBG("request for %s", hdev->name);
4345 if (cp->val != 0x00 && cp->val != 0x01)
4346 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4347 MGMT_STATUS_INVALID_PARAMS);
4352 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4354 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4356 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4361 err = new_settings(hdev, sk);
4364 hci_dev_unlock(hdev);
4368 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4371 struct mgmt_cp_set_privacy *cp = cp_data;
4375 BT_DBG("request for %s", hdev->name);
4377 if (!lmp_le_capable(hdev))
4378 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4379 MGMT_STATUS_NOT_SUPPORTED);
4381 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4382 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4383 MGMT_STATUS_INVALID_PARAMS);
4385 if (hdev_is_powered(hdev))
4386 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4387 MGMT_STATUS_REJECTED);
4391 /* If user space supports this command it is also expected to
4392 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4394 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4397 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4398 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4399 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4401 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4402 memset(hdev->irk, 0, sizeof(hdev->irk));
4403 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4406 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4411 err = new_settings(hdev, sk);
4414 hci_dev_unlock(hdev);
4418 static bool irk_is_valid(struct mgmt_irk_info *irk)
4420 switch (irk->addr.type) {
4421 case BDADDR_LE_PUBLIC:
4424 case BDADDR_LE_RANDOM:
4425 /* Two most significant bits shall be set */
4426 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4434 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4437 struct mgmt_cp_load_irks *cp = cp_data;
4438 u16 irk_count, expected_len;
4441 BT_DBG("request for %s", hdev->name);
4443 if (!lmp_le_capable(hdev))
4444 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4445 MGMT_STATUS_NOT_SUPPORTED);
4447 irk_count = __le16_to_cpu(cp->irk_count);
4449 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4450 if (expected_len != len) {
4451 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4453 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4454 MGMT_STATUS_INVALID_PARAMS);
4457 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4459 for (i = 0; i < irk_count; i++) {
4460 struct mgmt_irk_info *key = &cp->irks[i];
4462 if (!irk_is_valid(key))
4463 return cmd_status(sk, hdev->id,
4465 MGMT_STATUS_INVALID_PARAMS);
4470 hci_smp_irks_clear(hdev);
4472 for (i = 0; i < irk_count; i++) {
4473 struct mgmt_irk_info *irk = &cp->irks[i];
4476 if (irk->addr.type == BDADDR_LE_PUBLIC)
4477 addr_type = ADDR_LE_DEV_PUBLIC;
4479 addr_type = ADDR_LE_DEV_RANDOM;
4481 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4485 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4487 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4489 hci_dev_unlock(hdev);
4494 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4496 if (key->master != 0x00 && key->master != 0x01)
4499 switch (key->addr.type) {
4500 case BDADDR_LE_PUBLIC:
4503 case BDADDR_LE_RANDOM:
4504 /* Two most significant bits shall be set */
4505 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4513 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4514 void *cp_data, u16 len)
4516 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4517 u16 key_count, expected_len;
4520 BT_DBG("request for %s", hdev->name);
4522 if (!lmp_le_capable(hdev))
4523 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4524 MGMT_STATUS_NOT_SUPPORTED);
4526 key_count = __le16_to_cpu(cp->key_count);
4528 expected_len = sizeof(*cp) + key_count *
4529 sizeof(struct mgmt_ltk_info);
4530 if (expected_len != len) {
4531 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4533 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4534 MGMT_STATUS_INVALID_PARAMS);
4537 BT_DBG("%s key_count %u", hdev->name, key_count);
4539 for (i = 0; i < key_count; i++) {
4540 struct mgmt_ltk_info *key = &cp->keys[i];
4542 if (!ltk_is_valid(key))
4543 return cmd_status(sk, hdev->id,
4544 MGMT_OP_LOAD_LONG_TERM_KEYS,
4545 MGMT_STATUS_INVALID_PARAMS);
4550 hci_smp_ltks_clear(hdev);
4552 for (i = 0; i < key_count; i++) {
4553 struct mgmt_ltk_info *key = &cp->keys[i];
4554 u8 type, addr_type, authenticated;
4556 if (key->addr.type == BDADDR_LE_PUBLIC)
4557 addr_type = ADDR_LE_DEV_PUBLIC;
4559 addr_type = ADDR_LE_DEV_RANDOM;
4564 type = SMP_LTK_SLAVE;
4566 switch (key->type) {
4567 case MGMT_LTK_UNAUTHENTICATED:
4568 authenticated = 0x00;
4570 case MGMT_LTK_AUTHENTICATED:
4571 authenticated = 0x01;
4577 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4578 authenticated, key->val, key->enc_size, key->ediv,
4582 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4585 hci_dev_unlock(hdev);
4590 struct cmd_conn_lookup {
4591 struct hci_conn *conn;
4592 bool valid_tx_power;
4596 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4598 struct cmd_conn_lookup *match = data;
4599 struct mgmt_cp_get_conn_info *cp;
4600 struct mgmt_rp_get_conn_info rp;
4601 struct hci_conn *conn = cmd->user_data;
4603 if (conn != match->conn)
4606 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4608 memset(&rp, 0, sizeof(rp));
4609 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4610 rp.addr.type = cp->addr.type;
4612 if (!match->mgmt_status) {
4613 rp.rssi = conn->rssi;
4615 if (match->valid_tx_power) {
4616 rp.tx_power = conn->tx_power;
4617 rp.max_tx_power = conn->max_tx_power;
4619 rp.tx_power = HCI_TX_POWER_INVALID;
4620 rp.max_tx_power = HCI_TX_POWER_INVALID;
4624 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4625 match->mgmt_status, &rp, sizeof(rp));
4627 hci_conn_drop(conn);
4629 mgmt_pending_remove(cmd);
4632 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4634 struct hci_cp_read_rssi *cp;
4635 struct hci_conn *conn;
4636 struct cmd_conn_lookup match;
4639 BT_DBG("status 0x%02x", status);
4643 /* TX power data is valid in case request completed successfully,
4644 * otherwise we assume it's not valid. At the moment we assume that
4645 * either both or none of current and max values are valid to keep code
4648 match.valid_tx_power = !status;
4650 /* Commands sent in request are either Read RSSI or Read Transmit Power
4651 * Level so we check which one was last sent to retrieve connection
4652 * handle. Both commands have handle as first parameter so it's safe to
4653 * cast data on the same command struct.
4655 * First command sent is always Read RSSI and we fail only if it fails.
4656 * In other case we simply override error to indicate success as we
4657 * already remembered if TX power value is actually valid.
4659 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4661 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4666 BT_ERR("invalid sent_cmd in response");
4670 handle = __le16_to_cpu(cp->handle);
4671 conn = hci_conn_hash_lookup_handle(hdev, handle);
4673 BT_ERR("unknown handle (%d) in response", handle);
4678 match.mgmt_status = mgmt_status(status);
4680 /* Cache refresh is complete, now reply for mgmt request for given
4683 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4684 get_conn_info_complete, &match);
4687 hci_dev_unlock(hdev);
4690 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4693 struct mgmt_cp_get_conn_info *cp = data;
4694 struct mgmt_rp_get_conn_info rp;
4695 struct hci_conn *conn;
4696 unsigned long conn_info_age;
4699 BT_DBG("%s", hdev->name);
4701 memset(&rp, 0, sizeof(rp));
4702 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4703 rp.addr.type = cp->addr.type;
4705 if (!bdaddr_type_is_valid(cp->addr.type))
4706 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4707 MGMT_STATUS_INVALID_PARAMS,
4712 if (!hdev_is_powered(hdev)) {
4713 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4714 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4718 if (cp->addr.type == BDADDR_BREDR)
4719 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4722 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4724 if (!conn || conn->state != BT_CONNECTED) {
4725 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4726 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4730 /* To avoid client trying to guess when to poll again for information we
4731 * calculate conn info age as random value between min/max set in hdev.
4733 conn_info_age = hdev->conn_info_min_age +
4734 prandom_u32_max(hdev->conn_info_max_age -
4735 hdev->conn_info_min_age);
4737 /* Query controller to refresh cached values if they are too old or were
4740 if (time_after(jiffies, conn->conn_info_timestamp +
4741 msecs_to_jiffies(conn_info_age)) ||
4742 !conn->conn_info_timestamp) {
4743 struct hci_request req;
4744 struct hci_cp_read_tx_power req_txp_cp;
4745 struct hci_cp_read_rssi req_rssi_cp;
4746 struct pending_cmd *cmd;
4748 hci_req_init(&req, hdev);
4749 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4750 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4753 /* For LE links TX power does not change thus we don't need to
4754 * query for it once value is known.
4756 if (!bdaddr_type_is_le(cp->addr.type) ||
4757 conn->tx_power == HCI_TX_POWER_INVALID) {
4758 req_txp_cp.handle = cpu_to_le16(conn->handle);
4759 req_txp_cp.type = 0x00;
4760 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4761 sizeof(req_txp_cp), &req_txp_cp);
4764 /* Max TX power needs to be read only once per connection */
4765 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4766 req_txp_cp.handle = cpu_to_le16(conn->handle);
4767 req_txp_cp.type = 0x01;
4768 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4769 sizeof(req_txp_cp), &req_txp_cp);
4772 err = hci_req_run(&req, conn_info_refresh_complete);
4776 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4783 hci_conn_hold(conn);
4784 cmd->user_data = conn;
4786 conn->conn_info_timestamp = jiffies;
4788 /* Cache is valid, just reply with values cached in hci_conn */
4789 rp.rssi = conn->rssi;
4790 rp.tx_power = conn->tx_power;
4791 rp.max_tx_power = conn->max_tx_power;
4793 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4794 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4798 hci_dev_unlock(hdev);
4802 static const struct mgmt_handler {
4803 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4807 } mgmt_handlers[] = {
4808 { NULL }, /* 0x0000 (no command) */
4809 { read_version, false, MGMT_READ_VERSION_SIZE },
4810 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4811 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4812 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4813 { set_powered, false, MGMT_SETTING_SIZE },
4814 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4815 { set_connectable, false, MGMT_SETTING_SIZE },
4816 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4817 { set_pairable, false, MGMT_SETTING_SIZE },
4818 { set_link_security, false, MGMT_SETTING_SIZE },
4819 { set_ssp, false, MGMT_SETTING_SIZE },
4820 { set_hs, false, MGMT_SETTING_SIZE },
4821 { set_le, false, MGMT_SETTING_SIZE },
4822 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4823 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4824 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4825 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4826 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4827 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4828 { disconnect, false, MGMT_DISCONNECT_SIZE },
4829 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4830 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4831 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4832 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4833 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4834 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4835 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4836 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4837 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4838 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4839 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4840 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4841 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4842 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4843 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4844 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4845 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4846 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4847 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4848 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4849 { set_advertising, false, MGMT_SETTING_SIZE },
4850 { set_bredr, false, MGMT_SETTING_SIZE },
4851 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4852 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4853 { set_secure_conn, false, MGMT_SETTING_SIZE },
4854 { set_debug_keys, false, MGMT_SETTING_SIZE },
4855 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4856 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4857 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4861 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4865 struct mgmt_hdr *hdr;
4866 u16 opcode, index, len;
4867 struct hci_dev *hdev = NULL;
4868 const struct mgmt_handler *handler;
4871 BT_DBG("got %zu bytes", msglen);
4873 if (msglen < sizeof(*hdr))
4876 buf = kmalloc(msglen, GFP_KERNEL);
4880 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4886 opcode = __le16_to_cpu(hdr->opcode);
4887 index = __le16_to_cpu(hdr->index);
4888 len = __le16_to_cpu(hdr->len);
4890 if (len != msglen - sizeof(*hdr)) {
4895 if (index != MGMT_INDEX_NONE) {
4896 hdev = hci_dev_get(index);
4898 err = cmd_status(sk, index, opcode,
4899 MGMT_STATUS_INVALID_INDEX);
4903 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4904 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4905 err = cmd_status(sk, index, opcode,
4906 MGMT_STATUS_INVALID_INDEX);
4911 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4912 mgmt_handlers[opcode].func == NULL) {
4913 BT_DBG("Unknown op %u", opcode);
4914 err = cmd_status(sk, index, opcode,
4915 MGMT_STATUS_UNKNOWN_COMMAND);
4919 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4920 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4921 err = cmd_status(sk, index, opcode,
4922 MGMT_STATUS_INVALID_INDEX);
4926 handler = &mgmt_handlers[opcode];
4928 if ((handler->var_len && len < handler->data_len) ||
4929 (!handler->var_len && len != handler->data_len)) {
4930 err = cmd_status(sk, index, opcode,
4931 MGMT_STATUS_INVALID_PARAMS);
4936 mgmt_init_hdev(sk, hdev);
4938 cp = buf + sizeof(*hdr);
4940 err = handler->func(sk, hdev, cp, len);
4954 void mgmt_index_added(struct hci_dev *hdev)
4956 if (hdev->dev_type != HCI_BREDR)
4959 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4962 void mgmt_index_removed(struct hci_dev *hdev)
4964 u8 status = MGMT_STATUS_INVALID_INDEX;
4966 if (hdev->dev_type != HCI_BREDR)
4969 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4971 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4974 /* This function requires the caller holds hdev->lock */
4975 static void restart_le_auto_conns(struct hci_dev *hdev)
4977 struct hci_conn_params *p;
4979 list_for_each_entry(p, &hdev->le_conn_params, list) {
4980 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4981 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4985 static void powered_complete(struct hci_dev *hdev, u8 status)
4987 struct cmd_lookup match = { NULL, hdev };
4989 BT_DBG("status 0x%02x", status);
4993 restart_le_auto_conns(hdev);
4995 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4997 new_settings(hdev, match.sk);
4999 hci_dev_unlock(hdev);
5005 static int powered_update_hci(struct hci_dev *hdev)
5007 struct hci_request req;
5010 hci_req_init(&req, hdev);
5012 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5013 !lmp_host_ssp_capable(hdev)) {
5016 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5019 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5020 lmp_bredr_capable(hdev)) {
5021 struct hci_cp_write_le_host_supported cp;
5024 cp.simul = lmp_le_br_capable(hdev);
5026 /* Check first if we already have the right
5027 * host state (host features set)
5029 if (cp.le != lmp_host_le_capable(hdev) ||
5030 cp.simul != lmp_host_le_br_capable(hdev))
5031 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5035 if (lmp_le_capable(hdev)) {
5036 /* Make sure the controller has a good default for
5037 * advertising data. This also applies to the case
5038 * where BR/EDR was toggled during the AUTO_OFF phase.
5040 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5041 update_adv_data(&req);
5042 update_scan_rsp_data(&req);
5045 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5046 enable_advertising(&req);
5049 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5050 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5051 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5052 sizeof(link_sec), &link_sec);
5054 if (lmp_bredr_capable(hdev)) {
5055 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5056 set_bredr_scan(&req);
5062 return hci_req_run(&req, powered_complete);
5065 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5067 struct cmd_lookup match = { NULL, hdev };
5068 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5069 u8 zero_cod[] = { 0, 0, 0 };
5072 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5076 if (powered_update_hci(hdev) == 0)
5079 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5084 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5085 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5087 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5088 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5089 zero_cod, sizeof(zero_cod), NULL);
5092 err = new_settings(hdev, match.sk);
5100 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5102 struct pending_cmd *cmd;
5105 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5109 if (err == -ERFKILL)
5110 status = MGMT_STATUS_RFKILLED;
5112 status = MGMT_STATUS_FAILED;
5114 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5116 mgmt_pending_remove(cmd);
5119 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5121 struct hci_request req;
5125 /* When discoverable timeout triggers, then just make sure
5126 * the limited discoverable flag is cleared. Even in the case
5127 * of a timeout triggered from general discoverable, it is
5128 * safe to unconditionally clear the flag.
5130 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5131 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5133 hci_req_init(&req, hdev);
5134 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5135 u8 scan = SCAN_PAGE;
5136 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5137 sizeof(scan), &scan);
5140 update_adv_data(&req);
5141 hci_req_run(&req, NULL);
5143 hdev->discov_timeout = 0;
5145 new_settings(hdev, NULL);
5147 hci_dev_unlock(hdev);
5150 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5154 /* Nothing needed here if there's a pending command since that
5155 * commands request completion callback takes care of everything
5158 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5161 /* Powering off may clear the scan mode - don't let that interfere */
5162 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5166 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5168 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5169 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5173 struct hci_request req;
5175 /* In case this change in discoverable was triggered by
5176 * a disabling of connectable there could be a need to
5177 * update the advertising flags.
5179 hci_req_init(&req, hdev);
5180 update_adv_data(&req);
5181 hci_req_run(&req, NULL);
5183 new_settings(hdev, NULL);
5187 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5191 /* Nothing needed here if there's a pending command since that
5192 * commands request completion callback takes care of everything
5195 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5198 /* Powering off may clear the scan mode - don't let that interfere */
5199 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5203 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5205 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5208 new_settings(hdev, NULL);
5211 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5213 /* Powering off may stop advertising - don't let that interfere */
5214 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5218 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5220 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5223 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5225 u8 mgmt_err = mgmt_status(status);
5227 if (scan & SCAN_PAGE)
5228 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5229 cmd_status_rsp, &mgmt_err);
5231 if (scan & SCAN_INQUIRY)
5232 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5233 cmd_status_rsp, &mgmt_err);
5236 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5239 struct mgmt_ev_new_link_key ev;
5241 memset(&ev, 0, sizeof(ev));
5243 ev.store_hint = persistent;
5244 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5245 ev.key.addr.type = BDADDR_BREDR;
5246 ev.key.type = key->type;
5247 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5248 ev.key.pin_len = key->pin_len;
5250 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5253 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5255 if (ltk->authenticated)
5256 return MGMT_LTK_AUTHENTICATED;
5258 return MGMT_LTK_UNAUTHENTICATED;
5261 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5263 struct mgmt_ev_new_long_term_key ev;
5265 memset(&ev, 0, sizeof(ev));
5267 /* Devices using resolvable or non-resolvable random addresses
5268 * without providing an indentity resolving key don't require
5269 * to store long term keys. Their addresses will change the
5272 * Only when a remote device provides an identity address
5273 * make sure the long term key is stored. If the remote
5274 * identity is known, the long term keys are internally
5275 * mapped to the identity address. So allow static random
5276 * and public addresses here.
5278 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5279 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5280 ev.store_hint = 0x00;
5282 ev.store_hint = persistent;
5284 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5285 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5286 ev.key.type = mgmt_ltk_type(key);
5287 ev.key.enc_size = key->enc_size;
5288 ev.key.ediv = key->ediv;
5289 ev.key.rand = key->rand;
5291 if (key->type == SMP_LTK)
5294 memcpy(ev.key.val, key->val, sizeof(key->val));
5296 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5299 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5301 struct mgmt_ev_new_irk ev;
5303 memset(&ev, 0, sizeof(ev));
5305 /* For identity resolving keys from devices that are already
5306 * using a public address or static random address, do not
5307 * ask for storing this key. The identity resolving key really
5308 * is only mandatory for devices using resovlable random
5311 * Storing all identity resolving keys has the downside that
5312 * they will be also loaded on next boot of they system. More
5313 * identity resolving keys, means more time during scanning is
5314 * needed to actually resolve these addresses.
5316 if (bacmp(&irk->rpa, BDADDR_ANY))
5317 ev.store_hint = 0x01;
5319 ev.store_hint = 0x00;
5321 bacpy(&ev.rpa, &irk->rpa);
5322 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5323 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5324 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5326 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5329 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5332 struct mgmt_ev_new_csrk ev;
5334 memset(&ev, 0, sizeof(ev));
5336 /* Devices using resolvable or non-resolvable random addresses
5337 * without providing an indentity resolving key don't require
5338 * to store signature resolving keys. Their addresses will change
5339 * the next time around.
5341 * Only when a remote device provides an identity address
5342 * make sure the signature resolving key is stored. So allow
5343 * static random and public addresses here.
5345 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5346 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5347 ev.store_hint = 0x00;
5349 ev.store_hint = persistent;
5351 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5352 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5353 ev.key.master = csrk->master;
5354 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5356 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5359 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5362 eir[eir_len++] = sizeof(type) + data_len;
5363 eir[eir_len++] = type;
5364 memcpy(&eir[eir_len], data, data_len);
5365 eir_len += data_len;
5370 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5371 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5375 struct mgmt_ev_device_connected *ev = (void *) buf;
5378 bacpy(&ev->addr.bdaddr, bdaddr);
5379 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5381 ev->flags = __cpu_to_le32(flags);
5384 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5387 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5388 eir_len = eir_append_data(ev->eir, eir_len,
5389 EIR_CLASS_OF_DEV, dev_class, 3);
5391 ev->eir_len = cpu_to_le16(eir_len);
5393 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5394 sizeof(*ev) + eir_len, NULL);
5397 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5399 struct mgmt_cp_disconnect *cp = cmd->param;
5400 struct sock **sk = data;
5401 struct mgmt_rp_disconnect rp;
5403 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5404 rp.addr.type = cp->addr.type;
5406 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5412 mgmt_pending_remove(cmd);
5415 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5417 struct hci_dev *hdev = data;
5418 struct mgmt_cp_unpair_device *cp = cmd->param;
5419 struct mgmt_rp_unpair_device rp;
5421 memset(&rp, 0, sizeof(rp));
5422 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5423 rp.addr.type = cp->addr.type;
5425 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5427 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5429 mgmt_pending_remove(cmd);
5432 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5433 u8 link_type, u8 addr_type, u8 reason,
5434 bool mgmt_connected)
5436 struct mgmt_ev_device_disconnected ev;
5437 struct pending_cmd *power_off;
5438 struct sock *sk = NULL;
5440 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5442 struct mgmt_mode *cp = power_off->param;
5444 /* The connection is still in hci_conn_hash so test for 1
5445 * instead of 0 to know if this is the last one.
5447 if (!cp->val && hci_conn_count(hdev) == 1) {
5448 cancel_delayed_work(&hdev->power_off);
5449 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5453 if (!mgmt_connected)
5456 if (link_type != ACL_LINK && link_type != LE_LINK)
5459 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5461 bacpy(&ev.addr.bdaddr, bdaddr);
5462 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5465 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5470 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5474 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5475 u8 link_type, u8 addr_type, u8 status)
5477 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5478 struct mgmt_cp_disconnect *cp;
5479 struct mgmt_rp_disconnect rp;
5480 struct pending_cmd *cmd;
5482 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5485 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5491 if (bacmp(bdaddr, &cp->addr.bdaddr))
5494 if (cp->addr.type != bdaddr_type)
5497 bacpy(&rp.addr.bdaddr, bdaddr);
5498 rp.addr.type = bdaddr_type;
5500 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5501 mgmt_status(status), &rp, sizeof(rp));
5503 mgmt_pending_remove(cmd);
5506 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5507 u8 addr_type, u8 status)
5509 struct mgmt_ev_connect_failed ev;
5510 struct pending_cmd *power_off;
5512 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5514 struct mgmt_mode *cp = power_off->param;
5516 /* The connection is still in hci_conn_hash so test for 1
5517 * instead of 0 to know if this is the last one.
5519 if (!cp->val && hci_conn_count(hdev) == 1) {
5520 cancel_delayed_work(&hdev->power_off);
5521 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5525 bacpy(&ev.addr.bdaddr, bdaddr);
5526 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5527 ev.status = mgmt_status(status);
5529 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5532 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5534 struct mgmt_ev_pin_code_request ev;
5536 bacpy(&ev.addr.bdaddr, bdaddr);
5537 ev.addr.type = BDADDR_BREDR;
5540 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5543 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5546 struct pending_cmd *cmd;
5547 struct mgmt_rp_pin_code_reply rp;
5549 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5553 bacpy(&rp.addr.bdaddr, bdaddr);
5554 rp.addr.type = BDADDR_BREDR;
5556 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5557 mgmt_status(status), &rp, sizeof(rp));
5559 mgmt_pending_remove(cmd);
5562 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5565 struct pending_cmd *cmd;
5566 struct mgmt_rp_pin_code_reply rp;
5568 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5572 bacpy(&rp.addr.bdaddr, bdaddr);
5573 rp.addr.type = BDADDR_BREDR;
5575 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5576 mgmt_status(status), &rp, sizeof(rp));
5578 mgmt_pending_remove(cmd);
5581 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5582 u8 link_type, u8 addr_type, u32 value,
5585 struct mgmt_ev_user_confirm_request ev;
5587 BT_DBG("%s", hdev->name);
5589 bacpy(&ev.addr.bdaddr, bdaddr);
5590 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5591 ev.confirm_hint = confirm_hint;
5592 ev.value = cpu_to_le32(value);
5594 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5598 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5599 u8 link_type, u8 addr_type)
5601 struct mgmt_ev_user_passkey_request ev;
5603 BT_DBG("%s", hdev->name);
5605 bacpy(&ev.addr.bdaddr, bdaddr);
5606 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5608 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5612 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5613 u8 link_type, u8 addr_type, u8 status,
5616 struct pending_cmd *cmd;
5617 struct mgmt_rp_user_confirm_reply rp;
5620 cmd = mgmt_pending_find(opcode, hdev);
5624 bacpy(&rp.addr.bdaddr, bdaddr);
5625 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5626 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5629 mgmt_pending_remove(cmd);
5634 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5635 u8 link_type, u8 addr_type, u8 status)
5637 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5638 status, MGMT_OP_USER_CONFIRM_REPLY);
5641 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5642 u8 link_type, u8 addr_type, u8 status)
5644 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5646 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5649 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5650 u8 link_type, u8 addr_type, u8 status)
5652 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5653 status, MGMT_OP_USER_PASSKEY_REPLY);
5656 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5657 u8 link_type, u8 addr_type, u8 status)
5659 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5661 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5664 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5665 u8 link_type, u8 addr_type, u32 passkey,
5668 struct mgmt_ev_passkey_notify ev;
5670 BT_DBG("%s", hdev->name);
5672 bacpy(&ev.addr.bdaddr, bdaddr);
5673 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5674 ev.passkey = __cpu_to_le32(passkey);
5675 ev.entered = entered;
5677 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5680 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5681 u8 addr_type, u8 status)
5683 struct mgmt_ev_auth_failed ev;
5685 bacpy(&ev.addr.bdaddr, bdaddr);
5686 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5687 ev.status = mgmt_status(status);
5689 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5692 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5694 struct cmd_lookup match = { NULL, hdev };
5698 u8 mgmt_err = mgmt_status(status);
5699 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5700 cmd_status_rsp, &mgmt_err);
5704 if (test_bit(HCI_AUTH, &hdev->flags))
5705 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5708 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5711 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5715 new_settings(hdev, match.sk);
5721 static void clear_eir(struct hci_request *req)
5723 struct hci_dev *hdev = req->hdev;
5724 struct hci_cp_write_eir cp;
5726 if (!lmp_ext_inq_capable(hdev))
5729 memset(hdev->eir, 0, sizeof(hdev->eir));
5731 memset(&cp, 0, sizeof(cp));
5733 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5736 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5738 struct cmd_lookup match = { NULL, hdev };
5739 struct hci_request req;
5740 bool changed = false;
5743 u8 mgmt_err = mgmt_status(status);
5745 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5746 &hdev->dev_flags)) {
5747 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5748 new_settings(hdev, NULL);
5751 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5757 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5759 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5761 changed = test_and_clear_bit(HCI_HS_ENABLED,
5764 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5767 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5770 new_settings(hdev, match.sk);
5775 hci_req_init(&req, hdev);
5777 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5782 hci_req_run(&req, NULL);
5785 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5787 struct cmd_lookup match = { NULL, hdev };
5788 bool changed = false;
5791 u8 mgmt_err = mgmt_status(status);
5794 if (test_and_clear_bit(HCI_SC_ENABLED,
5796 new_settings(hdev, NULL);
5797 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5800 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5801 cmd_status_rsp, &mgmt_err);
5806 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5808 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5809 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5812 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5813 settings_rsp, &match);
5816 new_settings(hdev, match.sk);
5822 static void sk_lookup(struct pending_cmd *cmd, void *data)
5824 struct cmd_lookup *match = data;
5826 if (match->sk == NULL) {
5827 match->sk = cmd->sk;
5828 sock_hold(match->sk);
5832 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5835 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5837 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5838 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5839 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5842 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5849 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5851 struct mgmt_cp_set_local_name ev;
5852 struct pending_cmd *cmd;
5857 memset(&ev, 0, sizeof(ev));
5858 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5859 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5861 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5863 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5865 /* If this is a HCI command related to powering on the
5866 * HCI dev don't send any mgmt signals.
5868 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5872 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5873 cmd ? cmd->sk : NULL);
5876 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5877 u8 *randomizer192, u8 *hash256,
5878 u8 *randomizer256, u8 status)
5880 struct pending_cmd *cmd;
5882 BT_DBG("%s status %u", hdev->name, status);
5884 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5889 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5890 mgmt_status(status));
5892 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5893 hash256 && randomizer256) {
5894 struct mgmt_rp_read_local_oob_ext_data rp;
5896 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5897 memcpy(rp.randomizer192, randomizer192,
5898 sizeof(rp.randomizer192));
5900 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5901 memcpy(rp.randomizer256, randomizer256,
5902 sizeof(rp.randomizer256));
5904 cmd_complete(cmd->sk, hdev->id,
5905 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5908 struct mgmt_rp_read_local_oob_data rp;
5910 memcpy(rp.hash, hash192, sizeof(rp.hash));
5911 memcpy(rp.randomizer, randomizer192,
5912 sizeof(rp.randomizer));
5914 cmd_complete(cmd->sk, hdev->id,
5915 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5920 mgmt_pending_remove(cmd);
5923 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5924 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5925 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5929 struct mgmt_ev_device_found *ev = (void *) buf;
5930 struct smp_irk *irk;
5933 if (!hci_discovery_active(hdev))
5936 /* Make sure that the buffer is big enough. The 5 extra bytes
5937 * are for the potential CoD field.
5939 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5942 memset(buf, 0, sizeof(buf));
5944 irk = hci_get_irk(hdev, bdaddr, addr_type);
5946 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5947 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5949 bacpy(&ev->addr.bdaddr, bdaddr);
5950 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5955 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5957 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5960 memcpy(ev->eir, eir, eir_len);
5962 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5963 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5966 if (scan_rsp_len > 0)
5967 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5969 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5970 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5972 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5975 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5976 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5978 struct mgmt_ev_device_found *ev;
5979 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5982 ev = (struct mgmt_ev_device_found *) buf;
5984 memset(buf, 0, sizeof(buf));
5986 bacpy(&ev->addr.bdaddr, bdaddr);
5987 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5990 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5993 ev->eir_len = cpu_to_le16(eir_len);
5995 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5998 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6000 struct mgmt_ev_discovering ev;
6001 struct pending_cmd *cmd;
6003 BT_DBG("%s discovering %u", hdev->name, discovering);
6006 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6008 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6011 u8 type = hdev->discovery.type;
6013 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6015 mgmt_pending_remove(cmd);
6018 memset(&ev, 0, sizeof(ev));
6019 ev.type = hdev->discovery.type;
6020 ev.discovering = discovering;
6022 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6025 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6027 struct pending_cmd *cmd;
6028 struct mgmt_ev_device_blocked ev;
6030 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6032 bacpy(&ev.addr.bdaddr, bdaddr);
6033 ev.addr.type = type;
6035 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6036 cmd ? cmd->sk : NULL);
6039 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6041 struct pending_cmd *cmd;
6042 struct mgmt_ev_device_unblocked ev;
6044 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6046 bacpy(&ev.addr.bdaddr, bdaddr);
6047 ev.addr.type = type;
6049 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6050 cmd ? cmd->sk : NULL);
6053 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6055 BT_DBG("%s status %u", hdev->name, status);
6057 /* Clear the advertising mgmt setting if we failed to re-enable it */
6059 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6060 new_settings(hdev, NULL);
6064 void mgmt_reenable_advertising(struct hci_dev *hdev)
6066 struct hci_request req;
6068 if (hci_conn_num(hdev, LE_LINK) > 0)
6071 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6074 hci_req_init(&req, hdev);
6075 enable_advertising(&req);
6077 /* If this fails we have no option but to let user space know
6078 * that we've disabled advertising.
6080 if (hci_req_run(&req, adv_enable_complete) < 0) {
6081 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6082 new_settings(hdev, NULL);