2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
97 static const u16 mgmt_events[] = {
98 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_REMOVED,
101 MGMT_EV_NEW_SETTINGS,
102 MGMT_EV_CLASS_OF_DEV_CHANGED,
103 MGMT_EV_LOCAL_NAME_CHANGED,
104 MGMT_EV_NEW_LINK_KEY,
105 MGMT_EV_NEW_LONG_TERM_KEY,
106 MGMT_EV_DEVICE_CONNECTED,
107 MGMT_EV_DEVICE_DISCONNECTED,
108 MGMT_EV_CONNECT_FAILED,
109 MGMT_EV_PIN_CODE_REQUEST,
110 MGMT_EV_USER_CONFIRM_REQUEST,
111 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DEVICE_BLOCKED,
116 MGMT_EV_DEVICE_UNBLOCKED,
117 MGMT_EV_DEVICE_UNPAIRED,
118 MGMT_EV_PASSKEY_NOTIFY,
121 MGMT_EV_DEVICE_ADDED,
122 MGMT_EV_DEVICE_REMOVED,
123 MGMT_EV_NEW_CONN_PARAM,
124 MGMT_EV_UNCONF_INDEX_ADDED,
125 MGMT_EV_UNCONF_INDEX_REMOVED,
126 MGMT_EV_NEW_CONFIG_OPTIONS,
129 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
132 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
135 struct list_head list;
143 /* HCI to MGMT error code conversion table */
144 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
147 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
148 MGMT_STATUS_FAILED, /* Hardware Failure */
149 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
150 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
151 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
152 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
153 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
154 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
156 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
157 MGMT_STATUS_BUSY, /* Command Disallowed */
158 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
159 MGMT_STATUS_REJECTED, /* Rejected Security */
160 MGMT_STATUS_REJECTED, /* Rejected Personal */
161 MGMT_STATUS_TIMEOUT, /* Host Timeout */
162 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
163 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
164 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
165 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
166 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
167 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
168 MGMT_STATUS_BUSY, /* Repeated Attempts */
169 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
170 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
171 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
172 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
173 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
174 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
175 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
176 MGMT_STATUS_FAILED, /* Unspecified Error */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
178 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
179 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
180 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
181 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
182 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
183 MGMT_STATUS_FAILED, /* Unit Link Key Used */
184 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
185 MGMT_STATUS_TIMEOUT, /* Instant Passed */
186 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
187 MGMT_STATUS_FAILED, /* Transaction Collision */
188 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
189 MGMT_STATUS_REJECTED, /* QoS Rejected */
190 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
191 MGMT_STATUS_REJECTED, /* Insufficient Security */
192 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
193 MGMT_STATUS_BUSY, /* Role Switch Pending */
194 MGMT_STATUS_FAILED, /* Slot Violation */
195 MGMT_STATUS_FAILED, /* Role Switch Failed */
196 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
197 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
198 MGMT_STATUS_BUSY, /* Host Busy Pairing */
199 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
200 MGMT_STATUS_BUSY, /* Controller Busy */
201 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
202 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
203 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
204 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
205 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
208 static u8 mgmt_status(u8 hci_status)
210 if (hci_status < ARRAY_SIZE(mgmt_status_table))
211 return mgmt_status_table[hci_status];
213 return MGMT_STATUS_FAILED;
216 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
217 struct sock *skip_sk)
220 struct mgmt_hdr *hdr;
222 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
226 hdr = (void *) skb_put(skb, sizeof(*hdr));
227 hdr->opcode = cpu_to_le16(event);
229 hdr->index = cpu_to_le16(hdev->id);
231 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
232 hdr->len = cpu_to_le16(data_len);
235 memcpy(skb_put(skb, data_len), data, data_len);
238 __net_timestamp(skb);
240 hci_send_to_control(skb, skip_sk);
246 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
249 struct mgmt_hdr *hdr;
250 struct mgmt_ev_cmd_status *ev;
253 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
259 hdr = (void *) skb_put(skb, sizeof(*hdr));
261 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
262 hdr->index = cpu_to_le16(index);
263 hdr->len = cpu_to_le16(sizeof(*ev));
265 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->opcode = cpu_to_le16(cmd);
269 err = sock_queue_rcv_skb(sk, skb);
276 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
277 void *rp, size_t rp_len)
280 struct mgmt_hdr *hdr;
281 struct mgmt_ev_cmd_complete *ev;
284 BT_DBG("sock %p", sk);
286 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
290 hdr = (void *) skb_put(skb, sizeof(*hdr));
292 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
293 hdr->index = cpu_to_le16(index);
294 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
297 ev->opcode = cpu_to_le16(cmd);
301 memcpy(ev->data, rp, rp_len);
303 err = sock_queue_rcv_skb(sk, skb);
310 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
313 struct mgmt_rp_read_version rp;
315 BT_DBG("sock %p", sk);
317 rp.version = MGMT_VERSION;
318 rp.revision = cpu_to_le16(MGMT_REVISION);
320 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
324 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
327 struct mgmt_rp_read_commands *rp;
328 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
329 const u16 num_events = ARRAY_SIZE(mgmt_events);
334 BT_DBG("sock %p", sk);
336 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338 rp = kmalloc(rp_size, GFP_KERNEL);
342 rp->num_commands = cpu_to_le16(num_commands);
343 rp->num_events = cpu_to_le16(num_events);
345 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
346 put_unaligned_le16(mgmt_commands[i], opcode);
348 for (i = 0; i < num_events; i++, opcode++)
349 put_unaligned_le16(mgmt_events[i], opcode);
351 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
358 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
361 struct mgmt_rp_read_index_list *rp;
367 BT_DBG("sock %p", sk);
369 read_lock(&hci_dev_list_lock);
372 list_for_each_entry(d, &hci_dev_list, list) {
373 if (d->dev_type == HCI_BREDR &&
374 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
378 rp_len = sizeof(*rp) + (2 * count);
379 rp = kmalloc(rp_len, GFP_ATOMIC);
381 read_unlock(&hci_dev_list_lock);
386 list_for_each_entry(d, &hci_dev_list, list) {
387 if (test_bit(HCI_SETUP, &d->dev_flags) ||
388 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 /* Devices marked as raw-only are neither configured
392 * nor unconfigured controllers.
394 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 if (d->dev_type == HCI_BREDR &&
398 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
399 rp->index[count++] = cpu_to_le16(d->id);
400 BT_DBG("Added hci%u", d->id);
404 rp->num_controllers = cpu_to_le16(count);
405 rp_len = sizeof(*rp) + (2 * count);
407 read_unlock(&hci_dev_list_lock);
409 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
417 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
418 void *data, u16 data_len)
420 struct mgmt_rp_read_unconf_index_list *rp;
426 BT_DBG("sock %p", sk);
428 read_lock(&hci_dev_list_lock);
431 list_for_each_entry(d, &hci_dev_list, list) {
432 if (d->dev_type == HCI_BREDR &&
433 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
437 rp_len = sizeof(*rp) + (2 * count);
438 rp = kmalloc(rp_len, GFP_ATOMIC);
440 read_unlock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (test_bit(HCI_SETUP, &d->dev_flags) ||
447 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
450 /* Devices marked as raw-only are neither configured
451 * nor unconfigured controllers.
453 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
456 if (d->dev_type == HCI_BREDR &&
457 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
458 rp->index[count++] = cpu_to_le16(d->id);
459 BT_DBG("Added hci%u", d->id);
463 rp->num_controllers = cpu_to_le16(count);
464 rp_len = sizeof(*rp) + (2 * count);
466 read_unlock(&hci_dev_list_lock);
468 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
476 static bool is_configured(struct hci_dev *hdev)
478 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
479 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
482 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
483 !bacmp(&hdev->public_addr, BDADDR_ANY))
489 static __le32 get_missing_options(struct hci_dev *hdev)
493 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
494 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
495 options |= MGMT_OPTION_EXTERNAL_CONFIG;
497 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
498 !bacmp(&hdev->public_addr, BDADDR_ANY))
499 options |= MGMT_OPTION_PUBLIC_ADDRESS;
501 return cpu_to_le32(options);
504 static int new_options(struct hci_dev *hdev, struct sock *skip)
506 __le32 options = get_missing_options(hdev);
508 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
509 sizeof(options), skip);
512 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
514 __le32 options = get_missing_options(hdev);
516 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
521 void *data, u16 data_len)
523 struct mgmt_rp_read_config_info rp;
526 BT_DBG("sock %p %s", sk, hdev->name);
530 memset(&rp, 0, sizeof(rp));
531 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
533 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
534 options |= MGMT_OPTION_EXTERNAL_CONFIG;
536 if (hdev->set_bdaddr)
537 options |= MGMT_OPTION_PUBLIC_ADDRESS;
539 rp.supported_options = cpu_to_le32(options);
540 rp.missing_options = get_missing_options(hdev);
542 hci_dev_unlock(hdev);
544 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 static u32 get_supported_settings(struct hci_dev *hdev)
552 settings |= MGMT_SETTING_POWERED;
553 settings |= MGMT_SETTING_PAIRABLE;
554 settings |= MGMT_SETTING_DEBUG_KEYS;
556 if (lmp_bredr_capable(hdev)) {
557 settings |= MGMT_SETTING_CONNECTABLE;
558 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
559 settings |= MGMT_SETTING_FAST_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
561 settings |= MGMT_SETTING_BREDR;
562 settings |= MGMT_SETTING_LINK_SECURITY;
564 if (lmp_ssp_capable(hdev)) {
565 settings |= MGMT_SETTING_SSP;
566 settings |= MGMT_SETTING_HS;
569 if (lmp_sc_capable(hdev) ||
570 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
571 settings |= MGMT_SETTING_SECURE_CONN;
574 if (lmp_le_capable(hdev)) {
575 settings |= MGMT_SETTING_LE;
576 settings |= MGMT_SETTING_ADVERTISING;
577 settings |= MGMT_SETTING_PRIVACY;
580 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
582 settings |= MGMT_SETTING_CONFIGURATION;
587 static u32 get_current_settings(struct hci_dev *hdev)
591 if (hdev_is_powered(hdev))
592 settings |= MGMT_SETTING_POWERED;
594 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
595 settings |= MGMT_SETTING_CONNECTABLE;
597 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_FAST_CONNECTABLE;
600 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_DISCOVERABLE;
603 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_PAIRABLE;
606 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
607 settings |= MGMT_SETTING_BREDR;
609 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_LE;
612 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LINK_SECURITY;
615 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
616 settings |= MGMT_SETTING_SSP;
618 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_HS;
621 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
622 settings |= MGMT_SETTING_ADVERTISING;
624 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
625 settings |= MGMT_SETTING_SECURE_CONN;
627 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
628 settings |= MGMT_SETTING_DEBUG_KEYS;
630 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
631 settings |= MGMT_SETTING_PRIVACY;
636 #define PNP_INFO_SVCLASS_ID 0x1200
638 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
640 u8 *ptr = data, *uuids_start = NULL;
641 struct bt_uuid *uuid;
646 list_for_each_entry(uuid, &hdev->uuids, list) {
649 if (uuid->size != 16)
652 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656 if (uuid16 == PNP_INFO_SVCLASS_ID)
662 uuids_start[1] = EIR_UUID16_ALL;
666 /* Stop if not enough space to put next UUID */
667 if ((ptr - data) + sizeof(u16) > len) {
668 uuids_start[1] = EIR_UUID16_SOME;
672 *ptr++ = (uuid16 & 0x00ff);
673 *ptr++ = (uuid16 & 0xff00) >> 8;
674 uuids_start[0] += sizeof(uuid16);
680 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
682 u8 *ptr = data, *uuids_start = NULL;
683 struct bt_uuid *uuid;
688 list_for_each_entry(uuid, &hdev->uuids, list) {
689 if (uuid->size != 32)
695 uuids_start[1] = EIR_UUID32_ALL;
699 /* Stop if not enough space to put next UUID */
700 if ((ptr - data) + sizeof(u32) > len) {
701 uuids_start[1] = EIR_UUID32_SOME;
705 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
707 uuids_start[0] += sizeof(u32);
713 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
715 u8 *ptr = data, *uuids_start = NULL;
716 struct bt_uuid *uuid;
721 list_for_each_entry(uuid, &hdev->uuids, list) {
722 if (uuid->size != 128)
728 uuids_start[1] = EIR_UUID128_ALL;
732 /* Stop if not enough space to put next UUID */
733 if ((ptr - data) + 16 > len) {
734 uuids_start[1] = EIR_UUID128_SOME;
738 memcpy(ptr, uuid->uuid, 16);
740 uuids_start[0] += 16;
746 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
748 struct pending_cmd *cmd;
750 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
751 if (cmd->opcode == opcode)
758 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
759 struct hci_dev *hdev,
762 struct pending_cmd *cmd;
764 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
765 if (cmd->user_data != data)
767 if (cmd->opcode == opcode)
774 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
779 name_len = strlen(hdev->dev_name);
781 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
783 if (name_len > max_len) {
785 ptr[1] = EIR_NAME_SHORT;
787 ptr[1] = EIR_NAME_COMPLETE;
789 ptr[0] = name_len + 1;
791 memcpy(ptr + 2, hdev->dev_name, name_len);
793 ad_len += (name_len + 2);
794 ptr += (name_len + 2);
800 static void update_scan_rsp_data(struct hci_request *req)
802 struct hci_dev *hdev = req->hdev;
803 struct hci_cp_le_set_scan_rsp_data cp;
806 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
809 memset(&cp, 0, sizeof(cp));
811 len = create_scan_rsp_data(hdev, cp.data);
813 if (hdev->scan_rsp_data_len == len &&
814 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
817 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
818 hdev->scan_rsp_data_len = len;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
825 static u8 get_adv_discov_flags(struct hci_dev *hdev)
827 struct pending_cmd *cmd;
829 /* If there's a pending mgmt command the flags will not yet have
830 * their final values, so check for this first.
832 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
834 struct mgmt_mode *cp = cmd->param;
836 return LE_AD_GENERAL;
837 else if (cp->val == 0x02)
838 return LE_AD_LIMITED;
840 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
841 return LE_AD_LIMITED;
842 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
843 return LE_AD_GENERAL;
849 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
851 u8 ad_len = 0, flags = 0;
853 flags |= get_adv_discov_flags(hdev);
855 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
856 flags |= LE_AD_NO_BREDR;
859 BT_DBG("adv flags 0x%02x", flags);
869 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
871 ptr[1] = EIR_TX_POWER;
872 ptr[2] = (u8) hdev->adv_tx_power;
881 static void update_adv_data(struct hci_request *req)
883 struct hci_dev *hdev = req->hdev;
884 struct hci_cp_le_set_adv_data cp;
887 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
890 memset(&cp, 0, sizeof(cp));
892 len = create_adv_data(hdev, cp.data);
894 if (hdev->adv_data_len == len &&
895 memcmp(cp.data, hdev->adv_data, len) == 0)
898 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
899 hdev->adv_data_len = len;
903 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
906 static void create_eir(struct hci_dev *hdev, u8 *data)
911 name_len = strlen(hdev->dev_name);
917 ptr[1] = EIR_NAME_SHORT;
919 ptr[1] = EIR_NAME_COMPLETE;
921 /* EIR Data length */
922 ptr[0] = name_len + 1;
924 memcpy(ptr + 2, hdev->dev_name, name_len);
926 ptr += (name_len + 2);
929 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
931 ptr[1] = EIR_TX_POWER;
932 ptr[2] = (u8) hdev->inq_tx_power;
937 if (hdev->devid_source > 0) {
939 ptr[1] = EIR_DEVICE_ID;
941 put_unaligned_le16(hdev->devid_source, ptr + 2);
942 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
943 put_unaligned_le16(hdev->devid_product, ptr + 6);
944 put_unaligned_le16(hdev->devid_version, ptr + 8);
949 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
950 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
951 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
954 static void update_eir(struct hci_request *req)
956 struct hci_dev *hdev = req->hdev;
957 struct hci_cp_write_eir cp;
959 if (!hdev_is_powered(hdev))
962 if (!lmp_ext_inq_capable(hdev))
965 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
968 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
971 memset(&cp, 0, sizeof(cp));
973 create_eir(hdev, cp.data);
975 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
978 memcpy(hdev->eir, cp.data, sizeof(cp.data));
980 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
983 static u8 get_service_classes(struct hci_dev *hdev)
985 struct bt_uuid *uuid;
988 list_for_each_entry(uuid, &hdev->uuids, list)
989 val |= uuid->svc_hint;
994 static void update_class(struct hci_request *req)
996 struct hci_dev *hdev = req->hdev;
999 BT_DBG("%s", hdev->name);
1001 if (!hdev_is_powered(hdev))
1004 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1007 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1010 cod[0] = hdev->minor_class;
1011 cod[1] = hdev->major_class;
1012 cod[2] = get_service_classes(hdev);
1014 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1017 if (memcmp(cod, hdev->dev_class, 3) == 0)
1020 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1023 static bool get_connectable(struct hci_dev *hdev)
1025 struct pending_cmd *cmd;
1027 /* If there's a pending mgmt command the flag will not yet have
1028 * it's final value, so check for this first.
1030 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1032 struct mgmt_mode *cp = cmd->param;
1036 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1039 static void enable_advertising(struct hci_request *req)
1041 struct hci_dev *hdev = req->hdev;
1042 struct hci_cp_le_set_adv_param cp;
1043 u8 own_addr_type, enable = 0x01;
1046 /* Clear the HCI_ADVERTISING bit temporarily so that the
1047 * hci_update_random_address knows that it's safe to go ahead
1048 * and write a new random address. The flag will be set back on
1049 * as soon as the SET_ADV_ENABLE HCI command completes.
1051 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1053 connectable = get_connectable(hdev);
1055 /* Set require_privacy to true only when non-connectable
1056 * advertising is used. In that case it is fine to use a
1057 * non-resolvable private address.
1059 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1062 memset(&cp, 0, sizeof(cp));
1063 cp.min_interval = cpu_to_le16(0x0800);
1064 cp.max_interval = cpu_to_le16(0x0800);
1065 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1066 cp.own_address_type = own_addr_type;
1067 cp.channel_map = hdev->le_adv_channel_map;
1069 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1071 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1074 static void disable_advertising(struct hci_request *req)
1078 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1081 static void service_cache_off(struct work_struct *work)
1083 struct hci_dev *hdev = container_of(work, struct hci_dev,
1084 service_cache.work);
1085 struct hci_request req;
1087 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1090 hci_req_init(&req, hdev);
1097 hci_dev_unlock(hdev);
1099 hci_req_run(&req, NULL);
1102 static void rpa_expired(struct work_struct *work)
1104 struct hci_dev *hdev = container_of(work, struct hci_dev,
1106 struct hci_request req;
1110 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1112 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1113 hci_conn_num(hdev, LE_LINK) > 0)
1116 /* The generation of a new RPA and programming it into the
1117 * controller happens in the enable_advertising() function.
1120 hci_req_init(&req, hdev);
1122 disable_advertising(&req);
1123 enable_advertising(&req);
1125 hci_req_run(&req, NULL);
1128 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1130 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1133 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1134 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1136 /* Non-mgmt controlled devices get this bit set
1137 * implicitly so that pairing works for them, however
1138 * for mgmt we require user-space to explicitly enable
1141 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1144 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1145 void *data, u16 data_len)
1147 struct mgmt_rp_read_info rp;
1149 BT_DBG("sock %p %s", sk, hdev->name);
1153 memset(&rp, 0, sizeof(rp));
1155 bacpy(&rp.bdaddr, &hdev->bdaddr);
1157 rp.version = hdev->hci_ver;
1158 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1160 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1161 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1163 memcpy(rp.dev_class, hdev->dev_class, 3);
1165 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1166 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1168 hci_dev_unlock(hdev);
1170 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1174 static void mgmt_pending_free(struct pending_cmd *cmd)
1181 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1182 struct hci_dev *hdev, void *data,
1185 struct pending_cmd *cmd;
1187 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1191 cmd->opcode = opcode;
1192 cmd->index = hdev->id;
1194 cmd->param = kmalloc(len, GFP_KERNEL);
1201 memcpy(cmd->param, data, len);
1206 list_add(&cmd->list, &hdev->mgmt_pending);
1211 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1212 void (*cb)(struct pending_cmd *cmd,
1216 struct pending_cmd *cmd, *tmp;
1218 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1219 if (opcode > 0 && cmd->opcode != opcode)
1226 static void mgmt_pending_remove(struct pending_cmd *cmd)
1228 list_del(&cmd->list);
1229 mgmt_pending_free(cmd);
1232 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1234 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1236 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1240 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1242 BT_DBG("%s status 0x%02x", hdev->name, status);
1244 if (hci_conn_count(hdev) == 0) {
1245 cancel_delayed_work(&hdev->power_off);
1246 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1250 static void hci_stop_discovery(struct hci_request *req)
1252 struct hci_dev *hdev = req->hdev;
1253 struct hci_cp_remote_name_req_cancel cp;
1254 struct inquiry_entry *e;
1256 switch (hdev->discovery.state) {
1257 case DISCOVERY_FINDING:
1258 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1259 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1261 cancel_delayed_work(&hdev->le_scan_disable);
1262 hci_req_add_le_scan_disable(req);
1267 case DISCOVERY_RESOLVING:
1268 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1273 bacpy(&cp.bdaddr, &e->data.bdaddr);
1274 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1280 /* Passive scanning */
1281 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1282 hci_req_add_le_scan_disable(req);
1287 static int clean_up_hci_state(struct hci_dev *hdev)
1289 struct hci_request req;
1290 struct hci_conn *conn;
1292 hci_req_init(&req, hdev);
1294 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1295 test_bit(HCI_PSCAN, &hdev->flags)) {
1297 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1300 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1301 disable_advertising(&req);
1303 hci_stop_discovery(&req);
1305 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1306 struct hci_cp_disconnect dc;
1307 struct hci_cp_reject_conn_req rej;
1309 switch (conn->state) {
1312 dc.handle = cpu_to_le16(conn->handle);
1313 dc.reason = 0x15; /* Terminated due to Power Off */
1314 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1317 if (conn->type == LE_LINK)
1318 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1320 else if (conn->type == ACL_LINK)
1321 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1325 bacpy(&rej.bdaddr, &conn->dst);
1326 rej.reason = 0x15; /* Terminated due to Power Off */
1327 if (conn->type == ACL_LINK)
1328 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1330 else if (conn->type == SCO_LINK)
1331 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1337 return hci_req_run(&req, clean_up_hci_complete);
1340 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1343 struct mgmt_mode *cp = data;
1344 struct pending_cmd *cmd;
1347 BT_DBG("request for %s", hdev->name);
1349 if (cp->val != 0x00 && cp->val != 0x01)
1350 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1351 MGMT_STATUS_INVALID_PARAMS);
1355 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1356 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1361 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1362 cancel_delayed_work(&hdev->power_off);
1365 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1367 err = mgmt_powered(hdev, 1);
1372 if (!!cp->val == hdev_is_powered(hdev)) {
1373 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1377 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1384 queue_work(hdev->req_workqueue, &hdev->power_on);
1387 /* Disconnect connections, stop scans, etc */
1388 err = clean_up_hci_state(hdev);
1390 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1391 HCI_POWER_OFF_TIMEOUT);
1393 /* ENODATA means there were no HCI commands queued */
1394 if (err == -ENODATA) {
1395 cancel_delayed_work(&hdev->power_off);
1396 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1402 hci_dev_unlock(hdev);
1406 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1410 ev = cpu_to_le32(get_current_settings(hdev));
1412 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1417 struct hci_dev *hdev;
1421 static void settings_rsp(struct pending_cmd *cmd, void *data)
1423 struct cmd_lookup *match = data;
1425 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1427 list_del(&cmd->list);
1429 if (match->sk == NULL) {
1430 match->sk = cmd->sk;
1431 sock_hold(match->sk);
1434 mgmt_pending_free(cmd);
1437 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1441 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1442 mgmt_pending_remove(cmd);
1445 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1447 if (!lmp_bredr_capable(hdev))
1448 return MGMT_STATUS_NOT_SUPPORTED;
1449 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1450 return MGMT_STATUS_REJECTED;
1452 return MGMT_STATUS_SUCCESS;
1455 static u8 mgmt_le_support(struct hci_dev *hdev)
1457 if (!lmp_le_capable(hdev))
1458 return MGMT_STATUS_NOT_SUPPORTED;
1459 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1460 return MGMT_STATUS_REJECTED;
1462 return MGMT_STATUS_SUCCESS;
1465 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1467 struct pending_cmd *cmd;
1468 struct mgmt_mode *cp;
1469 struct hci_request req;
1472 BT_DBG("status 0x%02x", status);
1476 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1481 u8 mgmt_err = mgmt_status(status);
1482 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1483 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1489 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1492 if (hdev->discov_timeout > 0) {
1493 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1494 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1498 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1502 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1505 new_settings(hdev, cmd->sk);
1507 /* When the discoverable mode gets changed, make sure
1508 * that class of device has the limited discoverable
1509 * bit correctly set.
1511 hci_req_init(&req, hdev);
1513 hci_req_run(&req, NULL);
1516 mgmt_pending_remove(cmd);
1519 hci_dev_unlock(hdev);
1522 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1525 struct mgmt_cp_set_discoverable *cp = data;
1526 struct pending_cmd *cmd;
1527 struct hci_request req;
1532 BT_DBG("request for %s", hdev->name);
1534 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1535 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1536 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1537 MGMT_STATUS_REJECTED);
1539 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1540 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1541 MGMT_STATUS_INVALID_PARAMS);
1543 timeout = __le16_to_cpu(cp->timeout);
1545 /* Disabling discoverable requires that no timeout is set,
1546 * and enabling limited discoverable requires a timeout.
1548 if ((cp->val == 0x00 && timeout > 0) ||
1549 (cp->val == 0x02 && timeout == 0))
1550 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1551 MGMT_STATUS_INVALID_PARAMS);
1555 if (!hdev_is_powered(hdev) && timeout > 0) {
1556 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1557 MGMT_STATUS_NOT_POWERED);
1561 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1562 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1563 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1569 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_REJECTED);
1574 if (!hdev_is_powered(hdev)) {
1575 bool changed = false;
1577 /* Setting limited discoverable when powered off is
1578 * not a valid operation since it requires a timeout
1579 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1581 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1582 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1586 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1591 err = new_settings(hdev, sk);
1596 /* If the current mode is the same, then just update the timeout
1597 * value with the new value. And if only the timeout gets updated,
1598 * then no need for any HCI transactions.
1600 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1601 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1602 &hdev->dev_flags)) {
1603 cancel_delayed_work(&hdev->discov_off);
1604 hdev->discov_timeout = timeout;
1606 if (cp->val && hdev->discov_timeout > 0) {
1607 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1608 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1612 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1622 /* Cancel any potential discoverable timeout that might be
1623 * still active and store new timeout value. The arming of
1624 * the timeout happens in the complete handler.
1626 cancel_delayed_work(&hdev->discov_off);
1627 hdev->discov_timeout = timeout;
1629 /* Limited discoverable mode */
1630 if (cp->val == 0x02)
1631 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1633 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1635 hci_req_init(&req, hdev);
1637 /* The procedure for LE-only controllers is much simpler - just
1638 * update the advertising data.
1640 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1646 struct hci_cp_write_current_iac_lap hci_cp;
1648 if (cp->val == 0x02) {
1649 /* Limited discoverable mode */
1650 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1651 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1652 hci_cp.iac_lap[1] = 0x8b;
1653 hci_cp.iac_lap[2] = 0x9e;
1654 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1655 hci_cp.iac_lap[4] = 0x8b;
1656 hci_cp.iac_lap[5] = 0x9e;
1658 /* General discoverable mode */
1660 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1661 hci_cp.iac_lap[1] = 0x8b;
1662 hci_cp.iac_lap[2] = 0x9e;
1665 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1666 (hci_cp.num_iac * 3) + 1, &hci_cp);
1668 scan |= SCAN_INQUIRY;
1670 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1673 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1676 update_adv_data(&req);
1678 err = hci_req_run(&req, set_discoverable_complete);
1680 mgmt_pending_remove(cmd);
1683 hci_dev_unlock(hdev);
1687 static void write_fast_connectable(struct hci_request *req, bool enable)
1689 struct hci_dev *hdev = req->hdev;
1690 struct hci_cp_write_page_scan_activity acp;
1693 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1696 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1700 type = PAGE_SCAN_TYPE_INTERLACED;
1702 /* 160 msec page scan interval */
1703 acp.interval = cpu_to_le16(0x0100);
1705 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1707 /* default 1.28 sec page scan */
1708 acp.interval = cpu_to_le16(0x0800);
1711 acp.window = cpu_to_le16(0x0012);
1713 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1714 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1715 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1718 if (hdev->page_scan_type != type)
1719 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1722 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1724 struct pending_cmd *cmd;
1725 struct mgmt_mode *cp;
1728 BT_DBG("status 0x%02x", status);
1732 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1737 u8 mgmt_err = mgmt_status(status);
1738 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1744 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1746 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1748 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1751 new_settings(hdev, cmd->sk);
1754 mgmt_pending_remove(cmd);
1757 hci_dev_unlock(hdev);
1760 static int set_connectable_update_settings(struct hci_dev *hdev,
1761 struct sock *sk, u8 val)
1763 bool changed = false;
1766 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1770 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1772 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1773 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1776 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1781 return new_settings(hdev, sk);
1786 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1789 struct mgmt_mode *cp = data;
1790 struct pending_cmd *cmd;
1791 struct hci_request req;
1795 BT_DBG("request for %s", hdev->name);
1797 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1798 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1799 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1800 MGMT_STATUS_REJECTED);
1802 if (cp->val != 0x00 && cp->val != 0x01)
1803 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1804 MGMT_STATUS_INVALID_PARAMS);
1808 if (!hdev_is_powered(hdev)) {
1809 err = set_connectable_update_settings(hdev, sk, cp->val);
1813 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1814 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1815 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1820 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1826 hci_req_init(&req, hdev);
1828 /* If BR/EDR is not enabled and we disable advertising as a
1829 * by-product of disabling connectable, we need to update the
1830 * advertising flags.
1832 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1834 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1835 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1837 update_adv_data(&req);
1838 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1844 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1845 hdev->discov_timeout > 0)
1846 cancel_delayed_work(&hdev->discov_off);
1849 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1852 /* If we're going from non-connectable to connectable or
1853 * vice-versa when fast connectable is enabled ensure that fast
1854 * connectable gets disabled. write_fast_connectable won't do
1855 * anything if the page scan parameters are already what they
1858 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1859 write_fast_connectable(&req, false);
1861 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1862 hci_conn_num(hdev, LE_LINK) == 0) {
1863 disable_advertising(&req);
1864 enable_advertising(&req);
1867 err = hci_req_run(&req, set_connectable_complete);
1869 mgmt_pending_remove(cmd);
1870 if (err == -ENODATA)
1871 err = set_connectable_update_settings(hdev, sk,
1877 hci_dev_unlock(hdev);
1881 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1884 struct mgmt_mode *cp = data;
1888 BT_DBG("request for %s", hdev->name);
1890 if (cp->val != 0x00 && cp->val != 0x01)
1891 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1892 MGMT_STATUS_INVALID_PARAMS);
1897 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1899 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1901 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1906 err = new_settings(hdev, sk);
1909 hci_dev_unlock(hdev);
1913 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1916 struct mgmt_mode *cp = data;
1917 struct pending_cmd *cmd;
1921 BT_DBG("request for %s", hdev->name);
1923 status = mgmt_bredr_support(hdev);
1925 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1928 if (cp->val != 0x00 && cp->val != 0x01)
1929 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1930 MGMT_STATUS_INVALID_PARAMS);
1934 if (!hdev_is_powered(hdev)) {
1935 bool changed = false;
1937 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1938 &hdev->dev_flags)) {
1939 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1943 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1948 err = new_settings(hdev, sk);
1953 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1954 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1961 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1962 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1966 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1972 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1974 mgmt_pending_remove(cmd);
1979 hci_dev_unlock(hdev);
1983 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1985 struct mgmt_mode *cp = data;
1986 struct pending_cmd *cmd;
1990 BT_DBG("request for %s", hdev->name);
1992 status = mgmt_bredr_support(hdev);
1994 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1996 if (!lmp_ssp_capable(hdev))
1997 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1998 MGMT_STATUS_NOT_SUPPORTED);
2000 if (cp->val != 0x00 && cp->val != 0x01)
2001 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2002 MGMT_STATUS_INVALID_PARAMS);
2006 if (!hdev_is_powered(hdev)) {
2010 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2013 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2016 changed = test_and_clear_bit(HCI_HS_ENABLED,
2019 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2022 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2027 err = new_settings(hdev, sk);
2032 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2033 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2034 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2039 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2040 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2044 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2050 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2051 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2052 sizeof(cp->val), &cp->val);
2054 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2056 mgmt_pending_remove(cmd);
2061 hci_dev_unlock(hdev);
2065 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2072 BT_DBG("request for %s", hdev->name);
2074 status = mgmt_bredr_support(hdev);
2076 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2078 if (!lmp_ssp_capable(hdev))
2079 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2083 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_REJECTED);
2086 if (cp->val != 0x00 && cp->val != 0x01)
2087 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_INVALID_PARAMS);
2093 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2095 if (hdev_is_powered(hdev)) {
2096 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2097 MGMT_STATUS_REJECTED);
2101 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2104 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2109 err = new_settings(hdev, sk);
2112 hci_dev_unlock(hdev);
2116 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2118 struct cmd_lookup match = { NULL, hdev };
2121 u8 mgmt_err = mgmt_status(status);
2123 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2128 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2130 new_settings(hdev, match.sk);
2135 /* Make sure the controller has a good default for
2136 * advertising data. Restrict the update to when LE
2137 * has actually been enabled. During power on, the
2138 * update in powered_update_hci will take care of it.
2140 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2141 struct hci_request req;
2145 hci_req_init(&req, hdev);
2146 update_adv_data(&req);
2147 update_scan_rsp_data(&req);
2148 hci_req_run(&req, NULL);
2150 hci_dev_unlock(hdev);
2154 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2156 struct mgmt_mode *cp = data;
2157 struct hci_cp_write_le_host_supported hci_cp;
2158 struct pending_cmd *cmd;
2159 struct hci_request req;
2163 BT_DBG("request for %s", hdev->name);
2165 if (!lmp_le_capable(hdev))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2167 MGMT_STATUS_NOT_SUPPORTED);
2169 if (cp->val != 0x00 && cp->val != 0x01)
2170 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2171 MGMT_STATUS_INVALID_PARAMS);
2173 /* LE-only devices do not allow toggling LE on/off */
2174 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2175 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2176 MGMT_STATUS_REJECTED);
2181 enabled = lmp_host_le_capable(hdev);
2183 if (!hdev_is_powered(hdev) || val == enabled) {
2184 bool changed = false;
2186 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2187 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2191 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2192 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2196 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2201 err = new_settings(hdev, sk);
2206 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2207 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2208 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2213 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2219 hci_req_init(&req, hdev);
2221 memset(&hci_cp, 0, sizeof(hci_cp));
2225 hci_cp.simul = lmp_le_br_capable(hdev);
2227 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2228 disable_advertising(&req);
2231 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2234 err = hci_req_run(&req, le_enable_complete);
2236 mgmt_pending_remove(cmd);
2239 hci_dev_unlock(hdev);
2243 /* This is a helper function to test for pending mgmt commands that can
2244 * cause CoD or EIR HCI commands. We can only allow one such pending
2245 * mgmt command at a time since otherwise we cannot easily track what
2246 * the current values are, will be, and based on that calculate if a new
2247 * HCI command needs to be sent and if yes with what value.
2249 static bool pending_eir_or_class(struct hci_dev *hdev)
2251 struct pending_cmd *cmd;
2253 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2254 switch (cmd->opcode) {
2255 case MGMT_OP_ADD_UUID:
2256 case MGMT_OP_REMOVE_UUID:
2257 case MGMT_OP_SET_DEV_CLASS:
2258 case MGMT_OP_SET_POWERED:
2266 static const u8 bluetooth_base_uuid[] = {
2267 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2268 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2271 static u8 get_uuid_size(const u8 *uuid)
2275 if (memcmp(uuid, bluetooth_base_uuid, 12))
2278 val = get_unaligned_le32(&uuid[12]);
2285 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2287 struct pending_cmd *cmd;
2291 cmd = mgmt_pending_find(mgmt_op, hdev);
2295 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2296 hdev->dev_class, 3);
2298 mgmt_pending_remove(cmd);
2301 hci_dev_unlock(hdev);
2304 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2306 BT_DBG("status 0x%02x", status);
2308 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2311 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2313 struct mgmt_cp_add_uuid *cp = data;
2314 struct pending_cmd *cmd;
2315 struct hci_request req;
2316 struct bt_uuid *uuid;
2319 BT_DBG("request for %s", hdev->name);
2323 if (pending_eir_or_class(hdev)) {
2324 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2329 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2335 memcpy(uuid->uuid, cp->uuid, 16);
2336 uuid->svc_hint = cp->svc_hint;
2337 uuid->size = get_uuid_size(cp->uuid);
2339 list_add_tail(&uuid->list, &hdev->uuids);
2341 hci_req_init(&req, hdev);
2346 err = hci_req_run(&req, add_uuid_complete);
2348 if (err != -ENODATA)
2351 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2352 hdev->dev_class, 3);
2356 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2365 hci_dev_unlock(hdev);
2369 static bool enable_service_cache(struct hci_dev *hdev)
2371 if (!hdev_is_powered(hdev))
2374 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2375 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2383 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2385 BT_DBG("status 0x%02x", status);
2387 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2390 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2393 struct mgmt_cp_remove_uuid *cp = data;
2394 struct pending_cmd *cmd;
2395 struct bt_uuid *match, *tmp;
2396 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2397 struct hci_request req;
2400 BT_DBG("request for %s", hdev->name);
2404 if (pending_eir_or_class(hdev)) {
2405 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2410 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2411 hci_uuids_clear(hdev);
2413 if (enable_service_cache(hdev)) {
2414 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2415 0, hdev->dev_class, 3);
2424 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2425 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2428 list_del(&match->list);
2434 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2435 MGMT_STATUS_INVALID_PARAMS);
2440 hci_req_init(&req, hdev);
2445 err = hci_req_run(&req, remove_uuid_complete);
2447 if (err != -ENODATA)
2450 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2451 hdev->dev_class, 3);
2455 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2464 hci_dev_unlock(hdev);
2468 static void set_class_complete(struct hci_dev *hdev, u8 status)
2470 BT_DBG("status 0x%02x", status);
2472 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2475 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2478 struct mgmt_cp_set_dev_class *cp = data;
2479 struct pending_cmd *cmd;
2480 struct hci_request req;
2483 BT_DBG("request for %s", hdev->name);
2485 if (!lmp_bredr_capable(hdev))
2486 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2487 MGMT_STATUS_NOT_SUPPORTED);
2491 if (pending_eir_or_class(hdev)) {
2492 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2497 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2498 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2499 MGMT_STATUS_INVALID_PARAMS);
2503 hdev->major_class = cp->major;
2504 hdev->minor_class = cp->minor;
2506 if (!hdev_is_powered(hdev)) {
2507 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2508 hdev->dev_class, 3);
2512 hci_req_init(&req, hdev);
2514 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2515 hci_dev_unlock(hdev);
2516 cancel_delayed_work_sync(&hdev->service_cache);
2523 err = hci_req_run(&req, set_class_complete);
2525 if (err != -ENODATA)
2528 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2529 hdev->dev_class, 3);
2533 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2542 hci_dev_unlock(hdev);
2546 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2549 struct mgmt_cp_load_link_keys *cp = data;
2550 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2551 sizeof(struct mgmt_link_key_info));
2552 u16 key_count, expected_len;
2556 BT_DBG("request for %s", hdev->name);
2558 if (!lmp_bredr_capable(hdev))
2559 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2560 MGMT_STATUS_NOT_SUPPORTED);
2562 key_count = __le16_to_cpu(cp->key_count);
2563 if (key_count > max_key_count) {
2564 BT_ERR("load_link_keys: too big key_count value %u",
2566 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2567 MGMT_STATUS_INVALID_PARAMS);
2570 expected_len = sizeof(*cp) + key_count *
2571 sizeof(struct mgmt_link_key_info);
2572 if (expected_len != len) {
2573 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2575 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2576 MGMT_STATUS_INVALID_PARAMS);
2579 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2580 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2581 MGMT_STATUS_INVALID_PARAMS);
2583 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2586 for (i = 0; i < key_count; i++) {
2587 struct mgmt_link_key_info *key = &cp->keys[i];
2589 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2590 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2591 MGMT_STATUS_INVALID_PARAMS);
2596 hci_link_keys_clear(hdev);
2599 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2602 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2606 new_settings(hdev, NULL);
2608 for (i = 0; i < key_count; i++) {
2609 struct mgmt_link_key_info *key = &cp->keys[i];
2611 /* Always ignore debug keys and require a new pairing if
2612 * the user wants to use them.
2614 if (key->type == HCI_LK_DEBUG_COMBINATION)
2617 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2618 key->type, key->pin_len, NULL);
2621 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2623 hci_dev_unlock(hdev);
2628 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2629 u8 addr_type, struct sock *skip_sk)
2631 struct mgmt_ev_device_unpaired ev;
2633 bacpy(&ev.addr.bdaddr, bdaddr);
2634 ev.addr.type = addr_type;
2636 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2640 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2643 struct mgmt_cp_unpair_device *cp = data;
2644 struct mgmt_rp_unpair_device rp;
2645 struct hci_cp_disconnect dc;
2646 struct pending_cmd *cmd;
2647 struct hci_conn *conn;
2650 memset(&rp, 0, sizeof(rp));
2651 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2652 rp.addr.type = cp->addr.type;
2654 if (!bdaddr_type_is_valid(cp->addr.type))
2655 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2656 MGMT_STATUS_INVALID_PARAMS,
2659 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2660 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2661 MGMT_STATUS_INVALID_PARAMS,
2666 if (!hdev_is_powered(hdev)) {
2667 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2668 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2672 if (cp->addr.type == BDADDR_BREDR) {
2673 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2677 if (cp->addr.type == BDADDR_LE_PUBLIC)
2678 addr_type = ADDR_LE_DEV_PUBLIC;
2680 addr_type = ADDR_LE_DEV_RANDOM;
2682 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2684 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2686 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2690 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2691 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2695 if (cp->disconnect) {
2696 if (cp->addr.type == BDADDR_BREDR)
2697 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2700 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2707 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2709 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2713 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2720 dc.handle = cpu_to_le16(conn->handle);
2721 dc.reason = 0x13; /* Remote User Terminated Connection */
2722 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2724 mgmt_pending_remove(cmd);
2727 hci_dev_unlock(hdev);
2731 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2734 struct mgmt_cp_disconnect *cp = data;
2735 struct mgmt_rp_disconnect rp;
2736 struct hci_cp_disconnect dc;
2737 struct pending_cmd *cmd;
2738 struct hci_conn *conn;
2743 memset(&rp, 0, sizeof(rp));
2744 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2745 rp.addr.type = cp->addr.type;
2747 if (!bdaddr_type_is_valid(cp->addr.type))
2748 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2749 MGMT_STATUS_INVALID_PARAMS,
2754 if (!test_bit(HCI_UP, &hdev->flags)) {
2755 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2756 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2760 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2761 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2762 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2766 if (cp->addr.type == BDADDR_BREDR)
2767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2770 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2772 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2773 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2774 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2778 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2784 dc.handle = cpu_to_le16(conn->handle);
2785 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2787 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2789 mgmt_pending_remove(cmd);
2792 hci_dev_unlock(hdev);
2796 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2798 switch (link_type) {
2800 switch (addr_type) {
2801 case ADDR_LE_DEV_PUBLIC:
2802 return BDADDR_LE_PUBLIC;
2805 /* Fallback to LE Random address type */
2806 return BDADDR_LE_RANDOM;
2810 /* Fallback to BR/EDR type */
2811 return BDADDR_BREDR;
2815 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2818 struct mgmt_rp_get_connections *rp;
2828 if (!hdev_is_powered(hdev)) {
2829 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2830 MGMT_STATUS_NOT_POWERED);
2835 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2836 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2840 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2841 rp = kmalloc(rp_len, GFP_KERNEL);
2848 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2849 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2851 bacpy(&rp->addr[i].bdaddr, &c->dst);
2852 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2853 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2858 rp->conn_count = cpu_to_le16(i);
2860 /* Recalculate length in case of filtered SCO connections, etc */
2861 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2863 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2869 hci_dev_unlock(hdev);
2873 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2874 struct mgmt_cp_pin_code_neg_reply *cp)
2876 struct pending_cmd *cmd;
2879 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2884 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2885 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2887 mgmt_pending_remove(cmd);
2892 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2895 struct hci_conn *conn;
2896 struct mgmt_cp_pin_code_reply *cp = data;
2897 struct hci_cp_pin_code_reply reply;
2898 struct pending_cmd *cmd;
2905 if (!hdev_is_powered(hdev)) {
2906 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2907 MGMT_STATUS_NOT_POWERED);
2911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2913 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2914 MGMT_STATUS_NOT_CONNECTED);
2918 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2919 struct mgmt_cp_pin_code_neg_reply ncp;
2921 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2923 BT_ERR("PIN code is not 16 bytes long");
2925 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2927 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2928 MGMT_STATUS_INVALID_PARAMS);
2933 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2939 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2940 reply.pin_len = cp->pin_len;
2941 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2943 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2945 mgmt_pending_remove(cmd);
2948 hci_dev_unlock(hdev);
2952 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2955 struct mgmt_cp_set_io_capability *cp = data;
2959 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2960 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2961 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2965 hdev->io_capability = cp->io_capability;
2967 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2968 hdev->io_capability);
2970 hci_dev_unlock(hdev);
2972 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2976 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2978 struct hci_dev *hdev = conn->hdev;
2979 struct pending_cmd *cmd;
2981 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2982 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2985 if (cmd->user_data != conn)
2994 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2996 struct mgmt_rp_pair_device rp;
2997 struct hci_conn *conn = cmd->user_data;
2999 bacpy(&rp.addr.bdaddr, &conn->dst);
3000 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3002 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3005 /* So we don't get further callbacks for this connection */
3006 conn->connect_cfm_cb = NULL;
3007 conn->security_cfm_cb = NULL;
3008 conn->disconn_cfm_cb = NULL;
3010 hci_conn_drop(conn);
3012 mgmt_pending_remove(cmd);
3015 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3017 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3018 struct pending_cmd *cmd;
3020 cmd = find_pairing(conn);
3022 pairing_complete(cmd, status);
3025 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3027 struct pending_cmd *cmd;
3029 BT_DBG("status %u", status);
3031 cmd = find_pairing(conn);
3033 BT_DBG("Unable to find a pending command");
3035 pairing_complete(cmd, mgmt_status(status));
3038 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3040 struct pending_cmd *cmd;
3042 BT_DBG("status %u", status);
3047 cmd = find_pairing(conn);
3049 BT_DBG("Unable to find a pending command");
3051 pairing_complete(cmd, mgmt_status(status));
3054 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3057 struct mgmt_cp_pair_device *cp = data;
3058 struct mgmt_rp_pair_device rp;
3059 struct pending_cmd *cmd;
3060 u8 sec_level, auth_type;
3061 struct hci_conn *conn;
3066 memset(&rp, 0, sizeof(rp));
3067 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3068 rp.addr.type = cp->addr.type;
3070 if (!bdaddr_type_is_valid(cp->addr.type))
3071 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3072 MGMT_STATUS_INVALID_PARAMS,
3075 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3076 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3077 MGMT_STATUS_INVALID_PARAMS,
3082 if (!hdev_is_powered(hdev)) {
3083 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3084 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3088 sec_level = BT_SECURITY_MEDIUM;
3089 auth_type = HCI_AT_DEDICATED_BONDING;
3091 if (cp->addr.type == BDADDR_BREDR) {
3092 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3097 /* Convert from L2CAP channel address type to HCI address type
3099 if (cp->addr.type == BDADDR_LE_PUBLIC)
3100 addr_type = ADDR_LE_DEV_PUBLIC;
3102 addr_type = ADDR_LE_DEV_RANDOM;
3104 /* When pairing a new device, it is expected to remember
3105 * this device for future connections. Adding the connection
3106 * parameter information ahead of time allows tracking
3107 * of the slave preferred values and will speed up any
3108 * further connection establishment.
3110 * If connection parameters already exist, then they
3111 * will be kept and this function does nothing.
3113 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3115 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3116 sec_level, auth_type);
3122 if (PTR_ERR(conn) == -EBUSY)
3123 status = MGMT_STATUS_BUSY;
3125 status = MGMT_STATUS_CONNECT_FAILED;
3127 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3133 if (conn->connect_cfm_cb) {
3134 hci_conn_drop(conn);
3135 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3136 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3140 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3143 hci_conn_drop(conn);
3147 /* For LE, just connecting isn't a proof that the pairing finished */
3148 if (cp->addr.type == BDADDR_BREDR) {
3149 conn->connect_cfm_cb = pairing_complete_cb;
3150 conn->security_cfm_cb = pairing_complete_cb;
3151 conn->disconn_cfm_cb = pairing_complete_cb;
3153 conn->connect_cfm_cb = le_pairing_complete_cb;
3154 conn->security_cfm_cb = le_pairing_complete_cb;
3155 conn->disconn_cfm_cb = le_pairing_complete_cb;
3158 conn->io_capability = cp->io_cap;
3159 cmd->user_data = conn;
3161 if (conn->state == BT_CONNECTED &&
3162 hci_conn_security(conn, sec_level, auth_type))
3163 pairing_complete(cmd, 0);
3168 hci_dev_unlock(hdev);
3172 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3175 struct mgmt_addr_info *addr = data;
3176 struct pending_cmd *cmd;
3177 struct hci_conn *conn;
3184 if (!hdev_is_powered(hdev)) {
3185 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3186 MGMT_STATUS_NOT_POWERED);
3190 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3192 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3193 MGMT_STATUS_INVALID_PARAMS);
3197 conn = cmd->user_data;
3199 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3200 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3201 MGMT_STATUS_INVALID_PARAMS);
3205 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3207 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3208 addr, sizeof(*addr));
3210 hci_dev_unlock(hdev);
3214 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3215 struct mgmt_addr_info *addr, u16 mgmt_op,
3216 u16 hci_op, __le32 passkey)
3218 struct pending_cmd *cmd;
3219 struct hci_conn *conn;
3224 if (!hdev_is_powered(hdev)) {
3225 err = cmd_complete(sk, hdev->id, mgmt_op,
3226 MGMT_STATUS_NOT_POWERED, addr,
3231 if (addr->type == BDADDR_BREDR)
3232 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3234 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3237 err = cmd_complete(sk, hdev->id, mgmt_op,
3238 MGMT_STATUS_NOT_CONNECTED, addr,
3243 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3244 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3246 err = cmd_complete(sk, hdev->id, mgmt_op,
3247 MGMT_STATUS_SUCCESS, addr,
3250 err = cmd_complete(sk, hdev->id, mgmt_op,
3251 MGMT_STATUS_FAILED, addr,
3257 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3263 /* Continue with pairing via HCI */
3264 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3265 struct hci_cp_user_passkey_reply cp;
3267 bacpy(&cp.bdaddr, &addr->bdaddr);
3268 cp.passkey = passkey;
3269 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3271 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3275 mgmt_pending_remove(cmd);
3278 hci_dev_unlock(hdev);
3282 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3283 void *data, u16 len)
3285 struct mgmt_cp_pin_code_neg_reply *cp = data;
3289 return user_pairing_resp(sk, hdev, &cp->addr,
3290 MGMT_OP_PIN_CODE_NEG_REPLY,
3291 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3294 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3297 struct mgmt_cp_user_confirm_reply *cp = data;
3301 if (len != sizeof(*cp))
3302 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3303 MGMT_STATUS_INVALID_PARAMS);
3305 return user_pairing_resp(sk, hdev, &cp->addr,
3306 MGMT_OP_USER_CONFIRM_REPLY,
3307 HCI_OP_USER_CONFIRM_REPLY, 0);
3310 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3311 void *data, u16 len)
3313 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3317 return user_pairing_resp(sk, hdev, &cp->addr,
3318 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3319 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3322 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3325 struct mgmt_cp_user_passkey_reply *cp = data;
3329 return user_pairing_resp(sk, hdev, &cp->addr,
3330 MGMT_OP_USER_PASSKEY_REPLY,
3331 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3334 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3335 void *data, u16 len)
3337 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3341 return user_pairing_resp(sk, hdev, &cp->addr,
3342 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3343 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3346 static void update_name(struct hci_request *req)
3348 struct hci_dev *hdev = req->hdev;
3349 struct hci_cp_write_local_name cp;
3351 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3353 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3356 static void set_name_complete(struct hci_dev *hdev, u8 status)
3358 struct mgmt_cp_set_local_name *cp;
3359 struct pending_cmd *cmd;
3361 BT_DBG("status 0x%02x", status);
3365 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3372 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3373 mgmt_status(status));
3375 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3378 mgmt_pending_remove(cmd);
3381 hci_dev_unlock(hdev);
3384 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3387 struct mgmt_cp_set_local_name *cp = data;
3388 struct pending_cmd *cmd;
3389 struct hci_request req;
3396 /* If the old values are the same as the new ones just return a
3397 * direct command complete event.
3399 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3400 !memcmp(hdev->short_name, cp->short_name,
3401 sizeof(hdev->short_name))) {
3402 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3407 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3409 if (!hdev_is_powered(hdev)) {
3410 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3412 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3417 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3423 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3429 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3431 hci_req_init(&req, hdev);
3433 if (lmp_bredr_capable(hdev)) {
3438 /* The name is stored in the scan response data and so
3439 * no need to udpate the advertising data here.
3441 if (lmp_le_capable(hdev))
3442 update_scan_rsp_data(&req);
3444 err = hci_req_run(&req, set_name_complete);
3446 mgmt_pending_remove(cmd);
3449 hci_dev_unlock(hdev);
3453 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3454 void *data, u16 data_len)
3456 struct pending_cmd *cmd;
3459 BT_DBG("%s", hdev->name);
3463 if (!hdev_is_powered(hdev)) {
3464 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3465 MGMT_STATUS_NOT_POWERED);
3469 if (!lmp_ssp_capable(hdev)) {
3470 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3471 MGMT_STATUS_NOT_SUPPORTED);
3475 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3476 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3481 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3487 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3488 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3491 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3494 mgmt_pending_remove(cmd);
3497 hci_dev_unlock(hdev);
3501 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3502 void *data, u16 len)
3506 BT_DBG("%s ", hdev->name);
3510 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3511 struct mgmt_cp_add_remote_oob_data *cp = data;
3514 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3515 cp->hash, cp->randomizer);
3517 status = MGMT_STATUS_FAILED;
3519 status = MGMT_STATUS_SUCCESS;
3521 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3522 status, &cp->addr, sizeof(cp->addr));
3523 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3524 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3527 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3533 status = MGMT_STATUS_FAILED;
3535 status = MGMT_STATUS_SUCCESS;
3537 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3538 status, &cp->addr, sizeof(cp->addr));
3540 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3541 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3542 MGMT_STATUS_INVALID_PARAMS);
3545 hci_dev_unlock(hdev);
3549 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3550 void *data, u16 len)
3552 struct mgmt_cp_remove_remote_oob_data *cp = data;
3556 BT_DBG("%s", hdev->name);
3560 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3562 status = MGMT_STATUS_INVALID_PARAMS;
3564 status = MGMT_STATUS_SUCCESS;
3566 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3567 status, &cp->addr, sizeof(cp->addr));
3569 hci_dev_unlock(hdev);
3573 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3575 struct pending_cmd *cmd;
3579 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3581 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3585 type = hdev->discovery.type;
3587 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3588 &type, sizeof(type));
3589 mgmt_pending_remove(cmd);
3594 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3596 unsigned long timeout = 0;
3598 BT_DBG("status %d", status);
3602 mgmt_start_discovery_failed(hdev, status);
3603 hci_dev_unlock(hdev);
3608 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3609 hci_dev_unlock(hdev);
3611 switch (hdev->discovery.type) {
3612 case DISCOV_TYPE_LE:
3613 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3616 case DISCOV_TYPE_INTERLEAVED:
3617 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3620 case DISCOV_TYPE_BREDR:
3624 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3630 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3633 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3634 void *data, u16 len)
3636 struct mgmt_cp_start_discovery *cp = data;
3637 struct pending_cmd *cmd;
3638 struct hci_cp_le_set_scan_param param_cp;
3639 struct hci_cp_le_set_scan_enable enable_cp;
3640 struct hci_cp_inquiry inq_cp;
3641 struct hci_request req;
3642 /* General inquiry access code (GIAC) */
3643 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3644 u8 status, own_addr_type;
3647 BT_DBG("%s", hdev->name);
3651 if (!hdev_is_powered(hdev)) {
3652 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3653 MGMT_STATUS_NOT_POWERED);
3657 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3658 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3663 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3664 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3669 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3675 hdev->discovery.type = cp->type;
3677 hci_req_init(&req, hdev);
3679 switch (hdev->discovery.type) {
3680 case DISCOV_TYPE_BREDR:
3681 status = mgmt_bredr_support(hdev);
3683 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3685 mgmt_pending_remove(cmd);
3689 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3690 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3692 mgmt_pending_remove(cmd);
3696 hci_inquiry_cache_flush(hdev);
3698 memset(&inq_cp, 0, sizeof(inq_cp));
3699 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3700 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3701 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3704 case DISCOV_TYPE_LE:
3705 case DISCOV_TYPE_INTERLEAVED:
3706 status = mgmt_le_support(hdev);
3708 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3710 mgmt_pending_remove(cmd);
3714 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3715 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3716 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3717 MGMT_STATUS_NOT_SUPPORTED);
3718 mgmt_pending_remove(cmd);
3722 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3723 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3724 MGMT_STATUS_REJECTED);
3725 mgmt_pending_remove(cmd);
3729 /* If controller is scanning, it means the background scanning
3730 * is running. Thus, we should temporarily stop it in order to
3731 * set the discovery scanning parameters.
3733 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3734 hci_req_add_le_scan_disable(&req);
3736 memset(¶m_cp, 0, sizeof(param_cp));
3738 /* All active scans will be done with either a resolvable
3739 * private address (when privacy feature has been enabled)
3740 * or unresolvable private address.
3742 err = hci_update_random_address(&req, true, &own_addr_type);
3744 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3745 MGMT_STATUS_FAILED);
3746 mgmt_pending_remove(cmd);
3750 param_cp.type = LE_SCAN_ACTIVE;
3751 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3752 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3753 param_cp.own_address_type = own_addr_type;
3754 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3757 memset(&enable_cp, 0, sizeof(enable_cp));
3758 enable_cp.enable = LE_SCAN_ENABLE;
3759 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3760 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3765 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3766 MGMT_STATUS_INVALID_PARAMS);
3767 mgmt_pending_remove(cmd);
3771 err = hci_req_run(&req, start_discovery_complete);
3773 mgmt_pending_remove(cmd);
3775 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3778 hci_dev_unlock(hdev);
3782 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3784 struct pending_cmd *cmd;
3787 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3791 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3792 &hdev->discovery.type, sizeof(hdev->discovery.type));
3793 mgmt_pending_remove(cmd);
3798 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3800 BT_DBG("status %d", status);
3805 mgmt_stop_discovery_failed(hdev, status);
3809 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3812 hci_dev_unlock(hdev);
3815 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3818 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3819 struct pending_cmd *cmd;
3820 struct hci_request req;
3823 BT_DBG("%s", hdev->name);
3827 if (!hci_discovery_active(hdev)) {
3828 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3829 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3830 sizeof(mgmt_cp->type));
3834 if (hdev->discovery.type != mgmt_cp->type) {
3835 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3836 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3837 sizeof(mgmt_cp->type));
3841 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3847 hci_req_init(&req, hdev);
3849 hci_stop_discovery(&req);
3851 err = hci_req_run(&req, stop_discovery_complete);
3853 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3857 mgmt_pending_remove(cmd);
3859 /* If no HCI commands were sent we're done */
3860 if (err == -ENODATA) {
3861 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3862 &mgmt_cp->type, sizeof(mgmt_cp->type));
3863 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3867 hci_dev_unlock(hdev);
3871 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3874 struct mgmt_cp_confirm_name *cp = data;
3875 struct inquiry_entry *e;
3878 BT_DBG("%s", hdev->name);
3882 if (!hci_discovery_active(hdev)) {
3883 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3884 MGMT_STATUS_FAILED, &cp->addr,
3889 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3891 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3892 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3897 if (cp->name_known) {
3898 e->name_state = NAME_KNOWN;
3901 e->name_state = NAME_NEEDED;
3902 hci_inquiry_cache_update_resolve(hdev, e);
3905 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3909 hci_dev_unlock(hdev);
3913 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3916 struct mgmt_cp_block_device *cp = data;
3920 BT_DBG("%s", hdev->name);
3922 if (!bdaddr_type_is_valid(cp->addr.type))
3923 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3924 MGMT_STATUS_INVALID_PARAMS,
3925 &cp->addr, sizeof(cp->addr));
3929 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3931 status = MGMT_STATUS_FAILED;
3935 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3937 status = MGMT_STATUS_SUCCESS;
3940 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3941 &cp->addr, sizeof(cp->addr));
3943 hci_dev_unlock(hdev);
3948 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3951 struct mgmt_cp_unblock_device *cp = data;
3955 BT_DBG("%s", hdev->name);
3957 if (!bdaddr_type_is_valid(cp->addr.type))
3958 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3959 MGMT_STATUS_INVALID_PARAMS,
3960 &cp->addr, sizeof(cp->addr));
3964 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3966 status = MGMT_STATUS_INVALID_PARAMS;
3970 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3972 status = MGMT_STATUS_SUCCESS;
3975 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3976 &cp->addr, sizeof(cp->addr));
3978 hci_dev_unlock(hdev);
3983 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3986 struct mgmt_cp_set_device_id *cp = data;
3987 struct hci_request req;
3991 BT_DBG("%s", hdev->name);
3993 source = __le16_to_cpu(cp->source);
3995 if (source > 0x0002)
3996 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3997 MGMT_STATUS_INVALID_PARAMS);
4001 hdev->devid_source = source;
4002 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4003 hdev->devid_product = __le16_to_cpu(cp->product);
4004 hdev->devid_version = __le16_to_cpu(cp->version);
4006 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4008 hci_req_init(&req, hdev);
4010 hci_req_run(&req, NULL);
4012 hci_dev_unlock(hdev);
4017 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4019 struct cmd_lookup match = { NULL, hdev };
4022 u8 mgmt_err = mgmt_status(status);
4024 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4025 cmd_status_rsp, &mgmt_err);
4029 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4032 new_settings(hdev, match.sk);
4038 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4041 struct mgmt_mode *cp = data;
4042 struct pending_cmd *cmd;
4043 struct hci_request req;
4044 u8 val, enabled, status;
4047 BT_DBG("request for %s", hdev->name);
4049 status = mgmt_le_support(hdev);
4051 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4054 if (cp->val != 0x00 && cp->val != 0x01)
4055 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4056 MGMT_STATUS_INVALID_PARAMS);
4061 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4063 /* The following conditions are ones which mean that we should
4064 * not do any HCI communication but directly send a mgmt
4065 * response to user space (after toggling the flag if
4068 if (!hdev_is_powered(hdev) || val == enabled ||
4069 hci_conn_num(hdev, LE_LINK) > 0) {
4070 bool changed = false;
4072 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4073 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4077 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4082 err = new_settings(hdev, sk);
4087 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4088 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4089 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4094 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4100 hci_req_init(&req, hdev);
4103 enable_advertising(&req);
4105 disable_advertising(&req);
4107 err = hci_req_run(&req, set_advertising_complete);
4109 mgmt_pending_remove(cmd);
4112 hci_dev_unlock(hdev);
4116 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4117 void *data, u16 len)
4119 struct mgmt_cp_set_static_address *cp = data;
4122 BT_DBG("%s", hdev->name);
4124 if (!lmp_le_capable(hdev))
4125 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4126 MGMT_STATUS_NOT_SUPPORTED);
4128 if (hdev_is_powered(hdev))
4129 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4130 MGMT_STATUS_REJECTED);
4132 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4133 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4134 return cmd_status(sk, hdev->id,
4135 MGMT_OP_SET_STATIC_ADDRESS,
4136 MGMT_STATUS_INVALID_PARAMS);
4138 /* Two most significant bits shall be set */
4139 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4140 return cmd_status(sk, hdev->id,
4141 MGMT_OP_SET_STATIC_ADDRESS,
4142 MGMT_STATUS_INVALID_PARAMS);
4147 bacpy(&hdev->static_addr, &cp->bdaddr);
4149 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4151 hci_dev_unlock(hdev);
4156 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4157 void *data, u16 len)
4159 struct mgmt_cp_set_scan_params *cp = data;
4160 __u16 interval, window;
4163 BT_DBG("%s", hdev->name);
4165 if (!lmp_le_capable(hdev))
4166 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4167 MGMT_STATUS_NOT_SUPPORTED);
4169 interval = __le16_to_cpu(cp->interval);
4171 if (interval < 0x0004 || interval > 0x4000)
4172 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4173 MGMT_STATUS_INVALID_PARAMS);
4175 window = __le16_to_cpu(cp->window);
4177 if (window < 0x0004 || window > 0x4000)
4178 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4179 MGMT_STATUS_INVALID_PARAMS);
4181 if (window > interval)
4182 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4183 MGMT_STATUS_INVALID_PARAMS);
4187 hdev->le_scan_interval = interval;
4188 hdev->le_scan_window = window;
4190 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4192 /* If background scan is running, restart it so new parameters are
4195 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4196 hdev->discovery.state == DISCOVERY_STOPPED) {
4197 struct hci_request req;
4199 hci_req_init(&req, hdev);
4201 hci_req_add_le_scan_disable(&req);
4202 hci_req_add_le_passive_scan(&req);
4204 hci_req_run(&req, NULL);
4207 hci_dev_unlock(hdev);
4212 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4214 struct pending_cmd *cmd;
4216 BT_DBG("status 0x%02x", status);
4220 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4225 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4226 mgmt_status(status));
4228 struct mgmt_mode *cp = cmd->param;
4231 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4233 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4235 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4236 new_settings(hdev, cmd->sk);
4239 mgmt_pending_remove(cmd);
4242 hci_dev_unlock(hdev);
4245 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4246 void *data, u16 len)
4248 struct mgmt_mode *cp = data;
4249 struct pending_cmd *cmd;
4250 struct hci_request req;
4253 BT_DBG("%s", hdev->name);
4255 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4256 hdev->hci_ver < BLUETOOTH_VER_1_2)
4257 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4258 MGMT_STATUS_NOT_SUPPORTED);
4260 if (cp->val != 0x00 && cp->val != 0x01)
4261 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4262 MGMT_STATUS_INVALID_PARAMS);
4264 if (!hdev_is_powered(hdev))
4265 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4266 MGMT_STATUS_NOT_POWERED);
4268 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4269 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4270 MGMT_STATUS_REJECTED);
4274 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4275 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4280 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4281 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4286 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4293 hci_req_init(&req, hdev);
4295 write_fast_connectable(&req, cp->val);
4297 err = hci_req_run(&req, fast_connectable_complete);
4299 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4300 MGMT_STATUS_FAILED);
4301 mgmt_pending_remove(cmd);
4305 hci_dev_unlock(hdev);
4310 static void set_bredr_scan(struct hci_request *req)
4312 struct hci_dev *hdev = req->hdev;
4315 /* Ensure that fast connectable is disabled. This function will
4316 * not do anything if the page scan parameters are already what
4319 write_fast_connectable(req, false);
4321 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4323 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4324 scan |= SCAN_INQUIRY;
4327 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4330 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4332 struct pending_cmd *cmd;
4334 BT_DBG("status 0x%02x", status);
4338 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4343 u8 mgmt_err = mgmt_status(status);
4345 /* We need to restore the flag if related HCI commands
4348 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4350 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4352 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4353 new_settings(hdev, cmd->sk);
4356 mgmt_pending_remove(cmd);
4359 hci_dev_unlock(hdev);
4362 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4364 struct mgmt_mode *cp = data;
4365 struct pending_cmd *cmd;
4366 struct hci_request req;
4369 BT_DBG("request for %s", hdev->name);
4371 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4372 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4373 MGMT_STATUS_NOT_SUPPORTED);
4375 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4376 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4377 MGMT_STATUS_REJECTED);
4379 if (cp->val != 0x00 && cp->val != 0x01)
4380 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4381 MGMT_STATUS_INVALID_PARAMS);
4385 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4386 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4390 if (!hdev_is_powered(hdev)) {
4392 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4393 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4394 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4395 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4396 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4399 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4401 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4405 err = new_settings(hdev, sk);
4409 /* Reject disabling when powered on */
4411 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4412 MGMT_STATUS_REJECTED);
4416 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4417 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4422 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4428 /* We need to flip the bit already here so that update_adv_data
4429 * generates the correct flags.
4431 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4433 hci_req_init(&req, hdev);
4435 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4436 set_bredr_scan(&req);
4438 /* Since only the advertising data flags will change, there
4439 * is no need to update the scan response data.
4441 update_adv_data(&req);
4443 err = hci_req_run(&req, set_bredr_complete);
4445 mgmt_pending_remove(cmd);
4448 hci_dev_unlock(hdev);
4452 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4453 void *data, u16 len)
4455 struct mgmt_mode *cp = data;
4456 struct pending_cmd *cmd;
4460 BT_DBG("request for %s", hdev->name);
4462 status = mgmt_bredr_support(hdev);
4464 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4467 if (!lmp_sc_capable(hdev) &&
4468 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4469 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4470 MGMT_STATUS_NOT_SUPPORTED);
4472 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4473 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4474 MGMT_STATUS_INVALID_PARAMS);
4478 if (!hdev_is_powered(hdev)) {
4482 changed = !test_and_set_bit(HCI_SC_ENABLED,
4484 if (cp->val == 0x02)
4485 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4487 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4489 changed = test_and_clear_bit(HCI_SC_ENABLED,
4491 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4494 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4499 err = new_settings(hdev, sk);
4504 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4505 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4512 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4513 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4514 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4518 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4524 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4526 mgmt_pending_remove(cmd);
4530 if (cp->val == 0x02)
4531 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4533 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4536 hci_dev_unlock(hdev);
4540 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4541 void *data, u16 len)
4543 struct mgmt_mode *cp = data;
4544 bool changed, use_changed;
4547 BT_DBG("request for %s", hdev->name);
4549 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4550 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4551 MGMT_STATUS_INVALID_PARAMS);
4556 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4559 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4562 if (cp->val == 0x02)
4563 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4566 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4569 if (hdev_is_powered(hdev) && use_changed &&
4570 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4571 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4573 sizeof(mode), &mode);
4576 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4581 err = new_settings(hdev, sk);
4584 hci_dev_unlock(hdev);
4588 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4591 struct mgmt_cp_set_privacy *cp = cp_data;
4595 BT_DBG("request for %s", hdev->name);
4597 if (!lmp_le_capable(hdev))
4598 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4599 MGMT_STATUS_NOT_SUPPORTED);
4601 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4602 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4603 MGMT_STATUS_INVALID_PARAMS);
4605 if (hdev_is_powered(hdev))
4606 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4607 MGMT_STATUS_REJECTED);
4611 /* If user space supports this command it is also expected to
4612 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4614 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4617 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4618 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4619 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4621 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4622 memset(hdev->irk, 0, sizeof(hdev->irk));
4623 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4626 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4631 err = new_settings(hdev, sk);
4634 hci_dev_unlock(hdev);
4638 static bool irk_is_valid(struct mgmt_irk_info *irk)
4640 switch (irk->addr.type) {
4641 case BDADDR_LE_PUBLIC:
4644 case BDADDR_LE_RANDOM:
4645 /* Two most significant bits shall be set */
4646 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4654 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4657 struct mgmt_cp_load_irks *cp = cp_data;
4658 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4659 sizeof(struct mgmt_irk_info));
4660 u16 irk_count, expected_len;
4663 BT_DBG("request for %s", hdev->name);
4665 if (!lmp_le_capable(hdev))
4666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4667 MGMT_STATUS_NOT_SUPPORTED);
4669 irk_count = __le16_to_cpu(cp->irk_count);
4670 if (irk_count > max_irk_count) {
4671 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4672 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4673 MGMT_STATUS_INVALID_PARAMS);
4676 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4677 if (expected_len != len) {
4678 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4680 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4681 MGMT_STATUS_INVALID_PARAMS);
4684 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4686 for (i = 0; i < irk_count; i++) {
4687 struct mgmt_irk_info *key = &cp->irks[i];
4689 if (!irk_is_valid(key))
4690 return cmd_status(sk, hdev->id,
4692 MGMT_STATUS_INVALID_PARAMS);
4697 hci_smp_irks_clear(hdev);
4699 for (i = 0; i < irk_count; i++) {
4700 struct mgmt_irk_info *irk = &cp->irks[i];
4703 if (irk->addr.type == BDADDR_LE_PUBLIC)
4704 addr_type = ADDR_LE_DEV_PUBLIC;
4706 addr_type = ADDR_LE_DEV_RANDOM;
4708 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4712 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4714 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4716 hci_dev_unlock(hdev);
4721 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4723 if (key->master != 0x00 && key->master != 0x01)
4726 switch (key->addr.type) {
4727 case BDADDR_LE_PUBLIC:
4730 case BDADDR_LE_RANDOM:
4731 /* Two most significant bits shall be set */
4732 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4740 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4741 void *cp_data, u16 len)
4743 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4744 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4745 sizeof(struct mgmt_ltk_info));
4746 u16 key_count, expected_len;
4749 BT_DBG("request for %s", hdev->name);
4751 if (!lmp_le_capable(hdev))
4752 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4753 MGMT_STATUS_NOT_SUPPORTED);
4755 key_count = __le16_to_cpu(cp->key_count);
4756 if (key_count > max_key_count) {
4757 BT_ERR("load_ltks: too big key_count value %u", key_count);
4758 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4759 MGMT_STATUS_INVALID_PARAMS);
4762 expected_len = sizeof(*cp) + key_count *
4763 sizeof(struct mgmt_ltk_info);
4764 if (expected_len != len) {
4765 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4767 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4768 MGMT_STATUS_INVALID_PARAMS);
4771 BT_DBG("%s key_count %u", hdev->name, key_count);
4773 for (i = 0; i < key_count; i++) {
4774 struct mgmt_ltk_info *key = &cp->keys[i];
4776 if (!ltk_is_valid(key))
4777 return cmd_status(sk, hdev->id,
4778 MGMT_OP_LOAD_LONG_TERM_KEYS,
4779 MGMT_STATUS_INVALID_PARAMS);
4784 hci_smp_ltks_clear(hdev);
4786 for (i = 0; i < key_count; i++) {
4787 struct mgmt_ltk_info *key = &cp->keys[i];
4788 u8 type, addr_type, authenticated;
4790 if (key->addr.type == BDADDR_LE_PUBLIC)
4791 addr_type = ADDR_LE_DEV_PUBLIC;
4793 addr_type = ADDR_LE_DEV_RANDOM;
4798 type = SMP_LTK_SLAVE;
4800 switch (key->type) {
4801 case MGMT_LTK_UNAUTHENTICATED:
4802 authenticated = 0x00;
4804 case MGMT_LTK_AUTHENTICATED:
4805 authenticated = 0x01;
4811 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4812 authenticated, key->val, key->enc_size, key->ediv,
4816 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4819 hci_dev_unlock(hdev);
4824 struct cmd_conn_lookup {
4825 struct hci_conn *conn;
4826 bool valid_tx_power;
4830 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4832 struct cmd_conn_lookup *match = data;
4833 struct mgmt_cp_get_conn_info *cp;
4834 struct mgmt_rp_get_conn_info rp;
4835 struct hci_conn *conn = cmd->user_data;
4837 if (conn != match->conn)
4840 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4842 memset(&rp, 0, sizeof(rp));
4843 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4844 rp.addr.type = cp->addr.type;
4846 if (!match->mgmt_status) {
4847 rp.rssi = conn->rssi;
4849 if (match->valid_tx_power) {
4850 rp.tx_power = conn->tx_power;
4851 rp.max_tx_power = conn->max_tx_power;
4853 rp.tx_power = HCI_TX_POWER_INVALID;
4854 rp.max_tx_power = HCI_TX_POWER_INVALID;
4858 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4859 match->mgmt_status, &rp, sizeof(rp));
4861 hci_conn_drop(conn);
4863 mgmt_pending_remove(cmd);
4866 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4868 struct hci_cp_read_rssi *cp;
4869 struct hci_conn *conn;
4870 struct cmd_conn_lookup match;
4873 BT_DBG("status 0x%02x", status);
4877 /* TX power data is valid in case request completed successfully,
4878 * otherwise we assume it's not valid. At the moment we assume that
4879 * either both or none of current and max values are valid to keep code
4882 match.valid_tx_power = !status;
4884 /* Commands sent in request are either Read RSSI or Read Transmit Power
4885 * Level so we check which one was last sent to retrieve connection
4886 * handle. Both commands have handle as first parameter so it's safe to
4887 * cast data on the same command struct.
4889 * First command sent is always Read RSSI and we fail only if it fails.
4890 * In other case we simply override error to indicate success as we
4891 * already remembered if TX power value is actually valid.
4893 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4895 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4900 BT_ERR("invalid sent_cmd in response");
4904 handle = __le16_to_cpu(cp->handle);
4905 conn = hci_conn_hash_lookup_handle(hdev, handle);
4907 BT_ERR("unknown handle (%d) in response", handle);
4912 match.mgmt_status = mgmt_status(status);
4914 /* Cache refresh is complete, now reply for mgmt request for given
4917 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4918 get_conn_info_complete, &match);
4921 hci_dev_unlock(hdev);
4924 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4927 struct mgmt_cp_get_conn_info *cp = data;
4928 struct mgmt_rp_get_conn_info rp;
4929 struct hci_conn *conn;
4930 unsigned long conn_info_age;
4933 BT_DBG("%s", hdev->name);
4935 memset(&rp, 0, sizeof(rp));
4936 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4937 rp.addr.type = cp->addr.type;
4939 if (!bdaddr_type_is_valid(cp->addr.type))
4940 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4941 MGMT_STATUS_INVALID_PARAMS,
4946 if (!hdev_is_powered(hdev)) {
4947 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4948 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4952 if (cp->addr.type == BDADDR_BREDR)
4953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4956 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4958 if (!conn || conn->state != BT_CONNECTED) {
4959 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4960 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4964 /* To avoid client trying to guess when to poll again for information we
4965 * calculate conn info age as random value between min/max set in hdev.
4967 conn_info_age = hdev->conn_info_min_age +
4968 prandom_u32_max(hdev->conn_info_max_age -
4969 hdev->conn_info_min_age);
4971 /* Query controller to refresh cached values if they are too old or were
4974 if (time_after(jiffies, conn->conn_info_timestamp +
4975 msecs_to_jiffies(conn_info_age)) ||
4976 !conn->conn_info_timestamp) {
4977 struct hci_request req;
4978 struct hci_cp_read_tx_power req_txp_cp;
4979 struct hci_cp_read_rssi req_rssi_cp;
4980 struct pending_cmd *cmd;
4982 hci_req_init(&req, hdev);
4983 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4984 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4987 /* For LE links TX power does not change thus we don't need to
4988 * query for it once value is known.
4990 if (!bdaddr_type_is_le(cp->addr.type) ||
4991 conn->tx_power == HCI_TX_POWER_INVALID) {
4992 req_txp_cp.handle = cpu_to_le16(conn->handle);
4993 req_txp_cp.type = 0x00;
4994 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4995 sizeof(req_txp_cp), &req_txp_cp);
4998 /* Max TX power needs to be read only once per connection */
4999 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5000 req_txp_cp.handle = cpu_to_le16(conn->handle);
5001 req_txp_cp.type = 0x01;
5002 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5003 sizeof(req_txp_cp), &req_txp_cp);
5006 err = hci_req_run(&req, conn_info_refresh_complete);
5010 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5017 hci_conn_hold(conn);
5018 cmd->user_data = conn;
5020 conn->conn_info_timestamp = jiffies;
5022 /* Cache is valid, just reply with values cached in hci_conn */
5023 rp.rssi = conn->rssi;
5024 rp.tx_power = conn->tx_power;
5025 rp.max_tx_power = conn->max_tx_power;
5027 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5028 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5032 hci_dev_unlock(hdev);
5036 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5038 struct mgmt_cp_get_clock_info *cp;
5039 struct mgmt_rp_get_clock_info rp;
5040 struct hci_cp_read_clock *hci_cp;
5041 struct pending_cmd *cmd;
5042 struct hci_conn *conn;
5044 BT_DBG("%s status %u", hdev->name, status);
5048 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5052 if (hci_cp->which) {
5053 u16 handle = __le16_to_cpu(hci_cp->handle);
5054 conn = hci_conn_hash_lookup_handle(hdev, handle);
5059 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5065 memset(&rp, 0, sizeof(rp));
5066 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5071 rp.local_clock = cpu_to_le32(hdev->clock);
5074 rp.piconet_clock = cpu_to_le32(conn->clock);
5075 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5079 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5081 mgmt_pending_remove(cmd);
5083 hci_conn_drop(conn);
5086 hci_dev_unlock(hdev);
5089 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5092 struct mgmt_cp_get_clock_info *cp = data;
5093 struct mgmt_rp_get_clock_info rp;
5094 struct hci_cp_read_clock hci_cp;
5095 struct pending_cmd *cmd;
5096 struct hci_request req;
5097 struct hci_conn *conn;
5100 BT_DBG("%s", hdev->name);
5102 memset(&rp, 0, sizeof(rp));
5103 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5104 rp.addr.type = cp->addr.type;
5106 if (cp->addr.type != BDADDR_BREDR)
5107 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5108 MGMT_STATUS_INVALID_PARAMS,
5113 if (!hdev_is_powered(hdev)) {
5114 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5115 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5119 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5120 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5122 if (!conn || conn->state != BT_CONNECTED) {
5123 err = cmd_complete(sk, hdev->id,
5124 MGMT_OP_GET_CLOCK_INFO,
5125 MGMT_STATUS_NOT_CONNECTED,
5133 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5139 hci_req_init(&req, hdev);
5141 memset(&hci_cp, 0, sizeof(hci_cp));
5142 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5145 hci_conn_hold(conn);
5146 cmd->user_data = conn;
5148 hci_cp.handle = cpu_to_le16(conn->handle);
5149 hci_cp.which = 0x01; /* Piconet clock */
5150 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5153 err = hci_req_run(&req, get_clock_info_complete);
5155 mgmt_pending_remove(cmd);
5158 hci_dev_unlock(hdev);
5162 static void device_added(struct sock *sk, struct hci_dev *hdev,
5163 bdaddr_t *bdaddr, u8 type, u8 action)
5165 struct mgmt_ev_device_added ev;
5167 bacpy(&ev.addr.bdaddr, bdaddr);
5168 ev.addr.type = type;
5171 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5174 static int add_device(struct sock *sk, struct hci_dev *hdev,
5175 void *data, u16 len)
5177 struct mgmt_cp_add_device *cp = data;
5178 u8 auto_conn, addr_type;
5181 BT_DBG("%s", hdev->name);
5183 if (!bdaddr_type_is_le(cp->addr.type) ||
5184 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5185 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5186 MGMT_STATUS_INVALID_PARAMS,
5187 &cp->addr, sizeof(cp->addr));
5189 if (cp->action != 0x00 && cp->action != 0x01)
5190 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5191 MGMT_STATUS_INVALID_PARAMS,
5192 &cp->addr, sizeof(cp->addr));
5196 if (cp->addr.type == BDADDR_LE_PUBLIC)
5197 addr_type = ADDR_LE_DEV_PUBLIC;
5199 addr_type = ADDR_LE_DEV_RANDOM;
5202 auto_conn = HCI_AUTO_CONN_ALWAYS;
5204 auto_conn = HCI_AUTO_CONN_REPORT;
5206 /* If the connection parameters don't exist for this device,
5207 * they will be created and configured with defaults.
5209 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5211 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5213 &cp->addr, sizeof(cp->addr));
5217 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5219 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5220 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5223 hci_dev_unlock(hdev);
5227 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5228 bdaddr_t *bdaddr, u8 type)
5230 struct mgmt_ev_device_removed ev;
5232 bacpy(&ev.addr.bdaddr, bdaddr);
5233 ev.addr.type = type;
5235 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5238 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5239 void *data, u16 len)
5241 struct mgmt_cp_remove_device *cp = data;
5244 BT_DBG("%s", hdev->name);
5248 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5249 struct hci_conn_params *params;
5252 if (!bdaddr_type_is_le(cp->addr.type)) {
5253 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5254 MGMT_STATUS_INVALID_PARAMS,
5255 &cp->addr, sizeof(cp->addr));
5259 if (cp->addr.type == BDADDR_LE_PUBLIC)
5260 addr_type = ADDR_LE_DEV_PUBLIC;
5262 addr_type = ADDR_LE_DEV_RANDOM;
5264 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5267 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5268 MGMT_STATUS_INVALID_PARAMS,
5269 &cp->addr, sizeof(cp->addr));
5273 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5274 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5275 MGMT_STATUS_INVALID_PARAMS,
5276 &cp->addr, sizeof(cp->addr));
5280 list_del(¶ms->action);
5281 list_del(¶ms->list);
5283 hci_update_background_scan(hdev);
5285 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5287 struct hci_conn_params *p, *tmp;
5289 if (cp->addr.type) {
5290 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5291 MGMT_STATUS_INVALID_PARAMS,
5292 &cp->addr, sizeof(cp->addr));
5296 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5297 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5299 device_removed(sk, hdev, &p->addr, p->addr_type);
5300 list_del(&p->action);
5305 BT_DBG("All LE connection parameters were removed");
5307 hci_update_background_scan(hdev);
5310 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5311 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5314 hci_dev_unlock(hdev);
5318 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5321 struct mgmt_cp_load_conn_param *cp = data;
5322 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5323 sizeof(struct mgmt_conn_param));
5324 u16 param_count, expected_len;
5327 if (!lmp_le_capable(hdev))
5328 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5329 MGMT_STATUS_NOT_SUPPORTED);
5331 param_count = __le16_to_cpu(cp->param_count);
5332 if (param_count > max_param_count) {
5333 BT_ERR("load_conn_param: too big param_count value %u",
5335 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5336 MGMT_STATUS_INVALID_PARAMS);
5339 expected_len = sizeof(*cp) + param_count *
5340 sizeof(struct mgmt_conn_param);
5341 if (expected_len != len) {
5342 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5344 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5345 MGMT_STATUS_INVALID_PARAMS);
5348 BT_DBG("%s param_count %u", hdev->name, param_count);
5352 hci_conn_params_clear_disabled(hdev);
5354 for (i = 0; i < param_count; i++) {
5355 struct mgmt_conn_param *param = &cp->params[i];
5356 struct hci_conn_params *hci_param;
5357 u16 min, max, latency, timeout;
5360 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5363 if (param->addr.type == BDADDR_LE_PUBLIC) {
5364 addr_type = ADDR_LE_DEV_PUBLIC;
5365 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5366 addr_type = ADDR_LE_DEV_RANDOM;
5368 BT_ERR("Ignoring invalid connection parameters");
5372 min = le16_to_cpu(param->min_interval);
5373 max = le16_to_cpu(param->max_interval);
5374 latency = le16_to_cpu(param->latency);
5375 timeout = le16_to_cpu(param->timeout);
5377 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5378 min, max, latency, timeout);
5380 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5381 BT_ERR("Ignoring invalid connection parameters");
5385 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5388 BT_ERR("Failed to add connection parameters");
5392 hci_param->conn_min_interval = min;
5393 hci_param->conn_max_interval = max;
5394 hci_param->conn_latency = latency;
5395 hci_param->supervision_timeout = timeout;
5398 hci_dev_unlock(hdev);
5400 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5403 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5404 void *data, u16 len)
5406 struct mgmt_cp_set_external_config *cp = data;
5410 BT_DBG("%s", hdev->name);
5412 if (hdev_is_powered(hdev))
5413 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5414 MGMT_STATUS_REJECTED);
5416 if (cp->config != 0x00 && cp->config != 0x01)
5417 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5418 MGMT_STATUS_INVALID_PARAMS);
5420 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5421 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5422 MGMT_STATUS_NOT_SUPPORTED);
5427 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5430 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5433 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5440 err = new_options(hdev, sk);
5442 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5443 mgmt_index_removed(hdev);
5444 change_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5445 mgmt_index_added(hdev);
5449 hci_dev_unlock(hdev);
5453 static const struct mgmt_handler {
5454 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5458 } mgmt_handlers[] = {
5459 { NULL }, /* 0x0000 (no command) */
5460 { read_version, false, MGMT_READ_VERSION_SIZE },
5461 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5462 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5463 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5464 { set_powered, false, MGMT_SETTING_SIZE },
5465 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5466 { set_connectable, false, MGMT_SETTING_SIZE },
5467 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5468 { set_pairable, false, MGMT_SETTING_SIZE },
5469 { set_link_security, false, MGMT_SETTING_SIZE },
5470 { set_ssp, false, MGMT_SETTING_SIZE },
5471 { set_hs, false, MGMT_SETTING_SIZE },
5472 { set_le, false, MGMT_SETTING_SIZE },
5473 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5474 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5475 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5476 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5477 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5478 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5479 { disconnect, false, MGMT_DISCONNECT_SIZE },
5480 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5481 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5482 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5483 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5484 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5485 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5486 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5487 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5488 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5489 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5490 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5491 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5492 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5493 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5494 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5495 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5496 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5497 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5498 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5499 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5500 { set_advertising, false, MGMT_SETTING_SIZE },
5501 { set_bredr, false, MGMT_SETTING_SIZE },
5502 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5503 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5504 { set_secure_conn, false, MGMT_SETTING_SIZE },
5505 { set_debug_keys, false, MGMT_SETTING_SIZE },
5506 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5507 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5508 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5509 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5510 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5511 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5512 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5513 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5514 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5515 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5518 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5522 struct mgmt_hdr *hdr;
5523 u16 opcode, index, len;
5524 struct hci_dev *hdev = NULL;
5525 const struct mgmt_handler *handler;
5528 BT_DBG("got %zu bytes", msglen);
5530 if (msglen < sizeof(*hdr))
5533 buf = kmalloc(msglen, GFP_KERNEL);
5537 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5543 opcode = __le16_to_cpu(hdr->opcode);
5544 index = __le16_to_cpu(hdr->index);
5545 len = __le16_to_cpu(hdr->len);
5547 if (len != msglen - sizeof(*hdr)) {
5552 if (index != MGMT_INDEX_NONE) {
5553 hdev = hci_dev_get(index);
5555 err = cmd_status(sk, index, opcode,
5556 MGMT_STATUS_INVALID_INDEX);
5560 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5561 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5562 err = cmd_status(sk, index, opcode,
5563 MGMT_STATUS_INVALID_INDEX);
5567 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5568 opcode != MGMT_OP_READ_CONFIG_INFO &&
5569 opcode != MGMT_OP_SET_EXTERNAL_CONFIG) {
5570 err = cmd_status(sk, index, opcode,
5571 MGMT_STATUS_INVALID_INDEX);
5576 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5577 mgmt_handlers[opcode].func == NULL) {
5578 BT_DBG("Unknown op %u", opcode);
5579 err = cmd_status(sk, index, opcode,
5580 MGMT_STATUS_UNKNOWN_COMMAND);
5584 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5585 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5586 err = cmd_status(sk, index, opcode,
5587 MGMT_STATUS_INVALID_INDEX);
5591 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5592 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5593 err = cmd_status(sk, index, opcode,
5594 MGMT_STATUS_INVALID_INDEX);
5598 handler = &mgmt_handlers[opcode];
5600 if ((handler->var_len && len < handler->data_len) ||
5601 (!handler->var_len && len != handler->data_len)) {
5602 err = cmd_status(sk, index, opcode,
5603 MGMT_STATUS_INVALID_PARAMS);
5608 mgmt_init_hdev(sk, hdev);
5610 cp = buf + sizeof(*hdr);
5612 err = handler->func(sk, hdev, cp, len);
5626 void mgmt_index_added(struct hci_dev *hdev)
5628 if (hdev->dev_type != HCI_BREDR)
5631 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5634 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5635 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5637 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5640 void mgmt_index_removed(struct hci_dev *hdev)
5642 u8 status = MGMT_STATUS_INVALID_INDEX;
5644 if (hdev->dev_type != HCI_BREDR)
5647 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5650 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5652 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5653 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5655 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5658 /* This function requires the caller holds hdev->lock */
5659 static void restart_le_actions(struct hci_dev *hdev)
5661 struct hci_conn_params *p;
5663 list_for_each_entry(p, &hdev->le_conn_params, list) {
5664 /* Needed for AUTO_OFF case where might not "really"
5665 * have been powered off.
5667 list_del_init(&p->action);
5669 switch (p->auto_connect) {
5670 case HCI_AUTO_CONN_ALWAYS:
5671 list_add(&p->action, &hdev->pend_le_conns);
5673 case HCI_AUTO_CONN_REPORT:
5674 list_add(&p->action, &hdev->pend_le_reports);
5681 hci_update_background_scan(hdev);
5684 static void powered_complete(struct hci_dev *hdev, u8 status)
5686 struct cmd_lookup match = { NULL, hdev };
5688 BT_DBG("status 0x%02x", status);
5692 restart_le_actions(hdev);
5694 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5696 new_settings(hdev, match.sk);
5698 hci_dev_unlock(hdev);
5704 static int powered_update_hci(struct hci_dev *hdev)
5706 struct hci_request req;
5709 hci_req_init(&req, hdev);
5711 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5712 !lmp_host_ssp_capable(hdev)) {
5715 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5718 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5719 lmp_bredr_capable(hdev)) {
5720 struct hci_cp_write_le_host_supported cp;
5723 cp.simul = lmp_le_br_capable(hdev);
5725 /* Check first if we already have the right
5726 * host state (host features set)
5728 if (cp.le != lmp_host_le_capable(hdev) ||
5729 cp.simul != lmp_host_le_br_capable(hdev))
5730 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5734 if (lmp_le_capable(hdev)) {
5735 /* Make sure the controller has a good default for
5736 * advertising data. This also applies to the case
5737 * where BR/EDR was toggled during the AUTO_OFF phase.
5739 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5740 update_adv_data(&req);
5741 update_scan_rsp_data(&req);
5744 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5745 enable_advertising(&req);
5748 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5749 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5750 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5751 sizeof(link_sec), &link_sec);
5753 if (lmp_bredr_capable(hdev)) {
5754 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5755 set_bredr_scan(&req);
5761 return hci_req_run(&req, powered_complete);
5764 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5766 struct cmd_lookup match = { NULL, hdev };
5767 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5768 u8 zero_cod[] = { 0, 0, 0 };
5771 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5775 if (powered_update_hci(hdev) == 0)
5778 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5783 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5784 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5786 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5787 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5788 zero_cod, sizeof(zero_cod), NULL);
5791 err = new_settings(hdev, match.sk);
5799 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5801 struct pending_cmd *cmd;
5804 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5808 if (err == -ERFKILL)
5809 status = MGMT_STATUS_RFKILLED;
5811 status = MGMT_STATUS_FAILED;
5813 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5815 mgmt_pending_remove(cmd);
5818 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5820 struct hci_request req;
5824 /* When discoverable timeout triggers, then just make sure
5825 * the limited discoverable flag is cleared. Even in the case
5826 * of a timeout triggered from general discoverable, it is
5827 * safe to unconditionally clear the flag.
5829 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5830 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5832 hci_req_init(&req, hdev);
5833 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5834 u8 scan = SCAN_PAGE;
5835 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5836 sizeof(scan), &scan);
5839 update_adv_data(&req);
5840 hci_req_run(&req, NULL);
5842 hdev->discov_timeout = 0;
5844 new_settings(hdev, NULL);
5846 hci_dev_unlock(hdev);
5849 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5853 /* Nothing needed here if there's a pending command since that
5854 * commands request completion callback takes care of everything
5857 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5860 /* Powering off may clear the scan mode - don't let that interfere */
5861 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5865 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5867 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5868 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5872 struct hci_request req;
5874 /* In case this change in discoverable was triggered by
5875 * a disabling of connectable there could be a need to
5876 * update the advertising flags.
5878 hci_req_init(&req, hdev);
5879 update_adv_data(&req);
5880 hci_req_run(&req, NULL);
5882 new_settings(hdev, NULL);
5886 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5890 /* Nothing needed here if there's a pending command since that
5891 * commands request completion callback takes care of everything
5894 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5897 /* Powering off may clear the scan mode - don't let that interfere */
5898 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5902 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5904 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5907 new_settings(hdev, NULL);
5910 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5912 /* Powering off may stop advertising - don't let that interfere */
5913 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5917 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5919 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5922 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5924 u8 mgmt_err = mgmt_status(status);
5926 if (scan & SCAN_PAGE)
5927 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5928 cmd_status_rsp, &mgmt_err);
5930 if (scan & SCAN_INQUIRY)
5931 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5932 cmd_status_rsp, &mgmt_err);
5935 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5938 struct mgmt_ev_new_link_key ev;
5940 memset(&ev, 0, sizeof(ev));
5942 ev.store_hint = persistent;
5943 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5944 ev.key.addr.type = BDADDR_BREDR;
5945 ev.key.type = key->type;
5946 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5947 ev.key.pin_len = key->pin_len;
5949 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5952 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5954 if (ltk->authenticated)
5955 return MGMT_LTK_AUTHENTICATED;
5957 return MGMT_LTK_UNAUTHENTICATED;
5960 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5962 struct mgmt_ev_new_long_term_key ev;
5964 memset(&ev, 0, sizeof(ev));
5966 /* Devices using resolvable or non-resolvable random addresses
5967 * without providing an indentity resolving key don't require
5968 * to store long term keys. Their addresses will change the
5971 * Only when a remote device provides an identity address
5972 * make sure the long term key is stored. If the remote
5973 * identity is known, the long term keys are internally
5974 * mapped to the identity address. So allow static random
5975 * and public addresses here.
5977 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5978 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5979 ev.store_hint = 0x00;
5981 ev.store_hint = persistent;
5983 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5984 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5985 ev.key.type = mgmt_ltk_type(key);
5986 ev.key.enc_size = key->enc_size;
5987 ev.key.ediv = key->ediv;
5988 ev.key.rand = key->rand;
5990 if (key->type == SMP_LTK)
5993 memcpy(ev.key.val, key->val, sizeof(key->val));
5995 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5998 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6000 struct mgmt_ev_new_irk ev;
6002 memset(&ev, 0, sizeof(ev));
6004 /* For identity resolving keys from devices that are already
6005 * using a public address or static random address, do not
6006 * ask for storing this key. The identity resolving key really
6007 * is only mandatory for devices using resovlable random
6010 * Storing all identity resolving keys has the downside that
6011 * they will be also loaded on next boot of they system. More
6012 * identity resolving keys, means more time during scanning is
6013 * needed to actually resolve these addresses.
6015 if (bacmp(&irk->rpa, BDADDR_ANY))
6016 ev.store_hint = 0x01;
6018 ev.store_hint = 0x00;
6020 bacpy(&ev.rpa, &irk->rpa);
6021 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6022 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6023 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6025 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6028 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6031 struct mgmt_ev_new_csrk ev;
6033 memset(&ev, 0, sizeof(ev));
6035 /* Devices using resolvable or non-resolvable random addresses
6036 * without providing an indentity resolving key don't require
6037 * to store signature resolving keys. Their addresses will change
6038 * the next time around.
6040 * Only when a remote device provides an identity address
6041 * make sure the signature resolving key is stored. So allow
6042 * static random and public addresses here.
6044 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6045 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6046 ev.store_hint = 0x00;
6048 ev.store_hint = persistent;
6050 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6051 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6052 ev.key.master = csrk->master;
6053 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6055 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6058 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6059 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6060 u16 max_interval, u16 latency, u16 timeout)
6062 struct mgmt_ev_new_conn_param ev;
6064 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6067 memset(&ev, 0, sizeof(ev));
6068 bacpy(&ev.addr.bdaddr, bdaddr);
6069 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6070 ev.store_hint = store_hint;
6071 ev.min_interval = cpu_to_le16(min_interval);
6072 ev.max_interval = cpu_to_le16(max_interval);
6073 ev.latency = cpu_to_le16(latency);
6074 ev.timeout = cpu_to_le16(timeout);
6076 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6079 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6082 eir[eir_len++] = sizeof(type) + data_len;
6083 eir[eir_len++] = type;
6084 memcpy(&eir[eir_len], data, data_len);
6085 eir_len += data_len;
6090 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6091 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6095 struct mgmt_ev_device_connected *ev = (void *) buf;
6098 bacpy(&ev->addr.bdaddr, bdaddr);
6099 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6101 ev->flags = __cpu_to_le32(flags);
6104 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6107 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6108 eir_len = eir_append_data(ev->eir, eir_len,
6109 EIR_CLASS_OF_DEV, dev_class, 3);
6111 ev->eir_len = cpu_to_le16(eir_len);
6113 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6114 sizeof(*ev) + eir_len, NULL);
6117 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6119 struct mgmt_cp_disconnect *cp = cmd->param;
6120 struct sock **sk = data;
6121 struct mgmt_rp_disconnect rp;
6123 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6124 rp.addr.type = cp->addr.type;
6126 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6132 mgmt_pending_remove(cmd);
6135 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6137 struct hci_dev *hdev = data;
6138 struct mgmt_cp_unpair_device *cp = cmd->param;
6139 struct mgmt_rp_unpair_device rp;
6141 memset(&rp, 0, sizeof(rp));
6142 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6143 rp.addr.type = cp->addr.type;
6145 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6147 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6149 mgmt_pending_remove(cmd);
6152 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6153 u8 link_type, u8 addr_type, u8 reason,
6154 bool mgmt_connected)
6156 struct mgmt_ev_device_disconnected ev;
6157 struct pending_cmd *power_off;
6158 struct sock *sk = NULL;
6160 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6162 struct mgmt_mode *cp = power_off->param;
6164 /* The connection is still in hci_conn_hash so test for 1
6165 * instead of 0 to know if this is the last one.
6167 if (!cp->val && hci_conn_count(hdev) == 1) {
6168 cancel_delayed_work(&hdev->power_off);
6169 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6173 if (!mgmt_connected)
6176 if (link_type != ACL_LINK && link_type != LE_LINK)
6179 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6181 bacpy(&ev.addr.bdaddr, bdaddr);
6182 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6185 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6190 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6194 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6195 u8 link_type, u8 addr_type, u8 status)
6197 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6198 struct mgmt_cp_disconnect *cp;
6199 struct mgmt_rp_disconnect rp;
6200 struct pending_cmd *cmd;
6202 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6205 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6211 if (bacmp(bdaddr, &cp->addr.bdaddr))
6214 if (cp->addr.type != bdaddr_type)
6217 bacpy(&rp.addr.bdaddr, bdaddr);
6218 rp.addr.type = bdaddr_type;
6220 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6221 mgmt_status(status), &rp, sizeof(rp));
6223 mgmt_pending_remove(cmd);
6226 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6227 u8 addr_type, u8 status)
6229 struct mgmt_ev_connect_failed ev;
6230 struct pending_cmd *power_off;
6232 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6234 struct mgmt_mode *cp = power_off->param;
6236 /* The connection is still in hci_conn_hash so test for 1
6237 * instead of 0 to know if this is the last one.
6239 if (!cp->val && hci_conn_count(hdev) == 1) {
6240 cancel_delayed_work(&hdev->power_off);
6241 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6245 bacpy(&ev.addr.bdaddr, bdaddr);
6246 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6247 ev.status = mgmt_status(status);
6249 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6252 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6254 struct mgmt_ev_pin_code_request ev;
6256 bacpy(&ev.addr.bdaddr, bdaddr);
6257 ev.addr.type = BDADDR_BREDR;
6260 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6263 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6266 struct pending_cmd *cmd;
6267 struct mgmt_rp_pin_code_reply rp;
6269 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6273 bacpy(&rp.addr.bdaddr, bdaddr);
6274 rp.addr.type = BDADDR_BREDR;
6276 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6277 mgmt_status(status), &rp, sizeof(rp));
6279 mgmt_pending_remove(cmd);
6282 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6285 struct pending_cmd *cmd;
6286 struct mgmt_rp_pin_code_reply rp;
6288 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6292 bacpy(&rp.addr.bdaddr, bdaddr);
6293 rp.addr.type = BDADDR_BREDR;
6295 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6296 mgmt_status(status), &rp, sizeof(rp));
6298 mgmt_pending_remove(cmd);
6301 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6302 u8 link_type, u8 addr_type, u32 value,
6305 struct mgmt_ev_user_confirm_request ev;
6307 BT_DBG("%s", hdev->name);
6309 bacpy(&ev.addr.bdaddr, bdaddr);
6310 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6311 ev.confirm_hint = confirm_hint;
6312 ev.value = cpu_to_le32(value);
6314 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6318 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6319 u8 link_type, u8 addr_type)
6321 struct mgmt_ev_user_passkey_request ev;
6323 BT_DBG("%s", hdev->name);
6325 bacpy(&ev.addr.bdaddr, bdaddr);
6326 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6328 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6332 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6333 u8 link_type, u8 addr_type, u8 status,
6336 struct pending_cmd *cmd;
6337 struct mgmt_rp_user_confirm_reply rp;
6340 cmd = mgmt_pending_find(opcode, hdev);
6344 bacpy(&rp.addr.bdaddr, bdaddr);
6345 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6346 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6349 mgmt_pending_remove(cmd);
6354 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6355 u8 link_type, u8 addr_type, u8 status)
6357 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6358 status, MGMT_OP_USER_CONFIRM_REPLY);
6361 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6362 u8 link_type, u8 addr_type, u8 status)
6364 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6366 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6369 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6370 u8 link_type, u8 addr_type, u8 status)
6372 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6373 status, MGMT_OP_USER_PASSKEY_REPLY);
6376 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6377 u8 link_type, u8 addr_type, u8 status)
6379 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6381 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6384 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6385 u8 link_type, u8 addr_type, u32 passkey,
6388 struct mgmt_ev_passkey_notify ev;
6390 BT_DBG("%s", hdev->name);
6392 bacpy(&ev.addr.bdaddr, bdaddr);
6393 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6394 ev.passkey = __cpu_to_le32(passkey);
6395 ev.entered = entered;
6397 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6400 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6401 u8 addr_type, u8 status)
6403 struct mgmt_ev_auth_failed ev;
6405 bacpy(&ev.addr.bdaddr, bdaddr);
6406 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6407 ev.status = mgmt_status(status);
6409 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6412 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6414 struct cmd_lookup match = { NULL, hdev };
6418 u8 mgmt_err = mgmt_status(status);
6419 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6420 cmd_status_rsp, &mgmt_err);
6424 if (test_bit(HCI_AUTH, &hdev->flags))
6425 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6428 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6431 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6435 new_settings(hdev, match.sk);
6441 static void clear_eir(struct hci_request *req)
6443 struct hci_dev *hdev = req->hdev;
6444 struct hci_cp_write_eir cp;
6446 if (!lmp_ext_inq_capable(hdev))
6449 memset(hdev->eir, 0, sizeof(hdev->eir));
6451 memset(&cp, 0, sizeof(cp));
6453 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6456 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6458 struct cmd_lookup match = { NULL, hdev };
6459 struct hci_request req;
6460 bool changed = false;
6463 u8 mgmt_err = mgmt_status(status);
6465 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6466 &hdev->dev_flags)) {
6467 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6468 new_settings(hdev, NULL);
6471 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6477 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6479 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6481 changed = test_and_clear_bit(HCI_HS_ENABLED,
6484 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6487 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6490 new_settings(hdev, match.sk);
6495 hci_req_init(&req, hdev);
6497 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6498 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6499 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6500 sizeof(enable), &enable);
6506 hci_req_run(&req, NULL);
6509 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6511 struct cmd_lookup match = { NULL, hdev };
6512 bool changed = false;
6515 u8 mgmt_err = mgmt_status(status);
6518 if (test_and_clear_bit(HCI_SC_ENABLED,
6520 new_settings(hdev, NULL);
6521 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6524 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6525 cmd_status_rsp, &mgmt_err);
6530 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6532 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6533 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6536 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6537 settings_rsp, &match);
6540 new_settings(hdev, match.sk);
6546 static void sk_lookup(struct pending_cmd *cmd, void *data)
6548 struct cmd_lookup *match = data;
6550 if (match->sk == NULL) {
6551 match->sk = cmd->sk;
6552 sock_hold(match->sk);
6556 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6559 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6561 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6562 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6563 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6566 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6573 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6575 struct mgmt_cp_set_local_name ev;
6576 struct pending_cmd *cmd;
6581 memset(&ev, 0, sizeof(ev));
6582 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6583 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6585 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6587 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6589 /* If this is a HCI command related to powering on the
6590 * HCI dev don't send any mgmt signals.
6592 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6596 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6597 cmd ? cmd->sk : NULL);
6600 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6601 u8 *randomizer192, u8 *hash256,
6602 u8 *randomizer256, u8 status)
6604 struct pending_cmd *cmd;
6606 BT_DBG("%s status %u", hdev->name, status);
6608 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6613 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6614 mgmt_status(status));
6616 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6617 hash256 && randomizer256) {
6618 struct mgmt_rp_read_local_oob_ext_data rp;
6620 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6621 memcpy(rp.randomizer192, randomizer192,
6622 sizeof(rp.randomizer192));
6624 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6625 memcpy(rp.randomizer256, randomizer256,
6626 sizeof(rp.randomizer256));
6628 cmd_complete(cmd->sk, hdev->id,
6629 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6632 struct mgmt_rp_read_local_oob_data rp;
6634 memcpy(rp.hash, hash192, sizeof(rp.hash));
6635 memcpy(rp.randomizer, randomizer192,
6636 sizeof(rp.randomizer));
6638 cmd_complete(cmd->sk, hdev->id,
6639 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6644 mgmt_pending_remove(cmd);
6647 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6648 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6649 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6652 struct mgmt_ev_device_found *ev = (void *) buf;
6653 struct smp_irk *irk;
6656 /* Don't send events for a non-kernel initiated discovery. With
6657 * LE one exception is if we have pend_le_reports > 0 in which
6658 * case we're doing passive scanning and want these events.
6660 if (!hci_discovery_active(hdev)) {
6661 if (link_type == ACL_LINK)
6663 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6667 /* Make sure that the buffer is big enough. The 5 extra bytes
6668 * are for the potential CoD field.
6670 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6673 memset(buf, 0, sizeof(buf));
6675 irk = hci_get_irk(hdev, bdaddr, addr_type);
6677 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6678 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6680 bacpy(&ev->addr.bdaddr, bdaddr);
6681 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6685 ev->flags = cpu_to_le32(flags);
6688 memcpy(ev->eir, eir, eir_len);
6690 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6691 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6694 if (scan_rsp_len > 0)
6695 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6697 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6698 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6700 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6703 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6704 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6706 struct mgmt_ev_device_found *ev;
6707 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6710 ev = (struct mgmt_ev_device_found *) buf;
6712 memset(buf, 0, sizeof(buf));
6714 bacpy(&ev->addr.bdaddr, bdaddr);
6715 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6718 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6721 ev->eir_len = cpu_to_le16(eir_len);
6723 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6726 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6728 struct mgmt_ev_discovering ev;
6729 struct pending_cmd *cmd;
6731 BT_DBG("%s discovering %u", hdev->name, discovering);
6734 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6736 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6739 u8 type = hdev->discovery.type;
6741 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6743 mgmt_pending_remove(cmd);
6746 memset(&ev, 0, sizeof(ev));
6747 ev.type = hdev->discovery.type;
6748 ev.discovering = discovering;
6750 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6753 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6755 BT_DBG("%s status %u", hdev->name, status);
6757 /* Clear the advertising mgmt setting if we failed to re-enable it */
6759 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6760 new_settings(hdev, NULL);
6764 void mgmt_reenable_advertising(struct hci_dev *hdev)
6766 struct hci_request req;
6768 if (hci_conn_num(hdev, LE_LINK) > 0)
6771 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6774 hci_req_init(&req, hdev);
6775 enable_advertising(&req);
6777 /* If this fails we have no option but to let user space know
6778 * that we've disabled advertising.
6780 if (hci_req_run(&req, adv_enable_complete) < 0) {
6781 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6782 new_settings(hdev, NULL);