2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
114 MGMT_EV_DEVICE_FOUND,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
136 struct list_head list;
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
230 hdr->index = cpu_to_le16(hdev->id);
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
236 memcpy(skb_put(skb, data_len), data, data_len);
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
362 struct mgmt_rp_read_index_list *rp;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
382 read_unlock(&hci_dev_list_lock);
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
442 read_unlock(&hci_dev_list_lock);
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 static __le32 get_missing_options(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
529 BT_DBG("sock %p %s", sk, hdev->name);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
551 static u32 get_supported_settings(struct hci_dev *hdev)
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
559 if (lmp_bredr_capable(hdev)) {
560 settings |= MGMT_SETTING_CONNECTABLE;
561 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
562 settings |= MGMT_SETTING_FAST_CONNECTABLE;
563 settings |= MGMT_SETTING_DISCOVERABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 settings |= MGMT_SETTING_CONFIGURATION;
590 static u32 get_current_settings(struct hci_dev *hdev)
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
639 #define PNP_INFO_SVCLASS_ID 0x1200
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
649 list_for_each_entry(uuid, &hdev->uuids, list) {
652 if (uuid->size != 16)
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
665 uuids_start[1] = EIR_UUID16_ALL;
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
698 uuids_start[1] = EIR_UUID32_ALL;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 uuids_start[0] += sizeof(u32);
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
731 uuids_start[1] = EIR_UUID128_ALL;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
741 memcpy(ptr, uuid->uuid, 16);
743 uuids_start[0] += 16;
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 struct pending_cmd *cmd;
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
765 struct pending_cmd *cmd;
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
770 if (cmd->opcode == opcode)
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
782 name_len = strlen(hdev->dev_name);
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786 if (name_len > max_len) {
788 ptr[1] = EIR_NAME_SHORT;
790 ptr[1] = EIR_NAME_COMPLETE;
792 ptr[0] = name_len + 1;
794 memcpy(ptr + 2, hdev->dev_name, name_len);
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
803 static void update_scan_rsp_data(struct hci_request *req)
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
812 memset(&cp, 0, sizeof(cp));
814 len = create_scan_rsp_data(hdev, cp.data);
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 struct pending_cmd *cmd;
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 struct mgmt_mode *cp = cmd->param;
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 u8 ad_len = 0, flags = 0;
856 flags |= get_adv_discov_flags(hdev);
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
862 BT_DBG("adv flags 0x%02x", flags);
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
884 static void update_adv_data(struct hci_request *req)
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
893 memset(&cp, 0, sizeof(cp));
895 len = create_adv_data(hdev, cp.data);
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
909 static void create_eir(struct hci_dev *hdev, u8 *data)
914 name_len = strlen(hdev->dev_name);
920 ptr[1] = EIR_NAME_SHORT;
922 ptr[1] = EIR_NAME_COMPLETE;
924 /* EIR Data length */
925 ptr[0] = name_len + 1;
927 memcpy(ptr + 2, hdev->dev_name, name_len);
929 ptr += (name_len + 2);
932 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
934 ptr[1] = EIR_TX_POWER;
935 ptr[2] = (u8) hdev->inq_tx_power;
940 if (hdev->devid_source > 0) {
942 ptr[1] = EIR_DEVICE_ID;
944 put_unaligned_le16(hdev->devid_source, ptr + 2);
945 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
946 put_unaligned_le16(hdev->devid_product, ptr + 6);
947 put_unaligned_le16(hdev->devid_version, ptr + 8);
952 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
953 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
954 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
957 static void update_eir(struct hci_request *req)
959 struct hci_dev *hdev = req->hdev;
960 struct hci_cp_write_eir cp;
962 if (!hdev_is_powered(hdev))
965 if (!lmp_ext_inq_capable(hdev))
968 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
971 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
974 memset(&cp, 0, sizeof(cp));
976 create_eir(hdev, cp.data);
978 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
981 memcpy(hdev->eir, cp.data, sizeof(cp.data));
983 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
986 static u8 get_service_classes(struct hci_dev *hdev)
988 struct bt_uuid *uuid;
991 list_for_each_entry(uuid, &hdev->uuids, list)
992 val |= uuid->svc_hint;
997 static void update_class(struct hci_request *req)
999 struct hci_dev *hdev = req->hdev;
1002 BT_DBG("%s", hdev->name);
1004 if (!hdev_is_powered(hdev))
1007 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1010 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1013 cod[0] = hdev->minor_class;
1014 cod[1] = hdev->major_class;
1015 cod[2] = get_service_classes(hdev);
1017 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1020 if (memcmp(cod, hdev->dev_class, 3) == 0)
1023 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1026 static bool get_connectable(struct hci_dev *hdev)
1028 struct pending_cmd *cmd;
1030 /* If there's a pending mgmt command the flag will not yet have
1031 * it's final value, so check for this first.
1033 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1035 struct mgmt_mode *cp = cmd->param;
1039 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1042 static void enable_advertising(struct hci_request *req)
1044 struct hci_dev *hdev = req->hdev;
1045 struct hci_cp_le_set_adv_param cp;
1046 u8 own_addr_type, enable = 0x01;
1049 /* Clear the HCI_ADVERTISING bit temporarily so that the
1050 * hci_update_random_address knows that it's safe to go ahead
1051 * and write a new random address. The flag will be set back on
1052 * as soon as the SET_ADV_ENABLE HCI command completes.
1054 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1056 connectable = get_connectable(hdev);
1058 /* Set require_privacy to true only when non-connectable
1059 * advertising is used. In that case it is fine to use a
1060 * non-resolvable private address.
1062 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1065 memset(&cp, 0, sizeof(cp));
1066 cp.min_interval = cpu_to_le16(0x0800);
1067 cp.max_interval = cpu_to_le16(0x0800);
1068 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1069 cp.own_address_type = own_addr_type;
1070 cp.channel_map = hdev->le_adv_channel_map;
1072 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1074 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1077 static void disable_advertising(struct hci_request *req)
1081 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1084 static void service_cache_off(struct work_struct *work)
1086 struct hci_dev *hdev = container_of(work, struct hci_dev,
1087 service_cache.work);
1088 struct hci_request req;
1090 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1093 hci_req_init(&req, hdev);
1100 hci_dev_unlock(hdev);
1102 hci_req_run(&req, NULL);
1105 static void rpa_expired(struct work_struct *work)
1107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1109 struct hci_request req;
1113 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1115 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1116 hci_conn_num(hdev, LE_LINK) > 0)
1119 /* The generation of a new RPA and programming it into the
1120 * controller happens in the enable_advertising() function.
1123 hci_req_init(&req, hdev);
1125 disable_advertising(&req);
1126 enable_advertising(&req);
1128 hci_req_run(&req, NULL);
1131 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1133 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1136 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1137 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1144 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1147 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1148 void *data, u16 data_len)
1150 struct mgmt_rp_read_info rp;
1152 BT_DBG("sock %p %s", sk, hdev->name);
1156 memset(&rp, 0, sizeof(rp));
1158 bacpy(&rp.bdaddr, &hdev->bdaddr);
1160 rp.version = hdev->hci_ver;
1161 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1163 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1164 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1166 memcpy(rp.dev_class, hdev->dev_class, 3);
1168 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1169 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1171 hci_dev_unlock(hdev);
1173 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1177 static void mgmt_pending_free(struct pending_cmd *cmd)
1184 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1185 struct hci_dev *hdev, void *data,
1188 struct pending_cmd *cmd;
1190 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1194 cmd->opcode = opcode;
1195 cmd->index = hdev->id;
1197 cmd->param = kmalloc(len, GFP_KERNEL);
1204 memcpy(cmd->param, data, len);
1209 list_add(&cmd->list, &hdev->mgmt_pending);
1214 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1215 void (*cb)(struct pending_cmd *cmd,
1219 struct pending_cmd *cmd, *tmp;
1221 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1222 if (opcode > 0 && cmd->opcode != opcode)
1229 static void mgmt_pending_remove(struct pending_cmd *cmd)
1231 list_del(&cmd->list);
1232 mgmt_pending_free(cmd);
1235 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1237 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1239 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1243 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1245 BT_DBG("%s status 0x%02x", hdev->name, status);
1247 if (hci_conn_count(hdev) == 0) {
1248 cancel_delayed_work(&hdev->power_off);
1249 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1253 static void hci_stop_discovery(struct hci_request *req)
1255 struct hci_dev *hdev = req->hdev;
1256 struct hci_cp_remote_name_req_cancel cp;
1257 struct inquiry_entry *e;
1259 switch (hdev->discovery.state) {
1260 case DISCOVERY_FINDING:
1261 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1262 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1264 cancel_delayed_work(&hdev->le_scan_disable);
1265 hci_req_add_le_scan_disable(req);
1270 case DISCOVERY_RESOLVING:
1271 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1276 bacpy(&cp.bdaddr, &e->data.bdaddr);
1277 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1283 /* Passive scanning */
1284 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1285 hci_req_add_le_scan_disable(req);
1290 static int clean_up_hci_state(struct hci_dev *hdev)
1292 struct hci_request req;
1293 struct hci_conn *conn;
1295 hci_req_init(&req, hdev);
1297 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1298 test_bit(HCI_PSCAN, &hdev->flags)) {
1300 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1303 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1304 disable_advertising(&req);
1306 hci_stop_discovery(&req);
1308 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1309 struct hci_cp_disconnect dc;
1310 struct hci_cp_reject_conn_req rej;
1312 switch (conn->state) {
1315 dc.handle = cpu_to_le16(conn->handle);
1316 dc.reason = 0x15; /* Terminated due to Power Off */
1317 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1320 if (conn->type == LE_LINK)
1321 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1323 else if (conn->type == ACL_LINK)
1324 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1328 bacpy(&rej.bdaddr, &conn->dst);
1329 rej.reason = 0x15; /* Terminated due to Power Off */
1330 if (conn->type == ACL_LINK)
1331 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1333 else if (conn->type == SCO_LINK)
1334 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1340 return hci_req_run(&req, clean_up_hci_complete);
1343 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1346 struct mgmt_mode *cp = data;
1347 struct pending_cmd *cmd;
1350 BT_DBG("request for %s", hdev->name);
1352 if (cp->val != 0x00 && cp->val != 0x01)
1353 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1354 MGMT_STATUS_INVALID_PARAMS);
1358 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1359 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1364 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1365 cancel_delayed_work(&hdev->power_off);
1368 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1370 err = mgmt_powered(hdev, 1);
1375 if (!!cp->val == hdev_is_powered(hdev)) {
1376 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1380 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1387 queue_work(hdev->req_workqueue, &hdev->power_on);
1390 /* Disconnect connections, stop scans, etc */
1391 err = clean_up_hci_state(hdev);
1393 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1394 HCI_POWER_OFF_TIMEOUT);
1396 /* ENODATA means there were no HCI commands queued */
1397 if (err == -ENODATA) {
1398 cancel_delayed_work(&hdev->power_off);
1399 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1405 hci_dev_unlock(hdev);
1409 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1413 ev = cpu_to_le32(get_current_settings(hdev));
1415 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1420 struct hci_dev *hdev;
1424 static void settings_rsp(struct pending_cmd *cmd, void *data)
1426 struct cmd_lookup *match = data;
1428 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1430 list_del(&cmd->list);
1432 if (match->sk == NULL) {
1433 match->sk = cmd->sk;
1434 sock_hold(match->sk);
1437 mgmt_pending_free(cmd);
1440 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1444 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1445 mgmt_pending_remove(cmd);
1448 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1450 if (!lmp_bredr_capable(hdev))
1451 return MGMT_STATUS_NOT_SUPPORTED;
1452 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1453 return MGMT_STATUS_REJECTED;
1455 return MGMT_STATUS_SUCCESS;
1458 static u8 mgmt_le_support(struct hci_dev *hdev)
1460 if (!lmp_le_capable(hdev))
1461 return MGMT_STATUS_NOT_SUPPORTED;
1462 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1463 return MGMT_STATUS_REJECTED;
1465 return MGMT_STATUS_SUCCESS;
1468 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1470 struct pending_cmd *cmd;
1471 struct mgmt_mode *cp;
1472 struct hci_request req;
1475 BT_DBG("status 0x%02x", status);
1479 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1484 u8 mgmt_err = mgmt_status(status);
1485 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1486 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1492 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1495 if (hdev->discov_timeout > 0) {
1496 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1497 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1501 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1505 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1508 new_settings(hdev, cmd->sk);
1510 /* When the discoverable mode gets changed, make sure
1511 * that class of device has the limited discoverable
1512 * bit correctly set.
1514 hci_req_init(&req, hdev);
1516 hci_req_run(&req, NULL);
1519 mgmt_pending_remove(cmd);
1522 hci_dev_unlock(hdev);
1525 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1528 struct mgmt_cp_set_discoverable *cp = data;
1529 struct pending_cmd *cmd;
1530 struct hci_request req;
1535 BT_DBG("request for %s", hdev->name);
1537 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1538 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1539 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1540 MGMT_STATUS_REJECTED);
1542 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1543 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1544 MGMT_STATUS_INVALID_PARAMS);
1546 timeout = __le16_to_cpu(cp->timeout);
1548 /* Disabling discoverable requires that no timeout is set,
1549 * and enabling limited discoverable requires a timeout.
1551 if ((cp->val == 0x00 && timeout > 0) ||
1552 (cp->val == 0x02 && timeout == 0))
1553 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_INVALID_PARAMS);
1558 if (!hdev_is_powered(hdev) && timeout > 0) {
1559 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 MGMT_STATUS_NOT_POWERED);
1564 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1565 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1566 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1572 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 MGMT_STATUS_REJECTED);
1577 if (!hdev_is_powered(hdev)) {
1578 bool changed = false;
1580 /* Setting limited discoverable when powered off is
1581 * not a valid operation since it requires a timeout
1582 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1584 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1585 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1589 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1594 err = new_settings(hdev, sk);
1599 /* If the current mode is the same, then just update the timeout
1600 * value with the new value. And if only the timeout gets updated,
1601 * then no need for any HCI transactions.
1603 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1604 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1605 &hdev->dev_flags)) {
1606 cancel_delayed_work(&hdev->discov_off);
1607 hdev->discov_timeout = timeout;
1609 if (cp->val && hdev->discov_timeout > 0) {
1610 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1611 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1615 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1625 /* Cancel any potential discoverable timeout that might be
1626 * still active and store new timeout value. The arming of
1627 * the timeout happens in the complete handler.
1629 cancel_delayed_work(&hdev->discov_off);
1630 hdev->discov_timeout = timeout;
1632 /* Limited discoverable mode */
1633 if (cp->val == 0x02)
1634 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1636 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1638 hci_req_init(&req, hdev);
1640 /* The procedure for LE-only controllers is much simpler - just
1641 * update the advertising data.
1643 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1649 struct hci_cp_write_current_iac_lap hci_cp;
1651 if (cp->val == 0x02) {
1652 /* Limited discoverable mode */
1653 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1654 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1655 hci_cp.iac_lap[1] = 0x8b;
1656 hci_cp.iac_lap[2] = 0x9e;
1657 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1658 hci_cp.iac_lap[4] = 0x8b;
1659 hci_cp.iac_lap[5] = 0x9e;
1661 /* General discoverable mode */
1663 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1664 hci_cp.iac_lap[1] = 0x8b;
1665 hci_cp.iac_lap[2] = 0x9e;
1668 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1669 (hci_cp.num_iac * 3) + 1, &hci_cp);
1671 scan |= SCAN_INQUIRY;
1673 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1676 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1679 update_adv_data(&req);
1681 err = hci_req_run(&req, set_discoverable_complete);
1683 mgmt_pending_remove(cmd);
1686 hci_dev_unlock(hdev);
1690 static void write_fast_connectable(struct hci_request *req, bool enable)
1692 struct hci_dev *hdev = req->hdev;
1693 struct hci_cp_write_page_scan_activity acp;
1696 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1699 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1703 type = PAGE_SCAN_TYPE_INTERLACED;
1705 /* 160 msec page scan interval */
1706 acp.interval = cpu_to_le16(0x0100);
1708 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1710 /* default 1.28 sec page scan */
1711 acp.interval = cpu_to_le16(0x0800);
1714 acp.window = cpu_to_le16(0x0012);
1716 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1717 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1718 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1721 if (hdev->page_scan_type != type)
1722 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1725 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1727 struct pending_cmd *cmd;
1728 struct mgmt_mode *cp;
1731 BT_DBG("status 0x%02x", status);
1735 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1740 u8 mgmt_err = mgmt_status(status);
1741 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1747 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1749 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1751 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1754 new_settings(hdev, cmd->sk);
1755 hci_update_background_scan(hdev);
1759 mgmt_pending_remove(cmd);
1762 hci_dev_unlock(hdev);
1765 static int set_connectable_update_settings(struct hci_dev *hdev,
1766 struct sock *sk, u8 val)
1768 bool changed = false;
1771 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1775 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1777 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1778 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1781 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1786 return new_settings(hdev, sk);
1791 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1794 struct mgmt_mode *cp = data;
1795 struct pending_cmd *cmd;
1796 struct hci_request req;
1800 BT_DBG("request for %s", hdev->name);
1802 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1803 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1804 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1805 MGMT_STATUS_REJECTED);
1807 if (cp->val != 0x00 && cp->val != 0x01)
1808 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1809 MGMT_STATUS_INVALID_PARAMS);
1813 if (!hdev_is_powered(hdev)) {
1814 err = set_connectable_update_settings(hdev, sk, cp->val);
1818 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1819 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1820 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1825 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1831 hci_req_init(&req, hdev);
1833 /* If BR/EDR is not enabled and we disable advertising as a
1834 * by-product of disabling connectable, we need to update the
1835 * advertising flags.
1837 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1839 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1840 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1842 update_adv_data(&req);
1843 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1849 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1850 hdev->discov_timeout > 0)
1851 cancel_delayed_work(&hdev->discov_off);
1854 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1857 /* If we're going from non-connectable to connectable or
1858 * vice-versa when fast connectable is enabled ensure that fast
1859 * connectable gets disabled. write_fast_connectable won't do
1860 * anything if the page scan parameters are already what they
1863 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1864 write_fast_connectable(&req, false);
1866 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1867 hci_conn_num(hdev, LE_LINK) == 0) {
1868 disable_advertising(&req);
1869 enable_advertising(&req);
1872 err = hci_req_run(&req, set_connectable_complete);
1874 mgmt_pending_remove(cmd);
1875 if (err == -ENODATA)
1876 err = set_connectable_update_settings(hdev, sk,
1882 hci_dev_unlock(hdev);
1886 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1889 struct mgmt_mode *cp = data;
1893 BT_DBG("request for %s", hdev->name);
1895 if (cp->val != 0x00 && cp->val != 0x01)
1896 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1897 MGMT_STATUS_INVALID_PARAMS);
1902 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1904 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1906 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1911 err = new_settings(hdev, sk);
1914 hci_dev_unlock(hdev);
1918 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1921 struct mgmt_mode *cp = data;
1922 struct pending_cmd *cmd;
1926 BT_DBG("request for %s", hdev->name);
1928 status = mgmt_bredr_support(hdev);
1930 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1933 if (cp->val != 0x00 && cp->val != 0x01)
1934 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1935 MGMT_STATUS_INVALID_PARAMS);
1939 if (!hdev_is_powered(hdev)) {
1940 bool changed = false;
1942 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1943 &hdev->dev_flags)) {
1944 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1948 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1953 err = new_settings(hdev, sk);
1958 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1959 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1966 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1967 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1971 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1977 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1979 mgmt_pending_remove(cmd);
1984 hci_dev_unlock(hdev);
1988 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1990 struct mgmt_mode *cp = data;
1991 struct pending_cmd *cmd;
1995 BT_DBG("request for %s", hdev->name);
1997 status = mgmt_bredr_support(hdev);
1999 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2001 if (!lmp_ssp_capable(hdev))
2002 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2003 MGMT_STATUS_NOT_SUPPORTED);
2005 if (cp->val != 0x00 && cp->val != 0x01)
2006 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2007 MGMT_STATUS_INVALID_PARAMS);
2011 if (!hdev_is_powered(hdev)) {
2015 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2018 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2021 changed = test_and_clear_bit(HCI_HS_ENABLED,
2024 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2027 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 err = new_settings(hdev, sk);
2037 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2038 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2039 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2045 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2049 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2055 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2056 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2057 sizeof(cp->val), &cp->val);
2059 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2061 mgmt_pending_remove(cmd);
2066 hci_dev_unlock(hdev);
2070 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2072 struct mgmt_mode *cp = data;
2077 BT_DBG("request for %s", hdev->name);
2079 status = mgmt_bredr_support(hdev);
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2083 if (!lmp_ssp_capable(hdev))
2084 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2085 MGMT_STATUS_NOT_SUPPORTED);
2087 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2088 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 MGMT_STATUS_REJECTED);
2091 if (cp->val != 0x00 && cp->val != 0x01)
2092 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2093 MGMT_STATUS_INVALID_PARAMS);
2098 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2100 if (hdev_is_powered(hdev)) {
2101 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2102 MGMT_STATUS_REJECTED);
2106 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2109 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2114 err = new_settings(hdev, sk);
2117 hci_dev_unlock(hdev);
2121 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2123 struct cmd_lookup match = { NULL, hdev };
2126 u8 mgmt_err = mgmt_status(status);
2128 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2133 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2135 new_settings(hdev, match.sk);
2140 /* Make sure the controller has a good default for
2141 * advertising data. Restrict the update to when LE
2142 * has actually been enabled. During power on, the
2143 * update in powered_update_hci will take care of it.
2145 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2146 struct hci_request req;
2150 hci_req_init(&req, hdev);
2151 update_adv_data(&req);
2152 update_scan_rsp_data(&req);
2153 hci_req_run(&req, NULL);
2155 hci_dev_unlock(hdev);
2159 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2161 struct mgmt_mode *cp = data;
2162 struct hci_cp_write_le_host_supported hci_cp;
2163 struct pending_cmd *cmd;
2164 struct hci_request req;
2168 BT_DBG("request for %s", hdev->name);
2170 if (!lmp_le_capable(hdev))
2171 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2172 MGMT_STATUS_NOT_SUPPORTED);
2174 if (cp->val != 0x00 && cp->val != 0x01)
2175 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2176 MGMT_STATUS_INVALID_PARAMS);
2178 /* LE-only devices do not allow toggling LE on/off */
2179 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2180 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2181 MGMT_STATUS_REJECTED);
2186 enabled = lmp_host_le_capable(hdev);
2188 if (!hdev_is_powered(hdev) || val == enabled) {
2189 bool changed = false;
2191 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2192 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2196 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2197 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2201 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2206 err = new_settings(hdev, sk);
2211 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2212 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2213 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2218 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2224 hci_req_init(&req, hdev);
2226 memset(&hci_cp, 0, sizeof(hci_cp));
2230 hci_cp.simul = lmp_le_br_capable(hdev);
2232 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2233 disable_advertising(&req);
2236 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2239 err = hci_req_run(&req, le_enable_complete);
2241 mgmt_pending_remove(cmd);
2244 hci_dev_unlock(hdev);
2248 /* This is a helper function to test for pending mgmt commands that can
2249 * cause CoD or EIR HCI commands. We can only allow one such pending
2250 * mgmt command at a time since otherwise we cannot easily track what
2251 * the current values are, will be, and based on that calculate if a new
2252 * HCI command needs to be sent and if yes with what value.
2254 static bool pending_eir_or_class(struct hci_dev *hdev)
2256 struct pending_cmd *cmd;
2258 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2259 switch (cmd->opcode) {
2260 case MGMT_OP_ADD_UUID:
2261 case MGMT_OP_REMOVE_UUID:
2262 case MGMT_OP_SET_DEV_CLASS:
2263 case MGMT_OP_SET_POWERED:
2271 static const u8 bluetooth_base_uuid[] = {
2272 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2273 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2276 static u8 get_uuid_size(const u8 *uuid)
2280 if (memcmp(uuid, bluetooth_base_uuid, 12))
2283 val = get_unaligned_le32(&uuid[12]);
2290 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2292 struct pending_cmd *cmd;
2296 cmd = mgmt_pending_find(mgmt_op, hdev);
2300 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2301 hdev->dev_class, 3);
2303 mgmt_pending_remove(cmd);
2306 hci_dev_unlock(hdev);
2309 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2311 BT_DBG("status 0x%02x", status);
2313 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2316 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2318 struct mgmt_cp_add_uuid *cp = data;
2319 struct pending_cmd *cmd;
2320 struct hci_request req;
2321 struct bt_uuid *uuid;
2324 BT_DBG("request for %s", hdev->name);
2328 if (pending_eir_or_class(hdev)) {
2329 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2334 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2340 memcpy(uuid->uuid, cp->uuid, 16);
2341 uuid->svc_hint = cp->svc_hint;
2342 uuid->size = get_uuid_size(cp->uuid);
2344 list_add_tail(&uuid->list, &hdev->uuids);
2346 hci_req_init(&req, hdev);
2351 err = hci_req_run(&req, add_uuid_complete);
2353 if (err != -ENODATA)
2356 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2357 hdev->dev_class, 3);
2361 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2370 hci_dev_unlock(hdev);
2374 static bool enable_service_cache(struct hci_dev *hdev)
2376 if (!hdev_is_powered(hdev))
2379 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2380 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2388 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2390 BT_DBG("status 0x%02x", status);
2392 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2395 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2398 struct mgmt_cp_remove_uuid *cp = data;
2399 struct pending_cmd *cmd;
2400 struct bt_uuid *match, *tmp;
2401 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2402 struct hci_request req;
2405 BT_DBG("request for %s", hdev->name);
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2415 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2416 hci_uuids_clear(hdev);
2418 if (enable_service_cache(hdev)) {
2419 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2420 0, hdev->dev_class, 3);
2429 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2430 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2433 list_del(&match->list);
2439 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2440 MGMT_STATUS_INVALID_PARAMS);
2445 hci_req_init(&req, hdev);
2450 err = hci_req_run(&req, remove_uuid_complete);
2452 if (err != -ENODATA)
2455 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2456 hdev->dev_class, 3);
2460 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2469 hci_dev_unlock(hdev);
2473 static void set_class_complete(struct hci_dev *hdev, u8 status)
2475 BT_DBG("status 0x%02x", status);
2477 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2480 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2483 struct mgmt_cp_set_dev_class *cp = data;
2484 struct pending_cmd *cmd;
2485 struct hci_request req;
2488 BT_DBG("request for %s", hdev->name);
2490 if (!lmp_bredr_capable(hdev))
2491 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2492 MGMT_STATUS_NOT_SUPPORTED);
2496 if (pending_eir_or_class(hdev)) {
2497 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2502 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2503 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2504 MGMT_STATUS_INVALID_PARAMS);
2508 hdev->major_class = cp->major;
2509 hdev->minor_class = cp->minor;
2511 if (!hdev_is_powered(hdev)) {
2512 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2513 hdev->dev_class, 3);
2517 hci_req_init(&req, hdev);
2519 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2520 hci_dev_unlock(hdev);
2521 cancel_delayed_work_sync(&hdev->service_cache);
2528 err = hci_req_run(&req, set_class_complete);
2530 if (err != -ENODATA)
2533 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2534 hdev->dev_class, 3);
2538 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2547 hci_dev_unlock(hdev);
2551 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2554 struct mgmt_cp_load_link_keys *cp = data;
2555 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2556 sizeof(struct mgmt_link_key_info));
2557 u16 key_count, expected_len;
2561 BT_DBG("request for %s", hdev->name);
2563 if (!lmp_bredr_capable(hdev))
2564 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2565 MGMT_STATUS_NOT_SUPPORTED);
2567 key_count = __le16_to_cpu(cp->key_count);
2568 if (key_count > max_key_count) {
2569 BT_ERR("load_link_keys: too big key_count value %u",
2571 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2572 MGMT_STATUS_INVALID_PARAMS);
2575 expected_len = sizeof(*cp) + key_count *
2576 sizeof(struct mgmt_link_key_info);
2577 if (expected_len != len) {
2578 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2580 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2581 MGMT_STATUS_INVALID_PARAMS);
2584 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2585 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2586 MGMT_STATUS_INVALID_PARAMS);
2588 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2591 for (i = 0; i < key_count; i++) {
2592 struct mgmt_link_key_info *key = &cp->keys[i];
2594 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2595 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2596 MGMT_STATUS_INVALID_PARAMS);
2601 hci_link_keys_clear(hdev);
2604 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2607 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2611 new_settings(hdev, NULL);
2613 for (i = 0; i < key_count; i++) {
2614 struct mgmt_link_key_info *key = &cp->keys[i];
2616 /* Always ignore debug keys and require a new pairing if
2617 * the user wants to use them.
2619 if (key->type == HCI_LK_DEBUG_COMBINATION)
2622 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2623 key->type, key->pin_len, NULL);
2626 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2628 hci_dev_unlock(hdev);
2633 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2634 u8 addr_type, struct sock *skip_sk)
2636 struct mgmt_ev_device_unpaired ev;
2638 bacpy(&ev.addr.bdaddr, bdaddr);
2639 ev.addr.type = addr_type;
2641 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2645 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2648 struct mgmt_cp_unpair_device *cp = data;
2649 struct mgmt_rp_unpair_device rp;
2650 struct hci_cp_disconnect dc;
2651 struct pending_cmd *cmd;
2652 struct hci_conn *conn;
2655 memset(&rp, 0, sizeof(rp));
2656 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2657 rp.addr.type = cp->addr.type;
2659 if (!bdaddr_type_is_valid(cp->addr.type))
2660 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2661 MGMT_STATUS_INVALID_PARAMS,
2664 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2665 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2666 MGMT_STATUS_INVALID_PARAMS,
2671 if (!hdev_is_powered(hdev)) {
2672 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2673 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2677 if (cp->addr.type == BDADDR_BREDR) {
2678 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2682 if (cp->addr.type == BDADDR_LE_PUBLIC)
2683 addr_type = ADDR_LE_DEV_PUBLIC;
2685 addr_type = ADDR_LE_DEV_RANDOM;
2687 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2689 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2691 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2695 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2696 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2700 if (cp->disconnect) {
2701 if (cp->addr.type == BDADDR_BREDR)
2702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2705 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2712 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2714 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2718 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2725 dc.handle = cpu_to_le16(conn->handle);
2726 dc.reason = 0x13; /* Remote User Terminated Connection */
2727 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2729 mgmt_pending_remove(cmd);
2732 hci_dev_unlock(hdev);
2736 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2739 struct mgmt_cp_disconnect *cp = data;
2740 struct mgmt_rp_disconnect rp;
2741 struct hci_cp_disconnect dc;
2742 struct pending_cmd *cmd;
2743 struct hci_conn *conn;
2748 memset(&rp, 0, sizeof(rp));
2749 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2750 rp.addr.type = cp->addr.type;
2752 if (!bdaddr_type_is_valid(cp->addr.type))
2753 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2754 MGMT_STATUS_INVALID_PARAMS,
2759 if (!test_bit(HCI_UP, &hdev->flags)) {
2760 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2761 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2765 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2766 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2767 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2771 if (cp->addr.type == BDADDR_BREDR)
2772 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2775 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2777 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2778 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2779 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2783 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2789 dc.handle = cpu_to_le16(conn->handle);
2790 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2792 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2794 mgmt_pending_remove(cmd);
2797 hci_dev_unlock(hdev);
2801 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2803 switch (link_type) {
2805 switch (addr_type) {
2806 case ADDR_LE_DEV_PUBLIC:
2807 return BDADDR_LE_PUBLIC;
2810 /* Fallback to LE Random address type */
2811 return BDADDR_LE_RANDOM;
2815 /* Fallback to BR/EDR type */
2816 return BDADDR_BREDR;
2820 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2823 struct mgmt_rp_get_connections *rp;
2833 if (!hdev_is_powered(hdev)) {
2834 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2835 MGMT_STATUS_NOT_POWERED);
2840 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2841 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2845 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2846 rp = kmalloc(rp_len, GFP_KERNEL);
2853 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2854 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2856 bacpy(&rp->addr[i].bdaddr, &c->dst);
2857 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2858 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2863 rp->conn_count = cpu_to_le16(i);
2865 /* Recalculate length in case of filtered SCO connections, etc */
2866 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2874 hci_dev_unlock(hdev);
2878 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2879 struct mgmt_cp_pin_code_neg_reply *cp)
2881 struct pending_cmd *cmd;
2884 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2889 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2890 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2892 mgmt_pending_remove(cmd);
2897 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2900 struct hci_conn *conn;
2901 struct mgmt_cp_pin_code_reply *cp = data;
2902 struct hci_cp_pin_code_reply reply;
2903 struct pending_cmd *cmd;
2910 if (!hdev_is_powered(hdev)) {
2911 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2912 MGMT_STATUS_NOT_POWERED);
2916 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2918 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2919 MGMT_STATUS_NOT_CONNECTED);
2923 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2924 struct mgmt_cp_pin_code_neg_reply ncp;
2926 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2928 BT_ERR("PIN code is not 16 bytes long");
2930 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2932 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2933 MGMT_STATUS_INVALID_PARAMS);
2938 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2944 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2945 reply.pin_len = cp->pin_len;
2946 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2948 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2950 mgmt_pending_remove(cmd);
2953 hci_dev_unlock(hdev);
2957 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2960 struct mgmt_cp_set_io_capability *cp = data;
2964 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2965 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2966 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2970 hdev->io_capability = cp->io_capability;
2972 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2973 hdev->io_capability);
2975 hci_dev_unlock(hdev);
2977 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2981 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2983 struct hci_dev *hdev = conn->hdev;
2984 struct pending_cmd *cmd;
2986 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2987 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2990 if (cmd->user_data != conn)
2999 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3001 struct mgmt_rp_pair_device rp;
3002 struct hci_conn *conn = cmd->user_data;
3004 bacpy(&rp.addr.bdaddr, &conn->dst);
3005 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3007 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3010 /* So we don't get further callbacks for this connection */
3011 conn->connect_cfm_cb = NULL;
3012 conn->security_cfm_cb = NULL;
3013 conn->disconn_cfm_cb = NULL;
3015 hci_conn_drop(conn);
3017 mgmt_pending_remove(cmd);
3020 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3022 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3023 struct pending_cmd *cmd;
3025 cmd = find_pairing(conn);
3027 pairing_complete(cmd, status);
3030 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3032 struct pending_cmd *cmd;
3034 BT_DBG("status %u", status);
3036 cmd = find_pairing(conn);
3038 BT_DBG("Unable to find a pending command");
3040 pairing_complete(cmd, mgmt_status(status));
3043 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3045 struct pending_cmd *cmd;
3047 BT_DBG("status %u", status);
3052 cmd = find_pairing(conn);
3054 BT_DBG("Unable to find a pending command");
3056 pairing_complete(cmd, mgmt_status(status));
3059 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3062 struct mgmt_cp_pair_device *cp = data;
3063 struct mgmt_rp_pair_device rp;
3064 struct pending_cmd *cmd;
3065 u8 sec_level, auth_type;
3066 struct hci_conn *conn;
3071 memset(&rp, 0, sizeof(rp));
3072 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3073 rp.addr.type = cp->addr.type;
3075 if (!bdaddr_type_is_valid(cp->addr.type))
3076 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3077 MGMT_STATUS_INVALID_PARAMS,
3080 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3081 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 MGMT_STATUS_INVALID_PARAMS,
3087 if (!hdev_is_powered(hdev)) {
3088 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3093 sec_level = BT_SECURITY_MEDIUM;
3094 auth_type = HCI_AT_DEDICATED_BONDING;
3096 if (cp->addr.type == BDADDR_BREDR) {
3097 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3102 /* Convert from L2CAP channel address type to HCI address type
3104 if (cp->addr.type == BDADDR_LE_PUBLIC)
3105 addr_type = ADDR_LE_DEV_PUBLIC;
3107 addr_type = ADDR_LE_DEV_RANDOM;
3109 /* When pairing a new device, it is expected to remember
3110 * this device for future connections. Adding the connection
3111 * parameter information ahead of time allows tracking
3112 * of the slave preferred values and will speed up any
3113 * further connection establishment.
3115 * If connection parameters already exist, then they
3116 * will be kept and this function does nothing.
3118 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3120 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3121 sec_level, HCI_LE_CONN_TIMEOUT);
3127 if (PTR_ERR(conn) == -EBUSY)
3128 status = MGMT_STATUS_BUSY;
3130 status = MGMT_STATUS_CONNECT_FAILED;
3132 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3138 if (conn->connect_cfm_cb) {
3139 hci_conn_drop(conn);
3140 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3141 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3145 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3148 hci_conn_drop(conn);
3152 /* For LE, just connecting isn't a proof that the pairing finished */
3153 if (cp->addr.type == BDADDR_BREDR) {
3154 conn->connect_cfm_cb = pairing_complete_cb;
3155 conn->security_cfm_cb = pairing_complete_cb;
3156 conn->disconn_cfm_cb = pairing_complete_cb;
3158 conn->connect_cfm_cb = le_pairing_complete_cb;
3159 conn->security_cfm_cb = le_pairing_complete_cb;
3160 conn->disconn_cfm_cb = le_pairing_complete_cb;
3163 conn->io_capability = cp->io_cap;
3164 cmd->user_data = conn;
3166 if (conn->state == BT_CONNECTED &&
3167 hci_conn_security(conn, sec_level, auth_type))
3168 pairing_complete(cmd, 0);
3173 hci_dev_unlock(hdev);
3177 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3180 struct mgmt_addr_info *addr = data;
3181 struct pending_cmd *cmd;
3182 struct hci_conn *conn;
3189 if (!hdev_is_powered(hdev)) {
3190 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3191 MGMT_STATUS_NOT_POWERED);
3195 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3197 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3198 MGMT_STATUS_INVALID_PARAMS);
3202 conn = cmd->user_data;
3204 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3205 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3206 MGMT_STATUS_INVALID_PARAMS);
3210 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3212 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3213 addr, sizeof(*addr));
3215 hci_dev_unlock(hdev);
3219 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3220 struct mgmt_addr_info *addr, u16 mgmt_op,
3221 u16 hci_op, __le32 passkey)
3223 struct pending_cmd *cmd;
3224 struct hci_conn *conn;
3229 if (!hdev_is_powered(hdev)) {
3230 err = cmd_complete(sk, hdev->id, mgmt_op,
3231 MGMT_STATUS_NOT_POWERED, addr,
3236 if (addr->type == BDADDR_BREDR)
3237 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3239 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3242 err = cmd_complete(sk, hdev->id, mgmt_op,
3243 MGMT_STATUS_NOT_CONNECTED, addr,
3248 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3249 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3251 err = cmd_complete(sk, hdev->id, mgmt_op,
3252 MGMT_STATUS_SUCCESS, addr,
3255 err = cmd_complete(sk, hdev->id, mgmt_op,
3256 MGMT_STATUS_FAILED, addr,
3262 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3268 /* Continue with pairing via HCI */
3269 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3270 struct hci_cp_user_passkey_reply cp;
3272 bacpy(&cp.bdaddr, &addr->bdaddr);
3273 cp.passkey = passkey;
3274 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3276 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3280 mgmt_pending_remove(cmd);
3283 hci_dev_unlock(hdev);
3287 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3288 void *data, u16 len)
3290 struct mgmt_cp_pin_code_neg_reply *cp = data;
3294 return user_pairing_resp(sk, hdev, &cp->addr,
3295 MGMT_OP_PIN_CODE_NEG_REPLY,
3296 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3299 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3302 struct mgmt_cp_user_confirm_reply *cp = data;
3306 if (len != sizeof(*cp))
3307 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3308 MGMT_STATUS_INVALID_PARAMS);
3310 return user_pairing_resp(sk, hdev, &cp->addr,
3311 MGMT_OP_USER_CONFIRM_REPLY,
3312 HCI_OP_USER_CONFIRM_REPLY, 0);
3315 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3316 void *data, u16 len)
3318 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3322 return user_pairing_resp(sk, hdev, &cp->addr,
3323 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3324 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3327 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3330 struct mgmt_cp_user_passkey_reply *cp = data;
3334 return user_pairing_resp(sk, hdev, &cp->addr,
3335 MGMT_OP_USER_PASSKEY_REPLY,
3336 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3339 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3340 void *data, u16 len)
3342 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3346 return user_pairing_resp(sk, hdev, &cp->addr,
3347 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3348 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3351 static void update_name(struct hci_request *req)
3353 struct hci_dev *hdev = req->hdev;
3354 struct hci_cp_write_local_name cp;
3356 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3358 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3361 static void set_name_complete(struct hci_dev *hdev, u8 status)
3363 struct mgmt_cp_set_local_name *cp;
3364 struct pending_cmd *cmd;
3366 BT_DBG("status 0x%02x", status);
3370 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3377 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3378 mgmt_status(status));
3380 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3383 mgmt_pending_remove(cmd);
3386 hci_dev_unlock(hdev);
3389 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3392 struct mgmt_cp_set_local_name *cp = data;
3393 struct pending_cmd *cmd;
3394 struct hci_request req;
3401 /* If the old values are the same as the new ones just return a
3402 * direct command complete event.
3404 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 !memcmp(hdev->short_name, cp->short_name,
3406 sizeof(hdev->short_name))) {
3407 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3412 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3414 if (!hdev_is_powered(hdev)) {
3415 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3417 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3422 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3428 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3434 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3436 hci_req_init(&req, hdev);
3438 if (lmp_bredr_capable(hdev)) {
3443 /* The name is stored in the scan response data and so
3444 * no need to udpate the advertising data here.
3446 if (lmp_le_capable(hdev))
3447 update_scan_rsp_data(&req);
3449 err = hci_req_run(&req, set_name_complete);
3451 mgmt_pending_remove(cmd);
3454 hci_dev_unlock(hdev);
3458 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3459 void *data, u16 data_len)
3461 struct pending_cmd *cmd;
3464 BT_DBG("%s", hdev->name);
3468 if (!hdev_is_powered(hdev)) {
3469 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3470 MGMT_STATUS_NOT_POWERED);
3474 if (!lmp_ssp_capable(hdev)) {
3475 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3476 MGMT_STATUS_NOT_SUPPORTED);
3480 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3481 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3486 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3492 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3493 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3496 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3499 mgmt_pending_remove(cmd);
3502 hci_dev_unlock(hdev);
3506 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3507 void *data, u16 len)
3511 BT_DBG("%s ", hdev->name);
3515 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3516 struct mgmt_cp_add_remote_oob_data *cp = data;
3519 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3520 cp->hash, cp->randomizer);
3522 status = MGMT_STATUS_FAILED;
3524 status = MGMT_STATUS_SUCCESS;
3526 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3527 status, &cp->addr, sizeof(cp->addr));
3528 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3529 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3532 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3538 status = MGMT_STATUS_FAILED;
3540 status = MGMT_STATUS_SUCCESS;
3542 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3543 status, &cp->addr, sizeof(cp->addr));
3545 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3546 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3547 MGMT_STATUS_INVALID_PARAMS);
3550 hci_dev_unlock(hdev);
3554 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3555 void *data, u16 len)
3557 struct mgmt_cp_remove_remote_oob_data *cp = data;
3561 BT_DBG("%s", hdev->name);
3565 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3567 status = MGMT_STATUS_INVALID_PARAMS;
3569 status = MGMT_STATUS_SUCCESS;
3571 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3572 status, &cp->addr, sizeof(cp->addr));
3574 hci_dev_unlock(hdev);
3578 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3580 struct pending_cmd *cmd;
3584 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3586 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3590 type = hdev->discovery.type;
3592 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3593 &type, sizeof(type));
3594 mgmt_pending_remove(cmd);
3599 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3601 unsigned long timeout = 0;
3603 BT_DBG("status %d", status);
3607 mgmt_start_discovery_failed(hdev, status);
3608 hci_dev_unlock(hdev);
3613 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3614 hci_dev_unlock(hdev);
3616 switch (hdev->discovery.type) {
3617 case DISCOV_TYPE_LE:
3618 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3621 case DISCOV_TYPE_INTERLEAVED:
3622 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3625 case DISCOV_TYPE_BREDR:
3629 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3635 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3638 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3639 void *data, u16 len)
3641 struct mgmt_cp_start_discovery *cp = data;
3642 struct pending_cmd *cmd;
3643 struct hci_cp_le_set_scan_param param_cp;
3644 struct hci_cp_le_set_scan_enable enable_cp;
3645 struct hci_cp_inquiry inq_cp;
3646 struct hci_request req;
3647 /* General inquiry access code (GIAC) */
3648 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3649 u8 status, own_addr_type;
3652 BT_DBG("%s", hdev->name);
3656 if (!hdev_is_powered(hdev)) {
3657 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3658 MGMT_STATUS_NOT_POWERED);
3662 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3663 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3668 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3669 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3674 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3680 hdev->discovery.type = cp->type;
3682 hci_req_init(&req, hdev);
3684 switch (hdev->discovery.type) {
3685 case DISCOV_TYPE_BREDR:
3686 status = mgmt_bredr_support(hdev);
3688 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3690 mgmt_pending_remove(cmd);
3694 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3695 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3697 mgmt_pending_remove(cmd);
3701 hci_inquiry_cache_flush(hdev);
3703 memset(&inq_cp, 0, sizeof(inq_cp));
3704 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3705 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3706 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3709 case DISCOV_TYPE_LE:
3710 case DISCOV_TYPE_INTERLEAVED:
3711 status = mgmt_le_support(hdev);
3713 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3715 mgmt_pending_remove(cmd);
3719 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3720 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3721 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3722 MGMT_STATUS_NOT_SUPPORTED);
3723 mgmt_pending_remove(cmd);
3727 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3728 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3729 MGMT_STATUS_REJECTED);
3730 mgmt_pending_remove(cmd);
3734 /* If controller is scanning, it means the background scanning
3735 * is running. Thus, we should temporarily stop it in order to
3736 * set the discovery scanning parameters.
3738 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3739 hci_req_add_le_scan_disable(&req);
3741 memset(¶m_cp, 0, sizeof(param_cp));
3743 /* All active scans will be done with either a resolvable
3744 * private address (when privacy feature has been enabled)
3745 * or unresolvable private address.
3747 err = hci_update_random_address(&req, true, &own_addr_type);
3749 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3750 MGMT_STATUS_FAILED);
3751 mgmt_pending_remove(cmd);
3755 param_cp.type = LE_SCAN_ACTIVE;
3756 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3757 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3758 param_cp.own_address_type = own_addr_type;
3759 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3762 memset(&enable_cp, 0, sizeof(enable_cp));
3763 enable_cp.enable = LE_SCAN_ENABLE;
3764 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3765 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3770 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3771 MGMT_STATUS_INVALID_PARAMS);
3772 mgmt_pending_remove(cmd);
3776 err = hci_req_run(&req, start_discovery_complete);
3778 mgmt_pending_remove(cmd);
3780 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3783 hci_dev_unlock(hdev);
3787 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3789 struct pending_cmd *cmd;
3792 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3796 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3797 &hdev->discovery.type, sizeof(hdev->discovery.type));
3798 mgmt_pending_remove(cmd);
3803 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3805 BT_DBG("status %d", status);
3810 mgmt_stop_discovery_failed(hdev, status);
3814 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3817 hci_dev_unlock(hdev);
3820 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3823 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3824 struct pending_cmd *cmd;
3825 struct hci_request req;
3828 BT_DBG("%s", hdev->name);
3832 if (!hci_discovery_active(hdev)) {
3833 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3834 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3835 sizeof(mgmt_cp->type));
3839 if (hdev->discovery.type != mgmt_cp->type) {
3840 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3841 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3842 sizeof(mgmt_cp->type));
3846 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3852 hci_req_init(&req, hdev);
3854 hci_stop_discovery(&req);
3856 err = hci_req_run(&req, stop_discovery_complete);
3858 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3862 mgmt_pending_remove(cmd);
3864 /* If no HCI commands were sent we're done */
3865 if (err == -ENODATA) {
3866 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3867 &mgmt_cp->type, sizeof(mgmt_cp->type));
3868 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3872 hci_dev_unlock(hdev);
3876 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3879 struct mgmt_cp_confirm_name *cp = data;
3880 struct inquiry_entry *e;
3883 BT_DBG("%s", hdev->name);
3887 if (!hci_discovery_active(hdev)) {
3888 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3889 MGMT_STATUS_FAILED, &cp->addr,
3894 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3896 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3897 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3902 if (cp->name_known) {
3903 e->name_state = NAME_KNOWN;
3906 e->name_state = NAME_NEEDED;
3907 hci_inquiry_cache_update_resolve(hdev, e);
3910 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3914 hci_dev_unlock(hdev);
3918 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3921 struct mgmt_cp_block_device *cp = data;
3925 BT_DBG("%s", hdev->name);
3927 if (!bdaddr_type_is_valid(cp->addr.type))
3928 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3929 MGMT_STATUS_INVALID_PARAMS,
3930 &cp->addr, sizeof(cp->addr));
3934 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3936 status = MGMT_STATUS_FAILED;
3940 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3942 status = MGMT_STATUS_SUCCESS;
3945 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3946 &cp->addr, sizeof(cp->addr));
3948 hci_dev_unlock(hdev);
3953 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3956 struct mgmt_cp_unblock_device *cp = data;
3960 BT_DBG("%s", hdev->name);
3962 if (!bdaddr_type_is_valid(cp->addr.type))
3963 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3964 MGMT_STATUS_INVALID_PARAMS,
3965 &cp->addr, sizeof(cp->addr));
3969 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3971 status = MGMT_STATUS_INVALID_PARAMS;
3975 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3977 status = MGMT_STATUS_SUCCESS;
3980 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3981 &cp->addr, sizeof(cp->addr));
3983 hci_dev_unlock(hdev);
3988 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3991 struct mgmt_cp_set_device_id *cp = data;
3992 struct hci_request req;
3996 BT_DBG("%s", hdev->name);
3998 source = __le16_to_cpu(cp->source);
4000 if (source > 0x0002)
4001 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4002 MGMT_STATUS_INVALID_PARAMS);
4006 hdev->devid_source = source;
4007 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4008 hdev->devid_product = __le16_to_cpu(cp->product);
4009 hdev->devid_version = __le16_to_cpu(cp->version);
4011 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4013 hci_req_init(&req, hdev);
4015 hci_req_run(&req, NULL);
4017 hci_dev_unlock(hdev);
4022 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4024 struct cmd_lookup match = { NULL, hdev };
4027 u8 mgmt_err = mgmt_status(status);
4029 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4030 cmd_status_rsp, &mgmt_err);
4034 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4037 new_settings(hdev, match.sk);
4043 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4046 struct mgmt_mode *cp = data;
4047 struct pending_cmd *cmd;
4048 struct hci_request req;
4049 u8 val, enabled, status;
4052 BT_DBG("request for %s", hdev->name);
4054 status = mgmt_le_support(hdev);
4056 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4059 if (cp->val != 0x00 && cp->val != 0x01)
4060 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4061 MGMT_STATUS_INVALID_PARAMS);
4066 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4068 /* The following conditions are ones which mean that we should
4069 * not do any HCI communication but directly send a mgmt
4070 * response to user space (after toggling the flag if
4073 if (!hdev_is_powered(hdev) || val == enabled ||
4074 hci_conn_num(hdev, LE_LINK) > 0) {
4075 bool changed = false;
4077 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4078 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4082 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4087 err = new_settings(hdev, sk);
4092 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4093 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4094 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4099 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4105 hci_req_init(&req, hdev);
4108 enable_advertising(&req);
4110 disable_advertising(&req);
4112 err = hci_req_run(&req, set_advertising_complete);
4114 mgmt_pending_remove(cmd);
4117 hci_dev_unlock(hdev);
4121 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4122 void *data, u16 len)
4124 struct mgmt_cp_set_static_address *cp = data;
4127 BT_DBG("%s", hdev->name);
4129 if (!lmp_le_capable(hdev))
4130 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4131 MGMT_STATUS_NOT_SUPPORTED);
4133 if (hdev_is_powered(hdev))
4134 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4135 MGMT_STATUS_REJECTED);
4137 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4138 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4139 return cmd_status(sk, hdev->id,
4140 MGMT_OP_SET_STATIC_ADDRESS,
4141 MGMT_STATUS_INVALID_PARAMS);
4143 /* Two most significant bits shall be set */
4144 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4145 return cmd_status(sk, hdev->id,
4146 MGMT_OP_SET_STATIC_ADDRESS,
4147 MGMT_STATUS_INVALID_PARAMS);
4152 bacpy(&hdev->static_addr, &cp->bdaddr);
4154 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4156 hci_dev_unlock(hdev);
4161 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4162 void *data, u16 len)
4164 struct mgmt_cp_set_scan_params *cp = data;
4165 __u16 interval, window;
4168 BT_DBG("%s", hdev->name);
4170 if (!lmp_le_capable(hdev))
4171 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4172 MGMT_STATUS_NOT_SUPPORTED);
4174 interval = __le16_to_cpu(cp->interval);
4176 if (interval < 0x0004 || interval > 0x4000)
4177 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4178 MGMT_STATUS_INVALID_PARAMS);
4180 window = __le16_to_cpu(cp->window);
4182 if (window < 0x0004 || window > 0x4000)
4183 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4184 MGMT_STATUS_INVALID_PARAMS);
4186 if (window > interval)
4187 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4188 MGMT_STATUS_INVALID_PARAMS);
4192 hdev->le_scan_interval = interval;
4193 hdev->le_scan_window = window;
4195 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4197 /* If background scan is running, restart it so new parameters are
4200 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4201 hdev->discovery.state == DISCOVERY_STOPPED) {
4202 struct hci_request req;
4204 hci_req_init(&req, hdev);
4206 hci_req_add_le_scan_disable(&req);
4207 hci_req_add_le_passive_scan(&req);
4209 hci_req_run(&req, NULL);
4212 hci_dev_unlock(hdev);
4217 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4219 struct pending_cmd *cmd;
4221 BT_DBG("status 0x%02x", status);
4225 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4230 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4231 mgmt_status(status));
4233 struct mgmt_mode *cp = cmd->param;
4236 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4238 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4240 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4241 new_settings(hdev, cmd->sk);
4244 mgmt_pending_remove(cmd);
4247 hci_dev_unlock(hdev);
4250 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4251 void *data, u16 len)
4253 struct mgmt_mode *cp = data;
4254 struct pending_cmd *cmd;
4255 struct hci_request req;
4258 BT_DBG("%s", hdev->name);
4260 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4261 hdev->hci_ver < BLUETOOTH_VER_1_2)
4262 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4263 MGMT_STATUS_NOT_SUPPORTED);
4265 if (cp->val != 0x00 && cp->val != 0x01)
4266 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4267 MGMT_STATUS_INVALID_PARAMS);
4269 if (!hdev_is_powered(hdev))
4270 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4271 MGMT_STATUS_NOT_POWERED);
4273 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4274 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4275 MGMT_STATUS_REJECTED);
4279 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4280 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4285 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4286 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4291 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4298 hci_req_init(&req, hdev);
4300 write_fast_connectable(&req, cp->val);
4302 err = hci_req_run(&req, fast_connectable_complete);
4304 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4305 MGMT_STATUS_FAILED);
4306 mgmt_pending_remove(cmd);
4310 hci_dev_unlock(hdev);
4315 static void set_bredr_scan(struct hci_request *req)
4317 struct hci_dev *hdev = req->hdev;
4320 /* Ensure that fast connectable is disabled. This function will
4321 * not do anything if the page scan parameters are already what
4324 write_fast_connectable(req, false);
4326 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4328 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4329 scan |= SCAN_INQUIRY;
4332 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4335 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4337 struct pending_cmd *cmd;
4339 BT_DBG("status 0x%02x", status);
4343 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4348 u8 mgmt_err = mgmt_status(status);
4350 /* We need to restore the flag if related HCI commands
4353 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4355 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4357 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4358 new_settings(hdev, cmd->sk);
4361 mgmt_pending_remove(cmd);
4364 hci_dev_unlock(hdev);
4367 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4369 struct mgmt_mode *cp = data;
4370 struct pending_cmd *cmd;
4371 struct hci_request req;
4374 BT_DBG("request for %s", hdev->name);
4376 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4377 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4378 MGMT_STATUS_NOT_SUPPORTED);
4380 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4381 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4382 MGMT_STATUS_REJECTED);
4384 if (cp->val != 0x00 && cp->val != 0x01)
4385 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4386 MGMT_STATUS_INVALID_PARAMS);
4390 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4391 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4395 if (!hdev_is_powered(hdev)) {
4397 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4398 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4399 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4400 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4401 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4404 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4406 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4410 err = new_settings(hdev, sk);
4414 /* Reject disabling when powered on */
4416 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4417 MGMT_STATUS_REJECTED);
4421 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4422 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4427 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4433 /* We need to flip the bit already here so that update_adv_data
4434 * generates the correct flags.
4436 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4438 hci_req_init(&req, hdev);
4440 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4441 set_bredr_scan(&req);
4443 /* Since only the advertising data flags will change, there
4444 * is no need to update the scan response data.
4446 update_adv_data(&req);
4448 err = hci_req_run(&req, set_bredr_complete);
4450 mgmt_pending_remove(cmd);
4453 hci_dev_unlock(hdev);
4457 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4458 void *data, u16 len)
4460 struct mgmt_mode *cp = data;
4461 struct pending_cmd *cmd;
4465 BT_DBG("request for %s", hdev->name);
4467 status = mgmt_bredr_support(hdev);
4469 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4472 if (!lmp_sc_capable(hdev) &&
4473 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4474 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4475 MGMT_STATUS_NOT_SUPPORTED);
4477 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4478 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4479 MGMT_STATUS_INVALID_PARAMS);
4483 if (!hdev_is_powered(hdev)) {
4487 changed = !test_and_set_bit(HCI_SC_ENABLED,
4489 if (cp->val == 0x02)
4490 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4492 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4494 changed = test_and_clear_bit(HCI_SC_ENABLED,
4496 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4499 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4504 err = new_settings(hdev, sk);
4509 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4510 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4517 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4518 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4519 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4523 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4529 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4531 mgmt_pending_remove(cmd);
4535 if (cp->val == 0x02)
4536 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4538 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4541 hci_dev_unlock(hdev);
4545 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4546 void *data, u16 len)
4548 struct mgmt_mode *cp = data;
4549 bool changed, use_changed;
4552 BT_DBG("request for %s", hdev->name);
4554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4555 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4556 MGMT_STATUS_INVALID_PARAMS);
4561 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4564 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4567 if (cp->val == 0x02)
4568 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4571 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4574 if (hdev_is_powered(hdev) && use_changed &&
4575 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4576 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4577 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4578 sizeof(mode), &mode);
4581 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4586 err = new_settings(hdev, sk);
4589 hci_dev_unlock(hdev);
4593 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4596 struct mgmt_cp_set_privacy *cp = cp_data;
4600 BT_DBG("request for %s", hdev->name);
4602 if (!lmp_le_capable(hdev))
4603 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4604 MGMT_STATUS_NOT_SUPPORTED);
4606 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4607 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4608 MGMT_STATUS_INVALID_PARAMS);
4610 if (hdev_is_powered(hdev))
4611 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4612 MGMT_STATUS_REJECTED);
4616 /* If user space supports this command it is also expected to
4617 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4619 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4622 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4623 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4624 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4626 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4627 memset(hdev->irk, 0, sizeof(hdev->irk));
4628 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4631 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4636 err = new_settings(hdev, sk);
4639 hci_dev_unlock(hdev);
4643 static bool irk_is_valid(struct mgmt_irk_info *irk)
4645 switch (irk->addr.type) {
4646 case BDADDR_LE_PUBLIC:
4649 case BDADDR_LE_RANDOM:
4650 /* Two most significant bits shall be set */
4651 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4659 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4662 struct mgmt_cp_load_irks *cp = cp_data;
4663 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4664 sizeof(struct mgmt_irk_info));
4665 u16 irk_count, expected_len;
4668 BT_DBG("request for %s", hdev->name);
4670 if (!lmp_le_capable(hdev))
4671 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4672 MGMT_STATUS_NOT_SUPPORTED);
4674 irk_count = __le16_to_cpu(cp->irk_count);
4675 if (irk_count > max_irk_count) {
4676 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4677 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4678 MGMT_STATUS_INVALID_PARAMS);
4681 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4682 if (expected_len != len) {
4683 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4685 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4686 MGMT_STATUS_INVALID_PARAMS);
4689 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4691 for (i = 0; i < irk_count; i++) {
4692 struct mgmt_irk_info *key = &cp->irks[i];
4694 if (!irk_is_valid(key))
4695 return cmd_status(sk, hdev->id,
4697 MGMT_STATUS_INVALID_PARAMS);
4702 hci_smp_irks_clear(hdev);
4704 for (i = 0; i < irk_count; i++) {
4705 struct mgmt_irk_info *irk = &cp->irks[i];
4708 if (irk->addr.type == BDADDR_LE_PUBLIC)
4709 addr_type = ADDR_LE_DEV_PUBLIC;
4711 addr_type = ADDR_LE_DEV_RANDOM;
4713 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4717 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4719 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4721 hci_dev_unlock(hdev);
4726 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4728 if (key->master != 0x00 && key->master != 0x01)
4731 switch (key->addr.type) {
4732 case BDADDR_LE_PUBLIC:
4735 case BDADDR_LE_RANDOM:
4736 /* Two most significant bits shall be set */
4737 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4745 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4746 void *cp_data, u16 len)
4748 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4749 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4750 sizeof(struct mgmt_ltk_info));
4751 u16 key_count, expected_len;
4754 BT_DBG("request for %s", hdev->name);
4756 if (!lmp_le_capable(hdev))
4757 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4758 MGMT_STATUS_NOT_SUPPORTED);
4760 key_count = __le16_to_cpu(cp->key_count);
4761 if (key_count > max_key_count) {
4762 BT_ERR("load_ltks: too big key_count value %u", key_count);
4763 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4764 MGMT_STATUS_INVALID_PARAMS);
4767 expected_len = sizeof(*cp) + key_count *
4768 sizeof(struct mgmt_ltk_info);
4769 if (expected_len != len) {
4770 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4772 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4773 MGMT_STATUS_INVALID_PARAMS);
4776 BT_DBG("%s key_count %u", hdev->name, key_count);
4778 for (i = 0; i < key_count; i++) {
4779 struct mgmt_ltk_info *key = &cp->keys[i];
4781 if (!ltk_is_valid(key))
4782 return cmd_status(sk, hdev->id,
4783 MGMT_OP_LOAD_LONG_TERM_KEYS,
4784 MGMT_STATUS_INVALID_PARAMS);
4789 hci_smp_ltks_clear(hdev);
4791 for (i = 0; i < key_count; i++) {
4792 struct mgmt_ltk_info *key = &cp->keys[i];
4793 u8 type, addr_type, authenticated;
4795 if (key->addr.type == BDADDR_LE_PUBLIC)
4796 addr_type = ADDR_LE_DEV_PUBLIC;
4798 addr_type = ADDR_LE_DEV_RANDOM;
4803 type = SMP_LTK_SLAVE;
4805 switch (key->type) {
4806 case MGMT_LTK_UNAUTHENTICATED:
4807 authenticated = 0x00;
4809 case MGMT_LTK_AUTHENTICATED:
4810 authenticated = 0x01;
4816 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4817 authenticated, key->val, key->enc_size, key->ediv,
4821 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4824 hci_dev_unlock(hdev);
4829 struct cmd_conn_lookup {
4830 struct hci_conn *conn;
4831 bool valid_tx_power;
4835 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4837 struct cmd_conn_lookup *match = data;
4838 struct mgmt_cp_get_conn_info *cp;
4839 struct mgmt_rp_get_conn_info rp;
4840 struct hci_conn *conn = cmd->user_data;
4842 if (conn != match->conn)
4845 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4847 memset(&rp, 0, sizeof(rp));
4848 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4849 rp.addr.type = cp->addr.type;
4851 if (!match->mgmt_status) {
4852 rp.rssi = conn->rssi;
4854 if (match->valid_tx_power) {
4855 rp.tx_power = conn->tx_power;
4856 rp.max_tx_power = conn->max_tx_power;
4858 rp.tx_power = HCI_TX_POWER_INVALID;
4859 rp.max_tx_power = HCI_TX_POWER_INVALID;
4863 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4864 match->mgmt_status, &rp, sizeof(rp));
4866 hci_conn_drop(conn);
4868 mgmt_pending_remove(cmd);
4871 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4873 struct hci_cp_read_rssi *cp;
4874 struct hci_conn *conn;
4875 struct cmd_conn_lookup match;
4878 BT_DBG("status 0x%02x", status);
4882 /* TX power data is valid in case request completed successfully,
4883 * otherwise we assume it's not valid. At the moment we assume that
4884 * either both or none of current and max values are valid to keep code
4887 match.valid_tx_power = !status;
4889 /* Commands sent in request are either Read RSSI or Read Transmit Power
4890 * Level so we check which one was last sent to retrieve connection
4891 * handle. Both commands have handle as first parameter so it's safe to
4892 * cast data on the same command struct.
4894 * First command sent is always Read RSSI and we fail only if it fails.
4895 * In other case we simply override error to indicate success as we
4896 * already remembered if TX power value is actually valid.
4898 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4900 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4905 BT_ERR("invalid sent_cmd in response");
4909 handle = __le16_to_cpu(cp->handle);
4910 conn = hci_conn_hash_lookup_handle(hdev, handle);
4912 BT_ERR("unknown handle (%d) in response", handle);
4917 match.mgmt_status = mgmt_status(status);
4919 /* Cache refresh is complete, now reply for mgmt request for given
4922 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4923 get_conn_info_complete, &match);
4926 hci_dev_unlock(hdev);
4929 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4932 struct mgmt_cp_get_conn_info *cp = data;
4933 struct mgmt_rp_get_conn_info rp;
4934 struct hci_conn *conn;
4935 unsigned long conn_info_age;
4938 BT_DBG("%s", hdev->name);
4940 memset(&rp, 0, sizeof(rp));
4941 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4942 rp.addr.type = cp->addr.type;
4944 if (!bdaddr_type_is_valid(cp->addr.type))
4945 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4946 MGMT_STATUS_INVALID_PARAMS,
4951 if (!hdev_is_powered(hdev)) {
4952 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4953 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4957 if (cp->addr.type == BDADDR_BREDR)
4958 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4961 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4963 if (!conn || conn->state != BT_CONNECTED) {
4964 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4965 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4969 /* To avoid client trying to guess when to poll again for information we
4970 * calculate conn info age as random value between min/max set in hdev.
4972 conn_info_age = hdev->conn_info_min_age +
4973 prandom_u32_max(hdev->conn_info_max_age -
4974 hdev->conn_info_min_age);
4976 /* Query controller to refresh cached values if they are too old or were
4979 if (time_after(jiffies, conn->conn_info_timestamp +
4980 msecs_to_jiffies(conn_info_age)) ||
4981 !conn->conn_info_timestamp) {
4982 struct hci_request req;
4983 struct hci_cp_read_tx_power req_txp_cp;
4984 struct hci_cp_read_rssi req_rssi_cp;
4985 struct pending_cmd *cmd;
4987 hci_req_init(&req, hdev);
4988 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4989 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4992 /* For LE links TX power does not change thus we don't need to
4993 * query for it once value is known.
4995 if (!bdaddr_type_is_le(cp->addr.type) ||
4996 conn->tx_power == HCI_TX_POWER_INVALID) {
4997 req_txp_cp.handle = cpu_to_le16(conn->handle);
4998 req_txp_cp.type = 0x00;
4999 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5000 sizeof(req_txp_cp), &req_txp_cp);
5003 /* Max TX power needs to be read only once per connection */
5004 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5005 req_txp_cp.handle = cpu_to_le16(conn->handle);
5006 req_txp_cp.type = 0x01;
5007 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5008 sizeof(req_txp_cp), &req_txp_cp);
5011 err = hci_req_run(&req, conn_info_refresh_complete);
5015 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5022 hci_conn_hold(conn);
5023 cmd->user_data = conn;
5025 conn->conn_info_timestamp = jiffies;
5027 /* Cache is valid, just reply with values cached in hci_conn */
5028 rp.rssi = conn->rssi;
5029 rp.tx_power = conn->tx_power;
5030 rp.max_tx_power = conn->max_tx_power;
5032 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5033 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5037 hci_dev_unlock(hdev);
5041 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5043 struct mgmt_cp_get_clock_info *cp;
5044 struct mgmt_rp_get_clock_info rp;
5045 struct hci_cp_read_clock *hci_cp;
5046 struct pending_cmd *cmd;
5047 struct hci_conn *conn;
5049 BT_DBG("%s status %u", hdev->name, status);
5053 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5057 if (hci_cp->which) {
5058 u16 handle = __le16_to_cpu(hci_cp->handle);
5059 conn = hci_conn_hash_lookup_handle(hdev, handle);
5064 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5070 memset(&rp, 0, sizeof(rp));
5071 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5076 rp.local_clock = cpu_to_le32(hdev->clock);
5079 rp.piconet_clock = cpu_to_le32(conn->clock);
5080 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5084 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5086 mgmt_pending_remove(cmd);
5088 hci_conn_drop(conn);
5091 hci_dev_unlock(hdev);
5094 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5097 struct mgmt_cp_get_clock_info *cp = data;
5098 struct mgmt_rp_get_clock_info rp;
5099 struct hci_cp_read_clock hci_cp;
5100 struct pending_cmd *cmd;
5101 struct hci_request req;
5102 struct hci_conn *conn;
5105 BT_DBG("%s", hdev->name);
5107 memset(&rp, 0, sizeof(rp));
5108 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5109 rp.addr.type = cp->addr.type;
5111 if (cp->addr.type != BDADDR_BREDR)
5112 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5113 MGMT_STATUS_INVALID_PARAMS,
5118 if (!hdev_is_powered(hdev)) {
5119 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5120 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5124 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5125 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5127 if (!conn || conn->state != BT_CONNECTED) {
5128 err = cmd_complete(sk, hdev->id,
5129 MGMT_OP_GET_CLOCK_INFO,
5130 MGMT_STATUS_NOT_CONNECTED,
5138 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5144 hci_req_init(&req, hdev);
5146 memset(&hci_cp, 0, sizeof(hci_cp));
5147 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5150 hci_conn_hold(conn);
5151 cmd->user_data = conn;
5153 hci_cp.handle = cpu_to_le16(conn->handle);
5154 hci_cp.which = 0x01; /* Piconet clock */
5155 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5158 err = hci_req_run(&req, get_clock_info_complete);
5160 mgmt_pending_remove(cmd);
5163 hci_dev_unlock(hdev);
5167 static void device_added(struct sock *sk, struct hci_dev *hdev,
5168 bdaddr_t *bdaddr, u8 type, u8 action)
5170 struct mgmt_ev_device_added ev;
5172 bacpy(&ev.addr.bdaddr, bdaddr);
5173 ev.addr.type = type;
5176 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5179 static int add_device(struct sock *sk, struct hci_dev *hdev,
5180 void *data, u16 len)
5182 struct mgmt_cp_add_device *cp = data;
5183 u8 auto_conn, addr_type;
5186 BT_DBG("%s", hdev->name);
5188 if (!bdaddr_type_is_le(cp->addr.type) ||
5189 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5190 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5191 MGMT_STATUS_INVALID_PARAMS,
5192 &cp->addr, sizeof(cp->addr));
5194 if (cp->action != 0x00 && cp->action != 0x01)
5195 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5196 MGMT_STATUS_INVALID_PARAMS,
5197 &cp->addr, sizeof(cp->addr));
5201 if (cp->addr.type == BDADDR_LE_PUBLIC)
5202 addr_type = ADDR_LE_DEV_PUBLIC;
5204 addr_type = ADDR_LE_DEV_RANDOM;
5207 auto_conn = HCI_AUTO_CONN_ALWAYS;
5209 auto_conn = HCI_AUTO_CONN_REPORT;
5211 /* If the connection parameters don't exist for this device,
5212 * they will be created and configured with defaults.
5214 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5216 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5218 &cp->addr, sizeof(cp->addr));
5222 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5224 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5225 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5228 hci_dev_unlock(hdev);
5232 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5233 bdaddr_t *bdaddr, u8 type)
5235 struct mgmt_ev_device_removed ev;
5237 bacpy(&ev.addr.bdaddr, bdaddr);
5238 ev.addr.type = type;
5240 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5243 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5244 void *data, u16 len)
5246 struct mgmt_cp_remove_device *cp = data;
5249 BT_DBG("%s", hdev->name);
5253 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5254 struct hci_conn_params *params;
5257 if (!bdaddr_type_is_le(cp->addr.type)) {
5258 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5259 MGMT_STATUS_INVALID_PARAMS,
5260 &cp->addr, sizeof(cp->addr));
5264 if (cp->addr.type == BDADDR_LE_PUBLIC)
5265 addr_type = ADDR_LE_DEV_PUBLIC;
5267 addr_type = ADDR_LE_DEV_RANDOM;
5269 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5272 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5273 MGMT_STATUS_INVALID_PARAMS,
5274 &cp->addr, sizeof(cp->addr));
5278 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5279 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5280 MGMT_STATUS_INVALID_PARAMS,
5281 &cp->addr, sizeof(cp->addr));
5285 list_del(¶ms->action);
5286 list_del(¶ms->list);
5288 hci_update_background_scan(hdev);
5290 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5292 struct hci_conn_params *p, *tmp;
5294 if (cp->addr.type) {
5295 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5296 MGMT_STATUS_INVALID_PARAMS,
5297 &cp->addr, sizeof(cp->addr));
5301 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5302 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5304 device_removed(sk, hdev, &p->addr, p->addr_type);
5305 list_del(&p->action);
5310 BT_DBG("All LE connection parameters were removed");
5312 hci_update_background_scan(hdev);
5315 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5316 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5319 hci_dev_unlock(hdev);
5323 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5326 struct mgmt_cp_load_conn_param *cp = data;
5327 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5328 sizeof(struct mgmt_conn_param));
5329 u16 param_count, expected_len;
5332 if (!lmp_le_capable(hdev))
5333 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5334 MGMT_STATUS_NOT_SUPPORTED);
5336 param_count = __le16_to_cpu(cp->param_count);
5337 if (param_count > max_param_count) {
5338 BT_ERR("load_conn_param: too big param_count value %u",
5340 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5341 MGMT_STATUS_INVALID_PARAMS);
5344 expected_len = sizeof(*cp) + param_count *
5345 sizeof(struct mgmt_conn_param);
5346 if (expected_len != len) {
5347 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5349 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5350 MGMT_STATUS_INVALID_PARAMS);
5353 BT_DBG("%s param_count %u", hdev->name, param_count);
5357 hci_conn_params_clear_disabled(hdev);
5359 for (i = 0; i < param_count; i++) {
5360 struct mgmt_conn_param *param = &cp->params[i];
5361 struct hci_conn_params *hci_param;
5362 u16 min, max, latency, timeout;
5365 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5368 if (param->addr.type == BDADDR_LE_PUBLIC) {
5369 addr_type = ADDR_LE_DEV_PUBLIC;
5370 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5371 addr_type = ADDR_LE_DEV_RANDOM;
5373 BT_ERR("Ignoring invalid connection parameters");
5377 min = le16_to_cpu(param->min_interval);
5378 max = le16_to_cpu(param->max_interval);
5379 latency = le16_to_cpu(param->latency);
5380 timeout = le16_to_cpu(param->timeout);
5382 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5383 min, max, latency, timeout);
5385 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5386 BT_ERR("Ignoring invalid connection parameters");
5390 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5393 BT_ERR("Failed to add connection parameters");
5397 hci_param->conn_min_interval = min;
5398 hci_param->conn_max_interval = max;
5399 hci_param->conn_latency = latency;
5400 hci_param->supervision_timeout = timeout;
5403 hci_dev_unlock(hdev);
5405 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5408 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5409 void *data, u16 len)
5411 struct mgmt_cp_set_external_config *cp = data;
5415 BT_DBG("%s", hdev->name);
5417 if (hdev_is_powered(hdev))
5418 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5419 MGMT_STATUS_REJECTED);
5421 if (cp->config != 0x00 && cp->config != 0x01)
5422 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5423 MGMT_STATUS_INVALID_PARAMS);
5425 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5426 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5427 MGMT_STATUS_NOT_SUPPORTED);
5432 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5435 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5438 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5445 err = new_options(hdev, sk);
5447 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5448 mgmt_index_removed(hdev);
5450 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5451 set_bit(HCI_CONFIG, &hdev->dev_flags);
5452 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5454 queue_work(hdev->req_workqueue, &hdev->power_on);
5456 set_bit(HCI_RAW, &hdev->flags);
5457 mgmt_index_added(hdev);
5462 hci_dev_unlock(hdev);
5466 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5467 void *data, u16 len)
5469 struct mgmt_cp_set_public_address *cp = data;
5473 BT_DBG("%s", hdev->name);
5475 if (hdev_is_powered(hdev))
5476 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5477 MGMT_STATUS_REJECTED);
5479 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5480 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5481 MGMT_STATUS_INVALID_PARAMS);
5483 if (!hdev->set_bdaddr)
5484 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5485 MGMT_STATUS_NOT_SUPPORTED);
5489 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5490 bacpy(&hdev->public_addr, &cp->bdaddr);
5492 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5499 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5500 err = new_options(hdev, sk);
5502 if (is_configured(hdev)) {
5503 mgmt_index_removed(hdev);
5505 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5507 set_bit(HCI_CONFIG, &hdev->dev_flags);
5508 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5510 queue_work(hdev->req_workqueue, &hdev->power_on);
5514 hci_dev_unlock(hdev);
5518 static const struct mgmt_handler {
5519 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5523 } mgmt_handlers[] = {
5524 { NULL }, /* 0x0000 (no command) */
5525 { read_version, false, MGMT_READ_VERSION_SIZE },
5526 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5527 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5528 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5529 { set_powered, false, MGMT_SETTING_SIZE },
5530 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5531 { set_connectable, false, MGMT_SETTING_SIZE },
5532 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5533 { set_pairable, false, MGMT_SETTING_SIZE },
5534 { set_link_security, false, MGMT_SETTING_SIZE },
5535 { set_ssp, false, MGMT_SETTING_SIZE },
5536 { set_hs, false, MGMT_SETTING_SIZE },
5537 { set_le, false, MGMT_SETTING_SIZE },
5538 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5539 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5540 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5541 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5542 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5543 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5544 { disconnect, false, MGMT_DISCONNECT_SIZE },
5545 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5546 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5547 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5548 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5549 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5550 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5551 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5552 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5553 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5554 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5555 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5556 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5557 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5558 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5559 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5560 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5561 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5562 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5563 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5564 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5565 { set_advertising, false, MGMT_SETTING_SIZE },
5566 { set_bredr, false, MGMT_SETTING_SIZE },
5567 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5568 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5569 { set_secure_conn, false, MGMT_SETTING_SIZE },
5570 { set_debug_keys, false, MGMT_SETTING_SIZE },
5571 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5572 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5573 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5574 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5575 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5576 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5577 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5578 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5579 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5580 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5581 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5584 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5588 struct mgmt_hdr *hdr;
5589 u16 opcode, index, len;
5590 struct hci_dev *hdev = NULL;
5591 const struct mgmt_handler *handler;
5594 BT_DBG("got %zu bytes", msglen);
5596 if (msglen < sizeof(*hdr))
5599 buf = kmalloc(msglen, GFP_KERNEL);
5603 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5609 opcode = __le16_to_cpu(hdr->opcode);
5610 index = __le16_to_cpu(hdr->index);
5611 len = __le16_to_cpu(hdr->len);
5613 if (len != msglen - sizeof(*hdr)) {
5618 if (index != MGMT_INDEX_NONE) {
5619 hdev = hci_dev_get(index);
5621 err = cmd_status(sk, index, opcode,
5622 MGMT_STATUS_INVALID_INDEX);
5626 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5627 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5628 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5629 err = cmd_status(sk, index, opcode,
5630 MGMT_STATUS_INVALID_INDEX);
5634 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5635 opcode != MGMT_OP_READ_CONFIG_INFO &&
5636 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5637 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5638 err = cmd_status(sk, index, opcode,
5639 MGMT_STATUS_INVALID_INDEX);
5644 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5645 mgmt_handlers[opcode].func == NULL) {
5646 BT_DBG("Unknown op %u", opcode);
5647 err = cmd_status(sk, index, opcode,
5648 MGMT_STATUS_UNKNOWN_COMMAND);
5652 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5653 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5654 err = cmd_status(sk, index, opcode,
5655 MGMT_STATUS_INVALID_INDEX);
5659 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5660 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5661 err = cmd_status(sk, index, opcode,
5662 MGMT_STATUS_INVALID_INDEX);
5666 handler = &mgmt_handlers[opcode];
5668 if ((handler->var_len && len < handler->data_len) ||
5669 (!handler->var_len && len != handler->data_len)) {
5670 err = cmd_status(sk, index, opcode,
5671 MGMT_STATUS_INVALID_PARAMS);
5676 mgmt_init_hdev(sk, hdev);
5678 cp = buf + sizeof(*hdr);
5680 err = handler->func(sk, hdev, cp, len);
5694 void mgmt_index_added(struct hci_dev *hdev)
5696 if (hdev->dev_type != HCI_BREDR)
5699 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5702 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5703 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5705 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5708 void mgmt_index_removed(struct hci_dev *hdev)
5710 u8 status = MGMT_STATUS_INVALID_INDEX;
5712 if (hdev->dev_type != HCI_BREDR)
5715 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5718 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5720 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5721 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5723 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5726 /* This function requires the caller holds hdev->lock */
5727 static void restart_le_actions(struct hci_dev *hdev)
5729 struct hci_conn_params *p;
5731 list_for_each_entry(p, &hdev->le_conn_params, list) {
5732 /* Needed for AUTO_OFF case where might not "really"
5733 * have been powered off.
5735 list_del_init(&p->action);
5737 switch (p->auto_connect) {
5738 case HCI_AUTO_CONN_ALWAYS:
5739 list_add(&p->action, &hdev->pend_le_conns);
5741 case HCI_AUTO_CONN_REPORT:
5742 list_add(&p->action, &hdev->pend_le_reports);
5749 hci_update_background_scan(hdev);
5752 static void powered_complete(struct hci_dev *hdev, u8 status)
5754 struct cmd_lookup match = { NULL, hdev };
5756 BT_DBG("status 0x%02x", status);
5760 restart_le_actions(hdev);
5762 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5764 new_settings(hdev, match.sk);
5766 hci_dev_unlock(hdev);
5772 static int powered_update_hci(struct hci_dev *hdev)
5774 struct hci_request req;
5777 hci_req_init(&req, hdev);
5779 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5780 !lmp_host_ssp_capable(hdev)) {
5783 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5786 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5787 lmp_bredr_capable(hdev)) {
5788 struct hci_cp_write_le_host_supported cp;
5791 cp.simul = lmp_le_br_capable(hdev);
5793 /* Check first if we already have the right
5794 * host state (host features set)
5796 if (cp.le != lmp_host_le_capable(hdev) ||
5797 cp.simul != lmp_host_le_br_capable(hdev))
5798 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5802 if (lmp_le_capable(hdev)) {
5803 /* Make sure the controller has a good default for
5804 * advertising data. This also applies to the case
5805 * where BR/EDR was toggled during the AUTO_OFF phase.
5807 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5808 update_adv_data(&req);
5809 update_scan_rsp_data(&req);
5812 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5813 enable_advertising(&req);
5816 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5817 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5818 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5819 sizeof(link_sec), &link_sec);
5821 if (lmp_bredr_capable(hdev)) {
5822 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5823 set_bredr_scan(&req);
5829 return hci_req_run(&req, powered_complete);
5832 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5834 struct cmd_lookup match = { NULL, hdev };
5835 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5836 u8 zero_cod[] = { 0, 0, 0 };
5839 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5843 if (powered_update_hci(hdev) == 0)
5846 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5851 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5852 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5854 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5855 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5856 zero_cod, sizeof(zero_cod), NULL);
5859 err = new_settings(hdev, match.sk);
5867 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5869 struct pending_cmd *cmd;
5872 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5876 if (err == -ERFKILL)
5877 status = MGMT_STATUS_RFKILLED;
5879 status = MGMT_STATUS_FAILED;
5881 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5883 mgmt_pending_remove(cmd);
5886 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5888 struct hci_request req;
5892 /* When discoverable timeout triggers, then just make sure
5893 * the limited discoverable flag is cleared. Even in the case
5894 * of a timeout triggered from general discoverable, it is
5895 * safe to unconditionally clear the flag.
5897 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5898 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5900 hci_req_init(&req, hdev);
5901 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5902 u8 scan = SCAN_PAGE;
5903 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5904 sizeof(scan), &scan);
5907 update_adv_data(&req);
5908 hci_req_run(&req, NULL);
5910 hdev->discov_timeout = 0;
5912 new_settings(hdev, NULL);
5914 hci_dev_unlock(hdev);
5917 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5921 /* Nothing needed here if there's a pending command since that
5922 * commands request completion callback takes care of everything
5925 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5928 /* Powering off may clear the scan mode - don't let that interfere */
5929 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5933 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5935 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5936 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5940 struct hci_request req;
5942 /* In case this change in discoverable was triggered by
5943 * a disabling of connectable there could be a need to
5944 * update the advertising flags.
5946 hci_req_init(&req, hdev);
5947 update_adv_data(&req);
5948 hci_req_run(&req, NULL);
5950 new_settings(hdev, NULL);
5954 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5958 /* Nothing needed here if there's a pending command since that
5959 * commands request completion callback takes care of everything
5962 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5965 /* Powering off may clear the scan mode - don't let that interfere */
5966 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5970 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5972 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5975 new_settings(hdev, NULL);
5978 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5980 /* Powering off may stop advertising - don't let that interfere */
5981 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5985 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5987 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5990 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5992 u8 mgmt_err = mgmt_status(status);
5994 if (scan & SCAN_PAGE)
5995 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5996 cmd_status_rsp, &mgmt_err);
5998 if (scan & SCAN_INQUIRY)
5999 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
6000 cmd_status_rsp, &mgmt_err);
6003 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6006 struct mgmt_ev_new_link_key ev;
6008 memset(&ev, 0, sizeof(ev));
6010 ev.store_hint = persistent;
6011 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6012 ev.key.addr.type = BDADDR_BREDR;
6013 ev.key.type = key->type;
6014 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6015 ev.key.pin_len = key->pin_len;
6017 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6020 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6022 if (ltk->authenticated)
6023 return MGMT_LTK_AUTHENTICATED;
6025 return MGMT_LTK_UNAUTHENTICATED;
6028 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6030 struct mgmt_ev_new_long_term_key ev;
6032 memset(&ev, 0, sizeof(ev));
6034 /* Devices using resolvable or non-resolvable random addresses
6035 * without providing an indentity resolving key don't require
6036 * to store long term keys. Their addresses will change the
6039 * Only when a remote device provides an identity address
6040 * make sure the long term key is stored. If the remote
6041 * identity is known, the long term keys are internally
6042 * mapped to the identity address. So allow static random
6043 * and public addresses here.
6045 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6046 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6047 ev.store_hint = 0x00;
6049 ev.store_hint = persistent;
6051 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6052 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6053 ev.key.type = mgmt_ltk_type(key);
6054 ev.key.enc_size = key->enc_size;
6055 ev.key.ediv = key->ediv;
6056 ev.key.rand = key->rand;
6058 if (key->type == SMP_LTK)
6061 memcpy(ev.key.val, key->val, sizeof(key->val));
6063 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6066 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6068 struct mgmt_ev_new_irk ev;
6070 memset(&ev, 0, sizeof(ev));
6072 /* For identity resolving keys from devices that are already
6073 * using a public address or static random address, do not
6074 * ask for storing this key. The identity resolving key really
6075 * is only mandatory for devices using resovlable random
6078 * Storing all identity resolving keys has the downside that
6079 * they will be also loaded on next boot of they system. More
6080 * identity resolving keys, means more time during scanning is
6081 * needed to actually resolve these addresses.
6083 if (bacmp(&irk->rpa, BDADDR_ANY))
6084 ev.store_hint = 0x01;
6086 ev.store_hint = 0x00;
6088 bacpy(&ev.rpa, &irk->rpa);
6089 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6090 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6091 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6093 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6096 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6099 struct mgmt_ev_new_csrk ev;
6101 memset(&ev, 0, sizeof(ev));
6103 /* Devices using resolvable or non-resolvable random addresses
6104 * without providing an indentity resolving key don't require
6105 * to store signature resolving keys. Their addresses will change
6106 * the next time around.
6108 * Only when a remote device provides an identity address
6109 * make sure the signature resolving key is stored. So allow
6110 * static random and public addresses here.
6112 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6113 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6114 ev.store_hint = 0x00;
6116 ev.store_hint = persistent;
6118 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6119 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6120 ev.key.master = csrk->master;
6121 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6123 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6126 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6127 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6128 u16 max_interval, u16 latency, u16 timeout)
6130 struct mgmt_ev_new_conn_param ev;
6132 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6135 memset(&ev, 0, sizeof(ev));
6136 bacpy(&ev.addr.bdaddr, bdaddr);
6137 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6138 ev.store_hint = store_hint;
6139 ev.min_interval = cpu_to_le16(min_interval);
6140 ev.max_interval = cpu_to_le16(max_interval);
6141 ev.latency = cpu_to_le16(latency);
6142 ev.timeout = cpu_to_le16(timeout);
6144 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6147 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6150 eir[eir_len++] = sizeof(type) + data_len;
6151 eir[eir_len++] = type;
6152 memcpy(&eir[eir_len], data, data_len);
6153 eir_len += data_len;
6158 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6159 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6163 struct mgmt_ev_device_connected *ev = (void *) buf;
6166 bacpy(&ev->addr.bdaddr, bdaddr);
6167 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6169 ev->flags = __cpu_to_le32(flags);
6172 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6175 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6176 eir_len = eir_append_data(ev->eir, eir_len,
6177 EIR_CLASS_OF_DEV, dev_class, 3);
6179 ev->eir_len = cpu_to_le16(eir_len);
6181 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6182 sizeof(*ev) + eir_len, NULL);
6185 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6187 struct mgmt_cp_disconnect *cp = cmd->param;
6188 struct sock **sk = data;
6189 struct mgmt_rp_disconnect rp;
6191 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6192 rp.addr.type = cp->addr.type;
6194 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6200 mgmt_pending_remove(cmd);
6203 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6205 struct hci_dev *hdev = data;
6206 struct mgmt_cp_unpair_device *cp = cmd->param;
6207 struct mgmt_rp_unpair_device rp;
6209 memset(&rp, 0, sizeof(rp));
6210 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6211 rp.addr.type = cp->addr.type;
6213 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6215 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6217 mgmt_pending_remove(cmd);
6220 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6221 u8 link_type, u8 addr_type, u8 reason,
6222 bool mgmt_connected)
6224 struct mgmt_ev_device_disconnected ev;
6225 struct pending_cmd *power_off;
6226 struct sock *sk = NULL;
6228 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6230 struct mgmt_mode *cp = power_off->param;
6232 /* The connection is still in hci_conn_hash so test for 1
6233 * instead of 0 to know if this is the last one.
6235 if (!cp->val && hci_conn_count(hdev) == 1) {
6236 cancel_delayed_work(&hdev->power_off);
6237 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6241 if (!mgmt_connected)
6244 if (link_type != ACL_LINK && link_type != LE_LINK)
6247 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6249 bacpy(&ev.addr.bdaddr, bdaddr);
6250 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6253 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6258 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6262 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6263 u8 link_type, u8 addr_type, u8 status)
6265 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6266 struct mgmt_cp_disconnect *cp;
6267 struct mgmt_rp_disconnect rp;
6268 struct pending_cmd *cmd;
6270 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6273 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6279 if (bacmp(bdaddr, &cp->addr.bdaddr))
6282 if (cp->addr.type != bdaddr_type)
6285 bacpy(&rp.addr.bdaddr, bdaddr);
6286 rp.addr.type = bdaddr_type;
6288 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6289 mgmt_status(status), &rp, sizeof(rp));
6291 mgmt_pending_remove(cmd);
6294 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6295 u8 addr_type, u8 status)
6297 struct mgmt_ev_connect_failed ev;
6298 struct pending_cmd *power_off;
6300 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6302 struct mgmt_mode *cp = power_off->param;
6304 /* The connection is still in hci_conn_hash so test for 1
6305 * instead of 0 to know if this is the last one.
6307 if (!cp->val && hci_conn_count(hdev) == 1) {
6308 cancel_delayed_work(&hdev->power_off);
6309 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6313 bacpy(&ev.addr.bdaddr, bdaddr);
6314 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6315 ev.status = mgmt_status(status);
6317 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6320 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6322 struct mgmt_ev_pin_code_request ev;
6324 bacpy(&ev.addr.bdaddr, bdaddr);
6325 ev.addr.type = BDADDR_BREDR;
6328 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6331 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6334 struct pending_cmd *cmd;
6335 struct mgmt_rp_pin_code_reply rp;
6337 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6341 bacpy(&rp.addr.bdaddr, bdaddr);
6342 rp.addr.type = BDADDR_BREDR;
6344 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6345 mgmt_status(status), &rp, sizeof(rp));
6347 mgmt_pending_remove(cmd);
6350 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6353 struct pending_cmd *cmd;
6354 struct mgmt_rp_pin_code_reply rp;
6356 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6360 bacpy(&rp.addr.bdaddr, bdaddr);
6361 rp.addr.type = BDADDR_BREDR;
6363 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6364 mgmt_status(status), &rp, sizeof(rp));
6366 mgmt_pending_remove(cmd);
6369 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6370 u8 link_type, u8 addr_type, u32 value,
6373 struct mgmt_ev_user_confirm_request ev;
6375 BT_DBG("%s", hdev->name);
6377 bacpy(&ev.addr.bdaddr, bdaddr);
6378 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6379 ev.confirm_hint = confirm_hint;
6380 ev.value = cpu_to_le32(value);
6382 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6386 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6387 u8 link_type, u8 addr_type)
6389 struct mgmt_ev_user_passkey_request ev;
6391 BT_DBG("%s", hdev->name);
6393 bacpy(&ev.addr.bdaddr, bdaddr);
6394 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6396 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6400 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6401 u8 link_type, u8 addr_type, u8 status,
6404 struct pending_cmd *cmd;
6405 struct mgmt_rp_user_confirm_reply rp;
6408 cmd = mgmt_pending_find(opcode, hdev);
6412 bacpy(&rp.addr.bdaddr, bdaddr);
6413 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6414 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6417 mgmt_pending_remove(cmd);
6422 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6423 u8 link_type, u8 addr_type, u8 status)
6425 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6426 status, MGMT_OP_USER_CONFIRM_REPLY);
6429 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6430 u8 link_type, u8 addr_type, u8 status)
6432 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6434 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6437 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6438 u8 link_type, u8 addr_type, u8 status)
6440 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6441 status, MGMT_OP_USER_PASSKEY_REPLY);
6444 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6445 u8 link_type, u8 addr_type, u8 status)
6447 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6449 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6452 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6453 u8 link_type, u8 addr_type, u32 passkey,
6456 struct mgmt_ev_passkey_notify ev;
6458 BT_DBG("%s", hdev->name);
6460 bacpy(&ev.addr.bdaddr, bdaddr);
6461 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6462 ev.passkey = __cpu_to_le32(passkey);
6463 ev.entered = entered;
6465 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6468 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6469 u8 addr_type, u8 status)
6471 struct mgmt_ev_auth_failed ev;
6473 bacpy(&ev.addr.bdaddr, bdaddr);
6474 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6475 ev.status = mgmt_status(status);
6477 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6480 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6482 struct cmd_lookup match = { NULL, hdev };
6486 u8 mgmt_err = mgmt_status(status);
6487 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6488 cmd_status_rsp, &mgmt_err);
6492 if (test_bit(HCI_AUTH, &hdev->flags))
6493 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6496 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6499 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6503 new_settings(hdev, match.sk);
6509 static void clear_eir(struct hci_request *req)
6511 struct hci_dev *hdev = req->hdev;
6512 struct hci_cp_write_eir cp;
6514 if (!lmp_ext_inq_capable(hdev))
6517 memset(hdev->eir, 0, sizeof(hdev->eir));
6519 memset(&cp, 0, sizeof(cp));
6521 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6524 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6526 struct cmd_lookup match = { NULL, hdev };
6527 struct hci_request req;
6528 bool changed = false;
6531 u8 mgmt_err = mgmt_status(status);
6533 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6534 &hdev->dev_flags)) {
6535 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6536 new_settings(hdev, NULL);
6539 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6545 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6547 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6549 changed = test_and_clear_bit(HCI_HS_ENABLED,
6552 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6555 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6558 new_settings(hdev, match.sk);
6563 hci_req_init(&req, hdev);
6565 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6566 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6567 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6568 sizeof(enable), &enable);
6574 hci_req_run(&req, NULL);
6577 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6579 struct cmd_lookup match = { NULL, hdev };
6580 bool changed = false;
6583 u8 mgmt_err = mgmt_status(status);
6586 if (test_and_clear_bit(HCI_SC_ENABLED,
6588 new_settings(hdev, NULL);
6589 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6592 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6593 cmd_status_rsp, &mgmt_err);
6598 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6600 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6601 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6604 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6605 settings_rsp, &match);
6608 new_settings(hdev, match.sk);
6614 static void sk_lookup(struct pending_cmd *cmd, void *data)
6616 struct cmd_lookup *match = data;
6618 if (match->sk == NULL) {
6619 match->sk = cmd->sk;
6620 sock_hold(match->sk);
6624 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6627 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6629 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6630 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6631 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6634 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6641 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6643 struct mgmt_cp_set_local_name ev;
6644 struct pending_cmd *cmd;
6649 memset(&ev, 0, sizeof(ev));
6650 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6651 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6653 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6655 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6657 /* If this is a HCI command related to powering on the
6658 * HCI dev don't send any mgmt signals.
6660 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6664 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6665 cmd ? cmd->sk : NULL);
6668 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6669 u8 *randomizer192, u8 *hash256,
6670 u8 *randomizer256, u8 status)
6672 struct pending_cmd *cmd;
6674 BT_DBG("%s status %u", hdev->name, status);
6676 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6681 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6682 mgmt_status(status));
6684 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6685 hash256 && randomizer256) {
6686 struct mgmt_rp_read_local_oob_ext_data rp;
6688 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6689 memcpy(rp.randomizer192, randomizer192,
6690 sizeof(rp.randomizer192));
6692 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6693 memcpy(rp.randomizer256, randomizer256,
6694 sizeof(rp.randomizer256));
6696 cmd_complete(cmd->sk, hdev->id,
6697 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6700 struct mgmt_rp_read_local_oob_data rp;
6702 memcpy(rp.hash, hash192, sizeof(rp.hash));
6703 memcpy(rp.randomizer, randomizer192,
6704 sizeof(rp.randomizer));
6706 cmd_complete(cmd->sk, hdev->id,
6707 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6712 mgmt_pending_remove(cmd);
6715 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6716 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6717 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6720 struct mgmt_ev_device_found *ev = (void *) buf;
6723 /* Don't send events for a non-kernel initiated discovery. With
6724 * LE one exception is if we have pend_le_reports > 0 in which
6725 * case we're doing passive scanning and want these events.
6727 if (!hci_discovery_active(hdev)) {
6728 if (link_type == ACL_LINK)
6730 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6734 /* Make sure that the buffer is big enough. The 5 extra bytes
6735 * are for the potential CoD field.
6737 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6740 memset(buf, 0, sizeof(buf));
6742 bacpy(&ev->addr.bdaddr, bdaddr);
6743 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6745 ev->flags = cpu_to_le32(flags);
6748 memcpy(ev->eir, eir, eir_len);
6750 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6751 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6754 if (scan_rsp_len > 0)
6755 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6757 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6758 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6760 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6763 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6764 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6766 struct mgmt_ev_device_found *ev;
6767 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6770 ev = (struct mgmt_ev_device_found *) buf;
6772 memset(buf, 0, sizeof(buf));
6774 bacpy(&ev->addr.bdaddr, bdaddr);
6775 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6778 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6781 ev->eir_len = cpu_to_le16(eir_len);
6783 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6786 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6788 struct mgmt_ev_discovering ev;
6789 struct pending_cmd *cmd;
6791 BT_DBG("%s discovering %u", hdev->name, discovering);
6794 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6796 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6799 u8 type = hdev->discovery.type;
6801 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6803 mgmt_pending_remove(cmd);
6806 memset(&ev, 0, sizeof(ev));
6807 ev.type = hdev->discovery.type;
6808 ev.discovering = discovering;
6810 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6813 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6815 BT_DBG("%s status %u", hdev->name, status);
6817 /* Clear the advertising mgmt setting if we failed to re-enable it */
6819 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6820 new_settings(hdev, NULL);
6824 void mgmt_reenable_advertising(struct hci_dev *hdev)
6826 struct hci_request req;
6828 if (hci_conn_num(hdev, LE_LINK) > 0)
6831 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6834 hci_req_init(&req, hdev);
6835 enable_advertising(&req);
6837 /* If this fails we have no option but to let user space know
6838 * that we've disabled advertising.
6840 if (hci_req_run(&req, adv_enable_complete) < 0) {
6841 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6842 new_settings(hdev, NULL);