Bluetooth: Add identity address check in param lookup functions
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int uuids_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bt_uuid *uuid;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(uuid, &hdev->uuids, list) {
201                 u8 i, val[16];
202
203                 /* The Bluetooth UUID values are stored in big endian,
204                  * but with reversed byte order. So convert them into
205                  * the right order for the %pUb modifier.
206                  */
207                 for (i = 0; i < 16; i++)
208                         val[i] = uuid->uuid[15 - i];
209
210                 seq_printf(f, "%pUb\n", val);
211         }
212         hci_dev_unlock(hdev);
213
214         return 0;
215 }
216
217 static int uuids_open(struct inode *inode, struct file *file)
218 {
219         return single_open(file, uuids_show, inode->i_private);
220 }
221
222 static const struct file_operations uuids_fops = {
223         .open           = uuids_open,
224         .read           = seq_read,
225         .llseek         = seq_lseek,
226         .release        = single_release,
227 };
228
229 static int inquiry_cache_show(struct seq_file *f, void *p)
230 {
231         struct hci_dev *hdev = f->private;
232         struct discovery_state *cache = &hdev->discovery;
233         struct inquiry_entry *e;
234
235         hci_dev_lock(hdev);
236
237         list_for_each_entry(e, &cache->all, all) {
238                 struct inquiry_data *data = &e->data;
239                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240                            &data->bdaddr,
241                            data->pscan_rep_mode, data->pscan_period_mode,
242                            data->pscan_mode, data->dev_class[2],
243                            data->dev_class[1], data->dev_class[0],
244                            __le16_to_cpu(data->clock_offset),
245                            data->rssi, data->ssp_mode, e->timestamp);
246         }
247
248         hci_dev_unlock(hdev);
249
250         return 0;
251 }
252
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 {
255         return single_open(file, inquiry_cache_show, inode->i_private);
256 }
257
258 static const struct file_operations inquiry_cache_fops = {
259         .open           = inquiry_cache_open,
260         .read           = seq_read,
261         .llseek         = seq_lseek,
262         .release        = single_release,
263 };
264
265 static int link_keys_show(struct seq_file *f, void *ptr)
266 {
267         struct hci_dev *hdev = f->private;
268         struct list_head *p, *n;
269
270         hci_dev_lock(hdev);
271         list_for_each_safe(p, n, &hdev->link_keys) {
272                 struct link_key *key = list_entry(p, struct link_key, list);
273                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275         }
276         hci_dev_unlock(hdev);
277
278         return 0;
279 }
280
281 static int link_keys_open(struct inode *inode, struct file *file)
282 {
283         return single_open(file, link_keys_show, inode->i_private);
284 }
285
286 static const struct file_operations link_keys_fops = {
287         .open           = link_keys_open,
288         .read           = seq_read,
289         .llseek         = seq_lseek,
290         .release        = single_release,
291 };
292
293 static int dev_class_show(struct seq_file *f, void *ptr)
294 {
295         struct hci_dev *hdev = f->private;
296
297         hci_dev_lock(hdev);
298         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299                    hdev->dev_class[1], hdev->dev_class[0]);
300         hci_dev_unlock(hdev);
301
302         return 0;
303 }
304
305 static int dev_class_open(struct inode *inode, struct file *file)
306 {
307         return single_open(file, dev_class_show, inode->i_private);
308 }
309
310 static const struct file_operations dev_class_fops = {
311         .open           = dev_class_open,
312         .read           = seq_read,
313         .llseek         = seq_lseek,
314         .release        = single_release,
315 };
316
317 static int voice_setting_get(void *data, u64 *val)
318 {
319         struct hci_dev *hdev = data;
320
321         hci_dev_lock(hdev);
322         *val = hdev->voice_setting;
323         hci_dev_unlock(hdev);
324
325         return 0;
326 }
327
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329                         NULL, "0x%4.4llx\n");
330
331 static int auto_accept_delay_set(void *data, u64 val)
332 {
333         struct hci_dev *hdev = data;
334
335         hci_dev_lock(hdev);
336         hdev->auto_accept_delay = val;
337         hci_dev_unlock(hdev);
338
339         return 0;
340 }
341
342 static int auto_accept_delay_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->auto_accept_delay;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354                         auto_accept_delay_set, "%llu\n");
355
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357                                      size_t count, loff_t *ppos)
358 {
359         struct hci_dev *hdev = file->private_data;
360         char buf[3];
361
362         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363         buf[1] = '\n';
364         buf[2] = '\0';
365         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366 }
367
368 static ssize_t force_sc_support_write(struct file *file,
369                                       const char __user *user_buf,
370                                       size_t count, loff_t *ppos)
371 {
372         struct hci_dev *hdev = file->private_data;
373         char buf[32];
374         size_t buf_size = min(count, (sizeof(buf)-1));
375         bool enable;
376
377         if (test_bit(HCI_UP, &hdev->flags))
378                 return -EBUSY;
379
380         if (copy_from_user(buf, user_buf, buf_size))
381                 return -EFAULT;
382
383         buf[buf_size] = '\0';
384         if (strtobool(buf, &enable))
385                 return -EINVAL;
386
387         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388                 return -EALREADY;
389
390         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
391
392         return count;
393 }
394
395 static const struct file_operations force_sc_support_fops = {
396         .open           = simple_open,
397         .read           = force_sc_support_read,
398         .write          = force_sc_support_write,
399         .llseek         = default_llseek,
400 };
401
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403                                  size_t count, loff_t *ppos)
404 {
405         struct hci_dev *hdev = file->private_data;
406         char buf[3];
407
408         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409         buf[1] = '\n';
410         buf[2] = '\0';
411         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412 }
413
414 static const struct file_operations sc_only_mode_fops = {
415         .open           = simple_open,
416         .read           = sc_only_mode_read,
417         .llseek         = default_llseek,
418 };
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422         struct hci_dev *hdev = data;
423
424         if (val != 0 && (val < 500 || val > 3600000))
425                 return -EINVAL;
426
427         hci_dev_lock(hdev);
428         hdev->idle_timeout = val;
429         hci_dev_unlock(hdev);
430
431         return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436         struct hci_dev *hdev = data;
437
438         hci_dev_lock(hdev);
439         *val = hdev->idle_timeout;
440         hci_dev_unlock(hdev);
441
442         return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446                         idle_timeout_set, "%llu\n");
447
448 static int rpa_timeout_set(void *data, u64 val)
449 {
450         struct hci_dev *hdev = data;
451
452         /* Require the RPA timeout to be at least 30 seconds and at most
453          * 24 hours.
454          */
455         if (val < 30 || val > (60 * 60 * 24))
456                 return -EINVAL;
457
458         hci_dev_lock(hdev);
459         hdev->rpa_timeout = val;
460         hci_dev_unlock(hdev);
461
462         return 0;
463 }
464
465 static int rpa_timeout_get(void *data, u64 *val)
466 {
467         struct hci_dev *hdev = data;
468
469         hci_dev_lock(hdev);
470         *val = hdev->rpa_timeout;
471         hci_dev_unlock(hdev);
472
473         return 0;
474 }
475
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477                         rpa_timeout_set, "%llu\n");
478
479 static int sniff_min_interval_set(void *data, u64 val)
480 {
481         struct hci_dev *hdev = data;
482
483         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484                 return -EINVAL;
485
486         hci_dev_lock(hdev);
487         hdev->sniff_min_interval = val;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 static int sniff_min_interval_get(void *data, u64 *val)
494 {
495         struct hci_dev *hdev = data;
496
497         hci_dev_lock(hdev);
498         *val = hdev->sniff_min_interval;
499         hci_dev_unlock(hdev);
500
501         return 0;
502 }
503
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505                         sniff_min_interval_set, "%llu\n");
506
507 static int sniff_max_interval_set(void *data, u64 val)
508 {
509         struct hci_dev *hdev = data;
510
511         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512                 return -EINVAL;
513
514         hci_dev_lock(hdev);
515         hdev->sniff_max_interval = val;
516         hci_dev_unlock(hdev);
517
518         return 0;
519 }
520
521 static int sniff_max_interval_get(void *data, u64 *val)
522 {
523         struct hci_dev *hdev = data;
524
525         hci_dev_lock(hdev);
526         *val = hdev->sniff_max_interval;
527         hci_dev_unlock(hdev);
528
529         return 0;
530 }
531
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533                         sniff_max_interval_set, "%llu\n");
534
535 static int conn_info_min_age_set(void *data, u64 val)
536 {
537         struct hci_dev *hdev = data;
538
539         if (val == 0 || val > hdev->conn_info_max_age)
540                 return -EINVAL;
541
542         hci_dev_lock(hdev);
543         hdev->conn_info_min_age = val;
544         hci_dev_unlock(hdev);
545
546         return 0;
547 }
548
549 static int conn_info_min_age_get(void *data, u64 *val)
550 {
551         struct hci_dev *hdev = data;
552
553         hci_dev_lock(hdev);
554         *val = hdev->conn_info_min_age;
555         hci_dev_unlock(hdev);
556
557         return 0;
558 }
559
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561                         conn_info_min_age_set, "%llu\n");
562
563 static int conn_info_max_age_set(void *data, u64 val)
564 {
565         struct hci_dev *hdev = data;
566
567         if (val == 0 || val < hdev->conn_info_min_age)
568                 return -EINVAL;
569
570         hci_dev_lock(hdev);
571         hdev->conn_info_max_age = val;
572         hci_dev_unlock(hdev);
573
574         return 0;
575 }
576
577 static int conn_info_max_age_get(void *data, u64 *val)
578 {
579         struct hci_dev *hdev = data;
580
581         hci_dev_lock(hdev);
582         *val = hdev->conn_info_max_age;
583         hci_dev_unlock(hdev);
584
585         return 0;
586 }
587
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589                         conn_info_max_age_set, "%llu\n");
590
591 static int identity_show(struct seq_file *f, void *p)
592 {
593         struct hci_dev *hdev = f->private;
594         bdaddr_t addr;
595         u8 addr_type;
596
597         hci_dev_lock(hdev);
598
599         hci_copy_identity_address(hdev, &addr, &addr_type);
600
601         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602                    16, hdev->irk, &hdev->rpa);
603
604         hci_dev_unlock(hdev);
605
606         return 0;
607 }
608
609 static int identity_open(struct inode *inode, struct file *file)
610 {
611         return single_open(file, identity_show, inode->i_private);
612 }
613
614 static const struct file_operations identity_fops = {
615         .open           = identity_open,
616         .read           = seq_read,
617         .llseek         = seq_lseek,
618         .release        = single_release,
619 };
620
621 static int random_address_show(struct seq_file *f, void *p)
622 {
623         struct hci_dev *hdev = f->private;
624
625         hci_dev_lock(hdev);
626         seq_printf(f, "%pMR\n", &hdev->random_addr);
627         hci_dev_unlock(hdev);
628
629         return 0;
630 }
631
632 static int random_address_open(struct inode *inode, struct file *file)
633 {
634         return single_open(file, random_address_show, inode->i_private);
635 }
636
637 static const struct file_operations random_address_fops = {
638         .open           = random_address_open,
639         .read           = seq_read,
640         .llseek         = seq_lseek,
641         .release        = single_release,
642 };
643
644 static int static_address_show(struct seq_file *f, void *p)
645 {
646         struct hci_dev *hdev = f->private;
647
648         hci_dev_lock(hdev);
649         seq_printf(f, "%pMR\n", &hdev->static_addr);
650         hci_dev_unlock(hdev);
651
652         return 0;
653 }
654
655 static int static_address_open(struct inode *inode, struct file *file)
656 {
657         return single_open(file, static_address_show, inode->i_private);
658 }
659
660 static const struct file_operations static_address_fops = {
661         .open           = static_address_open,
662         .read           = seq_read,
663         .llseek         = seq_lseek,
664         .release        = single_release,
665 };
666
667 static ssize_t force_static_address_read(struct file *file,
668                                          char __user *user_buf,
669                                          size_t count, loff_t *ppos)
670 {
671         struct hci_dev *hdev = file->private_data;
672         char buf[3];
673
674         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675         buf[1] = '\n';
676         buf[2] = '\0';
677         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 }
679
680 static ssize_t force_static_address_write(struct file *file,
681                                           const char __user *user_buf,
682                                           size_t count, loff_t *ppos)
683 {
684         struct hci_dev *hdev = file->private_data;
685         char buf[32];
686         size_t buf_size = min(count, (sizeof(buf)-1));
687         bool enable;
688
689         if (test_bit(HCI_UP, &hdev->flags))
690                 return -EBUSY;
691
692         if (copy_from_user(buf, user_buf, buf_size))
693                 return -EFAULT;
694
695         buf[buf_size] = '\0';
696         if (strtobool(buf, &enable))
697                 return -EINVAL;
698
699         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700                 return -EALREADY;
701
702         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
703
704         return count;
705 }
706
707 static const struct file_operations force_static_address_fops = {
708         .open           = simple_open,
709         .read           = force_static_address_read,
710         .write          = force_static_address_write,
711         .llseek         = default_llseek,
712 };
713
714 static int white_list_show(struct seq_file *f, void *ptr)
715 {
716         struct hci_dev *hdev = f->private;
717         struct bdaddr_list *b;
718
719         hci_dev_lock(hdev);
720         list_for_each_entry(b, &hdev->le_white_list, list)
721                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722         hci_dev_unlock(hdev);
723
724         return 0;
725 }
726
727 static int white_list_open(struct inode *inode, struct file *file)
728 {
729         return single_open(file, white_list_show, inode->i_private);
730 }
731
732 static const struct file_operations white_list_fops = {
733         .open           = white_list_open,
734         .read           = seq_read,
735         .llseek         = seq_lseek,
736         .release        = single_release,
737 };
738
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct list_head *p, *n;
743
744         hci_dev_lock(hdev);
745         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748                            &irk->bdaddr, irk->addr_type,
749                            16, irk->val, &irk->rpa);
750         }
751         hci_dev_unlock(hdev);
752
753         return 0;
754 }
755
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 {
758         return single_open(file, identity_resolving_keys_show,
759                            inode->i_private);
760 }
761
762 static const struct file_operations identity_resolving_keys_fops = {
763         .open           = identity_resolving_keys_open,
764         .read           = seq_read,
765         .llseek         = seq_lseek,
766         .release        = single_release,
767 };
768
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 {
771         struct hci_dev *hdev = f->private;
772         struct list_head *p, *n;
773
774         hci_dev_lock(hdev);
775         list_for_each_safe(p, n, &hdev->long_term_keys) {
776                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780                            __le64_to_cpu(ltk->rand), 16, ltk->val);
781         }
782         hci_dev_unlock(hdev);
783
784         return 0;
785 }
786
787 static int long_term_keys_open(struct inode *inode, struct file *file)
788 {
789         return single_open(file, long_term_keys_show, inode->i_private);
790 }
791
792 static const struct file_operations long_term_keys_fops = {
793         .open           = long_term_keys_open,
794         .read           = seq_read,
795         .llseek         = seq_lseek,
796         .release        = single_release,
797 };
798
799 static int conn_min_interval_set(void *data, u64 val)
800 {
801         struct hci_dev *hdev = data;
802
803         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804                 return -EINVAL;
805
806         hci_dev_lock(hdev);
807         hdev->le_conn_min_interval = val;
808         hci_dev_unlock(hdev);
809
810         return 0;
811 }
812
813 static int conn_min_interval_get(void *data, u64 *val)
814 {
815         struct hci_dev *hdev = data;
816
817         hci_dev_lock(hdev);
818         *val = hdev->le_conn_min_interval;
819         hci_dev_unlock(hdev);
820
821         return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825                         conn_min_interval_set, "%llu\n");
826
827 static int conn_max_interval_set(void *data, u64 val)
828 {
829         struct hci_dev *hdev = data;
830
831         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832                 return -EINVAL;
833
834         hci_dev_lock(hdev);
835         hdev->le_conn_max_interval = val;
836         hci_dev_unlock(hdev);
837
838         return 0;
839 }
840
841 static int conn_max_interval_get(void *data, u64 *val)
842 {
843         struct hci_dev *hdev = data;
844
845         hci_dev_lock(hdev);
846         *val = hdev->le_conn_max_interval;
847         hci_dev_unlock(hdev);
848
849         return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853                         conn_max_interval_set, "%llu\n");
854
855 static int conn_latency_set(void *data, u64 val)
856 {
857         struct hci_dev *hdev = data;
858
859         if (val > 0x01f3)
860                 return -EINVAL;
861
862         hci_dev_lock(hdev);
863         hdev->le_conn_latency = val;
864         hci_dev_unlock(hdev);
865
866         return 0;
867 }
868
869 static int conn_latency_get(void *data, u64 *val)
870 {
871         struct hci_dev *hdev = data;
872
873         hci_dev_lock(hdev);
874         *val = hdev->le_conn_latency;
875         hci_dev_unlock(hdev);
876
877         return 0;
878 }
879
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881                         conn_latency_set, "%llu\n");
882
883 static int supervision_timeout_set(void *data, u64 val)
884 {
885         struct hci_dev *hdev = data;
886
887         if (val < 0x000a || val > 0x0c80)
888                 return -EINVAL;
889
890         hci_dev_lock(hdev);
891         hdev->le_supv_timeout = val;
892         hci_dev_unlock(hdev);
893
894         return 0;
895 }
896
897 static int supervision_timeout_get(void *data, u64 *val)
898 {
899         struct hci_dev *hdev = data;
900
901         hci_dev_lock(hdev);
902         *val = hdev->le_supv_timeout;
903         hci_dev_unlock(hdev);
904
905         return 0;
906 }
907
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909                         supervision_timeout_set, "%llu\n");
910
911 static int adv_channel_map_set(void *data, u64 val)
912 {
913         struct hci_dev *hdev = data;
914
915         if (val < 0x01 || val > 0x07)
916                 return -EINVAL;
917
918         hci_dev_lock(hdev);
919         hdev->le_adv_channel_map = val;
920         hci_dev_unlock(hdev);
921
922         return 0;
923 }
924
925 static int adv_channel_map_get(void *data, u64 *val)
926 {
927         struct hci_dev *hdev = data;
928
929         hci_dev_lock(hdev);
930         *val = hdev->le_adv_channel_map;
931         hci_dev_unlock(hdev);
932
933         return 0;
934 }
935
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937                         adv_channel_map_set, "%llu\n");
938
939 static int device_list_show(struct seq_file *f, void *ptr)
940 {
941         struct hci_dev *hdev = f->private;
942         struct hci_conn_params *p;
943
944         hci_dev_lock(hdev);
945         list_for_each_entry(p, &hdev->le_conn_params, list) {
946                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947                            p->auto_connect);
948         }
949         hci_dev_unlock(hdev);
950
951         return 0;
952 }
953
954 static int device_list_open(struct inode *inode, struct file *file)
955 {
956         return single_open(file, device_list_show, inode->i_private);
957 }
958
959 static const struct file_operations device_list_fops = {
960         .open           = device_list_open,
961         .read           = seq_read,
962         .llseek         = seq_lseek,
963         .release        = single_release,
964 };
965
966 /* ---- HCI requests ---- */
967
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
969 {
970         BT_DBG("%s result 0x%2.2x", hdev->name, result);
971
972         if (hdev->req_status == HCI_REQ_PEND) {
973                 hdev->req_result = result;
974                 hdev->req_status = HCI_REQ_DONE;
975                 wake_up_interruptible(&hdev->req_wait_q);
976         }
977 }
978
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
980 {
981         BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983         if (hdev->req_status == HCI_REQ_PEND) {
984                 hdev->req_result = err;
985                 hdev->req_status = HCI_REQ_CANCELED;
986                 wake_up_interruptible(&hdev->req_wait_q);
987         }
988 }
989
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991                                             u8 event)
992 {
993         struct hci_ev_cmd_complete *ev;
994         struct hci_event_hdr *hdr;
995         struct sk_buff *skb;
996
997         hci_dev_lock(hdev);
998
999         skb = hdev->recv_evt;
1000         hdev->recv_evt = NULL;
1001
1002         hci_dev_unlock(hdev);
1003
1004         if (!skb)
1005                 return ERR_PTR(-ENODATA);
1006
1007         if (skb->len < sizeof(*hdr)) {
1008                 BT_ERR("Too short HCI event");
1009                 goto failed;
1010         }
1011
1012         hdr = (void *) skb->data;
1013         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
1015         if (event) {
1016                 if (hdr->evt != event)
1017                         goto failed;
1018                 return skb;
1019         }
1020
1021         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023                 goto failed;
1024         }
1025
1026         if (skb->len < sizeof(*ev)) {
1027                 BT_ERR("Too short cmd_complete event");
1028                 goto failed;
1029         }
1030
1031         ev = (void *) skb->data;
1032         skb_pull(skb, sizeof(*ev));
1033
1034         if (opcode == __le16_to_cpu(ev->opcode))
1035                 return skb;
1036
1037         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038                __le16_to_cpu(ev->opcode));
1039
1040 failed:
1041         kfree_skb(skb);
1042         return ERR_PTR(-ENODATA);
1043 }
1044
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046                                   const void *param, u8 event, u32 timeout)
1047 {
1048         DECLARE_WAITQUEUE(wait, current);
1049         struct hci_request req;
1050         int err = 0;
1051
1052         BT_DBG("%s", hdev->name);
1053
1054         hci_req_init(&req, hdev);
1055
1056         hci_req_add_ev(&req, opcode, plen, param, event);
1057
1058         hdev->req_status = HCI_REQ_PEND;
1059
1060         err = hci_req_run(&req, hci_req_sync_complete);
1061         if (err < 0)
1062                 return ERR_PTR(err);
1063
1064         add_wait_queue(&hdev->req_wait_q, &wait);
1065         set_current_state(TASK_INTERRUPTIBLE);
1066
1067         schedule_timeout(timeout);
1068
1069         remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071         if (signal_pending(current))
1072                 return ERR_PTR(-EINTR);
1073
1074         switch (hdev->req_status) {
1075         case HCI_REQ_DONE:
1076                 err = -bt_to_errno(hdev->req_result);
1077                 break;
1078
1079         case HCI_REQ_CANCELED:
1080                 err = -hdev->req_result;
1081                 break;
1082
1083         default:
1084                 err = -ETIMEDOUT;
1085                 break;
1086         }
1087
1088         hdev->req_status = hdev->req_result = 0;
1089
1090         BT_DBG("%s end: err %d", hdev->name, err);
1091
1092         if (err < 0)
1093                 return ERR_PTR(err);
1094
1095         return hci_get_cmd_complete(hdev, opcode, event);
1096 }
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100                                const void *param, u32 timeout)
1101 {
1102         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1103 }
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1105
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108                           void (*func)(struct hci_request *req,
1109                                       unsigned long opt),
1110                           unsigned long opt, __u32 timeout)
1111 {
1112         struct hci_request req;
1113         DECLARE_WAITQUEUE(wait, current);
1114         int err = 0;
1115
1116         BT_DBG("%s start", hdev->name);
1117
1118         hci_req_init(&req, hdev);
1119
1120         hdev->req_status = HCI_REQ_PEND;
1121
1122         func(&req, opt);
1123
1124         err = hci_req_run(&req, hci_req_sync_complete);
1125         if (err < 0) {
1126                 hdev->req_status = 0;
1127
1128                 /* ENODATA means the HCI request command queue is empty.
1129                  * This can happen when a request with conditionals doesn't
1130                  * trigger any commands to be sent. This is normal behavior
1131                  * and should not trigger an error return.
1132                  */
1133                 if (err == -ENODATA)
1134                         return 0;
1135
1136                 return err;
1137         }
1138
1139         add_wait_queue(&hdev->req_wait_q, &wait);
1140         set_current_state(TASK_INTERRUPTIBLE);
1141
1142         schedule_timeout(timeout);
1143
1144         remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146         if (signal_pending(current))
1147                 return -EINTR;
1148
1149         switch (hdev->req_status) {
1150         case HCI_REQ_DONE:
1151                 err = -bt_to_errno(hdev->req_result);
1152                 break;
1153
1154         case HCI_REQ_CANCELED:
1155                 err = -hdev->req_result;
1156                 break;
1157
1158         default:
1159                 err = -ETIMEDOUT;
1160                 break;
1161         }
1162
1163         hdev->req_status = hdev->req_result = 0;
1164
1165         BT_DBG("%s end: err %d", hdev->name, err);
1166
1167         return err;
1168 }
1169
1170 static int hci_req_sync(struct hci_dev *hdev,
1171                         void (*req)(struct hci_request *req,
1172                                     unsigned long opt),
1173                         unsigned long opt, __u32 timeout)
1174 {
1175         int ret;
1176
1177         if (!test_bit(HCI_UP, &hdev->flags))
1178                 return -ENETDOWN;
1179
1180         /* Serialize all requests */
1181         hci_req_lock(hdev);
1182         ret = __hci_req_sync(hdev, req, opt, timeout);
1183         hci_req_unlock(hdev);
1184
1185         return ret;
1186 }
1187
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1189 {
1190         BT_DBG("%s %ld", req->hdev->name, opt);
1191
1192         /* Reset device */
1193         set_bit(HCI_RESET, &req->hdev->flags);
1194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1195 }
1196
1197 static void bredr_init(struct hci_request *req)
1198 {
1199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1200
1201         /* Read Local Supported Features */
1202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1203
1204         /* Read Local Version */
1205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1206
1207         /* Read BD Address */
1208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1209 }
1210
1211 static void amp_init(struct hci_request *req)
1212 {
1213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1214
1215         /* Read Local Version */
1216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1217
1218         /* Read Local Supported Commands */
1219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221         /* Read Local Supported Features */
1222         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224         /* Read Local AMP Info */
1225         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1226
1227         /* Read Data Blk size */
1228         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1229
1230         /* Read Flow Control Mode */
1231         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
1233         /* Read Location Data */
1234         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1235 }
1236
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1238 {
1239         struct hci_dev *hdev = req->hdev;
1240
1241         BT_DBG("%s %ld", hdev->name, opt);
1242
1243         /* Reset */
1244         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245                 hci_reset_req(req, 0);
1246
1247         switch (hdev->dev_type) {
1248         case HCI_BREDR:
1249                 bredr_init(req);
1250                 break;
1251
1252         case HCI_AMP:
1253                 amp_init(req);
1254                 break;
1255
1256         default:
1257                 BT_ERR("Unknown device type %d", hdev->dev_type);
1258                 break;
1259         }
1260 }
1261
1262 static void bredr_setup(struct hci_request *req)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         __le16 param;
1267         __u8 flt_type;
1268
1269         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1271
1272         /* Read Class of Device */
1273         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1274
1275         /* Read Local Name */
1276         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1277
1278         /* Read Voice Setting */
1279         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1280
1281         /* Read Number of Supported IAC */
1282         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
1284         /* Read Current IAC LAP */
1285         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
1287         /* Clear Event Filters */
1288         flt_type = HCI_FLT_CLEAR_ALL;
1289         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1290
1291         /* Connection accept timeout ~20 secs */
1292         param = cpu_to_le16(0x7d00);
1293         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1294
1295         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296          * but it does not support page scan related HCI commands.
1297          */
1298         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301         }
1302 }
1303
1304 static void le_setup(struct hci_request *req)
1305 {
1306         struct hci_dev *hdev = req->hdev;
1307
1308         /* Read LE Buffer Size */
1309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1310
1311         /* Read LE Local Supported Features */
1312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read LE Supported States */
1315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
1317         /* Read LE Advertising Channel TX Power */
1318         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1319
1320         /* Read LE White List Size */
1321         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1322
1323         /* Clear LE White List */
1324         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1325
1326         /* LE-only controllers have LE implicitly enabled */
1327         if (!lmp_bredr_capable(hdev))
1328                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1329 }
1330
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332 {
1333         if (lmp_ext_inq_capable(hdev))
1334                 return 0x02;
1335
1336         if (lmp_inq_rssi_capable(hdev))
1337                 return 0x01;
1338
1339         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340             hdev->lmp_subver == 0x0757)
1341                 return 0x01;
1342
1343         if (hdev->manufacturer == 15) {
1344                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345                         return 0x01;
1346                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347                         return 0x01;
1348                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349                         return 0x01;
1350         }
1351
1352         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353             hdev->lmp_subver == 0x1805)
1354                 return 0x01;
1355
1356         return 0x00;
1357 }
1358
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1360 {
1361         u8 mode;
1362
1363         mode = hci_get_inquiry_mode(req->hdev);
1364
1365         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1366 }
1367
1368 static void hci_setup_event_mask(struct hci_request *req)
1369 {
1370         struct hci_dev *hdev = req->hdev;
1371
1372         /* The second byte is 0xff instead of 0x9f (two reserved bits
1373          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374          * command otherwise.
1375          */
1376         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379          * any event mask for pre 1.2 devices.
1380          */
1381         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382                 return;
1383
1384         if (lmp_bredr_capable(hdev)) {
1385                 events[4] |= 0x01; /* Flow Specification Complete */
1386                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388                 events[5] |= 0x08; /* Synchronous Connection Complete */
1389                 events[5] |= 0x10; /* Synchronous Connection Changed */
1390         } else {
1391                 /* Use a different default for LE-only devices */
1392                 memset(events, 0, sizeof(events));
1393                 events[0] |= 0x10; /* Disconnection Complete */
1394                 events[0] |= 0x80; /* Encryption Change */
1395                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396                 events[1] |= 0x20; /* Command Complete */
1397                 events[1] |= 0x40; /* Command Status */
1398                 events[1] |= 0x80; /* Hardware Error */
1399                 events[2] |= 0x04; /* Number of Completed Packets */
1400                 events[3] |= 0x02; /* Data Buffer Overflow */
1401                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1402         }
1403
1404         if (lmp_inq_rssi_capable(hdev))
1405                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407         if (lmp_sniffsubr_capable(hdev))
1408                 events[5] |= 0x20; /* Sniff Subrating */
1409
1410         if (lmp_pause_enc_capable(hdev))
1411                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413         if (lmp_ext_inq_capable(hdev))
1414                 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416         if (lmp_no_flush_capable(hdev))
1417                 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419         if (lmp_lsto_capable(hdev))
1420                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422         if (lmp_ssp_capable(hdev)) {
1423                 events[6] |= 0x01;      /* IO Capability Request */
1424                 events[6] |= 0x02;      /* IO Capability Response */
1425                 events[6] |= 0x04;      /* User Confirmation Request */
1426                 events[6] |= 0x08;      /* User Passkey Request */
1427                 events[6] |= 0x10;      /* Remote OOB Data Request */
1428                 events[6] |= 0x20;      /* Simple Pairing Complete */
1429                 events[7] |= 0x04;      /* User Passkey Notification */
1430                 events[7] |= 0x08;      /* Keypress Notification */
1431                 events[7] |= 0x10;      /* Remote Host Supported
1432                                          * Features Notification
1433                                          */
1434         }
1435
1436         if (lmp_le_capable(hdev))
1437                 events[7] |= 0x20;      /* LE Meta-Event */
1438
1439         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1440 }
1441
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1443 {
1444         struct hci_dev *hdev = req->hdev;
1445
1446         if (lmp_bredr_capable(hdev))
1447                 bredr_setup(req);
1448         else
1449                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1450
1451         if (lmp_le_capable(hdev))
1452                 le_setup(req);
1453
1454         hci_setup_event_mask(req);
1455
1456         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457          * local supported commands HCI command.
1458          */
1459         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1461
1462         if (lmp_ssp_capable(hdev)) {
1463                 /* When SSP is available, then the host features page
1464                  * should also be available as well. However some
1465                  * controllers list the max_page as 0 as long as SSP
1466                  * has not been enabled. To achieve proper debugging
1467                  * output, force the minimum max_page to 1 at least.
1468                  */
1469                 hdev->max_page = 0x01;
1470
1471                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472                         u8 mode = 0x01;
1473                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474                                     sizeof(mode), &mode);
1475                 } else {
1476                         struct hci_cp_write_eir cp;
1477
1478                         memset(hdev->eir, 0, sizeof(hdev->eir));
1479                         memset(&cp, 0, sizeof(cp));
1480
1481                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1482                 }
1483         }
1484
1485         if (lmp_inq_rssi_capable(hdev))
1486                 hci_setup_inquiry_mode(req);
1487
1488         if (lmp_inq_tx_pwr_capable(hdev))
1489                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1490
1491         if (lmp_ext_feat_capable(hdev)) {
1492                 struct hci_cp_read_local_ext_features cp;
1493
1494                 cp.page = 0x01;
1495                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496                             sizeof(cp), &cp);
1497         }
1498
1499         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500                 u8 enable = 1;
1501                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502                             &enable);
1503         }
1504 }
1505
1506 static void hci_setup_link_policy(struct hci_request *req)
1507 {
1508         struct hci_dev *hdev = req->hdev;
1509         struct hci_cp_write_def_link_policy cp;
1510         u16 link_policy = 0;
1511
1512         if (lmp_rswitch_capable(hdev))
1513                 link_policy |= HCI_LP_RSWITCH;
1514         if (lmp_hold_capable(hdev))
1515                 link_policy |= HCI_LP_HOLD;
1516         if (lmp_sniff_capable(hdev))
1517                 link_policy |= HCI_LP_SNIFF;
1518         if (lmp_park_capable(hdev))
1519                 link_policy |= HCI_LP_PARK;
1520
1521         cp.policy = cpu_to_le16(link_policy);
1522         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1523 }
1524
1525 static void hci_set_le_support(struct hci_request *req)
1526 {
1527         struct hci_dev *hdev = req->hdev;
1528         struct hci_cp_write_le_host_supported cp;
1529
1530         /* LE-only devices do not support explicit enablement */
1531         if (!lmp_bredr_capable(hdev))
1532                 return;
1533
1534         memset(&cp, 0, sizeof(cp));
1535
1536         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537                 cp.le = 0x01;
1538                 cp.simul = lmp_le_br_capable(hdev);
1539         }
1540
1541         if (cp.le != lmp_host_le_capable(hdev))
1542                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543                             &cp);
1544 }
1545
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1547 {
1548         struct hci_dev *hdev = req->hdev;
1549         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551         /* If Connectionless Slave Broadcast master role is supported
1552          * enable all necessary events for it.
1553          */
1554         if (lmp_csb_master_capable(hdev)) {
1555                 events[1] |= 0x40;      /* Triggered Clock Capture */
1556                 events[1] |= 0x80;      /* Synchronization Train Complete */
1557                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1558                 events[2] |= 0x20;      /* CSB Channel Map Change */
1559         }
1560
1561         /* If Connectionless Slave Broadcast slave role is supported
1562          * enable all necessary events for it.
1563          */
1564         if (lmp_csb_slave_capable(hdev)) {
1565                 events[2] |= 0x01;      /* Synchronization Train Received */
1566                 events[2] |= 0x02;      /* CSB Receive */
1567                 events[2] |= 0x04;      /* CSB Timeout */
1568                 events[2] |= 0x08;      /* Truncated Page Complete */
1569         }
1570
1571         /* Enable Authenticated Payload Timeout Expired event if supported */
1572         if (lmp_ping_capable(hdev))
1573                 events[2] |= 0x80;
1574
1575         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576 }
1577
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 p;
1582
1583         /* Some Broadcom based Bluetooth controllers do not support the
1584          * Delete Stored Link Key command. They are clearly indicating its
1585          * absence in the bit mask of supported commands.
1586          *
1587          * Check the supported commands and only if the the command is marked
1588          * as supported send it. If not supported assume that the controller
1589          * does not have actual support for stored link keys which makes this
1590          * command redundant anyway.
1591          *
1592          * Some controllers indicate that they support handling deleting
1593          * stored link keys, but they don't. The quirk lets a driver
1594          * just disable this command.
1595          */
1596         if (hdev->commands[6] & 0x80 &&
1597             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598                 struct hci_cp_delete_stored_link_key cp;
1599
1600                 bacpy(&cp.bdaddr, BDADDR_ANY);
1601                 cp.delete_all = 0x01;
1602                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603                             sizeof(cp), &cp);
1604         }
1605
1606         if (hdev->commands[5] & 0x10)
1607                 hci_setup_link_policy(req);
1608
1609         if (lmp_le_capable(hdev)) {
1610                 u8 events[8];
1611
1612                 memset(events, 0, sizeof(events));
1613                 events[0] = 0x1f;
1614
1615                 /* If controller supports the Connection Parameters Request
1616                  * Link Layer Procedure, enable the corresponding event.
1617                  */
1618                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619                         events[0] |= 0x20;      /* LE Remote Connection
1620                                                  * Parameter Request
1621                                                  */
1622
1623                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624                             events);
1625
1626                 hci_set_le_support(req);
1627         }
1628
1629         /* Read features beyond page 1 if available */
1630         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631                 struct hci_cp_read_local_ext_features cp;
1632
1633                 cp.page = p;
1634                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635                             sizeof(cp), &cp);
1636         }
1637 }
1638
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640 {
1641         struct hci_dev *hdev = req->hdev;
1642
1643         /* Set event mask page 2 if the HCI command for it is supported */
1644         if (hdev->commands[22] & 0x04)
1645                 hci_set_event_mask_page_2(req);
1646
1647         /* Check for Synchronization Train support */
1648         if (lmp_sync_train_capable(hdev))
1649                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1650
1651         /* Enable Secure Connections if supported and configured */
1652         if ((lmp_sc_capable(hdev) ||
1653              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655                 u8 support = 0x01;
1656                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657                             sizeof(support), &support);
1658         }
1659 }
1660
1661 static int __hci_init(struct hci_dev *hdev)
1662 {
1663         int err;
1664
1665         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666         if (err < 0)
1667                 return err;
1668
1669         /* The Device Under Test (DUT) mode is special and available for
1670          * all controller types. So just create it early on.
1671          */
1672         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674                                     &dut_mode_fops);
1675         }
1676
1677         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678          * BR/EDR/LE type controllers. AMP controllers only need the
1679          * first stage init.
1680          */
1681         if (hdev->dev_type != HCI_BREDR)
1682                 return 0;
1683
1684         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685         if (err < 0)
1686                 return err;
1687
1688         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689         if (err < 0)
1690                 return err;
1691
1692         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693         if (err < 0)
1694                 return err;
1695
1696         /* Only create debugfs entries during the initial setup
1697          * phase and not every time the controller gets powered on.
1698          */
1699         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700                 return 0;
1701
1702         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703                             &features_fops);
1704         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705                            &hdev->manufacturer);
1706         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709                             &blacklist_fops);
1710         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
1712         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713                             &conn_info_min_age_fops);
1714         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715                             &conn_info_max_age_fops);
1716
1717         if (lmp_bredr_capable(hdev)) {
1718                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719                                     hdev, &inquiry_cache_fops);
1720                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721                                     hdev, &link_keys_fops);
1722                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723                                     hdev, &dev_class_fops);
1724                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725                                     hdev, &voice_setting_fops);
1726         }
1727
1728         if (lmp_ssp_capable(hdev)) {
1729                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730                                     hdev, &auto_accept_delay_fops);
1731                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732                                     hdev, &force_sc_support_fops);
1733                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734                                     hdev, &sc_only_mode_fops);
1735         }
1736
1737         if (lmp_sniff_capable(hdev)) {
1738                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739                                     hdev, &idle_timeout_fops);
1740                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741                                     hdev, &sniff_min_interval_fops);
1742                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743                                     hdev, &sniff_max_interval_fops);
1744         }
1745
1746         if (lmp_le_capable(hdev)) {
1747                 debugfs_create_file("identity", 0400, hdev->debugfs,
1748                                     hdev, &identity_fops);
1749                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750                                     hdev, &rpa_timeout_fops);
1751                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752                                     hdev, &random_address_fops);
1753                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754                                     hdev, &static_address_fops);
1755
1756                 /* For controllers with a public address, provide a debug
1757                  * option to force the usage of the configured static
1758                  * address. By default the public address is used.
1759                  */
1760                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761                         debugfs_create_file("force_static_address", 0644,
1762                                             hdev->debugfs, hdev,
1763                                             &force_static_address_fops);
1764
1765                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766                                   &hdev->le_white_list_size);
1767                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768                                     &white_list_fops);
1769                 debugfs_create_file("identity_resolving_keys", 0400,
1770                                     hdev->debugfs, hdev,
1771                                     &identity_resolving_keys_fops);
1772                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773                                     hdev, &long_term_keys_fops);
1774                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775                                     hdev, &conn_min_interval_fops);
1776                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777                                     hdev, &conn_max_interval_fops);
1778                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779                                     hdev, &conn_latency_fops);
1780                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781                                     hdev, &supervision_timeout_fops);
1782                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783                                     hdev, &adv_channel_map_fops);
1784                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785                                     &device_list_fops);
1786                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787                                    hdev->debugfs,
1788                                    &hdev->discov_interleaved_timeout);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1795 {
1796         __u8 scan = opt;
1797
1798         BT_DBG("%s %x", req->hdev->name, scan);
1799
1800         /* Inquiry and Page scans */
1801         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1802 }
1803
1804 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1805 {
1806         __u8 auth = opt;
1807
1808         BT_DBG("%s %x", req->hdev->name, auth);
1809
1810         /* Authentication */
1811         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1812 }
1813
1814 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1815 {
1816         __u8 encrypt = opt;
1817
1818         BT_DBG("%s %x", req->hdev->name, encrypt);
1819
1820         /* Encryption */
1821         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1822 }
1823
1824 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1825 {
1826         __le16 policy = cpu_to_le16(opt);
1827
1828         BT_DBG("%s %x", req->hdev->name, policy);
1829
1830         /* Default link policy */
1831         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1832 }
1833
1834 /* Get HCI device by index.
1835  * Device is held on return. */
1836 struct hci_dev *hci_dev_get(int index)
1837 {
1838         struct hci_dev *hdev = NULL, *d;
1839
1840         BT_DBG("%d", index);
1841
1842         if (index < 0)
1843                 return NULL;
1844
1845         read_lock(&hci_dev_list_lock);
1846         list_for_each_entry(d, &hci_dev_list, list) {
1847                 if (d->id == index) {
1848                         hdev = hci_dev_hold(d);
1849                         break;
1850                 }
1851         }
1852         read_unlock(&hci_dev_list_lock);
1853         return hdev;
1854 }
1855
1856 /* ---- Inquiry support ---- */
1857
1858 bool hci_discovery_active(struct hci_dev *hdev)
1859 {
1860         struct discovery_state *discov = &hdev->discovery;
1861
1862         switch (discov->state) {
1863         case DISCOVERY_FINDING:
1864         case DISCOVERY_RESOLVING:
1865                 return true;
1866
1867         default:
1868                 return false;
1869         }
1870 }
1871
1872 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1873 {
1874         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1875
1876         if (hdev->discovery.state == state)
1877                 return;
1878
1879         switch (state) {
1880         case DISCOVERY_STOPPED:
1881                 hci_update_background_scan(hdev);
1882
1883                 if (hdev->discovery.state != DISCOVERY_STARTING)
1884                         mgmt_discovering(hdev, 0);
1885                 break;
1886         case DISCOVERY_STARTING:
1887                 break;
1888         case DISCOVERY_FINDING:
1889                 mgmt_discovering(hdev, 1);
1890                 break;
1891         case DISCOVERY_RESOLVING:
1892                 break;
1893         case DISCOVERY_STOPPING:
1894                 break;
1895         }
1896
1897         hdev->discovery.state = state;
1898 }
1899
1900 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1901 {
1902         struct discovery_state *cache = &hdev->discovery;
1903         struct inquiry_entry *p, *n;
1904
1905         list_for_each_entry_safe(p, n, &cache->all, all) {
1906                 list_del(&p->all);
1907                 kfree(p);
1908         }
1909
1910         INIT_LIST_HEAD(&cache->unknown);
1911         INIT_LIST_HEAD(&cache->resolve);
1912 }
1913
1914 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915                                                bdaddr_t *bdaddr)
1916 {
1917         struct discovery_state *cache = &hdev->discovery;
1918         struct inquiry_entry *e;
1919
1920         BT_DBG("cache %p, %pMR", cache, bdaddr);
1921
1922         list_for_each_entry(e, &cache->all, all) {
1923                 if (!bacmp(&e->data.bdaddr, bdaddr))
1924                         return e;
1925         }
1926
1927         return NULL;
1928 }
1929
1930 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1931                                                        bdaddr_t *bdaddr)
1932 {
1933         struct discovery_state *cache = &hdev->discovery;
1934         struct inquiry_entry *e;
1935
1936         BT_DBG("cache %p, %pMR", cache, bdaddr);
1937
1938         list_for_each_entry(e, &cache->unknown, list) {
1939                 if (!bacmp(&e->data.bdaddr, bdaddr))
1940                         return e;
1941         }
1942
1943         return NULL;
1944 }
1945
1946 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1947                                                        bdaddr_t *bdaddr,
1948                                                        int state)
1949 {
1950         struct discovery_state *cache = &hdev->discovery;
1951         struct inquiry_entry *e;
1952
1953         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1954
1955         list_for_each_entry(e, &cache->resolve, list) {
1956                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957                         return e;
1958                 if (!bacmp(&e->data.bdaddr, bdaddr))
1959                         return e;
1960         }
1961
1962         return NULL;
1963 }
1964
1965 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1966                                       struct inquiry_entry *ie)
1967 {
1968         struct discovery_state *cache = &hdev->discovery;
1969         struct list_head *pos = &cache->resolve;
1970         struct inquiry_entry *p;
1971
1972         list_del(&ie->list);
1973
1974         list_for_each_entry(p, &cache->resolve, list) {
1975                 if (p->name_state != NAME_PENDING &&
1976                     abs(p->data.rssi) >= abs(ie->data.rssi))
1977                         break;
1978                 pos = &p->list;
1979         }
1980
1981         list_add(&ie->list, pos);
1982 }
1983
1984 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985                              bool name_known)
1986 {
1987         struct discovery_state *cache = &hdev->discovery;
1988         struct inquiry_entry *ie;
1989         u32 flags = 0;
1990
1991         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1992
1993         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1994
1995         if (!data->ssp_mode)
1996                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1997
1998         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1999         if (ie) {
2000                 if (!ie->data.ssp_mode)
2001                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2002
2003                 if (ie->name_state == NAME_NEEDED &&
2004                     data->rssi != ie->data.rssi) {
2005                         ie->data.rssi = data->rssi;
2006                         hci_inquiry_cache_update_resolve(hdev, ie);
2007                 }
2008
2009                 goto update;
2010         }
2011
2012         /* Entry not in the cache. Add new one. */
2013         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2014         if (!ie) {
2015                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016                 goto done;
2017         }
2018
2019         list_add(&ie->all, &cache->all);
2020
2021         if (name_known) {
2022                 ie->name_state = NAME_KNOWN;
2023         } else {
2024                 ie->name_state = NAME_NOT_KNOWN;
2025                 list_add(&ie->list, &cache->unknown);
2026         }
2027
2028 update:
2029         if (name_known && ie->name_state != NAME_KNOWN &&
2030             ie->name_state != NAME_PENDING) {
2031                 ie->name_state = NAME_KNOWN;
2032                 list_del(&ie->list);
2033         }
2034
2035         memcpy(&ie->data, data, sizeof(*data));
2036         ie->timestamp = jiffies;
2037         cache->timestamp = jiffies;
2038
2039         if (ie->name_state == NAME_NOT_KNOWN)
2040                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2041
2042 done:
2043         return flags;
2044 }
2045
2046 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_info *info = (struct inquiry_info *) buf;
2050         struct inquiry_entry *e;
2051         int copied = 0;
2052
2053         list_for_each_entry(e, &cache->all, all) {
2054                 struct inquiry_data *data = &e->data;
2055
2056                 if (copied >= num)
2057                         break;
2058
2059                 bacpy(&info->bdaddr, &data->bdaddr);
2060                 info->pscan_rep_mode    = data->pscan_rep_mode;
2061                 info->pscan_period_mode = data->pscan_period_mode;
2062                 info->pscan_mode        = data->pscan_mode;
2063                 memcpy(info->dev_class, data->dev_class, 3);
2064                 info->clock_offset      = data->clock_offset;
2065
2066                 info++;
2067                 copied++;
2068         }
2069
2070         BT_DBG("cache %p, copied %d", cache, copied);
2071         return copied;
2072 }
2073
2074 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2075 {
2076         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2077         struct hci_dev *hdev = req->hdev;
2078         struct hci_cp_inquiry cp;
2079
2080         BT_DBG("%s", hdev->name);
2081
2082         if (test_bit(HCI_INQUIRY, &hdev->flags))
2083                 return;
2084
2085         /* Start Inquiry */
2086         memcpy(&cp.lap, &ir->lap, 3);
2087         cp.length  = ir->length;
2088         cp.num_rsp = ir->num_rsp;
2089         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2090 }
2091
2092 static int wait_inquiry(void *word)
2093 {
2094         schedule();
2095         return signal_pending(current);
2096 }
2097
2098 int hci_inquiry(void __user *arg)
2099 {
2100         __u8 __user *ptr = arg;
2101         struct hci_inquiry_req ir;
2102         struct hci_dev *hdev;
2103         int err = 0, do_inquiry = 0, max_rsp;
2104         long timeo;
2105         __u8 *buf;
2106
2107         if (copy_from_user(&ir, ptr, sizeof(ir)))
2108                 return -EFAULT;
2109
2110         hdev = hci_dev_get(ir.dev_id);
2111         if (!hdev)
2112                 return -ENODEV;
2113
2114         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115                 err = -EBUSY;
2116                 goto done;
2117         }
2118
2119         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2120                 err = -EOPNOTSUPP;
2121                 goto done;
2122         }
2123
2124         if (hdev->dev_type != HCI_BREDR) {
2125                 err = -EOPNOTSUPP;
2126                 goto done;
2127         }
2128
2129         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130                 err = -EOPNOTSUPP;
2131                 goto done;
2132         }
2133
2134         hci_dev_lock(hdev);
2135         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2136             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2137                 hci_inquiry_cache_flush(hdev);
2138                 do_inquiry = 1;
2139         }
2140         hci_dev_unlock(hdev);
2141
2142         timeo = ir.length * msecs_to_jiffies(2000);
2143
2144         if (do_inquiry) {
2145                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146                                    timeo);
2147                 if (err < 0)
2148                         goto done;
2149
2150                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151                  * cleared). If it is interrupted by a signal, return -EINTR.
2152                  */
2153                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154                                 TASK_INTERRUPTIBLE))
2155                         return -EINTR;
2156         }
2157
2158         /* for unlimited number of responses we will use buffer with
2159          * 255 entries
2160          */
2161         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164          * copy it to the user space.
2165          */
2166         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2167         if (!buf) {
2168                 err = -ENOMEM;
2169                 goto done;
2170         }
2171
2172         hci_dev_lock(hdev);
2173         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2174         hci_dev_unlock(hdev);
2175
2176         BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179                 ptr += sizeof(ir);
2180                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2181                                  ir.num_rsp))
2182                         err = -EFAULT;
2183         } else
2184                 err = -EFAULT;
2185
2186         kfree(buf);
2187
2188 done:
2189         hci_dev_put(hdev);
2190         return err;
2191 }
2192
2193 static int hci_dev_do_open(struct hci_dev *hdev)
2194 {
2195         int ret = 0;
2196
2197         BT_DBG("%s %p", hdev->name, hdev);
2198
2199         hci_req_lock(hdev);
2200
2201         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202                 ret = -ENODEV;
2203                 goto done;
2204         }
2205
2206         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207                 /* Check for rfkill but allow the HCI setup stage to
2208                  * proceed (which in itself doesn't cause any RF activity).
2209                  */
2210                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211                         ret = -ERFKILL;
2212                         goto done;
2213                 }
2214
2215                 /* Check for valid public address or a configured static
2216                  * random adddress, but let the HCI setup proceed to
2217                  * be able to determine if there is a public address
2218                  * or not.
2219                  *
2220                  * In case of user channel usage, it is not important
2221                  * if a public address or static random address is
2222                  * available.
2223                  *
2224                  * This check is only valid for BR/EDR controllers
2225                  * since AMP controllers do not have an address.
2226                  */
2227                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228                     hdev->dev_type == HCI_BREDR &&
2229                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231                         ret = -EADDRNOTAVAIL;
2232                         goto done;
2233                 }
2234         }
2235
2236         if (test_bit(HCI_UP, &hdev->flags)) {
2237                 ret = -EALREADY;
2238                 goto done;
2239         }
2240
2241         if (hdev->open(hdev)) {
2242                 ret = -EIO;
2243                 goto done;
2244         }
2245
2246         atomic_set(&hdev->cmd_cnt, 1);
2247         set_bit(HCI_INIT, &hdev->flags);
2248
2249         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250                 ret = hdev->setup(hdev);
2251
2252         /* If public address change is configured, ensure that the
2253          * address gets programmed. If the driver does not support
2254          * changing the public address, fail the power on procedure.
2255          */
2256         if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257                 if (hdev->set_bdaddr)
2258                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2259                 else
2260                         ret = -EADDRNOTAVAIL;
2261         }
2262
2263         if (!ret) {
2264                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2265                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2266                         ret = __hci_init(hdev);
2267         }
2268
2269         clear_bit(HCI_INIT, &hdev->flags);
2270
2271         if (!ret) {
2272                 hci_dev_hold(hdev);
2273                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2274                 set_bit(HCI_UP, &hdev->flags);
2275                 hci_notify(hdev, HCI_DEV_UP);
2276                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2277                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2278                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2279                     hdev->dev_type == HCI_BREDR) {
2280                         hci_dev_lock(hdev);
2281                         mgmt_powered(hdev, 1);
2282                         hci_dev_unlock(hdev);
2283                 }
2284         } else {
2285                 /* Init failed, cleanup */
2286                 flush_work(&hdev->tx_work);
2287                 flush_work(&hdev->cmd_work);
2288                 flush_work(&hdev->rx_work);
2289
2290                 skb_queue_purge(&hdev->cmd_q);
2291                 skb_queue_purge(&hdev->rx_q);
2292
2293                 if (hdev->flush)
2294                         hdev->flush(hdev);
2295
2296                 if (hdev->sent_cmd) {
2297                         kfree_skb(hdev->sent_cmd);
2298                         hdev->sent_cmd = NULL;
2299                 }
2300
2301                 hdev->close(hdev);
2302                 hdev->flags &= BIT(HCI_RAW);
2303         }
2304
2305 done:
2306         hci_req_unlock(hdev);
2307         return ret;
2308 }
2309
2310 /* ---- HCI ioctl helpers ---- */
2311
2312 int hci_dev_open(__u16 dev)
2313 {
2314         struct hci_dev *hdev;
2315         int err;
2316
2317         hdev = hci_dev_get(dev);
2318         if (!hdev)
2319                 return -ENODEV;
2320
2321         /* Devices that are marked as unconfigured can only be powered
2322          * up as user channel. Trying to bring them up as normal devices
2323          * will result into a failure. Only user channel operation is
2324          * possible.
2325          *
2326          * When this function is called for a user channel, the flag
2327          * HCI_USER_CHANNEL will be set first before attempting to
2328          * open the device.
2329          */
2330         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2331             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2332                 err = -EOPNOTSUPP;
2333                 goto done;
2334         }
2335
2336         /* We need to ensure that no other power on/off work is pending
2337          * before proceeding to call hci_dev_do_open. This is
2338          * particularly important if the setup procedure has not yet
2339          * completed.
2340          */
2341         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2342                 cancel_delayed_work(&hdev->power_off);
2343
2344         /* After this call it is guaranteed that the setup procedure
2345          * has finished. This means that error conditions like RFKILL
2346          * or no valid public or static random address apply.
2347          */
2348         flush_workqueue(hdev->req_workqueue);
2349
2350         err = hci_dev_do_open(hdev);
2351
2352 done:
2353         hci_dev_put(hdev);
2354         return err;
2355 }
2356
2357 static int hci_dev_do_close(struct hci_dev *hdev)
2358 {
2359         BT_DBG("%s %p", hdev->name, hdev);
2360
2361         cancel_delayed_work(&hdev->power_off);
2362
2363         hci_req_cancel(hdev, ENODEV);
2364         hci_req_lock(hdev);
2365
2366         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2367                 cancel_delayed_work_sync(&hdev->cmd_timer);
2368                 hci_req_unlock(hdev);
2369                 return 0;
2370         }
2371
2372         /* Flush RX and TX works */
2373         flush_work(&hdev->tx_work);
2374         flush_work(&hdev->rx_work);
2375
2376         if (hdev->discov_timeout > 0) {
2377                 cancel_delayed_work(&hdev->discov_off);
2378                 hdev->discov_timeout = 0;
2379                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2380                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2381         }
2382
2383         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2384                 cancel_delayed_work(&hdev->service_cache);
2385
2386         cancel_delayed_work_sync(&hdev->le_scan_disable);
2387
2388         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2389                 cancel_delayed_work_sync(&hdev->rpa_expired);
2390
2391         hci_dev_lock(hdev);
2392         hci_inquiry_cache_flush(hdev);
2393         hci_conn_hash_flush(hdev);
2394         hci_pend_le_conns_clear(hdev);
2395         hci_dev_unlock(hdev);
2396
2397         hci_notify(hdev, HCI_DEV_DOWN);
2398
2399         if (hdev->flush)
2400                 hdev->flush(hdev);
2401
2402         /* Reset device */
2403         skb_queue_purge(&hdev->cmd_q);
2404         atomic_set(&hdev->cmd_cnt, 1);
2405         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2406             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2407             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2408                 set_bit(HCI_INIT, &hdev->flags);
2409                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2410                 clear_bit(HCI_INIT, &hdev->flags);
2411         }
2412
2413         /* flush cmd  work */
2414         flush_work(&hdev->cmd_work);
2415
2416         /* Drop queues */
2417         skb_queue_purge(&hdev->rx_q);
2418         skb_queue_purge(&hdev->cmd_q);
2419         skb_queue_purge(&hdev->raw_q);
2420
2421         /* Drop last sent command */
2422         if (hdev->sent_cmd) {
2423                 cancel_delayed_work_sync(&hdev->cmd_timer);
2424                 kfree_skb(hdev->sent_cmd);
2425                 hdev->sent_cmd = NULL;
2426         }
2427
2428         kfree_skb(hdev->recv_evt);
2429         hdev->recv_evt = NULL;
2430
2431         /* After this point our queues are empty
2432          * and no tasks are scheduled. */
2433         hdev->close(hdev);
2434
2435         /* Clear flags */
2436         hdev->flags &= BIT(HCI_RAW);
2437         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2438
2439         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2440                 if (hdev->dev_type == HCI_BREDR) {
2441                         hci_dev_lock(hdev);
2442                         mgmt_powered(hdev, 0);
2443                         hci_dev_unlock(hdev);
2444                 }
2445         }
2446
2447         /* Controller radio is available but is currently powered down */
2448         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2449
2450         memset(hdev->eir, 0, sizeof(hdev->eir));
2451         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2452         bacpy(&hdev->random_addr, BDADDR_ANY);
2453
2454         hci_req_unlock(hdev);
2455
2456         hci_dev_put(hdev);
2457         return 0;
2458 }
2459
2460 int hci_dev_close(__u16 dev)
2461 {
2462         struct hci_dev *hdev;
2463         int err;
2464
2465         hdev = hci_dev_get(dev);
2466         if (!hdev)
2467                 return -ENODEV;
2468
2469         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2470                 err = -EBUSY;
2471                 goto done;
2472         }
2473
2474         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2475                 cancel_delayed_work(&hdev->power_off);
2476
2477         err = hci_dev_do_close(hdev);
2478
2479 done:
2480         hci_dev_put(hdev);
2481         return err;
2482 }
2483
2484 int hci_dev_reset(__u16 dev)
2485 {
2486         struct hci_dev *hdev;
2487         int ret = 0;
2488
2489         hdev = hci_dev_get(dev);
2490         if (!hdev)
2491                 return -ENODEV;
2492
2493         hci_req_lock(hdev);
2494
2495         if (!test_bit(HCI_UP, &hdev->flags)) {
2496                 ret = -ENETDOWN;
2497                 goto done;
2498         }
2499
2500         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501                 ret = -EBUSY;
2502                 goto done;
2503         }
2504
2505         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2506                 ret = -EOPNOTSUPP;
2507                 goto done;
2508         }
2509
2510         /* Drop queues */
2511         skb_queue_purge(&hdev->rx_q);
2512         skb_queue_purge(&hdev->cmd_q);
2513
2514         hci_dev_lock(hdev);
2515         hci_inquiry_cache_flush(hdev);
2516         hci_conn_hash_flush(hdev);
2517         hci_dev_unlock(hdev);
2518
2519         if (hdev->flush)
2520                 hdev->flush(hdev);
2521
2522         atomic_set(&hdev->cmd_cnt, 1);
2523         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2524
2525         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2526
2527 done:
2528         hci_req_unlock(hdev);
2529         hci_dev_put(hdev);
2530         return ret;
2531 }
2532
2533 int hci_dev_reset_stat(__u16 dev)
2534 {
2535         struct hci_dev *hdev;
2536         int ret = 0;
2537
2538         hdev = hci_dev_get(dev);
2539         if (!hdev)
2540                 return -ENODEV;
2541
2542         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2543                 ret = -EBUSY;
2544                 goto done;
2545         }
2546
2547         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2548                 ret = -EOPNOTSUPP;
2549                 goto done;
2550         }
2551
2552         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
2554 done:
2555         hci_dev_put(hdev);
2556         return ret;
2557 }
2558
2559 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560 {
2561         struct hci_dev *hdev;
2562         struct hci_dev_req dr;
2563         int err = 0;
2564
2565         if (copy_from_user(&dr, arg, sizeof(dr)))
2566                 return -EFAULT;
2567
2568         hdev = hci_dev_get(dr.dev_id);
2569         if (!hdev)
2570                 return -ENODEV;
2571
2572         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573                 err = -EBUSY;
2574                 goto done;
2575         }
2576
2577         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2578                 err = -EOPNOTSUPP;
2579                 goto done;
2580         }
2581
2582         if (hdev->dev_type != HCI_BREDR) {
2583                 err = -EOPNOTSUPP;
2584                 goto done;
2585         }
2586
2587         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2588                 err = -EOPNOTSUPP;
2589                 goto done;
2590         }
2591
2592         switch (cmd) {
2593         case HCISETAUTH:
2594                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2595                                    HCI_INIT_TIMEOUT);
2596                 break;
2597
2598         case HCISETENCRYPT:
2599                 if (!lmp_encrypt_capable(hdev)) {
2600                         err = -EOPNOTSUPP;
2601                         break;
2602                 }
2603
2604                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2605                         /* Auth must be enabled first */
2606                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2607                                            HCI_INIT_TIMEOUT);
2608                         if (err)
2609                                 break;
2610                 }
2611
2612                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2613                                    HCI_INIT_TIMEOUT);
2614                 break;
2615
2616         case HCISETSCAN:
2617                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2618                                    HCI_INIT_TIMEOUT);
2619                 break;
2620
2621         case HCISETLINKPOL:
2622                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2623                                    HCI_INIT_TIMEOUT);
2624                 break;
2625
2626         case HCISETLINKMODE:
2627                 hdev->link_mode = ((__u16) dr.dev_opt) &
2628                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2629                 break;
2630
2631         case HCISETPTYPE:
2632                 hdev->pkt_type = (__u16) dr.dev_opt;
2633                 break;
2634
2635         case HCISETACLMTU:
2636                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2637                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2638                 break;
2639
2640         case HCISETSCOMTU:
2641                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2642                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2643                 break;
2644
2645         default:
2646                 err = -EINVAL;
2647                 break;
2648         }
2649
2650 done:
2651         hci_dev_put(hdev);
2652         return err;
2653 }
2654
2655 int hci_get_dev_list(void __user *arg)
2656 {
2657         struct hci_dev *hdev;
2658         struct hci_dev_list_req *dl;
2659         struct hci_dev_req *dr;
2660         int n = 0, size, err;
2661         __u16 dev_num;
2662
2663         if (get_user(dev_num, (__u16 __user *) arg))
2664                 return -EFAULT;
2665
2666         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2667                 return -EINVAL;
2668
2669         size = sizeof(*dl) + dev_num * sizeof(*dr);
2670
2671         dl = kzalloc(size, GFP_KERNEL);
2672         if (!dl)
2673                 return -ENOMEM;
2674
2675         dr = dl->dev_req;
2676
2677         read_lock(&hci_dev_list_lock);
2678         list_for_each_entry(hdev, &hci_dev_list, list) {
2679                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2680                         cancel_delayed_work(&hdev->power_off);
2681
2682                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2683                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2684
2685                 (dr + n)->dev_id  = hdev->id;
2686                 (dr + n)->dev_opt = hdev->flags;
2687
2688                 if (++n >= dev_num)
2689                         break;
2690         }
2691         read_unlock(&hci_dev_list_lock);
2692
2693         dl->dev_num = n;
2694         size = sizeof(*dl) + n * sizeof(*dr);
2695
2696         err = copy_to_user(arg, dl, size);
2697         kfree(dl);
2698
2699         return err ? -EFAULT : 0;
2700 }
2701
2702 int hci_get_dev_info(void __user *arg)
2703 {
2704         struct hci_dev *hdev;
2705         struct hci_dev_info di;
2706         int err = 0;
2707
2708         if (copy_from_user(&di, arg, sizeof(di)))
2709                 return -EFAULT;
2710
2711         hdev = hci_dev_get(di.dev_id);
2712         if (!hdev)
2713                 return -ENODEV;
2714
2715         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2716                 cancel_delayed_work_sync(&hdev->power_off);
2717
2718         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2719                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2720
2721         strcpy(di.name, hdev->name);
2722         di.bdaddr   = hdev->bdaddr;
2723         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2724         di.flags    = hdev->flags;
2725         di.pkt_type = hdev->pkt_type;
2726         if (lmp_bredr_capable(hdev)) {
2727                 di.acl_mtu  = hdev->acl_mtu;
2728                 di.acl_pkts = hdev->acl_pkts;
2729                 di.sco_mtu  = hdev->sco_mtu;
2730                 di.sco_pkts = hdev->sco_pkts;
2731         } else {
2732                 di.acl_mtu  = hdev->le_mtu;
2733                 di.acl_pkts = hdev->le_pkts;
2734                 di.sco_mtu  = 0;
2735                 di.sco_pkts = 0;
2736         }
2737         di.link_policy = hdev->link_policy;
2738         di.link_mode   = hdev->link_mode;
2739
2740         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2741         memcpy(&di.features, &hdev->features, sizeof(di.features));
2742
2743         if (copy_to_user(arg, &di, sizeof(di)))
2744                 err = -EFAULT;
2745
2746         hci_dev_put(hdev);
2747
2748         return err;
2749 }
2750
2751 /* ---- Interface to HCI drivers ---- */
2752
2753 static int hci_rfkill_set_block(void *data, bool blocked)
2754 {
2755         struct hci_dev *hdev = data;
2756
2757         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2758
2759         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2760                 return -EBUSY;
2761
2762         if (blocked) {
2763                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2764                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2765                         hci_dev_do_close(hdev);
2766         } else {
2767                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2768         }
2769
2770         return 0;
2771 }
2772
2773 static const struct rfkill_ops hci_rfkill_ops = {
2774         .set_block = hci_rfkill_set_block,
2775 };
2776
2777 static void hci_power_on(struct work_struct *work)
2778 {
2779         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2780         int err;
2781
2782         BT_DBG("%s", hdev->name);
2783
2784         err = hci_dev_do_open(hdev);
2785         if (err < 0) {
2786                 mgmt_set_powered_failed(hdev, err);
2787                 return;
2788         }
2789
2790         /* During the HCI setup phase, a few error conditions are
2791          * ignored and they need to be checked now. If they are still
2792          * valid, it is important to turn the device back off.
2793          */
2794         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2795             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2796             (hdev->dev_type == HCI_BREDR &&
2797              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2798              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2799                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2800                 hci_dev_do_close(hdev);
2801         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2802                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2803                                    HCI_AUTO_OFF_TIMEOUT);
2804         }
2805
2806         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2807                 /* For unconfigured devices, set the HCI_RAW flag
2808                  * so that userspace can easily identify them.
2809                  */
2810                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2811                         set_bit(HCI_RAW, &hdev->flags);
2812
2813                 /* For fully configured devices, this will send
2814                  * the Index Added event. For unconfigured devices,
2815                  * it will send Unconfigued Index Added event.
2816                  *
2817                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2818                  * and no event will be send.
2819                  */
2820                 mgmt_index_added(hdev);
2821         }
2822 }
2823
2824 static void hci_power_off(struct work_struct *work)
2825 {
2826         struct hci_dev *hdev = container_of(work, struct hci_dev,
2827                                             power_off.work);
2828
2829         BT_DBG("%s", hdev->name);
2830
2831         hci_dev_do_close(hdev);
2832 }
2833
2834 static void hci_discov_off(struct work_struct *work)
2835 {
2836         struct hci_dev *hdev;
2837
2838         hdev = container_of(work, struct hci_dev, discov_off.work);
2839
2840         BT_DBG("%s", hdev->name);
2841
2842         mgmt_discoverable_timeout(hdev);
2843 }
2844
2845 void hci_uuids_clear(struct hci_dev *hdev)
2846 {
2847         struct bt_uuid *uuid, *tmp;
2848
2849         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2850                 list_del(&uuid->list);
2851                 kfree(uuid);
2852         }
2853 }
2854
2855 void hci_link_keys_clear(struct hci_dev *hdev)
2856 {
2857         struct list_head *p, *n;
2858
2859         list_for_each_safe(p, n, &hdev->link_keys) {
2860                 struct link_key *key;
2861
2862                 key = list_entry(p, struct link_key, list);
2863
2864                 list_del(p);
2865                 kfree(key);
2866         }
2867 }
2868
2869 void hci_smp_ltks_clear(struct hci_dev *hdev)
2870 {
2871         struct smp_ltk *k, *tmp;
2872
2873         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2874                 list_del(&k->list);
2875                 kfree(k);
2876         }
2877 }
2878
2879 void hci_smp_irks_clear(struct hci_dev *hdev)
2880 {
2881         struct smp_irk *k, *tmp;
2882
2883         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2884                 list_del(&k->list);
2885                 kfree(k);
2886         }
2887 }
2888
2889 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2890 {
2891         struct link_key *k;
2892
2893         list_for_each_entry(k, &hdev->link_keys, list)
2894                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2895                         return k;
2896
2897         return NULL;
2898 }
2899
2900 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2901                                u8 key_type, u8 old_key_type)
2902 {
2903         /* Legacy key */
2904         if (key_type < 0x03)
2905                 return true;
2906
2907         /* Debug keys are insecure so don't store them persistently */
2908         if (key_type == HCI_LK_DEBUG_COMBINATION)
2909                 return false;
2910
2911         /* Changed combination key and there's no previous one */
2912         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2913                 return false;
2914
2915         /* Security mode 3 case */
2916         if (!conn)
2917                 return true;
2918
2919         /* Neither local nor remote side had no-bonding as requirement */
2920         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2921                 return true;
2922
2923         /* Local side had dedicated bonding as requirement */
2924         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2925                 return true;
2926
2927         /* Remote side had dedicated bonding as requirement */
2928         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2929                 return true;
2930
2931         /* If none of the above criteria match, then don't store the key
2932          * persistently */
2933         return false;
2934 }
2935
2936 static bool ltk_type_master(u8 type)
2937 {
2938         return (type == SMP_LTK);
2939 }
2940
2941 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2942                              bool master)
2943 {
2944         struct smp_ltk *k;
2945
2946         list_for_each_entry(k, &hdev->long_term_keys, list) {
2947                 if (k->ediv != ediv || k->rand != rand)
2948                         continue;
2949
2950                 if (ltk_type_master(k->type) != master)
2951                         continue;
2952
2953                 return k;
2954         }
2955
2956         return NULL;
2957 }
2958
2959 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2960                                      u8 addr_type, bool master)
2961 {
2962         struct smp_ltk *k;
2963
2964         list_for_each_entry(k, &hdev->long_term_keys, list)
2965                 if (addr_type == k->bdaddr_type &&
2966                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2967                     ltk_type_master(k->type) == master)
2968                         return k;
2969
2970         return NULL;
2971 }
2972
2973 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2974 {
2975         struct smp_irk *irk;
2976
2977         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978                 if (!bacmp(&irk->rpa, rpa))
2979                         return irk;
2980         }
2981
2982         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2983                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2984                         bacpy(&irk->rpa, rpa);
2985                         return irk;
2986                 }
2987         }
2988
2989         return NULL;
2990 }
2991
2992 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2993                                      u8 addr_type)
2994 {
2995         struct smp_irk *irk;
2996
2997         /* Identity Address must be public or static random */
2998         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2999                 return NULL;
3000
3001         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3002                 if (addr_type == irk->addr_type &&
3003                     bacmp(bdaddr, &irk->bdaddr) == 0)
3004                         return irk;
3005         }
3006
3007         return NULL;
3008 }
3009
3010 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3011                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3012                                   u8 pin_len, bool *persistent)
3013 {
3014         struct link_key *key, *old_key;
3015         u8 old_key_type;
3016
3017         old_key = hci_find_link_key(hdev, bdaddr);
3018         if (old_key) {
3019                 old_key_type = old_key->type;
3020                 key = old_key;
3021         } else {
3022                 old_key_type = conn ? conn->key_type : 0xff;
3023                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3024                 if (!key)
3025                         return NULL;
3026                 list_add(&key->list, &hdev->link_keys);
3027         }
3028
3029         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3030
3031         /* Some buggy controller combinations generate a changed
3032          * combination key for legacy pairing even when there's no
3033          * previous key */
3034         if (type == HCI_LK_CHANGED_COMBINATION &&
3035             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3036                 type = HCI_LK_COMBINATION;
3037                 if (conn)
3038                         conn->key_type = type;
3039         }
3040
3041         bacpy(&key->bdaddr, bdaddr);
3042         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3043         key->pin_len = pin_len;
3044
3045         if (type == HCI_LK_CHANGED_COMBINATION)
3046                 key->type = old_key_type;
3047         else
3048                 key->type = type;
3049
3050         if (persistent)
3051                 *persistent = hci_persistent_key(hdev, conn, type,
3052                                                  old_key_type);
3053
3054         return key;
3055 }
3056
3057 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3058                             u8 addr_type, u8 type, u8 authenticated,
3059                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3060 {
3061         struct smp_ltk *key, *old_key;
3062         bool master = ltk_type_master(type);
3063
3064         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3065         if (old_key)
3066                 key = old_key;
3067         else {
3068                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3069                 if (!key)
3070                         return NULL;
3071                 list_add(&key->list, &hdev->long_term_keys);
3072         }
3073
3074         bacpy(&key->bdaddr, bdaddr);
3075         key->bdaddr_type = addr_type;
3076         memcpy(key->val, tk, sizeof(key->val));
3077         key->authenticated = authenticated;
3078         key->ediv = ediv;
3079         key->rand = rand;
3080         key->enc_size = enc_size;
3081         key->type = type;
3082
3083         return key;
3084 }
3085
3086 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3087                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3088 {
3089         struct smp_irk *irk;
3090
3091         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3092         if (!irk) {
3093                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3094                 if (!irk)
3095                         return NULL;
3096
3097                 bacpy(&irk->bdaddr, bdaddr);
3098                 irk->addr_type = addr_type;
3099
3100                 list_add(&irk->list, &hdev->identity_resolving_keys);
3101         }
3102
3103         memcpy(irk->val, val, 16);
3104         bacpy(&irk->rpa, rpa);
3105
3106         return irk;
3107 }
3108
3109 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3110 {
3111         struct link_key *key;
3112
3113         key = hci_find_link_key(hdev, bdaddr);
3114         if (!key)
3115                 return -ENOENT;
3116
3117         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3118
3119         list_del(&key->list);
3120         kfree(key);
3121
3122         return 0;
3123 }
3124
3125 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3126 {
3127         struct smp_ltk *k, *tmp;
3128         int removed = 0;
3129
3130         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3131                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3132                         continue;
3133
3134                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3135
3136                 list_del(&k->list);
3137                 kfree(k);
3138                 removed++;
3139         }
3140
3141         return removed ? 0 : -ENOENT;
3142 }
3143
3144 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3145 {
3146         struct smp_irk *k, *tmp;
3147
3148         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3149                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3150                         continue;
3151
3152                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3153
3154                 list_del(&k->list);
3155                 kfree(k);
3156         }
3157 }
3158
3159 /* HCI command timer function */
3160 static void hci_cmd_timeout(struct work_struct *work)
3161 {
3162         struct hci_dev *hdev = container_of(work, struct hci_dev,
3163                                             cmd_timer.work);
3164
3165         if (hdev->sent_cmd) {
3166                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3167                 u16 opcode = __le16_to_cpu(sent->opcode);
3168
3169                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3170         } else {
3171                 BT_ERR("%s command tx timeout", hdev->name);
3172         }
3173
3174         atomic_set(&hdev->cmd_cnt, 1);
3175         queue_work(hdev->workqueue, &hdev->cmd_work);
3176 }
3177
3178 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3179                                           bdaddr_t *bdaddr)
3180 {
3181         struct oob_data *data;
3182
3183         list_for_each_entry(data, &hdev->remote_oob_data, list)
3184                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3185                         return data;
3186
3187         return NULL;
3188 }
3189
3190 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3191 {
3192         struct oob_data *data;
3193
3194         data = hci_find_remote_oob_data(hdev, bdaddr);
3195         if (!data)
3196                 return -ENOENT;
3197
3198         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3199
3200         list_del(&data->list);
3201         kfree(data);
3202
3203         return 0;
3204 }
3205
3206 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3207 {
3208         struct oob_data *data, *n;
3209
3210         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3211                 list_del(&data->list);
3212                 kfree(data);
3213         }
3214 }
3215
3216 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217                             u8 *hash, u8 *randomizer)
3218 {
3219         struct oob_data *data;
3220
3221         data = hci_find_remote_oob_data(hdev, bdaddr);
3222         if (!data) {
3223                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3224                 if (!data)
3225                         return -ENOMEM;
3226
3227                 bacpy(&data->bdaddr, bdaddr);
3228                 list_add(&data->list, &hdev->remote_oob_data);
3229         }
3230
3231         memcpy(data->hash192, hash, sizeof(data->hash192));
3232         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3233
3234         memset(data->hash256, 0, sizeof(data->hash256));
3235         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3236
3237         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3238
3239         return 0;
3240 }
3241
3242 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3243                                 u8 *hash192, u8 *randomizer192,
3244                                 u8 *hash256, u8 *randomizer256)
3245 {
3246         struct oob_data *data;
3247
3248         data = hci_find_remote_oob_data(hdev, bdaddr);
3249         if (!data) {
3250                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3251                 if (!data)
3252                         return -ENOMEM;
3253
3254                 bacpy(&data->bdaddr, bdaddr);
3255                 list_add(&data->list, &hdev->remote_oob_data);
3256         }
3257
3258         memcpy(data->hash192, hash192, sizeof(data->hash192));
3259         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3260
3261         memcpy(data->hash256, hash256, sizeof(data->hash256));
3262         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3263
3264         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3265
3266         return 0;
3267 }
3268
3269 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3270                                          bdaddr_t *bdaddr, u8 type)
3271 {
3272         struct bdaddr_list *b;
3273
3274         list_for_each_entry(b, &hdev->blacklist, list) {
3275                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3276                         return b;
3277         }
3278
3279         return NULL;
3280 }
3281
3282 static void hci_blacklist_clear(struct hci_dev *hdev)
3283 {
3284         struct list_head *p, *n;
3285
3286         list_for_each_safe(p, n, &hdev->blacklist) {
3287                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3288
3289                 list_del(p);
3290                 kfree(b);
3291         }
3292 }
3293
3294 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3295 {
3296         struct bdaddr_list *entry;
3297
3298         if (!bacmp(bdaddr, BDADDR_ANY))
3299                 return -EBADF;
3300
3301         if (hci_blacklist_lookup(hdev, bdaddr, type))
3302                 return -EEXIST;
3303
3304         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3305         if (!entry)
3306                 return -ENOMEM;
3307
3308         bacpy(&entry->bdaddr, bdaddr);
3309         entry->bdaddr_type = type;
3310
3311         list_add(&entry->list, &hdev->blacklist);
3312
3313         return 0;
3314 }
3315
3316 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3317 {
3318         struct bdaddr_list *entry;
3319
3320         if (!bacmp(bdaddr, BDADDR_ANY)) {
3321                 hci_blacklist_clear(hdev);
3322                 return 0;
3323         }
3324
3325         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3326         if (!entry)
3327                 return -ENOENT;
3328
3329         list_del(&entry->list);
3330         kfree(entry);
3331
3332         return 0;
3333 }
3334
3335 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3336                                           bdaddr_t *bdaddr, u8 type)
3337 {
3338         struct bdaddr_list *b;
3339
3340         list_for_each_entry(b, &hdev->le_white_list, list) {
3341                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3342                         return b;
3343         }
3344
3345         return NULL;
3346 }
3347
3348 void hci_white_list_clear(struct hci_dev *hdev)
3349 {
3350         struct list_head *p, *n;
3351
3352         list_for_each_safe(p, n, &hdev->le_white_list) {
3353                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3354
3355                 list_del(p);
3356                 kfree(b);
3357         }
3358 }
3359
3360 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3361 {
3362         struct bdaddr_list *entry;
3363
3364         if (!bacmp(bdaddr, BDADDR_ANY))
3365                 return -EBADF;
3366
3367         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3368         if (!entry)
3369                 return -ENOMEM;
3370
3371         bacpy(&entry->bdaddr, bdaddr);
3372         entry->bdaddr_type = type;
3373
3374         list_add(&entry->list, &hdev->le_white_list);
3375
3376         return 0;
3377 }
3378
3379 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3380 {
3381         struct bdaddr_list *entry;
3382
3383         if (!bacmp(bdaddr, BDADDR_ANY))
3384                 return -EBADF;
3385
3386         entry = hci_white_list_lookup(hdev, bdaddr, type);
3387         if (!entry)
3388                 return -ENOENT;
3389
3390         list_del(&entry->list);
3391         kfree(entry);
3392
3393         return 0;
3394 }
3395
3396 /* This function requires the caller holds hdev->lock */
3397 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3398                                                bdaddr_t *addr, u8 addr_type)
3399 {
3400         struct hci_conn_params *params;
3401
3402         /* The conn params list only contains identity addresses */
3403         if (!hci_is_identity_address(addr, addr_type))
3404                 return NULL;
3405
3406         list_for_each_entry(params, &hdev->le_conn_params, list) {
3407                 if (bacmp(&params->addr, addr) == 0 &&
3408                     params->addr_type == addr_type) {
3409                         return params;
3410                 }
3411         }
3412
3413         return NULL;
3414 }
3415
3416 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3417 {
3418         struct hci_conn *conn;
3419
3420         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3421         if (!conn)
3422                 return false;
3423
3424         if (conn->dst_type != type)
3425                 return false;
3426
3427         if (conn->state != BT_CONNECTED)
3428                 return false;
3429
3430         return true;
3431 }
3432
3433 /* This function requires the caller holds hdev->lock */
3434 struct hci_conn_params *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3435                                                 bdaddr_t *addr, u8 addr_type)
3436 {
3437         struct hci_conn_params *param;
3438
3439         /* The list only contains identity addresses */
3440         if (!hci_is_identity_address(addr, addr_type))
3441                 return NULL;
3442
3443         list_for_each_entry(param, &hdev->pend_le_conns, pend_le_conn) {
3444                 if (bacmp(&param->addr, addr) == 0 &&
3445                     param->addr_type == addr_type)
3446                         return param;
3447         }
3448
3449         return NULL;
3450 }
3451
3452 /* This function requires the caller holds hdev->lock */
3453 void hci_pend_le_conn_add(struct hci_dev *hdev, struct hci_conn_params *params)
3454 {
3455         list_del_init(&params->pend_le_conn);
3456         list_add(&params->pend_le_conn, &hdev->pend_le_conns);
3457
3458         BT_DBG("addr %pMR (type %u)", &params->addr, params->addr_type);
3459
3460         hci_update_background_scan(hdev);
3461 }
3462
3463 /* This function requires the caller holds hdev->lock */
3464 void hci_pend_le_conn_del(struct hci_dev *hdev, struct hci_conn_params *params)
3465 {
3466         list_del_init(&params->pend_le_conn);
3467
3468         BT_DBG("addr %pMR (type %u)", &params->addr, params->addr_type);
3469
3470         hci_update_background_scan(hdev);
3471 }
3472
3473 /* This function requires the caller holds hdev->lock */
3474 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3475 {
3476         while (!list_empty(&hdev->pend_le_conns))
3477                 list_del_init(hdev->pend_le_conns.next);
3478
3479         BT_DBG("All LE pending connections cleared");
3480
3481         hci_update_background_scan(hdev);
3482 }
3483
3484 /* This function requires the caller holds hdev->lock */
3485 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3486                                             bdaddr_t *addr, u8 addr_type)
3487 {
3488         struct hci_conn_params *params;
3489
3490         if (!hci_is_identity_address(addr, addr_type))
3491                 return NULL;
3492
3493         params = hci_conn_params_lookup(hdev, addr, addr_type);
3494         if (params)
3495                 return params;
3496
3497         params = kzalloc(sizeof(*params), GFP_KERNEL);
3498         if (!params) {
3499                 BT_ERR("Out of memory");
3500                 return NULL;
3501         }
3502
3503         bacpy(&params->addr, addr);
3504         params->addr_type = addr_type;
3505
3506         list_add(&params->list, &hdev->le_conn_params);
3507         INIT_LIST_HEAD(&params->pend_le_conn);
3508
3509         params->conn_min_interval = hdev->le_conn_min_interval;
3510         params->conn_max_interval = hdev->le_conn_max_interval;
3511         params->conn_latency = hdev->le_conn_latency;
3512         params->supervision_timeout = hdev->le_supv_timeout;
3513         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3514
3515         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3516
3517         return params;
3518 }
3519
3520 /* This function requires the caller holds hdev->lock */
3521 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3522                         u8 auto_connect)
3523 {
3524         struct hci_conn_params *params;
3525
3526         params = hci_conn_params_add(hdev, addr, addr_type);
3527         if (!params)
3528                 return -EIO;
3529
3530         if (params->auto_connect == HCI_AUTO_CONN_REPORT &&
3531             auto_connect != HCI_AUTO_CONN_REPORT)
3532                 hdev->pend_le_reports--;
3533
3534         switch (auto_connect) {
3535         case HCI_AUTO_CONN_DISABLED:
3536         case HCI_AUTO_CONN_LINK_LOSS:
3537                 hci_pend_le_conn_del(hdev, params);
3538                 break;
3539         case HCI_AUTO_CONN_REPORT:
3540                 if (params->auto_connect != HCI_AUTO_CONN_REPORT)
3541                         hdev->pend_le_reports++;
3542                 hci_pend_le_conn_del(hdev, params);
3543                 break;
3544         case HCI_AUTO_CONN_ALWAYS:
3545                 if (!is_connected(hdev, addr, addr_type))
3546                         hci_pend_le_conn_add(hdev, params);
3547                 break;
3548         }
3549
3550         params->auto_connect = auto_connect;
3551
3552         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3553                auto_connect);
3554
3555         return 0;
3556 }
3557
3558 /* This function requires the caller holds hdev->lock */
3559 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3560 {
3561         struct hci_conn_params *params;
3562
3563         params = hci_conn_params_lookup(hdev, addr, addr_type);
3564         if (!params)
3565                 return;
3566
3567         if (params->auto_connect == HCI_AUTO_CONN_REPORT)
3568                 hdev->pend_le_reports--;
3569
3570         hci_pend_le_conn_del(hdev, params);
3571
3572         list_del(&params->list);
3573         kfree(params);
3574
3575         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3576 }
3577
3578 /* This function requires the caller holds hdev->lock */
3579 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3580 {
3581         struct hci_conn_params *params, *tmp;
3582
3583         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3584                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3585                         continue;
3586                 list_del(&params->list);
3587                 kfree(params);
3588         }
3589
3590         BT_DBG("All LE disabled connection parameters were removed");
3591 }
3592
3593 /* This function requires the caller holds hdev->lock */
3594 void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3595 {
3596         struct hci_conn_params *params, *tmp;
3597
3598         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3599                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3600                         continue;
3601                 if (params->auto_connect == HCI_AUTO_CONN_REPORT)
3602                         hdev->pend_le_reports--;
3603                 list_del(&params->list);
3604                 kfree(params);
3605         }
3606
3607         hci_pend_le_conns_clear(hdev);
3608
3609         BT_DBG("All enabled LE connection parameters were removed");
3610 }
3611
3612 /* This function requires the caller holds hdev->lock */
3613 void hci_conn_params_clear_all(struct hci_dev *hdev)
3614 {
3615         struct hci_conn_params *params, *tmp;
3616
3617         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3618                 list_del(&params->list);
3619                 kfree(params);
3620         }
3621
3622         hci_pend_le_conns_clear(hdev);
3623
3624         BT_DBG("All LE connection parameters were removed");
3625 }
3626
3627 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3628 {
3629         if (status) {
3630                 BT_ERR("Failed to start inquiry: status %d", status);
3631
3632                 hci_dev_lock(hdev);
3633                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3634                 hci_dev_unlock(hdev);
3635                 return;
3636         }
3637 }
3638
3639 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3640 {
3641         /* General inquiry access code (GIAC) */
3642         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3643         struct hci_request req;
3644         struct hci_cp_inquiry cp;
3645         int err;
3646
3647         if (status) {
3648                 BT_ERR("Failed to disable LE scanning: status %d", status);
3649                 return;
3650         }
3651
3652         switch (hdev->discovery.type) {
3653         case DISCOV_TYPE_LE:
3654                 hci_dev_lock(hdev);
3655                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3656                 hci_dev_unlock(hdev);
3657                 break;
3658
3659         case DISCOV_TYPE_INTERLEAVED:
3660                 hci_req_init(&req, hdev);
3661
3662                 memset(&cp, 0, sizeof(cp));
3663                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3664                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3665                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3666
3667                 hci_dev_lock(hdev);
3668
3669                 hci_inquiry_cache_flush(hdev);
3670
3671                 err = hci_req_run(&req, inquiry_complete);
3672                 if (err) {
3673                         BT_ERR("Inquiry request failed: err %d", err);
3674                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3675                 }
3676
3677                 hci_dev_unlock(hdev);
3678                 break;
3679         }
3680 }
3681
3682 static void le_scan_disable_work(struct work_struct *work)
3683 {
3684         struct hci_dev *hdev = container_of(work, struct hci_dev,
3685                                             le_scan_disable.work);
3686         struct hci_request req;
3687         int err;
3688
3689         BT_DBG("%s", hdev->name);
3690
3691         hci_req_init(&req, hdev);
3692
3693         hci_req_add_le_scan_disable(&req);
3694
3695         err = hci_req_run(&req, le_scan_disable_work_complete);
3696         if (err)
3697                 BT_ERR("Disable LE scanning request failed: err %d", err);
3698 }
3699
3700 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3701 {
3702         struct hci_dev *hdev = req->hdev;
3703
3704         /* If we're advertising or initiating an LE connection we can't
3705          * go ahead and change the random address at this time. This is
3706          * because the eventual initiator address used for the
3707          * subsequently created connection will be undefined (some
3708          * controllers use the new address and others the one we had
3709          * when the operation started).
3710          *
3711          * In this kind of scenario skip the update and let the random
3712          * address be updated at the next cycle.
3713          */
3714         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3715             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3716                 BT_DBG("Deferring random address update");
3717                 return;
3718         }
3719
3720         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3721 }
3722
3723 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3724                               u8 *own_addr_type)
3725 {
3726         struct hci_dev *hdev = req->hdev;
3727         int err;
3728
3729         /* If privacy is enabled use a resolvable private address. If
3730          * current RPA has expired or there is something else than
3731          * the current RPA in use, then generate a new one.
3732          */
3733         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3734                 int to;
3735
3736                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3737
3738                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3739                     !bacmp(&hdev->random_addr, &hdev->rpa))
3740                         return 0;
3741
3742                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3743                 if (err < 0) {
3744                         BT_ERR("%s failed to generate new RPA", hdev->name);
3745                         return err;
3746                 }
3747
3748                 set_random_addr(req, &hdev->rpa);
3749
3750                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3751                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3752
3753                 return 0;
3754         }
3755
3756         /* In case of required privacy without resolvable private address,
3757          * use an unresolvable private address. This is useful for active
3758          * scanning and non-connectable advertising.
3759          */
3760         if (require_privacy) {
3761                 bdaddr_t urpa;
3762
3763                 get_random_bytes(&urpa, 6);
3764                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3765
3766                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3767                 set_random_addr(req, &urpa);
3768                 return 0;
3769         }
3770
3771         /* If forcing static address is in use or there is no public
3772          * address use the static address as random address (but skip
3773          * the HCI command if the current random address is already the
3774          * static one.
3775          */
3776         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3777             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3778                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3779                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3780                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3781                                     &hdev->static_addr);
3782                 return 0;
3783         }
3784
3785         /* Neither privacy nor static address is being used so use a
3786          * public address.
3787          */
3788         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3789
3790         return 0;
3791 }
3792
3793 /* Copy the Identity Address of the controller.
3794  *
3795  * If the controller has a public BD_ADDR, then by default use that one.
3796  * If this is a LE only controller without a public address, default to
3797  * the static random address.
3798  *
3799  * For debugging purposes it is possible to force controllers with a
3800  * public address to use the static random address instead.
3801  */
3802 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3803                                u8 *bdaddr_type)
3804 {
3805         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3806             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3807                 bacpy(bdaddr, &hdev->static_addr);
3808                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3809         } else {
3810                 bacpy(bdaddr, &hdev->bdaddr);
3811                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3812         }
3813 }
3814
3815 /* Alloc HCI device */
3816 struct hci_dev *hci_alloc_dev(void)
3817 {
3818         struct hci_dev *hdev;
3819
3820         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3821         if (!hdev)
3822                 return NULL;
3823
3824         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3825         hdev->esco_type = (ESCO_HV1);
3826         hdev->link_mode = (HCI_LM_ACCEPT);
3827         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3828         hdev->io_capability = 0x03;     /* No Input No Output */
3829         hdev->manufacturer = 0xffff;    /* Default to internal use */
3830         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3831         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3832
3833         hdev->sniff_max_interval = 800;
3834         hdev->sniff_min_interval = 80;
3835
3836         hdev->le_adv_channel_map = 0x07;
3837         hdev->le_scan_interval = 0x0060;
3838         hdev->le_scan_window = 0x0030;
3839         hdev->le_conn_min_interval = 0x0028;
3840         hdev->le_conn_max_interval = 0x0038;
3841         hdev->le_conn_latency = 0x0000;
3842         hdev->le_supv_timeout = 0x002a;
3843
3844         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3845         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3846         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3847         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3848
3849         mutex_init(&hdev->lock);
3850         mutex_init(&hdev->req_lock);
3851
3852         INIT_LIST_HEAD(&hdev->mgmt_pending);
3853         INIT_LIST_HEAD(&hdev->blacklist);
3854         INIT_LIST_HEAD(&hdev->uuids);
3855         INIT_LIST_HEAD(&hdev->link_keys);
3856         INIT_LIST_HEAD(&hdev->long_term_keys);
3857         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3858         INIT_LIST_HEAD(&hdev->remote_oob_data);
3859         INIT_LIST_HEAD(&hdev->le_white_list);
3860         INIT_LIST_HEAD(&hdev->le_conn_params);
3861         INIT_LIST_HEAD(&hdev->pend_le_conns);
3862         INIT_LIST_HEAD(&hdev->conn_hash.list);
3863
3864         INIT_WORK(&hdev->rx_work, hci_rx_work);
3865         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3866         INIT_WORK(&hdev->tx_work, hci_tx_work);
3867         INIT_WORK(&hdev->power_on, hci_power_on);
3868
3869         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3870         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3871         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3872
3873         skb_queue_head_init(&hdev->rx_q);
3874         skb_queue_head_init(&hdev->cmd_q);
3875         skb_queue_head_init(&hdev->raw_q);
3876
3877         init_waitqueue_head(&hdev->req_wait_q);
3878
3879         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3880
3881         hci_init_sysfs(hdev);
3882         discovery_init(hdev);
3883
3884         return hdev;
3885 }
3886 EXPORT_SYMBOL(hci_alloc_dev);
3887
3888 /* Free HCI device */
3889 void hci_free_dev(struct hci_dev *hdev)
3890 {
3891         /* will free via device release */
3892         put_device(&hdev->dev);
3893 }
3894 EXPORT_SYMBOL(hci_free_dev);
3895
3896 /* Register HCI device */
3897 int hci_register_dev(struct hci_dev *hdev)
3898 {
3899         int id, error;
3900
3901         if (!hdev->open || !hdev->close)
3902                 return -EINVAL;
3903
3904         /* Do not allow HCI_AMP devices to register at index 0,
3905          * so the index can be used as the AMP controller ID.
3906          */
3907         switch (hdev->dev_type) {
3908         case HCI_BREDR:
3909                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3910                 break;
3911         case HCI_AMP:
3912                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3913                 break;
3914         default:
3915                 return -EINVAL;
3916         }
3917
3918         if (id < 0)
3919                 return id;
3920
3921         sprintf(hdev->name, "hci%d", id);
3922         hdev->id = id;
3923
3924         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3925
3926         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3927                                           WQ_MEM_RECLAIM, 1, hdev->name);
3928         if (!hdev->workqueue) {
3929                 error = -ENOMEM;
3930                 goto err;
3931         }
3932
3933         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3934                                               WQ_MEM_RECLAIM, 1, hdev->name);
3935         if (!hdev->req_workqueue) {
3936                 destroy_workqueue(hdev->workqueue);
3937                 error = -ENOMEM;
3938                 goto err;
3939         }
3940
3941         if (!IS_ERR_OR_NULL(bt_debugfs))
3942                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3943
3944         dev_set_name(&hdev->dev, "%s", hdev->name);
3945
3946         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3947                                                CRYPTO_ALG_ASYNC);
3948         if (IS_ERR(hdev->tfm_aes)) {
3949                 BT_ERR("Unable to create crypto context");
3950                 error = PTR_ERR(hdev->tfm_aes);
3951                 hdev->tfm_aes = NULL;
3952                 goto err_wqueue;
3953         }
3954
3955         error = device_add(&hdev->dev);
3956         if (error < 0)
3957                 goto err_tfm;
3958
3959         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3960                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3961                                     hdev);
3962         if (hdev->rfkill) {
3963                 if (rfkill_register(hdev->rfkill) < 0) {
3964                         rfkill_destroy(hdev->rfkill);
3965                         hdev->rfkill = NULL;
3966                 }
3967         }
3968
3969         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3970                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3971
3972         set_bit(HCI_SETUP, &hdev->dev_flags);
3973         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3974
3975         if (hdev->dev_type == HCI_BREDR) {
3976                 /* Assume BR/EDR support until proven otherwise (such as
3977                  * through reading supported features during init.
3978                  */
3979                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3980         }
3981
3982         write_lock(&hci_dev_list_lock);
3983         list_add(&hdev->list, &hci_dev_list);
3984         write_unlock(&hci_dev_list_lock);
3985
3986         /* Devices that are marked for raw-only usage are unconfigured
3987          * and should not be included in normal operation.
3988          */
3989         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3990                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3991
3992         hci_notify(hdev, HCI_DEV_REG);
3993         hci_dev_hold(hdev);
3994
3995         queue_work(hdev->req_workqueue, &hdev->power_on);
3996
3997         return id;
3998
3999 err_tfm:
4000         crypto_free_blkcipher(hdev->tfm_aes);
4001 err_wqueue:
4002         destroy_workqueue(hdev->workqueue);
4003         destroy_workqueue(hdev->req_workqueue);
4004 err:
4005         ida_simple_remove(&hci_index_ida, hdev->id);
4006
4007         return error;
4008 }
4009 EXPORT_SYMBOL(hci_register_dev);
4010
4011 /* Unregister HCI device */
4012 void hci_unregister_dev(struct hci_dev *hdev)
4013 {
4014         int i, id;
4015
4016         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4017
4018         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4019
4020         id = hdev->id;
4021
4022         write_lock(&hci_dev_list_lock);
4023         list_del(&hdev->list);
4024         write_unlock(&hci_dev_list_lock);
4025
4026         hci_dev_do_close(hdev);
4027
4028         for (i = 0; i < NUM_REASSEMBLY; i++)
4029                 kfree_skb(hdev->reassembly[i]);
4030
4031         cancel_work_sync(&hdev->power_on);
4032
4033         if (!test_bit(HCI_INIT, &hdev->flags) &&
4034             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4035                 hci_dev_lock(hdev);
4036                 mgmt_index_removed(hdev);
4037                 hci_dev_unlock(hdev);
4038         }
4039
4040         /* mgmt_index_removed should take care of emptying the
4041          * pending list */
4042         BUG_ON(!list_empty(&hdev->mgmt_pending));
4043
4044         hci_notify(hdev, HCI_DEV_UNREG);
4045
4046         if (hdev->rfkill) {
4047                 rfkill_unregister(hdev->rfkill);
4048                 rfkill_destroy(hdev->rfkill);
4049         }
4050
4051         if (hdev->tfm_aes)
4052                 crypto_free_blkcipher(hdev->tfm_aes);
4053
4054         device_del(&hdev->dev);
4055
4056         debugfs_remove_recursive(hdev->debugfs);
4057
4058         destroy_workqueue(hdev->workqueue);
4059         destroy_workqueue(hdev->req_workqueue);
4060
4061         hci_dev_lock(hdev);
4062         hci_blacklist_clear(hdev);
4063         hci_uuids_clear(hdev);
4064         hci_link_keys_clear(hdev);
4065         hci_smp_ltks_clear(hdev);
4066         hci_smp_irks_clear(hdev);
4067         hci_remote_oob_data_clear(hdev);
4068         hci_white_list_clear(hdev);
4069         hci_conn_params_clear_all(hdev);
4070         hci_dev_unlock(hdev);
4071
4072         hci_dev_put(hdev);
4073
4074         ida_simple_remove(&hci_index_ida, id);
4075 }
4076 EXPORT_SYMBOL(hci_unregister_dev);
4077
4078 /* Suspend HCI device */
4079 int hci_suspend_dev(struct hci_dev *hdev)
4080 {
4081         hci_notify(hdev, HCI_DEV_SUSPEND);
4082         return 0;
4083 }
4084 EXPORT_SYMBOL(hci_suspend_dev);
4085
4086 /* Resume HCI device */
4087 int hci_resume_dev(struct hci_dev *hdev)
4088 {
4089         hci_notify(hdev, HCI_DEV_RESUME);
4090         return 0;
4091 }
4092 EXPORT_SYMBOL(hci_resume_dev);
4093
4094 /* Receive frame from HCI drivers */
4095 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4096 {
4097         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4098                       && !test_bit(HCI_INIT, &hdev->flags))) {
4099                 kfree_skb(skb);
4100                 return -ENXIO;
4101         }
4102
4103         /* Incoming skb */
4104         bt_cb(skb)->incoming = 1;
4105
4106         /* Time stamp */
4107         __net_timestamp(skb);
4108
4109         skb_queue_tail(&hdev->rx_q, skb);
4110         queue_work(hdev->workqueue, &hdev->rx_work);
4111
4112         return 0;
4113 }
4114 EXPORT_SYMBOL(hci_recv_frame);
4115
4116 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4117                           int count, __u8 index)
4118 {
4119         int len = 0;
4120         int hlen = 0;
4121         int remain = count;
4122         struct sk_buff *skb;
4123         struct bt_skb_cb *scb;
4124
4125         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4126             index >= NUM_REASSEMBLY)
4127                 return -EILSEQ;
4128
4129         skb = hdev->reassembly[index];
4130
4131         if (!skb) {
4132                 switch (type) {
4133                 case HCI_ACLDATA_PKT:
4134                         len = HCI_MAX_FRAME_SIZE;
4135                         hlen = HCI_ACL_HDR_SIZE;
4136                         break;
4137                 case HCI_EVENT_PKT:
4138                         len = HCI_MAX_EVENT_SIZE;
4139                         hlen = HCI_EVENT_HDR_SIZE;
4140                         break;
4141                 case HCI_SCODATA_PKT:
4142                         len = HCI_MAX_SCO_SIZE;
4143                         hlen = HCI_SCO_HDR_SIZE;
4144                         break;
4145                 }
4146
4147                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4148                 if (!skb)
4149                         return -ENOMEM;
4150
4151                 scb = (void *) skb->cb;
4152                 scb->expect = hlen;
4153                 scb->pkt_type = type;
4154
4155                 hdev->reassembly[index] = skb;
4156         }
4157
4158         while (count) {
4159                 scb = (void *) skb->cb;
4160                 len = min_t(uint, scb->expect, count);
4161
4162                 memcpy(skb_put(skb, len), data, len);
4163
4164                 count -= len;
4165                 data += len;
4166                 scb->expect -= len;
4167                 remain = count;
4168
4169                 switch (type) {
4170                 case HCI_EVENT_PKT:
4171                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4172                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4173                                 scb->expect = h->plen;
4174
4175                                 if (skb_tailroom(skb) < scb->expect) {
4176                                         kfree_skb(skb);
4177                                         hdev->reassembly[index] = NULL;
4178                                         return -ENOMEM;
4179                                 }
4180                         }
4181                         break;
4182
4183                 case HCI_ACLDATA_PKT:
4184                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4185                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4186                                 scb->expect = __le16_to_cpu(h->dlen);
4187
4188                                 if (skb_tailroom(skb) < scb->expect) {
4189                                         kfree_skb(skb);
4190                                         hdev->reassembly[index] = NULL;
4191                                         return -ENOMEM;
4192                                 }
4193                         }
4194                         break;
4195
4196                 case HCI_SCODATA_PKT:
4197                         if (skb->len == HCI_SCO_HDR_SIZE) {
4198                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4199                                 scb->expect = h->dlen;
4200
4201                                 if (skb_tailroom(skb) < scb->expect) {
4202                                         kfree_skb(skb);
4203                                         hdev->reassembly[index] = NULL;
4204                                         return -ENOMEM;
4205                                 }
4206                         }
4207                         break;
4208                 }
4209
4210                 if (scb->expect == 0) {
4211                         /* Complete frame */
4212
4213                         bt_cb(skb)->pkt_type = type;
4214                         hci_recv_frame(hdev, skb);
4215
4216                         hdev->reassembly[index] = NULL;
4217                         return remain;
4218                 }
4219         }
4220
4221         return remain;
4222 }
4223
4224 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4225 {
4226         int rem = 0;
4227
4228         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4229                 return -EILSEQ;
4230
4231         while (count) {
4232                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4233                 if (rem < 0)
4234                         return rem;
4235
4236                 data += (count - rem);
4237                 count = rem;
4238         }
4239
4240         return rem;
4241 }
4242 EXPORT_SYMBOL(hci_recv_fragment);
4243
4244 #define STREAM_REASSEMBLY 0
4245
4246 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4247 {
4248         int type;
4249         int rem = 0;
4250
4251         while (count) {
4252                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4253
4254                 if (!skb) {
4255                         struct { char type; } *pkt;
4256
4257                         /* Start of the frame */
4258                         pkt = data;
4259                         type = pkt->type;
4260
4261                         data++;
4262                         count--;
4263                 } else
4264                         type = bt_cb(skb)->pkt_type;
4265
4266                 rem = hci_reassembly(hdev, type, data, count,
4267                                      STREAM_REASSEMBLY);
4268                 if (rem < 0)
4269                         return rem;
4270
4271                 data += (count - rem);
4272                 count = rem;
4273         }
4274
4275         return rem;
4276 }
4277 EXPORT_SYMBOL(hci_recv_stream_fragment);
4278
4279 /* ---- Interface to upper protocols ---- */
4280
4281 int hci_register_cb(struct hci_cb *cb)
4282 {
4283         BT_DBG("%p name %s", cb, cb->name);
4284
4285         write_lock(&hci_cb_list_lock);
4286         list_add(&cb->list, &hci_cb_list);
4287         write_unlock(&hci_cb_list_lock);
4288
4289         return 0;
4290 }
4291 EXPORT_SYMBOL(hci_register_cb);
4292
4293 int hci_unregister_cb(struct hci_cb *cb)
4294 {
4295         BT_DBG("%p name %s", cb, cb->name);
4296
4297         write_lock(&hci_cb_list_lock);
4298         list_del(&cb->list);
4299         write_unlock(&hci_cb_list_lock);
4300
4301         return 0;
4302 }
4303 EXPORT_SYMBOL(hci_unregister_cb);
4304
4305 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4306 {
4307         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4308
4309         /* Time stamp */
4310         __net_timestamp(skb);
4311
4312         /* Send copy to monitor */
4313         hci_send_to_monitor(hdev, skb);
4314
4315         if (atomic_read(&hdev->promisc)) {
4316                 /* Send copy to the sockets */
4317                 hci_send_to_sock(hdev, skb);
4318         }
4319
4320         /* Get rid of skb owner, prior to sending to the driver. */
4321         skb_orphan(skb);
4322
4323         if (hdev->send(hdev, skb) < 0)
4324                 BT_ERR("%s sending frame failed", hdev->name);
4325 }
4326
4327 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4328 {
4329         skb_queue_head_init(&req->cmd_q);
4330         req->hdev = hdev;
4331         req->err = 0;
4332 }
4333
4334 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4335 {
4336         struct hci_dev *hdev = req->hdev;
4337         struct sk_buff *skb;
4338         unsigned long flags;
4339
4340         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4341
4342         /* If an error occured during request building, remove all HCI
4343          * commands queued on the HCI request queue.
4344          */
4345         if (req->err) {
4346                 skb_queue_purge(&req->cmd_q);
4347                 return req->err;
4348         }
4349
4350         /* Do not allow empty requests */
4351         if (skb_queue_empty(&req->cmd_q))
4352                 return -ENODATA;
4353
4354         skb = skb_peek_tail(&req->cmd_q);
4355         bt_cb(skb)->req.complete = complete;
4356
4357         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4358         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4359         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4360
4361         queue_work(hdev->workqueue, &hdev->cmd_work);
4362
4363         return 0;
4364 }
4365
4366 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4367                                        u32 plen, const void *param)
4368 {
4369         int len = HCI_COMMAND_HDR_SIZE + plen;
4370         struct hci_command_hdr *hdr;
4371         struct sk_buff *skb;
4372
4373         skb = bt_skb_alloc(len, GFP_ATOMIC);
4374         if (!skb)
4375                 return NULL;
4376
4377         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4378         hdr->opcode = cpu_to_le16(opcode);
4379         hdr->plen   = plen;
4380
4381         if (plen)
4382                 memcpy(skb_put(skb, plen), param, plen);
4383
4384         BT_DBG("skb len %d", skb->len);
4385
4386         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4387
4388         return skb;
4389 }
4390
4391 /* Send HCI command */
4392 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4393                  const void *param)
4394 {
4395         struct sk_buff *skb;
4396
4397         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4398
4399         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4400         if (!skb) {
4401                 BT_ERR("%s no memory for command", hdev->name);
4402                 return -ENOMEM;
4403         }
4404
4405         /* Stand-alone HCI commands must be flaged as
4406          * single-command requests.
4407          */
4408         bt_cb(skb)->req.start = true;
4409
4410         skb_queue_tail(&hdev->cmd_q, skb);
4411         queue_work(hdev->workqueue, &hdev->cmd_work);
4412
4413         return 0;
4414 }
4415
4416 /* Queue a command to an asynchronous HCI request */
4417 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4418                     const void *param, u8 event)
4419 {
4420         struct hci_dev *hdev = req->hdev;
4421         struct sk_buff *skb;
4422
4423         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4424
4425         /* If an error occured during request building, there is no point in
4426          * queueing the HCI command. We can simply return.
4427          */
4428         if (req->err)
4429                 return;
4430
4431         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4432         if (!skb) {
4433                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4434                        hdev->name, opcode);
4435                 req->err = -ENOMEM;
4436                 return;
4437         }
4438
4439         if (skb_queue_empty(&req->cmd_q))
4440                 bt_cb(skb)->req.start = true;
4441
4442         bt_cb(skb)->req.event = event;
4443
4444         skb_queue_tail(&req->cmd_q, skb);
4445 }
4446
4447 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4448                  const void *param)
4449 {
4450         hci_req_add_ev(req, opcode, plen, param, 0);
4451 }
4452
4453 /* Get data from the previously sent command */
4454 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4455 {
4456         struct hci_command_hdr *hdr;
4457
4458         if (!hdev->sent_cmd)
4459                 return NULL;
4460
4461         hdr = (void *) hdev->sent_cmd->data;
4462
4463         if (hdr->opcode != cpu_to_le16(opcode))
4464                 return NULL;
4465
4466         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4467
4468         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4469 }
4470
4471 /* Send ACL data */
4472 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4473 {
4474         struct hci_acl_hdr *hdr;
4475         int len = skb->len;
4476
4477         skb_push(skb, HCI_ACL_HDR_SIZE);
4478         skb_reset_transport_header(skb);
4479         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4480         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4481         hdr->dlen   = cpu_to_le16(len);
4482 }
4483
4484 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4485                           struct sk_buff *skb, __u16 flags)
4486 {
4487         struct hci_conn *conn = chan->conn;
4488         struct hci_dev *hdev = conn->hdev;
4489         struct sk_buff *list;
4490
4491         skb->len = skb_headlen(skb);
4492         skb->data_len = 0;
4493
4494         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4495
4496         switch (hdev->dev_type) {
4497         case HCI_BREDR:
4498                 hci_add_acl_hdr(skb, conn->handle, flags);
4499                 break;
4500         case HCI_AMP:
4501                 hci_add_acl_hdr(skb, chan->handle, flags);
4502                 break;
4503         default:
4504                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4505                 return;
4506         }
4507
4508         list = skb_shinfo(skb)->frag_list;
4509         if (!list) {
4510                 /* Non fragmented */
4511                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4512
4513                 skb_queue_tail(queue, skb);
4514         } else {
4515                 /* Fragmented */
4516                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4517
4518                 skb_shinfo(skb)->frag_list = NULL;
4519
4520                 /* Queue all fragments atomically */
4521                 spin_lock(&queue->lock);
4522
4523                 __skb_queue_tail(queue, skb);
4524
4525                 flags &= ~ACL_START;
4526                 flags |= ACL_CONT;
4527                 do {
4528                         skb = list; list = list->next;
4529
4530                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4531                         hci_add_acl_hdr(skb, conn->handle, flags);
4532
4533                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4534
4535                         __skb_queue_tail(queue, skb);
4536                 } while (list);
4537
4538                 spin_unlock(&queue->lock);
4539         }
4540 }
4541
4542 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4543 {
4544         struct hci_dev *hdev = chan->conn->hdev;
4545
4546         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4547
4548         hci_queue_acl(chan, &chan->data_q, skb, flags);
4549
4550         queue_work(hdev->workqueue, &hdev->tx_work);
4551 }
4552
4553 /* Send SCO data */
4554 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4555 {
4556         struct hci_dev *hdev = conn->hdev;
4557         struct hci_sco_hdr hdr;
4558
4559         BT_DBG("%s len %d", hdev->name, skb->len);
4560
4561         hdr.handle = cpu_to_le16(conn->handle);
4562         hdr.dlen   = skb->len;
4563
4564         skb_push(skb, HCI_SCO_HDR_SIZE);
4565         skb_reset_transport_header(skb);
4566         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4567
4568         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4569
4570         skb_queue_tail(&conn->data_q, skb);
4571         queue_work(hdev->workqueue, &hdev->tx_work);
4572 }
4573
4574 /* ---- HCI TX task (outgoing data) ---- */
4575
4576 /* HCI Connection scheduler */
4577 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4578                                      int *quote)
4579 {
4580         struct hci_conn_hash *h = &hdev->conn_hash;
4581         struct hci_conn *conn = NULL, *c;
4582         unsigned int num = 0, min = ~0;
4583
4584         /* We don't have to lock device here. Connections are always
4585          * added and removed with TX task disabled. */
4586
4587         rcu_read_lock();
4588
4589         list_for_each_entry_rcu(c, &h->list, list) {
4590                 if (c->type != type || skb_queue_empty(&c->data_q))
4591                         continue;
4592
4593                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4594                         continue;
4595
4596                 num++;
4597
4598                 if (c->sent < min) {
4599                         min  = c->sent;
4600                         conn = c;
4601                 }
4602
4603                 if (hci_conn_num(hdev, type) == num)
4604                         break;
4605         }
4606
4607         rcu_read_unlock();
4608
4609         if (conn) {
4610                 int cnt, q;
4611
4612                 switch (conn->type) {
4613                 case ACL_LINK:
4614                         cnt = hdev->acl_cnt;
4615                         break;
4616                 case SCO_LINK:
4617                 case ESCO_LINK:
4618                         cnt = hdev->sco_cnt;
4619                         break;
4620                 case LE_LINK:
4621                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4622                         break;
4623                 default:
4624                         cnt = 0;
4625                         BT_ERR("Unknown link type");
4626                 }
4627
4628                 q = cnt / num;
4629                 *quote = q ? q : 1;
4630         } else
4631                 *quote = 0;
4632
4633         BT_DBG("conn %p quote %d", conn, *quote);
4634         return conn;
4635 }
4636
4637 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4638 {
4639         struct hci_conn_hash *h = &hdev->conn_hash;
4640         struct hci_conn *c;
4641
4642         BT_ERR("%s link tx timeout", hdev->name);
4643
4644         rcu_read_lock();
4645
4646         /* Kill stalled connections */
4647         list_for_each_entry_rcu(c, &h->list, list) {
4648                 if (c->type == type && c->sent) {
4649                         BT_ERR("%s killing stalled connection %pMR",
4650                                hdev->name, &c->dst);
4651                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4652                 }
4653         }
4654
4655         rcu_read_unlock();
4656 }
4657
4658 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4659                                       int *quote)
4660 {
4661         struct hci_conn_hash *h = &hdev->conn_hash;
4662         struct hci_chan *chan = NULL;
4663         unsigned int num = 0, min = ~0, cur_prio = 0;
4664         struct hci_conn *conn;
4665         int cnt, q, conn_num = 0;
4666
4667         BT_DBG("%s", hdev->name);
4668
4669         rcu_read_lock();
4670
4671         list_for_each_entry_rcu(conn, &h->list, list) {
4672                 struct hci_chan *tmp;
4673
4674                 if (conn->type != type)
4675                         continue;
4676
4677                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4678                         continue;
4679
4680                 conn_num++;
4681
4682                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4683                         struct sk_buff *skb;
4684
4685                         if (skb_queue_empty(&tmp->data_q))
4686                                 continue;
4687
4688                         skb = skb_peek(&tmp->data_q);
4689                         if (skb->priority < cur_prio)
4690                                 continue;
4691
4692                         if (skb->priority > cur_prio) {
4693                                 num = 0;
4694                                 min = ~0;
4695                                 cur_prio = skb->priority;
4696                         }
4697
4698                         num++;
4699
4700                         if (conn->sent < min) {
4701                                 min  = conn->sent;
4702                                 chan = tmp;
4703                         }
4704                 }
4705
4706                 if (hci_conn_num(hdev, type) == conn_num)
4707                         break;
4708         }
4709
4710         rcu_read_unlock();
4711
4712         if (!chan)
4713                 return NULL;
4714
4715         switch (chan->conn->type) {
4716         case ACL_LINK:
4717                 cnt = hdev->acl_cnt;
4718                 break;
4719         case AMP_LINK:
4720                 cnt = hdev->block_cnt;
4721                 break;
4722         case SCO_LINK:
4723         case ESCO_LINK:
4724                 cnt = hdev->sco_cnt;
4725                 break;
4726         case LE_LINK:
4727                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4728                 break;
4729         default:
4730                 cnt = 0;
4731                 BT_ERR("Unknown link type");
4732         }
4733
4734         q = cnt / num;
4735         *quote = q ? q : 1;
4736         BT_DBG("chan %p quote %d", chan, *quote);
4737         return chan;
4738 }
4739
4740 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4741 {
4742         struct hci_conn_hash *h = &hdev->conn_hash;
4743         struct hci_conn *conn;
4744         int num = 0;
4745
4746         BT_DBG("%s", hdev->name);
4747
4748         rcu_read_lock();
4749
4750         list_for_each_entry_rcu(conn, &h->list, list) {
4751                 struct hci_chan *chan;
4752
4753                 if (conn->type != type)
4754                         continue;
4755
4756                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4757                         continue;
4758
4759                 num++;
4760
4761                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4762                         struct sk_buff *skb;
4763
4764                         if (chan->sent) {
4765                                 chan->sent = 0;
4766                                 continue;
4767                         }
4768
4769                         if (skb_queue_empty(&chan->data_q))
4770                                 continue;
4771
4772                         skb = skb_peek(&chan->data_q);
4773                         if (skb->priority >= HCI_PRIO_MAX - 1)
4774                                 continue;
4775
4776                         skb->priority = HCI_PRIO_MAX - 1;
4777
4778                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4779                                skb->priority);
4780                 }
4781
4782                 if (hci_conn_num(hdev, type) == num)
4783                         break;
4784         }
4785
4786         rcu_read_unlock();
4787
4788 }
4789
4790 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4791 {
4792         /* Calculate count of blocks used by this packet */
4793         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4794 }
4795
4796 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4797 {
4798         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4799                 /* ACL tx timeout must be longer than maximum
4800                  * link supervision timeout (40.9 seconds) */
4801                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4802                                        HCI_ACL_TX_TIMEOUT))
4803                         hci_link_tx_to(hdev, ACL_LINK);
4804         }
4805 }
4806
4807 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4808 {
4809         unsigned int cnt = hdev->acl_cnt;
4810         struct hci_chan *chan;
4811         struct sk_buff *skb;
4812         int quote;
4813
4814         __check_timeout(hdev, cnt);
4815
4816         while (hdev->acl_cnt &&
4817                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4818                 u32 priority = (skb_peek(&chan->data_q))->priority;
4819                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4820                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4821                                skb->len, skb->priority);
4822
4823                         /* Stop if priority has changed */
4824                         if (skb->priority < priority)
4825                                 break;
4826
4827                         skb = skb_dequeue(&chan->data_q);
4828
4829                         hci_conn_enter_active_mode(chan->conn,
4830                                                    bt_cb(skb)->force_active);
4831
4832                         hci_send_frame(hdev, skb);
4833                         hdev->acl_last_tx = jiffies;
4834
4835                         hdev->acl_cnt--;
4836                         chan->sent++;
4837                         chan->conn->sent++;
4838                 }
4839         }
4840
4841         if (cnt != hdev->acl_cnt)
4842                 hci_prio_recalculate(hdev, ACL_LINK);
4843 }
4844
4845 static void hci_sched_acl_blk(struct hci_dev *hdev)
4846 {
4847         unsigned int cnt = hdev->block_cnt;
4848         struct hci_chan *chan;
4849         struct sk_buff *skb;
4850         int quote;
4851         u8 type;
4852
4853         __check_timeout(hdev, cnt);
4854
4855         BT_DBG("%s", hdev->name);
4856
4857         if (hdev->dev_type == HCI_AMP)
4858                 type = AMP_LINK;
4859         else
4860                 type = ACL_LINK;
4861
4862         while (hdev->block_cnt > 0 &&
4863                (chan = hci_chan_sent(hdev, type, &quote))) {
4864                 u32 priority = (skb_peek(&chan->data_q))->priority;
4865                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4866                         int blocks;
4867
4868                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4869                                skb->len, skb->priority);
4870
4871                         /* Stop if priority has changed */
4872                         if (skb->priority < priority)
4873                                 break;
4874
4875                         skb = skb_dequeue(&chan->data_q);
4876
4877                         blocks = __get_blocks(hdev, skb);
4878                         if (blocks > hdev->block_cnt)
4879                                 return;
4880
4881                         hci_conn_enter_active_mode(chan->conn,
4882                                                    bt_cb(skb)->force_active);
4883
4884                         hci_send_frame(hdev, skb);
4885                         hdev->acl_last_tx = jiffies;
4886
4887                         hdev->block_cnt -= blocks;
4888                         quote -= blocks;
4889
4890                         chan->sent += blocks;
4891                         chan->conn->sent += blocks;
4892                 }
4893         }
4894
4895         if (cnt != hdev->block_cnt)
4896                 hci_prio_recalculate(hdev, type);
4897 }
4898
4899 static void hci_sched_acl(struct hci_dev *hdev)
4900 {
4901         BT_DBG("%s", hdev->name);
4902
4903         /* No ACL link over BR/EDR controller */
4904         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4905                 return;
4906
4907         /* No AMP link over AMP controller */
4908         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4909                 return;
4910
4911         switch (hdev->flow_ctl_mode) {
4912         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4913                 hci_sched_acl_pkt(hdev);
4914                 break;
4915
4916         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4917                 hci_sched_acl_blk(hdev);
4918                 break;
4919         }
4920 }
4921
4922 /* Schedule SCO */
4923 static void hci_sched_sco(struct hci_dev *hdev)
4924 {
4925         struct hci_conn *conn;
4926         struct sk_buff *skb;
4927         int quote;
4928
4929         BT_DBG("%s", hdev->name);
4930
4931         if (!hci_conn_num(hdev, SCO_LINK))
4932                 return;
4933
4934         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4935                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4936                         BT_DBG("skb %p len %d", skb, skb->len);
4937                         hci_send_frame(hdev, skb);
4938
4939                         conn->sent++;
4940                         if (conn->sent == ~0)
4941                                 conn->sent = 0;
4942                 }
4943         }
4944 }
4945
4946 static void hci_sched_esco(struct hci_dev *hdev)
4947 {
4948         struct hci_conn *conn;
4949         struct sk_buff *skb;
4950         int quote;
4951
4952         BT_DBG("%s", hdev->name);
4953
4954         if (!hci_conn_num(hdev, ESCO_LINK))
4955                 return;
4956
4957         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4958                                                      &quote))) {
4959                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4960                         BT_DBG("skb %p len %d", skb, skb->len);
4961                         hci_send_frame(hdev, skb);
4962
4963                         conn->sent++;
4964                         if (conn->sent == ~0)
4965                                 conn->sent = 0;
4966                 }
4967         }
4968 }
4969
4970 static void hci_sched_le(struct hci_dev *hdev)
4971 {
4972         struct hci_chan *chan;
4973         struct sk_buff *skb;
4974         int quote, cnt, tmp;
4975
4976         BT_DBG("%s", hdev->name);
4977
4978         if (!hci_conn_num(hdev, LE_LINK))
4979                 return;
4980
4981         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4982                 /* LE tx timeout must be longer than maximum
4983                  * link supervision timeout (40.9 seconds) */
4984                 if (!hdev->le_cnt && hdev->le_pkts &&
4985                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4986                         hci_link_tx_to(hdev, LE_LINK);
4987         }
4988
4989         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4990         tmp = cnt;
4991         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4992                 u32 priority = (skb_peek(&chan->data_q))->priority;
4993                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4994                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4995                                skb->len, skb->priority);
4996
4997                         /* Stop if priority has changed */
4998                         if (skb->priority < priority)
4999                                 break;
5000
5001                         skb = skb_dequeue(&chan->data_q);
5002
5003                         hci_send_frame(hdev, skb);
5004                         hdev->le_last_tx = jiffies;
5005
5006                         cnt--;
5007                         chan->sent++;
5008                         chan->conn->sent++;
5009                 }
5010         }
5011
5012         if (hdev->le_pkts)
5013                 hdev->le_cnt = cnt;
5014         else
5015                 hdev->acl_cnt = cnt;
5016
5017         if (cnt != tmp)
5018                 hci_prio_recalculate(hdev, LE_LINK);
5019 }
5020
5021 static void hci_tx_work(struct work_struct *work)
5022 {
5023         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5024         struct sk_buff *skb;
5025
5026         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5027                hdev->sco_cnt, hdev->le_cnt);
5028
5029         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5030                 /* Schedule queues and send stuff to HCI driver */
5031                 hci_sched_acl(hdev);
5032                 hci_sched_sco(hdev);
5033                 hci_sched_esco(hdev);
5034                 hci_sched_le(hdev);
5035         }
5036
5037         /* Send next queued raw (unknown type) packet */
5038         while ((skb = skb_dequeue(&hdev->raw_q)))
5039                 hci_send_frame(hdev, skb);
5040 }
5041
5042 /* ----- HCI RX task (incoming data processing) ----- */
5043
5044 /* ACL data packet */
5045 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5046 {
5047         struct hci_acl_hdr *hdr = (void *) skb->data;
5048         struct hci_conn *conn;
5049         __u16 handle, flags;
5050
5051         skb_pull(skb, HCI_ACL_HDR_SIZE);
5052
5053         handle = __le16_to_cpu(hdr->handle);
5054         flags  = hci_flags(handle);
5055         handle = hci_handle(handle);
5056
5057         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5058                handle, flags);
5059
5060         hdev->stat.acl_rx++;
5061
5062         hci_dev_lock(hdev);
5063         conn = hci_conn_hash_lookup_handle(hdev, handle);
5064         hci_dev_unlock(hdev);
5065
5066         if (conn) {
5067                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5068
5069                 /* Send to upper protocol */
5070                 l2cap_recv_acldata(conn, skb, flags);
5071                 return;
5072         } else {
5073                 BT_ERR("%s ACL packet for unknown connection handle %d",
5074                        hdev->name, handle);
5075         }
5076
5077         kfree_skb(skb);
5078 }
5079
5080 /* SCO data packet */
5081 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5082 {
5083         struct hci_sco_hdr *hdr = (void *) skb->data;
5084         struct hci_conn *conn;
5085         __u16 handle;
5086
5087         skb_pull(skb, HCI_SCO_HDR_SIZE);
5088
5089         handle = __le16_to_cpu(hdr->handle);
5090
5091         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5092
5093         hdev->stat.sco_rx++;
5094
5095         hci_dev_lock(hdev);
5096         conn = hci_conn_hash_lookup_handle(hdev, handle);
5097         hci_dev_unlock(hdev);
5098
5099         if (conn) {
5100                 /* Send to upper protocol */
5101                 sco_recv_scodata(conn, skb);
5102                 return;
5103         } else {
5104                 BT_ERR("%s SCO packet for unknown connection handle %d",
5105                        hdev->name, handle);
5106         }
5107
5108         kfree_skb(skb);
5109 }
5110
5111 static bool hci_req_is_complete(struct hci_dev *hdev)
5112 {
5113         struct sk_buff *skb;
5114
5115         skb = skb_peek(&hdev->cmd_q);
5116         if (!skb)
5117                 return true;
5118
5119         return bt_cb(skb)->req.start;
5120 }
5121
5122 static void hci_resend_last(struct hci_dev *hdev)
5123 {
5124         struct hci_command_hdr *sent;
5125         struct sk_buff *skb;
5126         u16 opcode;
5127
5128         if (!hdev->sent_cmd)
5129                 return;
5130
5131         sent = (void *) hdev->sent_cmd->data;
5132         opcode = __le16_to_cpu(sent->opcode);
5133         if (opcode == HCI_OP_RESET)
5134                 return;
5135
5136         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5137         if (!skb)
5138                 return;
5139
5140         skb_queue_head(&hdev->cmd_q, skb);
5141         queue_work(hdev->workqueue, &hdev->cmd_work);
5142 }
5143
5144 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5145 {
5146         hci_req_complete_t req_complete = NULL;
5147         struct sk_buff *skb;
5148         unsigned long flags;
5149
5150         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5151
5152         /* If the completed command doesn't match the last one that was
5153          * sent we need to do special handling of it.
5154          */
5155         if (!hci_sent_cmd_data(hdev, opcode)) {
5156                 /* Some CSR based controllers generate a spontaneous
5157                  * reset complete event during init and any pending
5158                  * command will never be completed. In such a case we
5159                  * need to resend whatever was the last sent
5160                  * command.
5161                  */
5162                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5163                         hci_resend_last(hdev);
5164
5165                 return;
5166         }
5167
5168         /* If the command succeeded and there's still more commands in
5169          * this request the request is not yet complete.
5170          */
5171         if (!status && !hci_req_is_complete(hdev))
5172                 return;
5173
5174         /* If this was the last command in a request the complete
5175          * callback would be found in hdev->sent_cmd instead of the
5176          * command queue (hdev->cmd_q).
5177          */
5178         if (hdev->sent_cmd) {
5179                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5180
5181                 if (req_complete) {
5182                         /* We must set the complete callback to NULL to
5183                          * avoid calling the callback more than once if
5184                          * this function gets called again.
5185                          */
5186                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5187
5188                         goto call_complete;
5189                 }
5190         }
5191
5192         /* Remove all pending commands belonging to this request */
5193         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5194         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5195                 if (bt_cb(skb)->req.start) {
5196                         __skb_queue_head(&hdev->cmd_q, skb);
5197                         break;
5198                 }
5199
5200                 req_complete = bt_cb(skb)->req.complete;
5201                 kfree_skb(skb);
5202         }
5203         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5204
5205 call_complete:
5206         if (req_complete)
5207                 req_complete(hdev, status);
5208 }
5209
5210 static void hci_rx_work(struct work_struct *work)
5211 {
5212         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5213         struct sk_buff *skb;
5214
5215         BT_DBG("%s", hdev->name);
5216
5217         while ((skb = skb_dequeue(&hdev->rx_q))) {
5218                 /* Send copy to monitor */
5219                 hci_send_to_monitor(hdev, skb);
5220
5221                 if (atomic_read(&hdev->promisc)) {
5222                         /* Send copy to the sockets */
5223                         hci_send_to_sock(hdev, skb);
5224                 }
5225
5226                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5227                         kfree_skb(skb);
5228                         continue;
5229                 }
5230
5231                 if (test_bit(HCI_INIT, &hdev->flags)) {
5232                         /* Don't process data packets in this states. */
5233                         switch (bt_cb(skb)->pkt_type) {
5234                         case HCI_ACLDATA_PKT:
5235                         case HCI_SCODATA_PKT:
5236                                 kfree_skb(skb);
5237                                 continue;
5238                         }
5239                 }
5240
5241                 /* Process frame */
5242                 switch (bt_cb(skb)->pkt_type) {
5243                 case HCI_EVENT_PKT:
5244                         BT_DBG("%s Event packet", hdev->name);
5245                         hci_event_packet(hdev, skb);
5246                         break;
5247
5248                 case HCI_ACLDATA_PKT:
5249                         BT_DBG("%s ACL data packet", hdev->name);
5250                         hci_acldata_packet(hdev, skb);
5251                         break;
5252
5253                 case HCI_SCODATA_PKT:
5254                         BT_DBG("%s SCO data packet", hdev->name);
5255                         hci_scodata_packet(hdev, skb);
5256                         break;
5257
5258                 default:
5259                         kfree_skb(skb);
5260                         break;
5261                 }
5262         }
5263 }
5264
5265 static void hci_cmd_work(struct work_struct *work)
5266 {
5267         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5268         struct sk_buff *skb;
5269
5270         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5271                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5272
5273         /* Send queued commands */
5274         if (atomic_read(&hdev->cmd_cnt)) {
5275                 skb = skb_dequeue(&hdev->cmd_q);
5276                 if (!skb)
5277                         return;
5278
5279                 kfree_skb(hdev->sent_cmd);
5280
5281                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5282                 if (hdev->sent_cmd) {
5283                         atomic_dec(&hdev->cmd_cnt);
5284                         hci_send_frame(hdev, skb);
5285                         if (test_bit(HCI_RESET, &hdev->flags))
5286                                 cancel_delayed_work(&hdev->cmd_timer);
5287                         else
5288                                 schedule_delayed_work(&hdev->cmd_timer,
5289                                                       HCI_CMD_TIMEOUT);
5290                 } else {
5291                         skb_queue_head(&hdev->cmd_q, skb);
5292                         queue_work(hdev->workqueue, &hdev->cmd_work);
5293                 }
5294         }
5295 }
5296
5297 void hci_req_add_le_scan_disable(struct hci_request *req)
5298 {
5299         struct hci_cp_le_set_scan_enable cp;
5300
5301         memset(&cp, 0, sizeof(cp));
5302         cp.enable = LE_SCAN_DISABLE;
5303         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5304 }
5305
5306 void hci_req_add_le_passive_scan(struct hci_request *req)
5307 {
5308         struct hci_cp_le_set_scan_param param_cp;
5309         struct hci_cp_le_set_scan_enable enable_cp;
5310         struct hci_dev *hdev = req->hdev;
5311         u8 own_addr_type;
5312
5313         /* Set require_privacy to false since no SCAN_REQ are send
5314          * during passive scanning. Not using an unresolvable address
5315          * here is important so that peer devices using direct
5316          * advertising with our address will be correctly reported
5317          * by the controller.
5318          */
5319         if (hci_update_random_address(req, false, &own_addr_type))
5320                 return;
5321
5322         memset(&param_cp, 0, sizeof(param_cp));
5323         param_cp.type = LE_SCAN_PASSIVE;
5324         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5325         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5326         param_cp.own_address_type = own_addr_type;
5327         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5328                     &param_cp);
5329
5330         memset(&enable_cp, 0, sizeof(enable_cp));
5331         enable_cp.enable = LE_SCAN_ENABLE;
5332         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5333         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5334                     &enable_cp);
5335 }
5336
5337 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5338 {
5339         if (status)
5340                 BT_DBG("HCI request failed to update background scanning: "
5341                        "status 0x%2.2x", status);
5342 }
5343
5344 /* This function controls the background scanning based on hdev->pend_le_conns
5345  * list. If there are pending LE connection we start the background scanning,
5346  * otherwise we stop it.
5347  *
5348  * This function requires the caller holds hdev->lock.
5349  */
5350 void hci_update_background_scan(struct hci_dev *hdev)
5351 {
5352         struct hci_request req;
5353         struct hci_conn *conn;
5354         int err;
5355
5356         if (!test_bit(HCI_UP, &hdev->flags) ||
5357             test_bit(HCI_INIT, &hdev->flags) ||
5358             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5359             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5360             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5361                 return;
5362
5363         hci_req_init(&req, hdev);
5364
5365         if (list_empty(&hdev->pend_le_conns) && !hdev->pend_le_reports) {
5366                 /* If there is no pending LE connections or devices
5367                  * to be scanned for, we should stop the background
5368                  * scanning.
5369                  */
5370
5371                 /* If controller is not scanning we are done. */
5372                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5373                         return;
5374
5375                 hci_req_add_le_scan_disable(&req);
5376
5377                 BT_DBG("%s stopping background scanning", hdev->name);
5378         } else {
5379                 /* If there is at least one pending LE connection, we should
5380                  * keep the background scan running.
5381                  */
5382
5383                 /* If controller is connecting, we should not start scanning
5384                  * since some controllers are not able to scan and connect at
5385                  * the same time.
5386                  */
5387                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5388                 if (conn)
5389                         return;
5390
5391                 /* If controller is currently scanning, we stop it to ensure we
5392                  * don't miss any advertising (due to duplicates filter).
5393                  */
5394                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5395                         hci_req_add_le_scan_disable(&req);
5396
5397                 hci_req_add_le_passive_scan(&req);
5398
5399                 BT_DBG("%s starting background scanning", hdev->name);
5400         }
5401
5402         err = hci_req_run(&req, update_background_scan_complete);
5403         if (err)
5404                 BT_ERR("Failed to run HCI request: err %d", err);
5405 }