97a6453bee30f20f8066435091989f3b002aaa7c
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38
39 #include "smp.h"
40
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
44
45 /* HCI device list */
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
48
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
52
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
55
56 /* ---- HCI notifications ---- */
57
58 static void hci_notify(struct hci_dev *hdev, int event)
59 {
60         hci_sock_dev_event(hdev, event);
61 }
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         char buf[32];
83         size_t buf_size = min(count, (sizeof(buf)-1));
84         bool enable;
85         int err;
86
87         if (!test_bit(HCI_UP, &hdev->flags))
88                 return -ENETDOWN;
89
90         if (copy_from_user(buf, user_buf, buf_size))
91                 return -EFAULT;
92
93         buf[buf_size] = '\0';
94         if (strtobool(buf, &enable))
95                 return -EINVAL;
96
97         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
98                 return -EALREADY;
99
100         hci_req_lock(hdev);
101         if (enable)
102                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103                                      HCI_CMD_TIMEOUT);
104         else
105                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106                                      HCI_CMD_TIMEOUT);
107         hci_req_unlock(hdev);
108
109         if (IS_ERR(skb))
110                 return PTR_ERR(skb);
111
112         err = -bt_to_errno(skb->data[0]);
113         kfree_skb(skb);
114
115         if (err < 0)
116                 return err;
117
118         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
119
120         return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124         .open           = simple_open,
125         .read           = dut_mode_read,
126         .write          = dut_mode_write,
127         .llseek         = default_llseek,
128 };
129
130 static int features_show(struct seq_file *f, void *ptr)
131 {
132         struct hci_dev *hdev = f->private;
133         u8 p;
134
135         hci_dev_lock(hdev);
136         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139                            hdev->features[p][0], hdev->features[p][1],
140                            hdev->features[p][2], hdev->features[p][3],
141                            hdev->features[p][4], hdev->features[p][5],
142                            hdev->features[p][6], hdev->features[p][7]);
143         }
144         if (lmp_le_capable(hdev))
145                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147                            hdev->le_features[0], hdev->le_features[1],
148                            hdev->le_features[2], hdev->le_features[3],
149                            hdev->le_features[4], hdev->le_features[5],
150                            hdev->le_features[6], hdev->le_features[7]);
151         hci_dev_unlock(hdev);
152
153         return 0;
154 }
155
156 static int features_open(struct inode *inode, struct file *file)
157 {
158         return single_open(file, features_show, inode->i_private);
159 }
160
161 static const struct file_operations features_fops = {
162         .open           = features_open,
163         .read           = seq_read,
164         .llseek         = seq_lseek,
165         .release        = single_release,
166 };
167
168 static int blacklist_show(struct seq_file *f, void *p)
169 {
170         struct hci_dev *hdev = f->private;
171         struct bdaddr_list *b;
172
173         hci_dev_lock(hdev);
174         list_for_each_entry(b, &hdev->blacklist, list)
175                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176         hci_dev_unlock(hdev);
177
178         return 0;
179 }
180
181 static int blacklist_open(struct inode *inode, struct file *file)
182 {
183         return single_open(file, blacklist_show, inode->i_private);
184 }
185
186 static const struct file_operations blacklist_fops = {
187         .open           = blacklist_open,
188         .read           = seq_read,
189         .llseek         = seq_lseek,
190         .release        = single_release,
191 };
192
193 static int uuids_show(struct seq_file *f, void *p)
194 {
195         struct hci_dev *hdev = f->private;
196         struct bt_uuid *uuid;
197
198         hci_dev_lock(hdev);
199         list_for_each_entry(uuid, &hdev->uuids, list) {
200                 u8 i, val[16];
201
202                 /* The Bluetooth UUID values are stored in big endian,
203                  * but with reversed byte order. So convert them into
204                  * the right order for the %pUb modifier.
205                  */
206                 for (i = 0; i < 16; i++)
207                         val[i] = uuid->uuid[15 - i];
208
209                 seq_printf(f, "%pUb\n", val);
210         }
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int uuids_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, uuids_show, inode->i_private);
219 }
220
221 static const struct file_operations uuids_fops = {
222         .open           = uuids_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int inquiry_cache_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct discovery_state *cache = &hdev->discovery;
232         struct inquiry_entry *e;
233
234         hci_dev_lock(hdev);
235
236         list_for_each_entry(e, &cache->all, all) {
237                 struct inquiry_data *data = &e->data;
238                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239                            &data->bdaddr,
240                            data->pscan_rep_mode, data->pscan_period_mode,
241                            data->pscan_mode, data->dev_class[2],
242                            data->dev_class[1], data->dev_class[0],
243                            __le16_to_cpu(data->clock_offset),
244                            data->rssi, data->ssp_mode, e->timestamp);
245         }
246
247         hci_dev_unlock(hdev);
248
249         return 0;
250 }
251
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 {
254         return single_open(file, inquiry_cache_show, inode->i_private);
255 }
256
257 static const struct file_operations inquiry_cache_fops = {
258         .open           = inquiry_cache_open,
259         .read           = seq_read,
260         .llseek         = seq_lseek,
261         .release        = single_release,
262 };
263
264 static int link_keys_show(struct seq_file *f, void *ptr)
265 {
266         struct hci_dev *hdev = f->private;
267         struct list_head *p, *n;
268
269         hci_dev_lock(hdev);
270         list_for_each_safe(p, n, &hdev->link_keys) {
271                 struct link_key *key = list_entry(p, struct link_key, list);
272                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274         }
275         hci_dev_unlock(hdev);
276
277         return 0;
278 }
279
280 static int link_keys_open(struct inode *inode, struct file *file)
281 {
282         return single_open(file, link_keys_show, inode->i_private);
283 }
284
285 static const struct file_operations link_keys_fops = {
286         .open           = link_keys_open,
287         .read           = seq_read,
288         .llseek         = seq_lseek,
289         .release        = single_release,
290 };
291
292 static int dev_class_show(struct seq_file *f, void *ptr)
293 {
294         struct hci_dev *hdev = f->private;
295
296         hci_dev_lock(hdev);
297         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298                    hdev->dev_class[1], hdev->dev_class[0]);
299         hci_dev_unlock(hdev);
300
301         return 0;
302 }
303
304 static int dev_class_open(struct inode *inode, struct file *file)
305 {
306         return single_open(file, dev_class_show, inode->i_private);
307 }
308
309 static const struct file_operations dev_class_fops = {
310         .open           = dev_class_open,
311         .read           = seq_read,
312         .llseek         = seq_lseek,
313         .release        = single_release,
314 };
315
316 static int voice_setting_get(void *data, u64 *val)
317 {
318         struct hci_dev *hdev = data;
319
320         hci_dev_lock(hdev);
321         *val = hdev->voice_setting;
322         hci_dev_unlock(hdev);
323
324         return 0;
325 }
326
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328                         NULL, "0x%4.4llx\n");
329
330 static int auto_accept_delay_set(void *data, u64 val)
331 {
332         struct hci_dev *hdev = data;
333
334         hci_dev_lock(hdev);
335         hdev->auto_accept_delay = val;
336         hci_dev_unlock(hdev);
337
338         return 0;
339 }
340
341 static int auto_accept_delay_get(void *data, u64 *val)
342 {
343         struct hci_dev *hdev = data;
344
345         hci_dev_lock(hdev);
346         *val = hdev->auto_accept_delay;
347         hci_dev_unlock(hdev);
348
349         return 0;
350 }
351
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353                         auto_accept_delay_set, "%llu\n");
354
355 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356                                      size_t count, loff_t *ppos)
357 {
358         struct hci_dev *hdev = file->private_data;
359         char buf[3];
360
361         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
362         buf[1] = '\n';
363         buf[2] = '\0';
364         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365 }
366
367 static ssize_t force_sc_support_write(struct file *file,
368                                       const char __user *user_buf,
369                                       size_t count, loff_t *ppos)
370 {
371         struct hci_dev *hdev = file->private_data;
372         char buf[32];
373         size_t buf_size = min(count, (sizeof(buf)-1));
374         bool enable;
375
376         if (test_bit(HCI_UP, &hdev->flags))
377                 return -EBUSY;
378
379         if (copy_from_user(buf, user_buf, buf_size))
380                 return -EFAULT;
381
382         buf[buf_size] = '\0';
383         if (strtobool(buf, &enable))
384                 return -EINVAL;
385
386         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
387                 return -EALREADY;
388
389         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
390
391         return count;
392 }
393
394 static const struct file_operations force_sc_support_fops = {
395         .open           = simple_open,
396         .read           = force_sc_support_read,
397         .write          = force_sc_support_write,
398         .llseek         = default_llseek,
399 };
400
401 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402                                  size_t count, loff_t *ppos)
403 {
404         struct hci_dev *hdev = file->private_data;
405         char buf[3];
406
407         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408         buf[1] = '\n';
409         buf[2] = '\0';
410         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411 }
412
413 static const struct file_operations sc_only_mode_fops = {
414         .open           = simple_open,
415         .read           = sc_only_mode_read,
416         .llseek         = default_llseek,
417 };
418
419 static int idle_timeout_set(void *data, u64 val)
420 {
421         struct hci_dev *hdev = data;
422
423         if (val != 0 && (val < 500 || val > 3600000))
424                 return -EINVAL;
425
426         hci_dev_lock(hdev);
427         hdev->idle_timeout = val;
428         hci_dev_unlock(hdev);
429
430         return 0;
431 }
432
433 static int idle_timeout_get(void *data, u64 *val)
434 {
435         struct hci_dev *hdev = data;
436
437         hci_dev_lock(hdev);
438         *val = hdev->idle_timeout;
439         hci_dev_unlock(hdev);
440
441         return 0;
442 }
443
444 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445                         idle_timeout_set, "%llu\n");
446
447 static int rpa_timeout_set(void *data, u64 val)
448 {
449         struct hci_dev *hdev = data;
450
451         /* Require the RPA timeout to be at least 30 seconds and at most
452          * 24 hours.
453          */
454         if (val < 30 || val > (60 * 60 * 24))
455                 return -EINVAL;
456
457         hci_dev_lock(hdev);
458         hdev->rpa_timeout = val;
459         hci_dev_unlock(hdev);
460
461         return 0;
462 }
463
464 static int rpa_timeout_get(void *data, u64 *val)
465 {
466         struct hci_dev *hdev = data;
467
468         hci_dev_lock(hdev);
469         *val = hdev->rpa_timeout;
470         hci_dev_unlock(hdev);
471
472         return 0;
473 }
474
475 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476                         rpa_timeout_set, "%llu\n");
477
478 static int sniff_min_interval_set(void *data, u64 val)
479 {
480         struct hci_dev *hdev = data;
481
482         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483                 return -EINVAL;
484
485         hci_dev_lock(hdev);
486         hdev->sniff_min_interval = val;
487         hci_dev_unlock(hdev);
488
489         return 0;
490 }
491
492 static int sniff_min_interval_get(void *data, u64 *val)
493 {
494         struct hci_dev *hdev = data;
495
496         hci_dev_lock(hdev);
497         *val = hdev->sniff_min_interval;
498         hci_dev_unlock(hdev);
499
500         return 0;
501 }
502
503 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504                         sniff_min_interval_set, "%llu\n");
505
506 static int sniff_max_interval_set(void *data, u64 val)
507 {
508         struct hci_dev *hdev = data;
509
510         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511                 return -EINVAL;
512
513         hci_dev_lock(hdev);
514         hdev->sniff_max_interval = val;
515         hci_dev_unlock(hdev);
516
517         return 0;
518 }
519
520 static int sniff_max_interval_get(void *data, u64 *val)
521 {
522         struct hci_dev *hdev = data;
523
524         hci_dev_lock(hdev);
525         *val = hdev->sniff_max_interval;
526         hci_dev_unlock(hdev);
527
528         return 0;
529 }
530
531 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532                         sniff_max_interval_set, "%llu\n");
533
534 static int conn_info_min_age_set(void *data, u64 val)
535 {
536         struct hci_dev *hdev = data;
537
538         if (val == 0 || val > hdev->conn_info_max_age)
539                 return -EINVAL;
540
541         hci_dev_lock(hdev);
542         hdev->conn_info_min_age = val;
543         hci_dev_unlock(hdev);
544
545         return 0;
546 }
547
548 static int conn_info_min_age_get(void *data, u64 *val)
549 {
550         struct hci_dev *hdev = data;
551
552         hci_dev_lock(hdev);
553         *val = hdev->conn_info_min_age;
554         hci_dev_unlock(hdev);
555
556         return 0;
557 }
558
559 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560                         conn_info_min_age_set, "%llu\n");
561
562 static int conn_info_max_age_set(void *data, u64 val)
563 {
564         struct hci_dev *hdev = data;
565
566         if (val == 0 || val < hdev->conn_info_min_age)
567                 return -EINVAL;
568
569         hci_dev_lock(hdev);
570         hdev->conn_info_max_age = val;
571         hci_dev_unlock(hdev);
572
573         return 0;
574 }
575
576 static int conn_info_max_age_get(void *data, u64 *val)
577 {
578         struct hci_dev *hdev = data;
579
580         hci_dev_lock(hdev);
581         *val = hdev->conn_info_max_age;
582         hci_dev_unlock(hdev);
583
584         return 0;
585 }
586
587 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588                         conn_info_max_age_set, "%llu\n");
589
590 static int identity_show(struct seq_file *f, void *p)
591 {
592         struct hci_dev *hdev = f->private;
593         bdaddr_t addr;
594         u8 addr_type;
595
596         hci_dev_lock(hdev);
597
598         hci_copy_identity_address(hdev, &addr, &addr_type);
599
600         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
601                    16, hdev->irk, &hdev->rpa);
602
603         hci_dev_unlock(hdev);
604
605         return 0;
606 }
607
608 static int identity_open(struct inode *inode, struct file *file)
609 {
610         return single_open(file, identity_show, inode->i_private);
611 }
612
613 static const struct file_operations identity_fops = {
614         .open           = identity_open,
615         .read           = seq_read,
616         .llseek         = seq_lseek,
617         .release        = single_release,
618 };
619
620 static int random_address_show(struct seq_file *f, void *p)
621 {
622         struct hci_dev *hdev = f->private;
623
624         hci_dev_lock(hdev);
625         seq_printf(f, "%pMR\n", &hdev->random_addr);
626         hci_dev_unlock(hdev);
627
628         return 0;
629 }
630
631 static int random_address_open(struct inode *inode, struct file *file)
632 {
633         return single_open(file, random_address_show, inode->i_private);
634 }
635
636 static const struct file_operations random_address_fops = {
637         .open           = random_address_open,
638         .read           = seq_read,
639         .llseek         = seq_lseek,
640         .release        = single_release,
641 };
642
643 static int static_address_show(struct seq_file *f, void *p)
644 {
645         struct hci_dev *hdev = f->private;
646
647         hci_dev_lock(hdev);
648         seq_printf(f, "%pMR\n", &hdev->static_addr);
649         hci_dev_unlock(hdev);
650
651         return 0;
652 }
653
654 static int static_address_open(struct inode *inode, struct file *file)
655 {
656         return single_open(file, static_address_show, inode->i_private);
657 }
658
659 static const struct file_operations static_address_fops = {
660         .open           = static_address_open,
661         .read           = seq_read,
662         .llseek         = seq_lseek,
663         .release        = single_release,
664 };
665
666 static ssize_t force_static_address_read(struct file *file,
667                                          char __user *user_buf,
668                                          size_t count, loff_t *ppos)
669 {
670         struct hci_dev *hdev = file->private_data;
671         char buf[3];
672
673         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
674         buf[1] = '\n';
675         buf[2] = '\0';
676         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677 }
678
679 static ssize_t force_static_address_write(struct file *file,
680                                           const char __user *user_buf,
681                                           size_t count, loff_t *ppos)
682 {
683         struct hci_dev *hdev = file->private_data;
684         char buf[32];
685         size_t buf_size = min(count, (sizeof(buf)-1));
686         bool enable;
687
688         if (test_bit(HCI_UP, &hdev->flags))
689                 return -EBUSY;
690
691         if (copy_from_user(buf, user_buf, buf_size))
692                 return -EFAULT;
693
694         buf[buf_size] = '\0';
695         if (strtobool(buf, &enable))
696                 return -EINVAL;
697
698         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
699                 return -EALREADY;
700
701         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
702
703         return count;
704 }
705
706 static const struct file_operations force_static_address_fops = {
707         .open           = simple_open,
708         .read           = force_static_address_read,
709         .write          = force_static_address_write,
710         .llseek         = default_llseek,
711 };
712
713 static int white_list_show(struct seq_file *f, void *ptr)
714 {
715         struct hci_dev *hdev = f->private;
716         struct bdaddr_list *b;
717
718         hci_dev_lock(hdev);
719         list_for_each_entry(b, &hdev->le_white_list, list)
720                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721         hci_dev_unlock(hdev);
722
723         return 0;
724 }
725
726 static int white_list_open(struct inode *inode, struct file *file)
727 {
728         return single_open(file, white_list_show, inode->i_private);
729 }
730
731 static const struct file_operations white_list_fops = {
732         .open           = white_list_open,
733         .read           = seq_read,
734         .llseek         = seq_lseek,
735         .release        = single_release,
736 };
737
738 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739 {
740         struct hci_dev *hdev = f->private;
741         struct list_head *p, *n;
742
743         hci_dev_lock(hdev);
744         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747                            &irk->bdaddr, irk->addr_type,
748                            16, irk->val, &irk->rpa);
749         }
750         hci_dev_unlock(hdev);
751
752         return 0;
753 }
754
755 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756 {
757         return single_open(file, identity_resolving_keys_show,
758                            inode->i_private);
759 }
760
761 static const struct file_operations identity_resolving_keys_fops = {
762         .open           = identity_resolving_keys_open,
763         .read           = seq_read,
764         .llseek         = seq_lseek,
765         .release        = single_release,
766 };
767
768 static int long_term_keys_show(struct seq_file *f, void *ptr)
769 {
770         struct hci_dev *hdev = f->private;
771         struct list_head *p, *n;
772
773         hci_dev_lock(hdev);
774         list_for_each_safe(p, n, &hdev->long_term_keys) {
775                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
776                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
777                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
779                            __le64_to_cpu(ltk->rand), 16, ltk->val);
780         }
781         hci_dev_unlock(hdev);
782
783         return 0;
784 }
785
786 static int long_term_keys_open(struct inode *inode, struct file *file)
787 {
788         return single_open(file, long_term_keys_show, inode->i_private);
789 }
790
791 static const struct file_operations long_term_keys_fops = {
792         .open           = long_term_keys_open,
793         .read           = seq_read,
794         .llseek         = seq_lseek,
795         .release        = single_release,
796 };
797
798 static int conn_min_interval_set(void *data, u64 val)
799 {
800         struct hci_dev *hdev = data;
801
802         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803                 return -EINVAL;
804
805         hci_dev_lock(hdev);
806         hdev->le_conn_min_interval = val;
807         hci_dev_unlock(hdev);
808
809         return 0;
810 }
811
812 static int conn_min_interval_get(void *data, u64 *val)
813 {
814         struct hci_dev *hdev = data;
815
816         hci_dev_lock(hdev);
817         *val = hdev->le_conn_min_interval;
818         hci_dev_unlock(hdev);
819
820         return 0;
821 }
822
823 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824                         conn_min_interval_set, "%llu\n");
825
826 static int conn_max_interval_set(void *data, u64 val)
827 {
828         struct hci_dev *hdev = data;
829
830         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831                 return -EINVAL;
832
833         hci_dev_lock(hdev);
834         hdev->le_conn_max_interval = val;
835         hci_dev_unlock(hdev);
836
837         return 0;
838 }
839
840 static int conn_max_interval_get(void *data, u64 *val)
841 {
842         struct hci_dev *hdev = data;
843
844         hci_dev_lock(hdev);
845         *val = hdev->le_conn_max_interval;
846         hci_dev_unlock(hdev);
847
848         return 0;
849 }
850
851 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852                         conn_max_interval_set, "%llu\n");
853
854 static int adv_channel_map_set(void *data, u64 val)
855 {
856         struct hci_dev *hdev = data;
857
858         if (val < 0x01 || val > 0x07)
859                 return -EINVAL;
860
861         hci_dev_lock(hdev);
862         hdev->le_adv_channel_map = val;
863         hci_dev_unlock(hdev);
864
865         return 0;
866 }
867
868 static int adv_channel_map_get(void *data, u64 *val)
869 {
870         struct hci_dev *hdev = data;
871
872         hci_dev_lock(hdev);
873         *val = hdev->le_adv_channel_map;
874         hci_dev_unlock(hdev);
875
876         return 0;
877 }
878
879 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880                         adv_channel_map_set, "%llu\n");
881
882 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
883 {
884         struct hci_dev *hdev = sf->private;
885         struct hci_conn_params *p;
886
887         hci_dev_lock(hdev);
888
889         list_for_each_entry(p, &hdev->le_conn_params, list) {
890                 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
891                            p->auto_connect);
892         }
893
894         hci_dev_unlock(hdev);
895
896         return 0;
897 }
898
899 static int le_auto_conn_open(struct inode *inode, struct file *file)
900 {
901         return single_open(file, le_auto_conn_show, inode->i_private);
902 }
903
904 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905                                   size_t count, loff_t *offset)
906 {
907         struct seq_file *sf = file->private_data;
908         struct hci_dev *hdev = sf->private;
909         u8 auto_connect = 0;
910         bdaddr_t addr;
911         u8 addr_type;
912         char *buf;
913         int err = 0;
914         int n;
915
916         /* Don't allow partial write */
917         if (*offset != 0)
918                 return -EINVAL;
919
920         if (count < 3)
921                 return -EINVAL;
922
923         buf = memdup_user(data, count);
924         if (IS_ERR(buf))
925                 return PTR_ERR(buf);
926
927         if (memcmp(buf, "add", 3) == 0) {
928                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930                            &addr.b[1], &addr.b[0], &addr_type,
931                            &auto_connect);
932
933                 if (n < 7) {
934                         err = -EINVAL;
935                         goto done;
936                 }
937
938                 hci_dev_lock(hdev);
939                 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940                                           hdev->le_conn_min_interval,
941                                           hdev->le_conn_max_interval);
942                 hci_dev_unlock(hdev);
943
944                 if (err)
945                         goto done;
946         } else if (memcmp(buf, "del", 3) == 0) {
947                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949                            &addr.b[1], &addr.b[0], &addr_type);
950
951                 if (n < 7) {
952                         err = -EINVAL;
953                         goto done;
954                 }
955
956                 hci_dev_lock(hdev);
957                 hci_conn_params_del(hdev, &addr, addr_type);
958                 hci_dev_unlock(hdev);
959         } else if (memcmp(buf, "clr", 3) == 0) {
960                 hci_dev_lock(hdev);
961                 hci_conn_params_clear(hdev);
962                 hci_dev_unlock(hdev);
963         } else {
964                 err = -EINVAL;
965         }
966
967 done:
968         kfree(buf);
969
970         if (err)
971                 return err;
972         else
973                 return count;
974 }
975
976 static const struct file_operations le_auto_conn_fops = {
977         .open           = le_auto_conn_open,
978         .read           = seq_read,
979         .write          = le_auto_conn_write,
980         .llseek         = seq_lseek,
981         .release        = single_release,
982 };
983
984 /* ---- HCI requests ---- */
985
986 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
987 {
988         BT_DBG("%s result 0x%2.2x", hdev->name, result);
989
990         if (hdev->req_status == HCI_REQ_PEND) {
991                 hdev->req_result = result;
992                 hdev->req_status = HCI_REQ_DONE;
993                 wake_up_interruptible(&hdev->req_wait_q);
994         }
995 }
996
997 static void hci_req_cancel(struct hci_dev *hdev, int err)
998 {
999         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1000
1001         if (hdev->req_status == HCI_REQ_PEND) {
1002                 hdev->req_result = err;
1003                 hdev->req_status = HCI_REQ_CANCELED;
1004                 wake_up_interruptible(&hdev->req_wait_q);
1005         }
1006 }
1007
1008 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1009                                             u8 event)
1010 {
1011         struct hci_ev_cmd_complete *ev;
1012         struct hci_event_hdr *hdr;
1013         struct sk_buff *skb;
1014
1015         hci_dev_lock(hdev);
1016
1017         skb = hdev->recv_evt;
1018         hdev->recv_evt = NULL;
1019
1020         hci_dev_unlock(hdev);
1021
1022         if (!skb)
1023                 return ERR_PTR(-ENODATA);
1024
1025         if (skb->len < sizeof(*hdr)) {
1026                 BT_ERR("Too short HCI event");
1027                 goto failed;
1028         }
1029
1030         hdr = (void *) skb->data;
1031         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1032
1033         if (event) {
1034                 if (hdr->evt != event)
1035                         goto failed;
1036                 return skb;
1037         }
1038
1039         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1040                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1041                 goto failed;
1042         }
1043
1044         if (skb->len < sizeof(*ev)) {
1045                 BT_ERR("Too short cmd_complete event");
1046                 goto failed;
1047         }
1048
1049         ev = (void *) skb->data;
1050         skb_pull(skb, sizeof(*ev));
1051
1052         if (opcode == __le16_to_cpu(ev->opcode))
1053                 return skb;
1054
1055         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1056                __le16_to_cpu(ev->opcode));
1057
1058 failed:
1059         kfree_skb(skb);
1060         return ERR_PTR(-ENODATA);
1061 }
1062
1063 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1064                                   const void *param, u8 event, u32 timeout)
1065 {
1066         DECLARE_WAITQUEUE(wait, current);
1067         struct hci_request req;
1068         int err = 0;
1069
1070         BT_DBG("%s", hdev->name);
1071
1072         hci_req_init(&req, hdev);
1073
1074         hci_req_add_ev(&req, opcode, plen, param, event);
1075
1076         hdev->req_status = HCI_REQ_PEND;
1077
1078         err = hci_req_run(&req, hci_req_sync_complete);
1079         if (err < 0)
1080                 return ERR_PTR(err);
1081
1082         add_wait_queue(&hdev->req_wait_q, &wait);
1083         set_current_state(TASK_INTERRUPTIBLE);
1084
1085         schedule_timeout(timeout);
1086
1087         remove_wait_queue(&hdev->req_wait_q, &wait);
1088
1089         if (signal_pending(current))
1090                 return ERR_PTR(-EINTR);
1091
1092         switch (hdev->req_status) {
1093         case HCI_REQ_DONE:
1094                 err = -bt_to_errno(hdev->req_result);
1095                 break;
1096
1097         case HCI_REQ_CANCELED:
1098                 err = -hdev->req_result;
1099                 break;
1100
1101         default:
1102                 err = -ETIMEDOUT;
1103                 break;
1104         }
1105
1106         hdev->req_status = hdev->req_result = 0;
1107
1108         BT_DBG("%s end: err %d", hdev->name, err);
1109
1110         if (err < 0)
1111                 return ERR_PTR(err);
1112
1113         return hci_get_cmd_complete(hdev, opcode, event);
1114 }
1115 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1116
1117 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1118                                const void *param, u32 timeout)
1119 {
1120         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1121 }
1122 EXPORT_SYMBOL(__hci_cmd_sync);
1123
1124 /* Execute request and wait for completion. */
1125 static int __hci_req_sync(struct hci_dev *hdev,
1126                           void (*func)(struct hci_request *req,
1127                                       unsigned long opt),
1128                           unsigned long opt, __u32 timeout)
1129 {
1130         struct hci_request req;
1131         DECLARE_WAITQUEUE(wait, current);
1132         int err = 0;
1133
1134         BT_DBG("%s start", hdev->name);
1135
1136         hci_req_init(&req, hdev);
1137
1138         hdev->req_status = HCI_REQ_PEND;
1139
1140         func(&req, opt);
1141
1142         err = hci_req_run(&req, hci_req_sync_complete);
1143         if (err < 0) {
1144                 hdev->req_status = 0;
1145
1146                 /* ENODATA means the HCI request command queue is empty.
1147                  * This can happen when a request with conditionals doesn't
1148                  * trigger any commands to be sent. This is normal behavior
1149                  * and should not trigger an error return.
1150                  */
1151                 if (err == -ENODATA)
1152                         return 0;
1153
1154                 return err;
1155         }
1156
1157         add_wait_queue(&hdev->req_wait_q, &wait);
1158         set_current_state(TASK_INTERRUPTIBLE);
1159
1160         schedule_timeout(timeout);
1161
1162         remove_wait_queue(&hdev->req_wait_q, &wait);
1163
1164         if (signal_pending(current))
1165                 return -EINTR;
1166
1167         switch (hdev->req_status) {
1168         case HCI_REQ_DONE:
1169                 err = -bt_to_errno(hdev->req_result);
1170                 break;
1171
1172         case HCI_REQ_CANCELED:
1173                 err = -hdev->req_result;
1174                 break;
1175
1176         default:
1177                 err = -ETIMEDOUT;
1178                 break;
1179         }
1180
1181         hdev->req_status = hdev->req_result = 0;
1182
1183         BT_DBG("%s end: err %d", hdev->name, err);
1184
1185         return err;
1186 }
1187
1188 static int hci_req_sync(struct hci_dev *hdev,
1189                         void (*req)(struct hci_request *req,
1190                                     unsigned long opt),
1191                         unsigned long opt, __u32 timeout)
1192 {
1193         int ret;
1194
1195         if (!test_bit(HCI_UP, &hdev->flags))
1196                 return -ENETDOWN;
1197
1198         /* Serialize all requests */
1199         hci_req_lock(hdev);
1200         ret = __hci_req_sync(hdev, req, opt, timeout);
1201         hci_req_unlock(hdev);
1202
1203         return ret;
1204 }
1205
1206 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1207 {
1208         BT_DBG("%s %ld", req->hdev->name, opt);
1209
1210         /* Reset device */
1211         set_bit(HCI_RESET, &req->hdev->flags);
1212         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1213 }
1214
1215 static void bredr_init(struct hci_request *req)
1216 {
1217         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1218
1219         /* Read Local Supported Features */
1220         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1221
1222         /* Read Local Version */
1223         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1224
1225         /* Read BD Address */
1226         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1227 }
1228
1229 static void amp_init(struct hci_request *req)
1230 {
1231         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1232
1233         /* Read Local Version */
1234         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1235
1236         /* Read Local Supported Commands */
1237         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1238
1239         /* Read Local Supported Features */
1240         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1241
1242         /* Read Local AMP Info */
1243         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1244
1245         /* Read Data Blk size */
1246         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1247
1248         /* Read Flow Control Mode */
1249         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1250
1251         /* Read Location Data */
1252         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1253 }
1254
1255 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1256 {
1257         struct hci_dev *hdev = req->hdev;
1258
1259         BT_DBG("%s %ld", hdev->name, opt);
1260
1261         /* Reset */
1262         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1263                 hci_reset_req(req, 0);
1264
1265         switch (hdev->dev_type) {
1266         case HCI_BREDR:
1267                 bredr_init(req);
1268                 break;
1269
1270         case HCI_AMP:
1271                 amp_init(req);
1272                 break;
1273
1274         default:
1275                 BT_ERR("Unknown device type %d", hdev->dev_type);
1276                 break;
1277         }
1278 }
1279
1280 static void bredr_setup(struct hci_request *req)
1281 {
1282         struct hci_dev *hdev = req->hdev;
1283
1284         __le16 param;
1285         __u8 flt_type;
1286
1287         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1288         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1289
1290         /* Read Class of Device */
1291         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1292
1293         /* Read Local Name */
1294         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1295
1296         /* Read Voice Setting */
1297         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1298
1299         /* Read Number of Supported IAC */
1300         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1301
1302         /* Read Current IAC LAP */
1303         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1304
1305         /* Clear Event Filters */
1306         flt_type = HCI_FLT_CLEAR_ALL;
1307         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1308
1309         /* Connection accept timeout ~20 secs */
1310         param = cpu_to_le16(0x7d00);
1311         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1312
1313         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1314          * but it does not support page scan related HCI commands.
1315          */
1316         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1317                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1318                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1319         }
1320 }
1321
1322 static void le_setup(struct hci_request *req)
1323 {
1324         struct hci_dev *hdev = req->hdev;
1325
1326         /* Read LE Buffer Size */
1327         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1328
1329         /* Read LE Local Supported Features */
1330         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1331
1332         /* Read LE Supported States */
1333         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1334
1335         /* Read LE Advertising Channel TX Power */
1336         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1337
1338         /* Read LE White List Size */
1339         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1340
1341         /* Clear LE White List */
1342         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1343
1344         /* LE-only controllers have LE implicitly enabled */
1345         if (!lmp_bredr_capable(hdev))
1346                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1347 }
1348
1349 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1350 {
1351         if (lmp_ext_inq_capable(hdev))
1352                 return 0x02;
1353
1354         if (lmp_inq_rssi_capable(hdev))
1355                 return 0x01;
1356
1357         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1358             hdev->lmp_subver == 0x0757)
1359                 return 0x01;
1360
1361         if (hdev->manufacturer == 15) {
1362                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1363                         return 0x01;
1364                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1365                         return 0x01;
1366                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1367                         return 0x01;
1368         }
1369
1370         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1371             hdev->lmp_subver == 0x1805)
1372                 return 0x01;
1373
1374         return 0x00;
1375 }
1376
1377 static void hci_setup_inquiry_mode(struct hci_request *req)
1378 {
1379         u8 mode;
1380
1381         mode = hci_get_inquiry_mode(req->hdev);
1382
1383         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1384 }
1385
1386 static void hci_setup_event_mask(struct hci_request *req)
1387 {
1388         struct hci_dev *hdev = req->hdev;
1389
1390         /* The second byte is 0xff instead of 0x9f (two reserved bits
1391          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1392          * command otherwise.
1393          */
1394         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1395
1396         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1397          * any event mask for pre 1.2 devices.
1398          */
1399         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1400                 return;
1401
1402         if (lmp_bredr_capable(hdev)) {
1403                 events[4] |= 0x01; /* Flow Specification Complete */
1404                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1405                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1406                 events[5] |= 0x08; /* Synchronous Connection Complete */
1407                 events[5] |= 0x10; /* Synchronous Connection Changed */
1408         } else {
1409                 /* Use a different default for LE-only devices */
1410                 memset(events, 0, sizeof(events));
1411                 events[0] |= 0x10; /* Disconnection Complete */
1412                 events[0] |= 0x80; /* Encryption Change */
1413                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1414                 events[1] |= 0x20; /* Command Complete */
1415                 events[1] |= 0x40; /* Command Status */
1416                 events[1] |= 0x80; /* Hardware Error */
1417                 events[2] |= 0x04; /* Number of Completed Packets */
1418                 events[3] |= 0x02; /* Data Buffer Overflow */
1419                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1420         }
1421
1422         if (lmp_inq_rssi_capable(hdev))
1423                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1424
1425         if (lmp_sniffsubr_capable(hdev))
1426                 events[5] |= 0x20; /* Sniff Subrating */
1427
1428         if (lmp_pause_enc_capable(hdev))
1429                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1430
1431         if (lmp_ext_inq_capable(hdev))
1432                 events[5] |= 0x40; /* Extended Inquiry Result */
1433
1434         if (lmp_no_flush_capable(hdev))
1435                 events[7] |= 0x01; /* Enhanced Flush Complete */
1436
1437         if (lmp_lsto_capable(hdev))
1438                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1439
1440         if (lmp_ssp_capable(hdev)) {
1441                 events[6] |= 0x01;      /* IO Capability Request */
1442                 events[6] |= 0x02;      /* IO Capability Response */
1443                 events[6] |= 0x04;      /* User Confirmation Request */
1444                 events[6] |= 0x08;      /* User Passkey Request */
1445                 events[6] |= 0x10;      /* Remote OOB Data Request */
1446                 events[6] |= 0x20;      /* Simple Pairing Complete */
1447                 events[7] |= 0x04;      /* User Passkey Notification */
1448                 events[7] |= 0x08;      /* Keypress Notification */
1449                 events[7] |= 0x10;      /* Remote Host Supported
1450                                          * Features Notification
1451                                          */
1452         }
1453
1454         if (lmp_le_capable(hdev))
1455                 events[7] |= 0x20;      /* LE Meta-Event */
1456
1457         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1458
1459         if (lmp_le_capable(hdev)) {
1460                 memset(events, 0, sizeof(events));
1461                 events[0] = 0x1f;
1462                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1463                             sizeof(events), events);
1464         }
1465 }
1466
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1468 {
1469         struct hci_dev *hdev = req->hdev;
1470
1471         if (lmp_bredr_capable(hdev))
1472                 bredr_setup(req);
1473         else
1474                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1475
1476         if (lmp_le_capable(hdev))
1477                 le_setup(req);
1478
1479         hci_setup_event_mask(req);
1480
1481         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482          * local supported commands HCI command.
1483          */
1484         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1486
1487         if (lmp_ssp_capable(hdev)) {
1488                 /* When SSP is available, then the host features page
1489                  * should also be available as well. However some
1490                  * controllers list the max_page as 0 as long as SSP
1491                  * has not been enabled. To achieve proper debugging
1492                  * output, force the minimum max_page to 1 at least.
1493                  */
1494                 hdev->max_page = 0x01;
1495
1496                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497                         u8 mode = 0x01;
1498                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499                                     sizeof(mode), &mode);
1500                 } else {
1501                         struct hci_cp_write_eir cp;
1502
1503                         memset(hdev->eir, 0, sizeof(hdev->eir));
1504                         memset(&cp, 0, sizeof(cp));
1505
1506                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1507                 }
1508         }
1509
1510         if (lmp_inq_rssi_capable(hdev))
1511                 hci_setup_inquiry_mode(req);
1512
1513         if (lmp_inq_tx_pwr_capable(hdev))
1514                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1515
1516         if (lmp_ext_feat_capable(hdev)) {
1517                 struct hci_cp_read_local_ext_features cp;
1518
1519                 cp.page = 0x01;
1520                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521                             sizeof(cp), &cp);
1522         }
1523
1524         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525                 u8 enable = 1;
1526                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527                             &enable);
1528         }
1529 }
1530
1531 static void hci_setup_link_policy(struct hci_request *req)
1532 {
1533         struct hci_dev *hdev = req->hdev;
1534         struct hci_cp_write_def_link_policy cp;
1535         u16 link_policy = 0;
1536
1537         if (lmp_rswitch_capable(hdev))
1538                 link_policy |= HCI_LP_RSWITCH;
1539         if (lmp_hold_capable(hdev))
1540                 link_policy |= HCI_LP_HOLD;
1541         if (lmp_sniff_capable(hdev))
1542                 link_policy |= HCI_LP_SNIFF;
1543         if (lmp_park_capable(hdev))
1544                 link_policy |= HCI_LP_PARK;
1545
1546         cp.policy = cpu_to_le16(link_policy);
1547         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1548 }
1549
1550 static void hci_set_le_support(struct hci_request *req)
1551 {
1552         struct hci_dev *hdev = req->hdev;
1553         struct hci_cp_write_le_host_supported cp;
1554
1555         /* LE-only devices do not support explicit enablement */
1556         if (!lmp_bredr_capable(hdev))
1557                 return;
1558
1559         memset(&cp, 0, sizeof(cp));
1560
1561         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562                 cp.le = 0x01;
1563                 cp.simul = lmp_le_br_capable(hdev);
1564         }
1565
1566         if (cp.le != lmp_host_le_capable(hdev))
1567                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568                             &cp);
1569 }
1570
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1572 {
1573         struct hci_dev *hdev = req->hdev;
1574         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576         /* If Connectionless Slave Broadcast master role is supported
1577          * enable all necessary events for it.
1578          */
1579         if (lmp_csb_master_capable(hdev)) {
1580                 events[1] |= 0x40;      /* Triggered Clock Capture */
1581                 events[1] |= 0x80;      /* Synchronization Train Complete */
1582                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1583                 events[2] |= 0x20;      /* CSB Channel Map Change */
1584         }
1585
1586         /* If Connectionless Slave Broadcast slave role is supported
1587          * enable all necessary events for it.
1588          */
1589         if (lmp_csb_slave_capable(hdev)) {
1590                 events[2] |= 0x01;      /* Synchronization Train Received */
1591                 events[2] |= 0x02;      /* CSB Receive */
1592                 events[2] |= 0x04;      /* CSB Timeout */
1593                 events[2] |= 0x08;      /* Truncated Page Complete */
1594         }
1595
1596         /* Enable Authenticated Payload Timeout Expired event if supported */
1597         if (lmp_ping_capable(hdev))
1598                 events[2] |= 0x80;
1599
1600         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601 }
1602
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1604 {
1605         struct hci_dev *hdev = req->hdev;
1606         u8 p;
1607
1608         /* Some Broadcom based Bluetooth controllers do not support the
1609          * Delete Stored Link Key command. They are clearly indicating its
1610          * absence in the bit mask of supported commands.
1611          *
1612          * Check the supported commands and only if the the command is marked
1613          * as supported send it. If not supported assume that the controller
1614          * does not have actual support for stored link keys which makes this
1615          * command redundant anyway.
1616          *
1617          * Some controllers indicate that they support handling deleting
1618          * stored link keys, but they don't. The quirk lets a driver
1619          * just disable this command.
1620          */
1621         if (hdev->commands[6] & 0x80 &&
1622             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623                 struct hci_cp_delete_stored_link_key cp;
1624
1625                 bacpy(&cp.bdaddr, BDADDR_ANY);
1626                 cp.delete_all = 0x01;
1627                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628                             sizeof(cp), &cp);
1629         }
1630
1631         if (hdev->commands[5] & 0x10)
1632                 hci_setup_link_policy(req);
1633
1634         if (lmp_le_capable(hdev))
1635                 hci_set_le_support(req);
1636
1637         /* Read features beyond page 1 if available */
1638         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1639                 struct hci_cp_read_local_ext_features cp;
1640
1641                 cp.page = p;
1642                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1643                             sizeof(cp), &cp);
1644         }
1645 }
1646
1647 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1648 {
1649         struct hci_dev *hdev = req->hdev;
1650
1651         /* Set event mask page 2 if the HCI command for it is supported */
1652         if (hdev->commands[22] & 0x04)
1653                 hci_set_event_mask_page_2(req);
1654
1655         /* Check for Synchronization Train support */
1656         if (lmp_sync_train_capable(hdev))
1657                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1658
1659         /* Enable Secure Connections if supported and configured */
1660         if ((lmp_sc_capable(hdev) ||
1661              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1662             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1663                 u8 support = 0x01;
1664                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1665                             sizeof(support), &support);
1666         }
1667 }
1668
1669 static int __hci_init(struct hci_dev *hdev)
1670 {
1671         int err;
1672
1673         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1674         if (err < 0)
1675                 return err;
1676
1677         /* The Device Under Test (DUT) mode is special and available for
1678          * all controller types. So just create it early on.
1679          */
1680         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1681                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1682                                     &dut_mode_fops);
1683         }
1684
1685         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1686          * BR/EDR/LE type controllers. AMP controllers only need the
1687          * first stage init.
1688          */
1689         if (hdev->dev_type != HCI_BREDR)
1690                 return 0;
1691
1692         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1693         if (err < 0)
1694                 return err;
1695
1696         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1697         if (err < 0)
1698                 return err;
1699
1700         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1701         if (err < 0)
1702                 return err;
1703
1704         /* Only create debugfs entries during the initial setup
1705          * phase and not every time the controller gets powered on.
1706          */
1707         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1708                 return 0;
1709
1710         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1711                             &features_fops);
1712         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1713                            &hdev->manufacturer);
1714         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1715         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1716         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1717                             &blacklist_fops);
1718         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1719
1720         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1721                             &conn_info_min_age_fops);
1722         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1723                             &conn_info_max_age_fops);
1724
1725         if (lmp_bredr_capable(hdev)) {
1726                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1727                                     hdev, &inquiry_cache_fops);
1728                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1729                                     hdev, &link_keys_fops);
1730                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1731                                     hdev, &dev_class_fops);
1732                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1733                                     hdev, &voice_setting_fops);
1734         }
1735
1736         if (lmp_ssp_capable(hdev)) {
1737                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1738                                     hdev, &auto_accept_delay_fops);
1739                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1740                                     hdev, &force_sc_support_fops);
1741                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1742                                     hdev, &sc_only_mode_fops);
1743         }
1744
1745         if (lmp_sniff_capable(hdev)) {
1746                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1747                                     hdev, &idle_timeout_fops);
1748                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1749                                     hdev, &sniff_min_interval_fops);
1750                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1751                                     hdev, &sniff_max_interval_fops);
1752         }
1753
1754         if (lmp_le_capable(hdev)) {
1755                 debugfs_create_file("identity", 0400, hdev->debugfs,
1756                                     hdev, &identity_fops);
1757                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1758                                     hdev, &rpa_timeout_fops);
1759                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1760                                     hdev, &random_address_fops);
1761                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1762                                     hdev, &static_address_fops);
1763
1764                 /* For controllers with a public address, provide a debug
1765                  * option to force the usage of the configured static
1766                  * address. By default the public address is used.
1767                  */
1768                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1769                         debugfs_create_file("force_static_address", 0644,
1770                                             hdev->debugfs, hdev,
1771                                             &force_static_address_fops);
1772
1773                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1774                                   &hdev->le_white_list_size);
1775                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1776                                     &white_list_fops);
1777                 debugfs_create_file("identity_resolving_keys", 0400,
1778                                     hdev->debugfs, hdev,
1779                                     &identity_resolving_keys_fops);
1780                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1781                                     hdev, &long_term_keys_fops);
1782                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1783                                     hdev, &conn_min_interval_fops);
1784                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1785                                     hdev, &conn_max_interval_fops);
1786                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1787                                     hdev, &adv_channel_map_fops);
1788                 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1789                                     &le_auto_conn_fops);
1790                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1791                                    hdev->debugfs,
1792                                    &hdev->discov_interleaved_timeout);
1793         }
1794
1795         return 0;
1796 }
1797
1798 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1799 {
1800         __u8 scan = opt;
1801
1802         BT_DBG("%s %x", req->hdev->name, scan);
1803
1804         /* Inquiry and Page scans */
1805         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1806 }
1807
1808 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1809 {
1810         __u8 auth = opt;
1811
1812         BT_DBG("%s %x", req->hdev->name, auth);
1813
1814         /* Authentication */
1815         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1816 }
1817
1818 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1819 {
1820         __u8 encrypt = opt;
1821
1822         BT_DBG("%s %x", req->hdev->name, encrypt);
1823
1824         /* Encryption */
1825         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1826 }
1827
1828 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1829 {
1830         __le16 policy = cpu_to_le16(opt);
1831
1832         BT_DBG("%s %x", req->hdev->name, policy);
1833
1834         /* Default link policy */
1835         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1836 }
1837
1838 /* Get HCI device by index.
1839  * Device is held on return. */
1840 struct hci_dev *hci_dev_get(int index)
1841 {
1842         struct hci_dev *hdev = NULL, *d;
1843
1844         BT_DBG("%d", index);
1845
1846         if (index < 0)
1847                 return NULL;
1848
1849         read_lock(&hci_dev_list_lock);
1850         list_for_each_entry(d, &hci_dev_list, list) {
1851                 if (d->id == index) {
1852                         hdev = hci_dev_hold(d);
1853                         break;
1854                 }
1855         }
1856         read_unlock(&hci_dev_list_lock);
1857         return hdev;
1858 }
1859
1860 /* ---- Inquiry support ---- */
1861
1862 bool hci_discovery_active(struct hci_dev *hdev)
1863 {
1864         struct discovery_state *discov = &hdev->discovery;
1865
1866         switch (discov->state) {
1867         case DISCOVERY_FINDING:
1868         case DISCOVERY_RESOLVING:
1869                 return true;
1870
1871         default:
1872                 return false;
1873         }
1874 }
1875
1876 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1877 {
1878         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1879
1880         if (hdev->discovery.state == state)
1881                 return;
1882
1883         switch (state) {
1884         case DISCOVERY_STOPPED:
1885                 hci_update_background_scan(hdev);
1886
1887                 if (hdev->discovery.state != DISCOVERY_STARTING)
1888                         mgmt_discovering(hdev, 0);
1889                 break;
1890         case DISCOVERY_STARTING:
1891                 break;
1892         case DISCOVERY_FINDING:
1893                 mgmt_discovering(hdev, 1);
1894                 break;
1895         case DISCOVERY_RESOLVING:
1896                 break;
1897         case DISCOVERY_STOPPING:
1898                 break;
1899         }
1900
1901         hdev->discovery.state = state;
1902 }
1903
1904 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1905 {
1906         struct discovery_state *cache = &hdev->discovery;
1907         struct inquiry_entry *p, *n;
1908
1909         list_for_each_entry_safe(p, n, &cache->all, all) {
1910                 list_del(&p->all);
1911                 kfree(p);
1912         }
1913
1914         INIT_LIST_HEAD(&cache->unknown);
1915         INIT_LIST_HEAD(&cache->resolve);
1916 }
1917
1918 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1919                                                bdaddr_t *bdaddr)
1920 {
1921         struct discovery_state *cache = &hdev->discovery;
1922         struct inquiry_entry *e;
1923
1924         BT_DBG("cache %p, %pMR", cache, bdaddr);
1925
1926         list_for_each_entry(e, &cache->all, all) {
1927                 if (!bacmp(&e->data.bdaddr, bdaddr))
1928                         return e;
1929         }
1930
1931         return NULL;
1932 }
1933
1934 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1935                                                        bdaddr_t *bdaddr)
1936 {
1937         struct discovery_state *cache = &hdev->discovery;
1938         struct inquiry_entry *e;
1939
1940         BT_DBG("cache %p, %pMR", cache, bdaddr);
1941
1942         list_for_each_entry(e, &cache->unknown, list) {
1943                 if (!bacmp(&e->data.bdaddr, bdaddr))
1944                         return e;
1945         }
1946
1947         return NULL;
1948 }
1949
1950 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1951                                                        bdaddr_t *bdaddr,
1952                                                        int state)
1953 {
1954         struct discovery_state *cache = &hdev->discovery;
1955         struct inquiry_entry *e;
1956
1957         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1958
1959         list_for_each_entry(e, &cache->resolve, list) {
1960                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1961                         return e;
1962                 if (!bacmp(&e->data.bdaddr, bdaddr))
1963                         return e;
1964         }
1965
1966         return NULL;
1967 }
1968
1969 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1970                                       struct inquiry_entry *ie)
1971 {
1972         struct discovery_state *cache = &hdev->discovery;
1973         struct list_head *pos = &cache->resolve;
1974         struct inquiry_entry *p;
1975
1976         list_del(&ie->list);
1977
1978         list_for_each_entry(p, &cache->resolve, list) {
1979                 if (p->name_state != NAME_PENDING &&
1980                     abs(p->data.rssi) >= abs(ie->data.rssi))
1981                         break;
1982                 pos = &p->list;
1983         }
1984
1985         list_add(&ie->list, pos);
1986 }
1987
1988 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1989                               bool name_known, bool *ssp)
1990 {
1991         struct discovery_state *cache = &hdev->discovery;
1992         struct inquiry_entry *ie;
1993
1994         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1995
1996         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1997
1998         *ssp = data->ssp_mode;
1999
2000         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2001         if (ie) {
2002                 if (ie->data.ssp_mode)
2003                         *ssp = true;
2004
2005                 if (ie->name_state == NAME_NEEDED &&
2006                     data->rssi != ie->data.rssi) {
2007                         ie->data.rssi = data->rssi;
2008                         hci_inquiry_cache_update_resolve(hdev, ie);
2009                 }
2010
2011                 goto update;
2012         }
2013
2014         /* Entry not in the cache. Add new one. */
2015         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2016         if (!ie)
2017                 return false;
2018
2019         list_add(&ie->all, &cache->all);
2020
2021         if (name_known) {
2022                 ie->name_state = NAME_KNOWN;
2023         } else {
2024                 ie->name_state = NAME_NOT_KNOWN;
2025                 list_add(&ie->list, &cache->unknown);
2026         }
2027
2028 update:
2029         if (name_known && ie->name_state != NAME_KNOWN &&
2030             ie->name_state != NAME_PENDING) {
2031                 ie->name_state = NAME_KNOWN;
2032                 list_del(&ie->list);
2033         }
2034
2035         memcpy(&ie->data, data, sizeof(*data));
2036         ie->timestamp = jiffies;
2037         cache->timestamp = jiffies;
2038
2039         if (ie->name_state == NAME_NOT_KNOWN)
2040                 return false;
2041
2042         return true;
2043 }
2044
2045 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2046 {
2047         struct discovery_state *cache = &hdev->discovery;
2048         struct inquiry_info *info = (struct inquiry_info *) buf;
2049         struct inquiry_entry *e;
2050         int copied = 0;
2051
2052         list_for_each_entry(e, &cache->all, all) {
2053                 struct inquiry_data *data = &e->data;
2054
2055                 if (copied >= num)
2056                         break;
2057
2058                 bacpy(&info->bdaddr, &data->bdaddr);
2059                 info->pscan_rep_mode    = data->pscan_rep_mode;
2060                 info->pscan_period_mode = data->pscan_period_mode;
2061                 info->pscan_mode        = data->pscan_mode;
2062                 memcpy(info->dev_class, data->dev_class, 3);
2063                 info->clock_offset      = data->clock_offset;
2064
2065                 info++;
2066                 copied++;
2067         }
2068
2069         BT_DBG("cache %p, copied %d", cache, copied);
2070         return copied;
2071 }
2072
2073 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2074 {
2075         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2076         struct hci_dev *hdev = req->hdev;
2077         struct hci_cp_inquiry cp;
2078
2079         BT_DBG("%s", hdev->name);
2080
2081         if (test_bit(HCI_INQUIRY, &hdev->flags))
2082                 return;
2083
2084         /* Start Inquiry */
2085         memcpy(&cp.lap, &ir->lap, 3);
2086         cp.length  = ir->length;
2087         cp.num_rsp = ir->num_rsp;
2088         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2089 }
2090
2091 static int wait_inquiry(void *word)
2092 {
2093         schedule();
2094         return signal_pending(current);
2095 }
2096
2097 int hci_inquiry(void __user *arg)
2098 {
2099         __u8 __user *ptr = arg;
2100         struct hci_inquiry_req ir;
2101         struct hci_dev *hdev;
2102         int err = 0, do_inquiry = 0, max_rsp;
2103         long timeo;
2104         __u8 *buf;
2105
2106         if (copy_from_user(&ir, ptr, sizeof(ir)))
2107                 return -EFAULT;
2108
2109         hdev = hci_dev_get(ir.dev_id);
2110         if (!hdev)
2111                 return -ENODEV;
2112
2113         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2114                 err = -EBUSY;
2115                 goto done;
2116         }
2117
2118         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2119                 err = -EOPNOTSUPP;
2120                 goto done;
2121         }
2122
2123         if (hdev->dev_type != HCI_BREDR) {
2124                 err = -EOPNOTSUPP;
2125                 goto done;
2126         }
2127
2128         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2129                 err = -EOPNOTSUPP;
2130                 goto done;
2131         }
2132
2133         hci_dev_lock(hdev);
2134         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2135             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2136                 hci_inquiry_cache_flush(hdev);
2137                 do_inquiry = 1;
2138         }
2139         hci_dev_unlock(hdev);
2140
2141         timeo = ir.length * msecs_to_jiffies(2000);
2142
2143         if (do_inquiry) {
2144                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2145                                    timeo);
2146                 if (err < 0)
2147                         goto done;
2148
2149                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2150                  * cleared). If it is interrupted by a signal, return -EINTR.
2151                  */
2152                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2153                                 TASK_INTERRUPTIBLE))
2154                         return -EINTR;
2155         }
2156
2157         /* for unlimited number of responses we will use buffer with
2158          * 255 entries
2159          */
2160         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2161
2162         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2163          * copy it to the user space.
2164          */
2165         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2166         if (!buf) {
2167                 err = -ENOMEM;
2168                 goto done;
2169         }
2170
2171         hci_dev_lock(hdev);
2172         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2173         hci_dev_unlock(hdev);
2174
2175         BT_DBG("num_rsp %d", ir.num_rsp);
2176
2177         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2178                 ptr += sizeof(ir);
2179                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2180                                  ir.num_rsp))
2181                         err = -EFAULT;
2182         } else
2183                 err = -EFAULT;
2184
2185         kfree(buf);
2186
2187 done:
2188         hci_dev_put(hdev);
2189         return err;
2190 }
2191
2192 static int hci_dev_do_open(struct hci_dev *hdev)
2193 {
2194         int ret = 0;
2195
2196         BT_DBG("%s %p", hdev->name, hdev);
2197
2198         hci_req_lock(hdev);
2199
2200         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2201                 ret = -ENODEV;
2202                 goto done;
2203         }
2204
2205         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2206                 /* Check for rfkill but allow the HCI setup stage to
2207                  * proceed (which in itself doesn't cause any RF activity).
2208                  */
2209                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2210                         ret = -ERFKILL;
2211                         goto done;
2212                 }
2213
2214                 /* Check for valid public address or a configured static
2215                  * random adddress, but let the HCI setup proceed to
2216                  * be able to determine if there is a public address
2217                  * or not.
2218                  *
2219                  * In case of user channel usage, it is not important
2220                  * if a public address or static random address is
2221                  * available.
2222                  *
2223                  * This check is only valid for BR/EDR controllers
2224                  * since AMP controllers do not have an address.
2225                  */
2226                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2227                     hdev->dev_type == HCI_BREDR &&
2228                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2229                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2230                         ret = -EADDRNOTAVAIL;
2231                         goto done;
2232                 }
2233         }
2234
2235         if (test_bit(HCI_UP, &hdev->flags)) {
2236                 ret = -EALREADY;
2237                 goto done;
2238         }
2239
2240         if (hdev->open(hdev)) {
2241                 ret = -EIO;
2242                 goto done;
2243         }
2244
2245         atomic_set(&hdev->cmd_cnt, 1);
2246         set_bit(HCI_INIT, &hdev->flags);
2247
2248         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2249                 ret = hdev->setup(hdev);
2250
2251         if (!ret) {
2252                 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2253                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2254                         ret = __hci_init(hdev);
2255         }
2256
2257         clear_bit(HCI_INIT, &hdev->flags);
2258
2259         if (!ret) {
2260                 hci_dev_hold(hdev);
2261                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2262                 set_bit(HCI_UP, &hdev->flags);
2263                 hci_notify(hdev, HCI_DEV_UP);
2264                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2265                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2266                     hdev->dev_type == HCI_BREDR) {
2267                         hci_dev_lock(hdev);
2268                         mgmt_powered(hdev, 1);
2269                         hci_dev_unlock(hdev);
2270                 }
2271         } else {
2272                 /* Init failed, cleanup */
2273                 flush_work(&hdev->tx_work);
2274                 flush_work(&hdev->cmd_work);
2275                 flush_work(&hdev->rx_work);
2276
2277                 skb_queue_purge(&hdev->cmd_q);
2278                 skb_queue_purge(&hdev->rx_q);
2279
2280                 if (hdev->flush)
2281                         hdev->flush(hdev);
2282
2283                 if (hdev->sent_cmd) {
2284                         kfree_skb(hdev->sent_cmd);
2285                         hdev->sent_cmd = NULL;
2286                 }
2287
2288                 hdev->close(hdev);
2289                 hdev->flags &= BIT(HCI_RAW);
2290         }
2291
2292 done:
2293         hci_req_unlock(hdev);
2294         return ret;
2295 }
2296
2297 /* ---- HCI ioctl helpers ---- */
2298
2299 int hci_dev_open(__u16 dev)
2300 {
2301         struct hci_dev *hdev;
2302         int err;
2303
2304         hdev = hci_dev_get(dev);
2305         if (!hdev)
2306                 return -ENODEV;
2307
2308         /* Devices that are marked for raw-only usage can only be powered
2309          * up as user channel. Trying to bring them up as normal devices
2310          * will result into a failure. Only user channel operation is
2311          * possible.
2312          *
2313          * When this function is called for a user channel, the flag
2314          * HCI_USER_CHANNEL will be set first before attempting to
2315          * open the device.
2316          */
2317         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2318             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2319                 err = -EOPNOTSUPP;
2320                 goto done;
2321         }
2322
2323         /* We need to ensure that no other power on/off work is pending
2324          * before proceeding to call hci_dev_do_open. This is
2325          * particularly important if the setup procedure has not yet
2326          * completed.
2327          */
2328         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2329                 cancel_delayed_work(&hdev->power_off);
2330
2331         /* After this call it is guaranteed that the setup procedure
2332          * has finished. This means that error conditions like RFKILL
2333          * or no valid public or static random address apply.
2334          */
2335         flush_workqueue(hdev->req_workqueue);
2336
2337         err = hci_dev_do_open(hdev);
2338
2339 done:
2340         hci_dev_put(hdev);
2341         return err;
2342 }
2343
2344 static int hci_dev_do_close(struct hci_dev *hdev)
2345 {
2346         BT_DBG("%s %p", hdev->name, hdev);
2347
2348         cancel_delayed_work(&hdev->power_off);
2349
2350         hci_req_cancel(hdev, ENODEV);
2351         hci_req_lock(hdev);
2352
2353         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2354                 cancel_delayed_work_sync(&hdev->cmd_timer);
2355                 hci_req_unlock(hdev);
2356                 return 0;
2357         }
2358
2359         /* Flush RX and TX works */
2360         flush_work(&hdev->tx_work);
2361         flush_work(&hdev->rx_work);
2362
2363         if (hdev->discov_timeout > 0) {
2364                 cancel_delayed_work(&hdev->discov_off);
2365                 hdev->discov_timeout = 0;
2366                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2367                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2368         }
2369
2370         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2371                 cancel_delayed_work(&hdev->service_cache);
2372
2373         cancel_delayed_work_sync(&hdev->le_scan_disable);
2374
2375         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2376                 cancel_delayed_work_sync(&hdev->rpa_expired);
2377
2378         hci_dev_lock(hdev);
2379         hci_inquiry_cache_flush(hdev);
2380         hci_conn_hash_flush(hdev);
2381         hci_pend_le_conns_clear(hdev);
2382         hci_dev_unlock(hdev);
2383
2384         hci_notify(hdev, HCI_DEV_DOWN);
2385
2386         if (hdev->flush)
2387                 hdev->flush(hdev);
2388
2389         /* Reset device */
2390         skb_queue_purge(&hdev->cmd_q);
2391         atomic_set(&hdev->cmd_cnt, 1);
2392         if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2393             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2394             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2395                 set_bit(HCI_INIT, &hdev->flags);
2396                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2397                 clear_bit(HCI_INIT, &hdev->flags);
2398         }
2399
2400         /* flush cmd  work */
2401         flush_work(&hdev->cmd_work);
2402
2403         /* Drop queues */
2404         skb_queue_purge(&hdev->rx_q);
2405         skb_queue_purge(&hdev->cmd_q);
2406         skb_queue_purge(&hdev->raw_q);
2407
2408         /* Drop last sent command */
2409         if (hdev->sent_cmd) {
2410                 cancel_delayed_work_sync(&hdev->cmd_timer);
2411                 kfree_skb(hdev->sent_cmd);
2412                 hdev->sent_cmd = NULL;
2413         }
2414
2415         kfree_skb(hdev->recv_evt);
2416         hdev->recv_evt = NULL;
2417
2418         /* After this point our queues are empty
2419          * and no tasks are scheduled. */
2420         hdev->close(hdev);
2421
2422         /* Clear flags */
2423         hdev->flags &= BIT(HCI_RAW);
2424         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2425
2426         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2427                 if (hdev->dev_type == HCI_BREDR) {
2428                         hci_dev_lock(hdev);
2429                         mgmt_powered(hdev, 0);
2430                         hci_dev_unlock(hdev);
2431                 }
2432         }
2433
2434         /* Controller radio is available but is currently powered down */
2435         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2436
2437         memset(hdev->eir, 0, sizeof(hdev->eir));
2438         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2439         bacpy(&hdev->random_addr, BDADDR_ANY);
2440
2441         hci_req_unlock(hdev);
2442
2443         hci_dev_put(hdev);
2444         return 0;
2445 }
2446
2447 int hci_dev_close(__u16 dev)
2448 {
2449         struct hci_dev *hdev;
2450         int err;
2451
2452         hdev = hci_dev_get(dev);
2453         if (!hdev)
2454                 return -ENODEV;
2455
2456         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2457                 err = -EBUSY;
2458                 goto done;
2459         }
2460
2461         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2462                 cancel_delayed_work(&hdev->power_off);
2463
2464         err = hci_dev_do_close(hdev);
2465
2466 done:
2467         hci_dev_put(hdev);
2468         return err;
2469 }
2470
2471 int hci_dev_reset(__u16 dev)
2472 {
2473         struct hci_dev *hdev;
2474         int ret = 0;
2475
2476         hdev = hci_dev_get(dev);
2477         if (!hdev)
2478                 return -ENODEV;
2479
2480         hci_req_lock(hdev);
2481
2482         if (!test_bit(HCI_UP, &hdev->flags)) {
2483                 ret = -ENETDOWN;
2484                 goto done;
2485         }
2486
2487         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2488                 ret = -EBUSY;
2489                 goto done;
2490         }
2491
2492         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2493                 ret = -EOPNOTSUPP;
2494                 goto done;
2495         }
2496
2497         /* Drop queues */
2498         skb_queue_purge(&hdev->rx_q);
2499         skb_queue_purge(&hdev->cmd_q);
2500
2501         hci_dev_lock(hdev);
2502         hci_inquiry_cache_flush(hdev);
2503         hci_conn_hash_flush(hdev);
2504         hci_dev_unlock(hdev);
2505
2506         if (hdev->flush)
2507                 hdev->flush(hdev);
2508
2509         atomic_set(&hdev->cmd_cnt, 1);
2510         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2511
2512         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2513
2514 done:
2515         hci_req_unlock(hdev);
2516         hci_dev_put(hdev);
2517         return ret;
2518 }
2519
2520 int hci_dev_reset_stat(__u16 dev)
2521 {
2522         struct hci_dev *hdev;
2523         int ret = 0;
2524
2525         hdev = hci_dev_get(dev);
2526         if (!hdev)
2527                 return -ENODEV;
2528
2529         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2530                 ret = -EBUSY;
2531                 goto done;
2532         }
2533
2534         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2535                 ret = -EOPNOTSUPP;
2536                 goto done;
2537         }
2538
2539         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2540
2541 done:
2542         hci_dev_put(hdev);
2543         return ret;
2544 }
2545
2546 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2547 {
2548         struct hci_dev *hdev;
2549         struct hci_dev_req dr;
2550         int err = 0;
2551
2552         if (copy_from_user(&dr, arg, sizeof(dr)))
2553                 return -EFAULT;
2554
2555         hdev = hci_dev_get(dr.dev_id);
2556         if (!hdev)
2557                 return -ENODEV;
2558
2559         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2560                 err = -EBUSY;
2561                 goto done;
2562         }
2563
2564         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2565                 err = -EOPNOTSUPP;
2566                 goto done;
2567         }
2568
2569         if (hdev->dev_type != HCI_BREDR) {
2570                 err = -EOPNOTSUPP;
2571                 goto done;
2572         }
2573
2574         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2575                 err = -EOPNOTSUPP;
2576                 goto done;
2577         }
2578
2579         switch (cmd) {
2580         case HCISETAUTH:
2581                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2582                                    HCI_INIT_TIMEOUT);
2583                 break;
2584
2585         case HCISETENCRYPT:
2586                 if (!lmp_encrypt_capable(hdev)) {
2587                         err = -EOPNOTSUPP;
2588                         break;
2589                 }
2590
2591                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2592                         /* Auth must be enabled first */
2593                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2594                                            HCI_INIT_TIMEOUT);
2595                         if (err)
2596                                 break;
2597                 }
2598
2599                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2600                                    HCI_INIT_TIMEOUT);
2601                 break;
2602
2603         case HCISETSCAN:
2604                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2605                                    HCI_INIT_TIMEOUT);
2606                 break;
2607
2608         case HCISETLINKPOL:
2609                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2610                                    HCI_INIT_TIMEOUT);
2611                 break;
2612
2613         case HCISETLINKMODE:
2614                 hdev->link_mode = ((__u16) dr.dev_opt) &
2615                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2616                 break;
2617
2618         case HCISETPTYPE:
2619                 hdev->pkt_type = (__u16) dr.dev_opt;
2620                 break;
2621
2622         case HCISETACLMTU:
2623                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2624                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2625                 break;
2626
2627         case HCISETSCOMTU:
2628                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2629                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2630                 break;
2631
2632         default:
2633                 err = -EINVAL;
2634                 break;
2635         }
2636
2637 done:
2638         hci_dev_put(hdev);
2639         return err;
2640 }
2641
2642 int hci_get_dev_list(void __user *arg)
2643 {
2644         struct hci_dev *hdev;
2645         struct hci_dev_list_req *dl;
2646         struct hci_dev_req *dr;
2647         int n = 0, size, err;
2648         __u16 dev_num;
2649
2650         if (get_user(dev_num, (__u16 __user *) arg))
2651                 return -EFAULT;
2652
2653         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2654                 return -EINVAL;
2655
2656         size = sizeof(*dl) + dev_num * sizeof(*dr);
2657
2658         dl = kzalloc(size, GFP_KERNEL);
2659         if (!dl)
2660                 return -ENOMEM;
2661
2662         dr = dl->dev_req;
2663
2664         read_lock(&hci_dev_list_lock);
2665         list_for_each_entry(hdev, &hci_dev_list, list) {
2666                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2667                         cancel_delayed_work(&hdev->power_off);
2668
2669                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2670                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2671
2672                 (dr + n)->dev_id  = hdev->id;
2673                 (dr + n)->dev_opt = hdev->flags;
2674
2675                 if (++n >= dev_num)
2676                         break;
2677         }
2678         read_unlock(&hci_dev_list_lock);
2679
2680         dl->dev_num = n;
2681         size = sizeof(*dl) + n * sizeof(*dr);
2682
2683         err = copy_to_user(arg, dl, size);
2684         kfree(dl);
2685
2686         return err ? -EFAULT : 0;
2687 }
2688
2689 int hci_get_dev_info(void __user *arg)
2690 {
2691         struct hci_dev *hdev;
2692         struct hci_dev_info di;
2693         int err = 0;
2694
2695         if (copy_from_user(&di, arg, sizeof(di)))
2696                 return -EFAULT;
2697
2698         hdev = hci_dev_get(di.dev_id);
2699         if (!hdev)
2700                 return -ENODEV;
2701
2702         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2703                 cancel_delayed_work_sync(&hdev->power_off);
2704
2705         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2706                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2707
2708         strcpy(di.name, hdev->name);
2709         di.bdaddr   = hdev->bdaddr;
2710         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2711         di.flags    = hdev->flags;
2712         di.pkt_type = hdev->pkt_type;
2713         if (lmp_bredr_capable(hdev)) {
2714                 di.acl_mtu  = hdev->acl_mtu;
2715                 di.acl_pkts = hdev->acl_pkts;
2716                 di.sco_mtu  = hdev->sco_mtu;
2717                 di.sco_pkts = hdev->sco_pkts;
2718         } else {
2719                 di.acl_mtu  = hdev->le_mtu;
2720                 di.acl_pkts = hdev->le_pkts;
2721                 di.sco_mtu  = 0;
2722                 di.sco_pkts = 0;
2723         }
2724         di.link_policy = hdev->link_policy;
2725         di.link_mode   = hdev->link_mode;
2726
2727         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2728         memcpy(&di.features, &hdev->features, sizeof(di.features));
2729
2730         if (copy_to_user(arg, &di, sizeof(di)))
2731                 err = -EFAULT;
2732
2733         hci_dev_put(hdev);
2734
2735         return err;
2736 }
2737
2738 /* ---- Interface to HCI drivers ---- */
2739
2740 static int hci_rfkill_set_block(void *data, bool blocked)
2741 {
2742         struct hci_dev *hdev = data;
2743
2744         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2745
2746         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2747                 return -EBUSY;
2748
2749         if (blocked) {
2750                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2751                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2752                         hci_dev_do_close(hdev);
2753         } else {
2754                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2755         }
2756
2757         return 0;
2758 }
2759
2760 static const struct rfkill_ops hci_rfkill_ops = {
2761         .set_block = hci_rfkill_set_block,
2762 };
2763
2764 static void hci_power_on(struct work_struct *work)
2765 {
2766         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2767         int err;
2768
2769         BT_DBG("%s", hdev->name);
2770
2771         err = hci_dev_do_open(hdev);
2772         if (err < 0) {
2773                 mgmt_set_powered_failed(hdev, err);
2774                 return;
2775         }
2776
2777         /* During the HCI setup phase, a few error conditions are
2778          * ignored and they need to be checked now. If they are still
2779          * valid, it is important to turn the device back off.
2780          */
2781         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2782             (hdev->dev_type == HCI_BREDR &&
2783              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2784              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2785                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2786                 hci_dev_do_close(hdev);
2787         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2788                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2789                                    HCI_AUTO_OFF_TIMEOUT);
2790         }
2791
2792         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2793                 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2794                         mgmt_index_added(hdev);
2795         }
2796 }
2797
2798 static void hci_power_off(struct work_struct *work)
2799 {
2800         struct hci_dev *hdev = container_of(work, struct hci_dev,
2801                                             power_off.work);
2802
2803         BT_DBG("%s", hdev->name);
2804
2805         hci_dev_do_close(hdev);
2806 }
2807
2808 static void hci_discov_off(struct work_struct *work)
2809 {
2810         struct hci_dev *hdev;
2811
2812         hdev = container_of(work, struct hci_dev, discov_off.work);
2813
2814         BT_DBG("%s", hdev->name);
2815
2816         mgmt_discoverable_timeout(hdev);
2817 }
2818
2819 void hci_uuids_clear(struct hci_dev *hdev)
2820 {
2821         struct bt_uuid *uuid, *tmp;
2822
2823         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2824                 list_del(&uuid->list);
2825                 kfree(uuid);
2826         }
2827 }
2828
2829 void hci_link_keys_clear(struct hci_dev *hdev)
2830 {
2831         struct list_head *p, *n;
2832
2833         list_for_each_safe(p, n, &hdev->link_keys) {
2834                 struct link_key *key;
2835
2836                 key = list_entry(p, struct link_key, list);
2837
2838                 list_del(p);
2839                 kfree(key);
2840         }
2841 }
2842
2843 void hci_smp_ltks_clear(struct hci_dev *hdev)
2844 {
2845         struct smp_ltk *k, *tmp;
2846
2847         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2848                 list_del(&k->list);
2849                 kfree(k);
2850         }
2851 }
2852
2853 void hci_smp_irks_clear(struct hci_dev *hdev)
2854 {
2855         struct smp_irk *k, *tmp;
2856
2857         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2858                 list_del(&k->list);
2859                 kfree(k);
2860         }
2861 }
2862
2863 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2864 {
2865         struct link_key *k;
2866
2867         list_for_each_entry(k, &hdev->link_keys, list)
2868                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2869                         return k;
2870
2871         return NULL;
2872 }
2873
2874 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2875                                u8 key_type, u8 old_key_type)
2876 {
2877         /* Legacy key */
2878         if (key_type < 0x03)
2879                 return true;
2880
2881         /* Debug keys are insecure so don't store them persistently */
2882         if (key_type == HCI_LK_DEBUG_COMBINATION)
2883                 return false;
2884
2885         /* Changed combination key and there's no previous one */
2886         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2887                 return false;
2888
2889         /* Security mode 3 case */
2890         if (!conn)
2891                 return true;
2892
2893         /* Neither local nor remote side had no-bonding as requirement */
2894         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2895                 return true;
2896
2897         /* Local side had dedicated bonding as requirement */
2898         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2899                 return true;
2900
2901         /* Remote side had dedicated bonding as requirement */
2902         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2903                 return true;
2904
2905         /* If none of the above criteria match, then don't store the key
2906          * persistently */
2907         return false;
2908 }
2909
2910 static bool ltk_type_master(u8 type)
2911 {
2912         return (type == SMP_LTK);
2913 }
2914
2915 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2916                              bool master)
2917 {
2918         struct smp_ltk *k;
2919
2920         list_for_each_entry(k, &hdev->long_term_keys, list) {
2921                 if (k->ediv != ediv || k->rand != rand)
2922                         continue;
2923
2924                 if (ltk_type_master(k->type) != master)
2925                         continue;
2926
2927                 return k;
2928         }
2929
2930         return NULL;
2931 }
2932
2933 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2934                                      u8 addr_type, bool master)
2935 {
2936         struct smp_ltk *k;
2937
2938         list_for_each_entry(k, &hdev->long_term_keys, list)
2939                 if (addr_type == k->bdaddr_type &&
2940                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2941                     ltk_type_master(k->type) == master)
2942                         return k;
2943
2944         return NULL;
2945 }
2946
2947 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2948 {
2949         struct smp_irk *irk;
2950
2951         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2952                 if (!bacmp(&irk->rpa, rpa))
2953                         return irk;
2954         }
2955
2956         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2957                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2958                         bacpy(&irk->rpa, rpa);
2959                         return irk;
2960                 }
2961         }
2962
2963         return NULL;
2964 }
2965
2966 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2967                                      u8 addr_type)
2968 {
2969         struct smp_irk *irk;
2970
2971         /* Identity Address must be public or static random */
2972         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2973                 return NULL;
2974
2975         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2976                 if (addr_type == irk->addr_type &&
2977                     bacmp(bdaddr, &irk->bdaddr) == 0)
2978                         return irk;
2979         }
2980
2981         return NULL;
2982 }
2983
2984 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2985                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2986                                   u8 pin_len, bool *persistent)
2987 {
2988         struct link_key *key, *old_key;
2989         u8 old_key_type;
2990
2991         old_key = hci_find_link_key(hdev, bdaddr);
2992         if (old_key) {
2993                 old_key_type = old_key->type;
2994                 key = old_key;
2995         } else {
2996                 old_key_type = conn ? conn->key_type : 0xff;
2997                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2998                 if (!key)
2999                         return NULL;
3000                 list_add(&key->list, &hdev->link_keys);
3001         }
3002
3003         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3004
3005         /* Some buggy controller combinations generate a changed
3006          * combination key for legacy pairing even when there's no
3007          * previous key */
3008         if (type == HCI_LK_CHANGED_COMBINATION &&
3009             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3010                 type = HCI_LK_COMBINATION;
3011                 if (conn)
3012                         conn->key_type = type;
3013         }
3014
3015         bacpy(&key->bdaddr, bdaddr);
3016         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3017         key->pin_len = pin_len;
3018
3019         if (type == HCI_LK_CHANGED_COMBINATION)
3020                 key->type = old_key_type;
3021         else
3022                 key->type = type;
3023
3024         if (persistent)
3025                 *persistent = hci_persistent_key(hdev, conn, type,
3026                                                  old_key_type);
3027
3028         return key;
3029 }
3030
3031 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3032                             u8 addr_type, u8 type, u8 authenticated,
3033                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3034 {
3035         struct smp_ltk *key, *old_key;
3036         bool master = ltk_type_master(type);
3037
3038         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3039         if (old_key)
3040                 key = old_key;
3041         else {
3042                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3043                 if (!key)
3044                         return NULL;
3045                 list_add(&key->list, &hdev->long_term_keys);
3046         }
3047
3048         bacpy(&key->bdaddr, bdaddr);
3049         key->bdaddr_type = addr_type;
3050         memcpy(key->val, tk, sizeof(key->val));
3051         key->authenticated = authenticated;
3052         key->ediv = ediv;
3053         key->rand = rand;
3054         key->enc_size = enc_size;
3055         key->type = type;
3056
3057         return key;
3058 }
3059
3060 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3061                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3062 {
3063         struct smp_irk *irk;
3064
3065         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3066         if (!irk) {
3067                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3068                 if (!irk)
3069                         return NULL;
3070
3071                 bacpy(&irk->bdaddr, bdaddr);
3072                 irk->addr_type = addr_type;
3073
3074                 list_add(&irk->list, &hdev->identity_resolving_keys);
3075         }
3076
3077         memcpy(irk->val, val, 16);
3078         bacpy(&irk->rpa, rpa);
3079
3080         return irk;
3081 }
3082
3083 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3084 {
3085         struct link_key *key;
3086
3087         key = hci_find_link_key(hdev, bdaddr);
3088         if (!key)
3089                 return -ENOENT;
3090
3091         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3092
3093         list_del(&key->list);
3094         kfree(key);
3095
3096         return 0;
3097 }
3098
3099 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3100 {
3101         struct smp_ltk *k, *tmp;
3102         int removed = 0;
3103
3104         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3105                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3106                         continue;
3107
3108                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3109
3110                 list_del(&k->list);
3111                 kfree(k);
3112                 removed++;
3113         }
3114
3115         return removed ? 0 : -ENOENT;
3116 }
3117
3118 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3119 {
3120         struct smp_irk *k, *tmp;
3121
3122         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3123                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3124                         continue;
3125
3126                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3127
3128                 list_del(&k->list);
3129                 kfree(k);
3130         }
3131 }
3132
3133 /* HCI command timer function */
3134 static void hci_cmd_timeout(struct work_struct *work)
3135 {
3136         struct hci_dev *hdev = container_of(work, struct hci_dev,
3137                                             cmd_timer.work);
3138
3139         if (hdev->sent_cmd) {
3140                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3141                 u16 opcode = __le16_to_cpu(sent->opcode);
3142
3143                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3144         } else {
3145                 BT_ERR("%s command tx timeout", hdev->name);
3146         }
3147
3148         atomic_set(&hdev->cmd_cnt, 1);
3149         queue_work(hdev->workqueue, &hdev->cmd_work);
3150 }
3151
3152 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3153                                           bdaddr_t *bdaddr)
3154 {
3155         struct oob_data *data;
3156
3157         list_for_each_entry(data, &hdev->remote_oob_data, list)
3158                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3159                         return data;
3160
3161         return NULL;
3162 }
3163
3164 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3165 {
3166         struct oob_data *data;
3167
3168         data = hci_find_remote_oob_data(hdev, bdaddr);
3169         if (!data)
3170                 return -ENOENT;
3171
3172         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3173
3174         list_del(&data->list);
3175         kfree(data);
3176
3177         return 0;
3178 }
3179
3180 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3181 {
3182         struct oob_data *data, *n;
3183
3184         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3185                 list_del(&data->list);
3186                 kfree(data);
3187         }
3188 }
3189
3190 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3191                             u8 *hash, u8 *randomizer)
3192 {
3193         struct oob_data *data;
3194
3195         data = hci_find_remote_oob_data(hdev, bdaddr);
3196         if (!data) {
3197                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3198                 if (!data)
3199                         return -ENOMEM;
3200
3201                 bacpy(&data->bdaddr, bdaddr);
3202                 list_add(&data->list, &hdev->remote_oob_data);
3203         }
3204
3205         memcpy(data->hash192, hash, sizeof(data->hash192));
3206         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3207
3208         memset(data->hash256, 0, sizeof(data->hash256));
3209         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3210
3211         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3212
3213         return 0;
3214 }
3215
3216 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217                                 u8 *hash192, u8 *randomizer192,
3218                                 u8 *hash256, u8 *randomizer256)
3219 {
3220         struct oob_data *data;
3221
3222         data = hci_find_remote_oob_data(hdev, bdaddr);
3223         if (!data) {
3224                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3225                 if (!data)
3226                         return -ENOMEM;
3227
3228                 bacpy(&data->bdaddr, bdaddr);
3229                 list_add(&data->list, &hdev->remote_oob_data);
3230         }
3231
3232         memcpy(data->hash192, hash192, sizeof(data->hash192));
3233         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3234
3235         memcpy(data->hash256, hash256, sizeof(data->hash256));
3236         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3237
3238         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3239
3240         return 0;
3241 }
3242
3243 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3244                                          bdaddr_t *bdaddr, u8 type)
3245 {
3246         struct bdaddr_list *b;
3247
3248         list_for_each_entry(b, &hdev->blacklist, list) {
3249                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3250                         return b;
3251         }
3252
3253         return NULL;
3254 }
3255
3256 static void hci_blacklist_clear(struct hci_dev *hdev)
3257 {
3258         struct list_head *p, *n;
3259
3260         list_for_each_safe(p, n, &hdev->blacklist) {
3261                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3262
3263                 list_del(p);
3264                 kfree(b);
3265         }
3266 }
3267
3268 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3269 {
3270         struct bdaddr_list *entry;
3271
3272         if (!bacmp(bdaddr, BDADDR_ANY))
3273                 return -EBADF;
3274
3275         if (hci_blacklist_lookup(hdev, bdaddr, type))
3276                 return -EEXIST;
3277
3278         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3279         if (!entry)
3280                 return -ENOMEM;
3281
3282         bacpy(&entry->bdaddr, bdaddr);
3283         entry->bdaddr_type = type;
3284
3285         list_add(&entry->list, &hdev->blacklist);
3286
3287         return mgmt_device_blocked(hdev, bdaddr, type);
3288 }
3289
3290 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3291 {
3292         struct bdaddr_list *entry;
3293
3294         if (!bacmp(bdaddr, BDADDR_ANY)) {
3295                 hci_blacklist_clear(hdev);
3296                 return 0;
3297         }
3298
3299         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3300         if (!entry)
3301                 return -ENOENT;
3302
3303         list_del(&entry->list);
3304         kfree(entry);
3305
3306         return mgmt_device_unblocked(hdev, bdaddr, type);
3307 }
3308
3309 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3310                                           bdaddr_t *bdaddr, u8 type)
3311 {
3312         struct bdaddr_list *b;
3313
3314         list_for_each_entry(b, &hdev->le_white_list, list) {
3315                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3316                         return b;
3317         }
3318
3319         return NULL;
3320 }
3321
3322 void hci_white_list_clear(struct hci_dev *hdev)
3323 {
3324         struct list_head *p, *n;
3325
3326         list_for_each_safe(p, n, &hdev->le_white_list) {
3327                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3328
3329                 list_del(p);
3330                 kfree(b);
3331         }
3332 }
3333
3334 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3335 {
3336         struct bdaddr_list *entry;
3337
3338         if (!bacmp(bdaddr, BDADDR_ANY))
3339                 return -EBADF;
3340
3341         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3342         if (!entry)
3343                 return -ENOMEM;
3344
3345         bacpy(&entry->bdaddr, bdaddr);
3346         entry->bdaddr_type = type;
3347
3348         list_add(&entry->list, &hdev->le_white_list);
3349
3350         return 0;
3351 }
3352
3353 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3354 {
3355         struct bdaddr_list *entry;
3356
3357         if (!bacmp(bdaddr, BDADDR_ANY))
3358                 return -EBADF;
3359
3360         entry = hci_white_list_lookup(hdev, bdaddr, type);
3361         if (!entry)
3362                 return -ENOENT;
3363
3364         list_del(&entry->list);
3365         kfree(entry);
3366
3367         return 0;
3368 }
3369
3370 /* This function requires the caller holds hdev->lock */
3371 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3372                                                bdaddr_t *addr, u8 addr_type)
3373 {
3374         struct hci_conn_params *params;
3375
3376         list_for_each_entry(params, &hdev->le_conn_params, list) {
3377                 if (bacmp(&params->addr, addr) == 0 &&
3378                     params->addr_type == addr_type) {
3379                         return params;
3380                 }
3381         }
3382
3383         return NULL;
3384 }
3385
3386 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3387 {
3388         struct hci_conn *conn;
3389
3390         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3391         if (!conn)
3392                 return false;
3393
3394         if (conn->dst_type != type)
3395                 return false;
3396
3397         if (conn->state != BT_CONNECTED)
3398                 return false;
3399
3400         return true;
3401 }
3402
3403 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3404 {
3405         if (addr_type == ADDR_LE_DEV_PUBLIC)
3406                 return true;
3407
3408         /* Check for Random Static address type */
3409         if ((addr->b[5] & 0xc0) == 0xc0)
3410                 return true;
3411
3412         return false;
3413 }
3414
3415 /* This function requires the caller holds hdev->lock */
3416 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3417                                             bdaddr_t *addr, u8 addr_type)
3418 {
3419         struct bdaddr_list *entry;
3420
3421         list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3422                 if (bacmp(&entry->bdaddr, addr) == 0 &&
3423                     entry->bdaddr_type == addr_type)
3424                         return entry;
3425         }
3426
3427         return NULL;
3428 }
3429
3430 /* This function requires the caller holds hdev->lock */
3431 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3432 {
3433         struct bdaddr_list *entry;
3434
3435         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3436         if (entry)
3437                 goto done;
3438
3439         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3440         if (!entry) {
3441                 BT_ERR("Out of memory");
3442                 return;
3443         }
3444
3445         bacpy(&entry->bdaddr, addr);
3446         entry->bdaddr_type = addr_type;
3447
3448         list_add(&entry->list, &hdev->pend_le_conns);
3449
3450         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3451
3452 done:
3453         hci_update_background_scan(hdev);
3454 }
3455
3456 /* This function requires the caller holds hdev->lock */
3457 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3458 {
3459         struct bdaddr_list *entry;
3460
3461         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3462         if (!entry)
3463                 goto done;
3464
3465         list_del(&entry->list);
3466         kfree(entry);
3467
3468         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3469
3470 done:
3471         hci_update_background_scan(hdev);
3472 }
3473
3474 /* This function requires the caller holds hdev->lock */
3475 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3476 {
3477         struct bdaddr_list *entry, *tmp;
3478
3479         list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3480                 list_del(&entry->list);
3481                 kfree(entry);
3482         }
3483
3484         BT_DBG("All LE pending connections cleared");
3485
3486         hci_update_background_scan(hdev);
3487 }
3488
3489 /* This function requires the caller holds hdev->lock */
3490 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3491                         u8 auto_connect, u16 conn_min_interval,
3492                         u16 conn_max_interval)
3493 {
3494         struct hci_conn_params *params;
3495
3496         if (!is_identity_address(addr, addr_type))
3497                 return -EINVAL;
3498
3499         params = hci_conn_params_lookup(hdev, addr, addr_type);
3500         if (params)
3501                 goto update;
3502
3503         params = kzalloc(sizeof(*params), GFP_KERNEL);
3504         if (!params) {
3505                 BT_ERR("Out of memory");
3506                 return -ENOMEM;
3507         }
3508
3509         bacpy(&params->addr, addr);
3510         params->addr_type = addr_type;
3511
3512         list_add(&params->list, &hdev->le_conn_params);
3513
3514 update:
3515         params->conn_min_interval = conn_min_interval;
3516         params->conn_max_interval = conn_max_interval;
3517         params->conn_latency = 0x0000;
3518         params->supervision_timeout = 0x002a;
3519         params->auto_connect = auto_connect;
3520
3521         switch (auto_connect) {
3522         case HCI_AUTO_CONN_DISABLED:
3523         case HCI_AUTO_CONN_LINK_LOSS:
3524                 hci_pend_le_conn_del(hdev, addr, addr_type);
3525                 break;
3526         case HCI_AUTO_CONN_ALWAYS:
3527                 if (!is_connected(hdev, addr, addr_type))
3528                         hci_pend_le_conn_add(hdev, addr, addr_type);
3529                 break;
3530         }
3531
3532         BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3533                "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3534                conn_min_interval, conn_max_interval);
3535
3536         return 0;
3537 }
3538
3539 /* This function requires the caller holds hdev->lock */
3540 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3541 {
3542         struct hci_conn_params *params;
3543
3544         params = hci_conn_params_lookup(hdev, addr, addr_type);
3545         if (!params)
3546                 return;
3547
3548         hci_pend_le_conn_del(hdev, addr, addr_type);
3549
3550         list_del(&params->list);
3551         kfree(params);
3552
3553         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3554 }
3555
3556 /* This function requires the caller holds hdev->lock */
3557 void hci_conn_params_clear(struct hci_dev *hdev)
3558 {
3559         struct hci_conn_params *params, *tmp;
3560
3561         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3562                 list_del(&params->list);
3563                 kfree(params);
3564         }
3565
3566         hci_pend_le_conns_clear(hdev);
3567
3568         BT_DBG("All LE connection parameters were removed");
3569 }
3570
3571 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3572 {
3573         if (status) {
3574                 BT_ERR("Failed to start inquiry: status %d", status);
3575
3576                 hci_dev_lock(hdev);
3577                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3578                 hci_dev_unlock(hdev);
3579                 return;
3580         }
3581 }
3582
3583 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3584 {
3585         /* General inquiry access code (GIAC) */
3586         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3587         struct hci_request req;
3588         struct hci_cp_inquiry cp;
3589         int err;
3590
3591         if (status) {
3592                 BT_ERR("Failed to disable LE scanning: status %d", status);
3593                 return;
3594         }
3595
3596         switch (hdev->discovery.type) {
3597         case DISCOV_TYPE_LE:
3598                 hci_dev_lock(hdev);
3599                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3600                 hci_dev_unlock(hdev);
3601                 break;
3602
3603         case DISCOV_TYPE_INTERLEAVED:
3604                 hci_req_init(&req, hdev);
3605
3606                 memset(&cp, 0, sizeof(cp));
3607                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3608                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3609                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3610
3611                 hci_dev_lock(hdev);
3612
3613                 hci_inquiry_cache_flush(hdev);
3614
3615                 err = hci_req_run(&req, inquiry_complete);
3616                 if (err) {
3617                         BT_ERR("Inquiry request failed: err %d", err);
3618                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3619                 }
3620
3621                 hci_dev_unlock(hdev);
3622                 break;
3623         }
3624 }
3625
3626 static void le_scan_disable_work(struct work_struct *work)
3627 {
3628         struct hci_dev *hdev = container_of(work, struct hci_dev,
3629                                             le_scan_disable.work);
3630         struct hci_request req;
3631         int err;
3632
3633         BT_DBG("%s", hdev->name);
3634
3635         hci_req_init(&req, hdev);
3636
3637         hci_req_add_le_scan_disable(&req);
3638
3639         err = hci_req_run(&req, le_scan_disable_work_complete);
3640         if (err)
3641                 BT_ERR("Disable LE scanning request failed: err %d", err);
3642 }
3643
3644 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3645 {
3646         struct hci_dev *hdev = req->hdev;
3647
3648         /* If we're advertising or initiating an LE connection we can't
3649          * go ahead and change the random address at this time. This is
3650          * because the eventual initiator address used for the
3651          * subsequently created connection will be undefined (some
3652          * controllers use the new address and others the one we had
3653          * when the operation started).
3654          *
3655          * In this kind of scenario skip the update and let the random
3656          * address be updated at the next cycle.
3657          */
3658         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3659             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3660                 BT_DBG("Deferring random address update");
3661                 return;
3662         }
3663
3664         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3665 }
3666
3667 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3668                               u8 *own_addr_type)
3669 {
3670         struct hci_dev *hdev = req->hdev;
3671         int err;
3672
3673         /* If privacy is enabled use a resolvable private address. If
3674          * current RPA has expired or there is something else than
3675          * the current RPA in use, then generate a new one.
3676          */
3677         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3678                 int to;
3679
3680                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3681
3682                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3683                     !bacmp(&hdev->random_addr, &hdev->rpa))
3684                         return 0;
3685
3686                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3687                 if (err < 0) {
3688                         BT_ERR("%s failed to generate new RPA", hdev->name);
3689                         return err;
3690                 }
3691
3692                 set_random_addr(req, &hdev->rpa);
3693
3694                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3695                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3696
3697                 return 0;
3698         }
3699
3700         /* In case of required privacy without resolvable private address,
3701          * use an unresolvable private address. This is useful for active
3702          * scanning and non-connectable advertising.
3703          */
3704         if (require_privacy) {
3705                 bdaddr_t urpa;
3706
3707                 get_random_bytes(&urpa, 6);
3708                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3709
3710                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3711                 set_random_addr(req, &urpa);
3712                 return 0;
3713         }
3714
3715         /* If forcing static address is in use or there is no public
3716          * address use the static address as random address (but skip
3717          * the HCI command if the current random address is already the
3718          * static one.
3719          */
3720         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3721             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3722                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3723                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3724                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3725                                     &hdev->static_addr);
3726                 return 0;
3727         }
3728
3729         /* Neither privacy nor static address is being used so use a
3730          * public address.
3731          */
3732         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3733
3734         return 0;
3735 }
3736
3737 /* Copy the Identity Address of the controller.
3738  *
3739  * If the controller has a public BD_ADDR, then by default use that one.
3740  * If this is a LE only controller without a public address, default to
3741  * the static random address.
3742  *
3743  * For debugging purposes it is possible to force controllers with a
3744  * public address to use the static random address instead.
3745  */
3746 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3747                                u8 *bdaddr_type)
3748 {
3749         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3750             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3751                 bacpy(bdaddr, &hdev->static_addr);
3752                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3753         } else {
3754                 bacpy(bdaddr, &hdev->bdaddr);
3755                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3756         }
3757 }
3758
3759 /* Alloc HCI device */
3760 struct hci_dev *hci_alloc_dev(void)
3761 {
3762         struct hci_dev *hdev;
3763
3764         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3765         if (!hdev)
3766                 return NULL;
3767
3768         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3769         hdev->esco_type = (ESCO_HV1);
3770         hdev->link_mode = (HCI_LM_ACCEPT);
3771         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3772         hdev->io_capability = 0x03;     /* No Input No Output */
3773         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3774         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3775
3776         hdev->sniff_max_interval = 800;
3777         hdev->sniff_min_interval = 80;
3778
3779         hdev->le_adv_channel_map = 0x07;
3780         hdev->le_scan_interval = 0x0060;
3781         hdev->le_scan_window = 0x0030;
3782         hdev->le_conn_min_interval = 0x0028;
3783         hdev->le_conn_max_interval = 0x0038;
3784
3785         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3786         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3787         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3788         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3789
3790         mutex_init(&hdev->lock);
3791         mutex_init(&hdev->req_lock);
3792
3793         INIT_LIST_HEAD(&hdev->mgmt_pending);
3794         INIT_LIST_HEAD(&hdev->blacklist);
3795         INIT_LIST_HEAD(&hdev->uuids);
3796         INIT_LIST_HEAD(&hdev->link_keys);
3797         INIT_LIST_HEAD(&hdev->long_term_keys);
3798         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3799         INIT_LIST_HEAD(&hdev->remote_oob_data);
3800         INIT_LIST_HEAD(&hdev->le_white_list);
3801         INIT_LIST_HEAD(&hdev->le_conn_params);
3802         INIT_LIST_HEAD(&hdev->pend_le_conns);
3803         INIT_LIST_HEAD(&hdev->conn_hash.list);
3804
3805         INIT_WORK(&hdev->rx_work, hci_rx_work);
3806         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3807         INIT_WORK(&hdev->tx_work, hci_tx_work);
3808         INIT_WORK(&hdev->power_on, hci_power_on);
3809
3810         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3811         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3812         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3813
3814         skb_queue_head_init(&hdev->rx_q);
3815         skb_queue_head_init(&hdev->cmd_q);
3816         skb_queue_head_init(&hdev->raw_q);
3817
3818         init_waitqueue_head(&hdev->req_wait_q);
3819
3820         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3821
3822         hci_init_sysfs(hdev);
3823         discovery_init(hdev);
3824
3825         return hdev;
3826 }
3827 EXPORT_SYMBOL(hci_alloc_dev);
3828
3829 /* Free HCI device */
3830 void hci_free_dev(struct hci_dev *hdev)
3831 {
3832         /* will free via device release */
3833         put_device(&hdev->dev);
3834 }
3835 EXPORT_SYMBOL(hci_free_dev);
3836
3837 /* Register HCI device */
3838 int hci_register_dev(struct hci_dev *hdev)
3839 {
3840         int id, error;
3841
3842         if (!hdev->open || !hdev->close)
3843                 return -EINVAL;
3844
3845         /* Do not allow HCI_AMP devices to register at index 0,
3846          * so the index can be used as the AMP controller ID.
3847          */
3848         switch (hdev->dev_type) {
3849         case HCI_BREDR:
3850                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3851                 break;
3852         case HCI_AMP:
3853                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3854                 break;
3855         default:
3856                 return -EINVAL;
3857         }
3858
3859         if (id < 0)
3860                 return id;
3861
3862         sprintf(hdev->name, "hci%d", id);
3863         hdev->id = id;
3864
3865         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3866
3867         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3868                                           WQ_MEM_RECLAIM, 1, hdev->name);
3869         if (!hdev->workqueue) {
3870                 error = -ENOMEM;
3871                 goto err;
3872         }
3873
3874         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3875                                               WQ_MEM_RECLAIM, 1, hdev->name);
3876         if (!hdev->req_workqueue) {
3877                 destroy_workqueue(hdev->workqueue);
3878                 error = -ENOMEM;
3879                 goto err;
3880         }
3881
3882         if (!IS_ERR_OR_NULL(bt_debugfs))
3883                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3884
3885         dev_set_name(&hdev->dev, "%s", hdev->name);
3886
3887         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3888                                                CRYPTO_ALG_ASYNC);
3889         if (IS_ERR(hdev->tfm_aes)) {
3890                 BT_ERR("Unable to create crypto context");
3891                 error = PTR_ERR(hdev->tfm_aes);
3892                 hdev->tfm_aes = NULL;
3893                 goto err_wqueue;
3894         }
3895
3896         error = device_add(&hdev->dev);
3897         if (error < 0)
3898                 goto err_tfm;
3899
3900         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3901                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3902                                     hdev);
3903         if (hdev->rfkill) {
3904                 if (rfkill_register(hdev->rfkill) < 0) {
3905                         rfkill_destroy(hdev->rfkill);
3906                         hdev->rfkill = NULL;
3907                 }
3908         }
3909
3910         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3911                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3912
3913         set_bit(HCI_SETUP, &hdev->dev_flags);
3914         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3915
3916         if (hdev->dev_type == HCI_BREDR) {
3917                 /* Assume BR/EDR support until proven otherwise (such as
3918                  * through reading supported features during init.
3919                  */
3920                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3921         }
3922
3923         write_lock(&hci_dev_list_lock);
3924         list_add(&hdev->list, &hci_dev_list);
3925         write_unlock(&hci_dev_list_lock);
3926
3927         /* Devices that are marked for raw-only usage need to set
3928          * the HCI_RAW flag to indicate that only user channel is
3929          * supported.
3930          */
3931         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3932                 set_bit(HCI_RAW, &hdev->flags);
3933
3934         hci_notify(hdev, HCI_DEV_REG);
3935         hci_dev_hold(hdev);
3936
3937         queue_work(hdev->req_workqueue, &hdev->power_on);
3938
3939         return id;
3940
3941 err_tfm:
3942         crypto_free_blkcipher(hdev->tfm_aes);
3943 err_wqueue:
3944         destroy_workqueue(hdev->workqueue);
3945         destroy_workqueue(hdev->req_workqueue);
3946 err:
3947         ida_simple_remove(&hci_index_ida, hdev->id);
3948
3949         return error;
3950 }
3951 EXPORT_SYMBOL(hci_register_dev);
3952
3953 /* Unregister HCI device */
3954 void hci_unregister_dev(struct hci_dev *hdev)
3955 {
3956         int i, id;
3957
3958         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3959
3960         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3961
3962         id = hdev->id;
3963
3964         write_lock(&hci_dev_list_lock);
3965         list_del(&hdev->list);
3966         write_unlock(&hci_dev_list_lock);
3967
3968         hci_dev_do_close(hdev);
3969
3970         for (i = 0; i < NUM_REASSEMBLY; i++)
3971                 kfree_skb(hdev->reassembly[i]);
3972
3973         cancel_work_sync(&hdev->power_on);
3974
3975         if (!test_bit(HCI_INIT, &hdev->flags) &&
3976             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3977             !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
3978                 hci_dev_lock(hdev);
3979                 mgmt_index_removed(hdev);
3980                 hci_dev_unlock(hdev);
3981         }
3982
3983         /* mgmt_index_removed should take care of emptying the
3984          * pending list */
3985         BUG_ON(!list_empty(&hdev->mgmt_pending));
3986
3987         hci_notify(hdev, HCI_DEV_UNREG);
3988
3989         if (hdev->rfkill) {
3990                 rfkill_unregister(hdev->rfkill);
3991                 rfkill_destroy(hdev->rfkill);
3992         }
3993
3994         if (hdev->tfm_aes)
3995                 crypto_free_blkcipher(hdev->tfm_aes);
3996
3997         device_del(&hdev->dev);
3998
3999         debugfs_remove_recursive(hdev->debugfs);
4000
4001         destroy_workqueue(hdev->workqueue);
4002         destroy_workqueue(hdev->req_workqueue);
4003
4004         hci_dev_lock(hdev);
4005         hci_blacklist_clear(hdev);
4006         hci_uuids_clear(hdev);
4007         hci_link_keys_clear(hdev);
4008         hci_smp_ltks_clear(hdev);
4009         hci_smp_irks_clear(hdev);
4010         hci_remote_oob_data_clear(hdev);
4011         hci_white_list_clear(hdev);
4012         hci_conn_params_clear(hdev);
4013         hci_dev_unlock(hdev);
4014
4015         hci_dev_put(hdev);
4016
4017         ida_simple_remove(&hci_index_ida, id);
4018 }
4019 EXPORT_SYMBOL(hci_unregister_dev);
4020
4021 /* Suspend HCI device */
4022 int hci_suspend_dev(struct hci_dev *hdev)
4023 {
4024         hci_notify(hdev, HCI_DEV_SUSPEND);
4025         return 0;
4026 }
4027 EXPORT_SYMBOL(hci_suspend_dev);
4028
4029 /* Resume HCI device */
4030 int hci_resume_dev(struct hci_dev *hdev)
4031 {
4032         hci_notify(hdev, HCI_DEV_RESUME);
4033         return 0;
4034 }
4035 EXPORT_SYMBOL(hci_resume_dev);
4036
4037 /* Receive frame from HCI drivers */
4038 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4039 {
4040         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4041                       && !test_bit(HCI_INIT, &hdev->flags))) {
4042                 kfree_skb(skb);
4043                 return -ENXIO;
4044         }
4045
4046         /* Incoming skb */
4047         bt_cb(skb)->incoming = 1;
4048
4049         /* Time stamp */
4050         __net_timestamp(skb);
4051
4052         skb_queue_tail(&hdev->rx_q, skb);
4053         queue_work(hdev->workqueue, &hdev->rx_work);
4054
4055         return 0;
4056 }
4057 EXPORT_SYMBOL(hci_recv_frame);
4058
4059 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4060                           int count, __u8 index)
4061 {
4062         int len = 0;
4063         int hlen = 0;
4064         int remain = count;
4065         struct sk_buff *skb;
4066         struct bt_skb_cb *scb;
4067
4068         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4069             index >= NUM_REASSEMBLY)
4070                 return -EILSEQ;
4071
4072         skb = hdev->reassembly[index];
4073
4074         if (!skb) {
4075                 switch (type) {
4076                 case HCI_ACLDATA_PKT:
4077                         len = HCI_MAX_FRAME_SIZE;
4078                         hlen = HCI_ACL_HDR_SIZE;
4079                         break;
4080                 case HCI_EVENT_PKT:
4081                         len = HCI_MAX_EVENT_SIZE;
4082                         hlen = HCI_EVENT_HDR_SIZE;
4083                         break;
4084                 case HCI_SCODATA_PKT:
4085                         len = HCI_MAX_SCO_SIZE;
4086                         hlen = HCI_SCO_HDR_SIZE;
4087                         break;
4088                 }
4089
4090                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4091                 if (!skb)
4092                         return -ENOMEM;
4093
4094                 scb = (void *) skb->cb;
4095                 scb->expect = hlen;
4096                 scb->pkt_type = type;
4097
4098                 hdev->reassembly[index] = skb;
4099         }
4100
4101         while (count) {
4102                 scb = (void *) skb->cb;
4103                 len = min_t(uint, scb->expect, count);
4104
4105                 memcpy(skb_put(skb, len), data, len);
4106
4107                 count -= len;
4108                 data += len;
4109                 scb->expect -= len;
4110                 remain = count;
4111
4112                 switch (type) {
4113                 case HCI_EVENT_PKT:
4114                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4115                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4116                                 scb->expect = h->plen;
4117
4118                                 if (skb_tailroom(skb) < scb->expect) {
4119                                         kfree_skb(skb);
4120                                         hdev->reassembly[index] = NULL;
4121                                         return -ENOMEM;
4122                                 }
4123                         }
4124                         break;
4125
4126                 case HCI_ACLDATA_PKT:
4127                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4128                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4129                                 scb->expect = __le16_to_cpu(h->dlen);
4130
4131                                 if (skb_tailroom(skb) < scb->expect) {
4132                                         kfree_skb(skb);
4133                                         hdev->reassembly[index] = NULL;
4134                                         return -ENOMEM;
4135                                 }
4136                         }
4137                         break;
4138
4139                 case HCI_SCODATA_PKT:
4140                         if (skb->len == HCI_SCO_HDR_SIZE) {
4141                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4142                                 scb->expect = h->dlen;
4143
4144                                 if (skb_tailroom(skb) < scb->expect) {
4145                                         kfree_skb(skb);
4146                                         hdev->reassembly[index] = NULL;
4147                                         return -ENOMEM;
4148                                 }
4149                         }
4150                         break;
4151                 }
4152
4153                 if (scb->expect == 0) {
4154                         /* Complete frame */
4155
4156                         bt_cb(skb)->pkt_type = type;
4157                         hci_recv_frame(hdev, skb);
4158
4159                         hdev->reassembly[index] = NULL;
4160                         return remain;
4161                 }
4162         }
4163
4164         return remain;
4165 }
4166
4167 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4168 {
4169         int rem = 0;
4170
4171         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4172                 return -EILSEQ;
4173
4174         while (count) {
4175                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4176                 if (rem < 0)
4177                         return rem;
4178
4179                 data += (count - rem);
4180                 count = rem;
4181         }
4182
4183         return rem;
4184 }
4185 EXPORT_SYMBOL(hci_recv_fragment);
4186
4187 #define STREAM_REASSEMBLY 0
4188
4189 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4190 {
4191         int type;
4192         int rem = 0;
4193
4194         while (count) {
4195                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4196
4197                 if (!skb) {
4198                         struct { char type; } *pkt;
4199
4200                         /* Start of the frame */
4201                         pkt = data;
4202                         type = pkt->type;
4203
4204                         data++;
4205                         count--;
4206                 } else
4207                         type = bt_cb(skb)->pkt_type;
4208
4209                 rem = hci_reassembly(hdev, type, data, count,
4210                                      STREAM_REASSEMBLY);
4211                 if (rem < 0)
4212                         return rem;
4213
4214                 data += (count - rem);
4215                 count = rem;
4216         }
4217
4218         return rem;
4219 }
4220 EXPORT_SYMBOL(hci_recv_stream_fragment);
4221
4222 /* ---- Interface to upper protocols ---- */
4223
4224 int hci_register_cb(struct hci_cb *cb)
4225 {
4226         BT_DBG("%p name %s", cb, cb->name);
4227
4228         write_lock(&hci_cb_list_lock);
4229         list_add(&cb->list, &hci_cb_list);
4230         write_unlock(&hci_cb_list_lock);
4231
4232         return 0;
4233 }
4234 EXPORT_SYMBOL(hci_register_cb);
4235
4236 int hci_unregister_cb(struct hci_cb *cb)
4237 {
4238         BT_DBG("%p name %s", cb, cb->name);
4239
4240         write_lock(&hci_cb_list_lock);
4241         list_del(&cb->list);
4242         write_unlock(&hci_cb_list_lock);
4243
4244         return 0;
4245 }
4246 EXPORT_SYMBOL(hci_unregister_cb);
4247
4248 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4249 {
4250         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4251
4252         /* Time stamp */
4253         __net_timestamp(skb);
4254
4255         /* Send copy to monitor */
4256         hci_send_to_monitor(hdev, skb);
4257
4258         if (atomic_read(&hdev->promisc)) {
4259                 /* Send copy to the sockets */
4260                 hci_send_to_sock(hdev, skb);
4261         }
4262
4263         /* Get rid of skb owner, prior to sending to the driver. */
4264         skb_orphan(skb);
4265
4266         if (hdev->send(hdev, skb) < 0)
4267                 BT_ERR("%s sending frame failed", hdev->name);
4268 }
4269
4270 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4271 {
4272         skb_queue_head_init(&req->cmd_q);
4273         req->hdev = hdev;
4274         req->err = 0;
4275 }
4276
4277 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4278 {
4279         struct hci_dev *hdev = req->hdev;
4280         struct sk_buff *skb;
4281         unsigned long flags;
4282
4283         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4284
4285         /* If an error occured during request building, remove all HCI
4286          * commands queued on the HCI request queue.
4287          */
4288         if (req->err) {
4289                 skb_queue_purge(&req->cmd_q);
4290                 return req->err;
4291         }
4292
4293         /* Do not allow empty requests */
4294         if (skb_queue_empty(&req->cmd_q))
4295                 return -ENODATA;
4296
4297         skb = skb_peek_tail(&req->cmd_q);
4298         bt_cb(skb)->req.complete = complete;
4299
4300         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4301         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4302         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4303
4304         queue_work(hdev->workqueue, &hdev->cmd_work);
4305
4306         return 0;
4307 }
4308
4309 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4310                                        u32 plen, const void *param)
4311 {
4312         int len = HCI_COMMAND_HDR_SIZE + plen;
4313         struct hci_command_hdr *hdr;
4314         struct sk_buff *skb;
4315
4316         skb = bt_skb_alloc(len, GFP_ATOMIC);
4317         if (!skb)
4318                 return NULL;
4319
4320         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4321         hdr->opcode = cpu_to_le16(opcode);
4322         hdr->plen   = plen;
4323
4324         if (plen)
4325                 memcpy(skb_put(skb, plen), param, plen);
4326
4327         BT_DBG("skb len %d", skb->len);
4328
4329         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4330
4331         return skb;
4332 }
4333
4334 /* Send HCI command */
4335 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4336                  const void *param)
4337 {
4338         struct sk_buff *skb;
4339
4340         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4341
4342         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4343         if (!skb) {
4344                 BT_ERR("%s no memory for command", hdev->name);
4345                 return -ENOMEM;
4346         }
4347
4348         /* Stand-alone HCI commands must be flaged as
4349          * single-command requests.
4350          */
4351         bt_cb(skb)->req.start = true;
4352
4353         skb_queue_tail(&hdev->cmd_q, skb);
4354         queue_work(hdev->workqueue, &hdev->cmd_work);
4355
4356         return 0;
4357 }
4358
4359 /* Queue a command to an asynchronous HCI request */
4360 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4361                     const void *param, u8 event)
4362 {
4363         struct hci_dev *hdev = req->hdev;
4364         struct sk_buff *skb;
4365
4366         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4367
4368         /* If an error occured during request building, there is no point in
4369          * queueing the HCI command. We can simply return.
4370          */
4371         if (req->err)
4372                 return;
4373
4374         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4375         if (!skb) {
4376                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4377                        hdev->name, opcode);
4378                 req->err = -ENOMEM;
4379                 return;
4380         }
4381
4382         if (skb_queue_empty(&req->cmd_q))
4383                 bt_cb(skb)->req.start = true;
4384
4385         bt_cb(skb)->req.event = event;
4386
4387         skb_queue_tail(&req->cmd_q, skb);
4388 }
4389
4390 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4391                  const void *param)
4392 {
4393         hci_req_add_ev(req, opcode, plen, param, 0);
4394 }
4395
4396 /* Get data from the previously sent command */
4397 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4398 {
4399         struct hci_command_hdr *hdr;
4400
4401         if (!hdev->sent_cmd)
4402                 return NULL;
4403
4404         hdr = (void *) hdev->sent_cmd->data;
4405
4406         if (hdr->opcode != cpu_to_le16(opcode))
4407                 return NULL;
4408
4409         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4410
4411         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4412 }
4413
4414 /* Send ACL data */
4415 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4416 {
4417         struct hci_acl_hdr *hdr;
4418         int len = skb->len;
4419
4420         skb_push(skb, HCI_ACL_HDR_SIZE);
4421         skb_reset_transport_header(skb);
4422         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4423         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4424         hdr->dlen   = cpu_to_le16(len);
4425 }
4426
4427 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4428                           struct sk_buff *skb, __u16 flags)
4429 {
4430         struct hci_conn *conn = chan->conn;
4431         struct hci_dev *hdev = conn->hdev;
4432         struct sk_buff *list;
4433
4434         skb->len = skb_headlen(skb);
4435         skb->data_len = 0;
4436
4437         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4438
4439         switch (hdev->dev_type) {
4440         case HCI_BREDR:
4441                 hci_add_acl_hdr(skb, conn->handle, flags);
4442                 break;
4443         case HCI_AMP:
4444                 hci_add_acl_hdr(skb, chan->handle, flags);
4445                 break;
4446         default:
4447                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4448                 return;
4449         }
4450
4451         list = skb_shinfo(skb)->frag_list;
4452         if (!list) {
4453                 /* Non fragmented */
4454                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4455
4456                 skb_queue_tail(queue, skb);
4457         } else {
4458                 /* Fragmented */
4459                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4460
4461                 skb_shinfo(skb)->frag_list = NULL;
4462
4463                 /* Queue all fragments atomically */
4464                 spin_lock(&queue->lock);
4465
4466                 __skb_queue_tail(queue, skb);
4467
4468                 flags &= ~ACL_START;
4469                 flags |= ACL_CONT;
4470                 do {
4471                         skb = list; list = list->next;
4472
4473                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4474                         hci_add_acl_hdr(skb, conn->handle, flags);
4475
4476                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4477
4478                         __skb_queue_tail(queue, skb);
4479                 } while (list);
4480
4481                 spin_unlock(&queue->lock);
4482         }
4483 }
4484
4485 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4486 {
4487         struct hci_dev *hdev = chan->conn->hdev;
4488
4489         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4490
4491         hci_queue_acl(chan, &chan->data_q, skb, flags);
4492
4493         queue_work(hdev->workqueue, &hdev->tx_work);
4494 }
4495
4496 /* Send SCO data */
4497 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4498 {
4499         struct hci_dev *hdev = conn->hdev;
4500         struct hci_sco_hdr hdr;
4501
4502         BT_DBG("%s len %d", hdev->name, skb->len);
4503
4504         hdr.handle = cpu_to_le16(conn->handle);
4505         hdr.dlen   = skb->len;
4506
4507         skb_push(skb, HCI_SCO_HDR_SIZE);
4508         skb_reset_transport_header(skb);
4509         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4510
4511         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4512
4513         skb_queue_tail(&conn->data_q, skb);
4514         queue_work(hdev->workqueue, &hdev->tx_work);
4515 }
4516
4517 /* ---- HCI TX task (outgoing data) ---- */
4518
4519 /* HCI Connection scheduler */
4520 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4521                                      int *quote)
4522 {
4523         struct hci_conn_hash *h = &hdev->conn_hash;
4524         struct hci_conn *conn = NULL, *c;
4525         unsigned int num = 0, min = ~0;
4526
4527         /* We don't have to lock device here. Connections are always
4528          * added and removed with TX task disabled. */
4529
4530         rcu_read_lock();
4531
4532         list_for_each_entry_rcu(c, &h->list, list) {
4533                 if (c->type != type || skb_queue_empty(&c->data_q))
4534                         continue;
4535
4536                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4537                         continue;
4538
4539                 num++;
4540
4541                 if (c->sent < min) {
4542                         min  = c->sent;
4543                         conn = c;
4544                 }
4545
4546                 if (hci_conn_num(hdev, type) == num)
4547                         break;
4548         }
4549
4550         rcu_read_unlock();
4551
4552         if (conn) {
4553                 int cnt, q;
4554
4555                 switch (conn->type) {
4556                 case ACL_LINK:
4557                         cnt = hdev->acl_cnt;
4558                         break;
4559                 case SCO_LINK:
4560                 case ESCO_LINK:
4561                         cnt = hdev->sco_cnt;
4562                         break;
4563                 case LE_LINK:
4564                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4565                         break;
4566                 default:
4567                         cnt = 0;
4568                         BT_ERR("Unknown link type");
4569                 }
4570
4571                 q = cnt / num;
4572                 *quote = q ? q : 1;
4573         } else
4574                 *quote = 0;
4575
4576         BT_DBG("conn %p quote %d", conn, *quote);
4577         return conn;
4578 }
4579
4580 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4581 {
4582         struct hci_conn_hash *h = &hdev->conn_hash;
4583         struct hci_conn *c;
4584
4585         BT_ERR("%s link tx timeout", hdev->name);
4586
4587         rcu_read_lock();
4588
4589         /* Kill stalled connections */
4590         list_for_each_entry_rcu(c, &h->list, list) {
4591                 if (c->type == type && c->sent) {
4592                         BT_ERR("%s killing stalled connection %pMR",
4593                                hdev->name, &c->dst);
4594                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4595                 }
4596         }
4597
4598         rcu_read_unlock();
4599 }
4600
4601 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4602                                       int *quote)
4603 {
4604         struct hci_conn_hash *h = &hdev->conn_hash;
4605         struct hci_chan *chan = NULL;
4606         unsigned int num = 0, min = ~0, cur_prio = 0;
4607         struct hci_conn *conn;
4608         int cnt, q, conn_num = 0;
4609
4610         BT_DBG("%s", hdev->name);
4611
4612         rcu_read_lock();
4613
4614         list_for_each_entry_rcu(conn, &h->list, list) {
4615                 struct hci_chan *tmp;
4616
4617                 if (conn->type != type)
4618                         continue;
4619
4620                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4621                         continue;
4622
4623                 conn_num++;
4624
4625                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4626                         struct sk_buff *skb;
4627
4628                         if (skb_queue_empty(&tmp->data_q))
4629                                 continue;
4630
4631                         skb = skb_peek(&tmp->data_q);
4632                         if (skb->priority < cur_prio)
4633                                 continue;
4634
4635                         if (skb->priority > cur_prio) {
4636                                 num = 0;
4637                                 min = ~0;
4638                                 cur_prio = skb->priority;
4639                         }
4640
4641                         num++;
4642
4643                         if (conn->sent < min) {
4644                                 min  = conn->sent;
4645                                 chan = tmp;
4646                         }
4647                 }
4648
4649                 if (hci_conn_num(hdev, type) == conn_num)
4650                         break;
4651         }
4652
4653         rcu_read_unlock();
4654
4655         if (!chan)
4656                 return NULL;
4657
4658         switch (chan->conn->type) {
4659         case ACL_LINK:
4660                 cnt = hdev->acl_cnt;
4661                 break;
4662         case AMP_LINK:
4663                 cnt = hdev->block_cnt;
4664                 break;
4665         case SCO_LINK:
4666         case ESCO_LINK:
4667                 cnt = hdev->sco_cnt;
4668                 break;
4669         case LE_LINK:
4670                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4671                 break;
4672         default:
4673                 cnt = 0;
4674                 BT_ERR("Unknown link type");
4675         }
4676
4677         q = cnt / num;
4678         *quote = q ? q : 1;
4679         BT_DBG("chan %p quote %d", chan, *quote);
4680         return chan;
4681 }
4682
4683 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4684 {
4685         struct hci_conn_hash *h = &hdev->conn_hash;
4686         struct hci_conn *conn;
4687         int num = 0;
4688
4689         BT_DBG("%s", hdev->name);
4690
4691         rcu_read_lock();
4692
4693         list_for_each_entry_rcu(conn, &h->list, list) {
4694                 struct hci_chan *chan;
4695
4696                 if (conn->type != type)
4697                         continue;
4698
4699                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4700                         continue;
4701
4702                 num++;
4703
4704                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4705                         struct sk_buff *skb;
4706
4707                         if (chan->sent) {
4708                                 chan->sent = 0;
4709                                 continue;
4710                         }
4711
4712                         if (skb_queue_empty(&chan->data_q))
4713                                 continue;
4714
4715                         skb = skb_peek(&chan->data_q);
4716                         if (skb->priority >= HCI_PRIO_MAX - 1)
4717                                 continue;
4718
4719                         skb->priority = HCI_PRIO_MAX - 1;
4720
4721                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4722                                skb->priority);
4723                 }
4724
4725                 if (hci_conn_num(hdev, type) == num)
4726                         break;
4727         }
4728
4729         rcu_read_unlock();
4730
4731 }
4732
4733 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4734 {
4735         /* Calculate count of blocks used by this packet */
4736         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4737 }
4738
4739 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4740 {
4741         if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4742                 /* ACL tx timeout must be longer than maximum
4743                  * link supervision timeout (40.9 seconds) */
4744                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4745                                        HCI_ACL_TX_TIMEOUT))
4746                         hci_link_tx_to(hdev, ACL_LINK);
4747         }
4748 }
4749
4750 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4751 {
4752         unsigned int cnt = hdev->acl_cnt;
4753         struct hci_chan *chan;
4754         struct sk_buff *skb;
4755         int quote;
4756
4757         __check_timeout(hdev, cnt);
4758
4759         while (hdev->acl_cnt &&
4760                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4761                 u32 priority = (skb_peek(&chan->data_q))->priority;
4762                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4763                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4764                                skb->len, skb->priority);
4765
4766                         /* Stop if priority has changed */
4767                         if (skb->priority < priority)
4768                                 break;
4769
4770                         skb = skb_dequeue(&chan->data_q);
4771
4772                         hci_conn_enter_active_mode(chan->conn,
4773                                                    bt_cb(skb)->force_active);
4774
4775                         hci_send_frame(hdev, skb);
4776                         hdev->acl_last_tx = jiffies;
4777
4778                         hdev->acl_cnt--;
4779                         chan->sent++;
4780                         chan->conn->sent++;
4781                 }
4782         }
4783
4784         if (cnt != hdev->acl_cnt)
4785                 hci_prio_recalculate(hdev, ACL_LINK);
4786 }
4787
4788 static void hci_sched_acl_blk(struct hci_dev *hdev)
4789 {
4790         unsigned int cnt = hdev->block_cnt;
4791         struct hci_chan *chan;
4792         struct sk_buff *skb;
4793         int quote;
4794         u8 type;
4795
4796         __check_timeout(hdev, cnt);
4797
4798         BT_DBG("%s", hdev->name);
4799
4800         if (hdev->dev_type == HCI_AMP)
4801                 type = AMP_LINK;
4802         else
4803                 type = ACL_LINK;
4804
4805         while (hdev->block_cnt > 0 &&
4806                (chan = hci_chan_sent(hdev, type, &quote))) {
4807                 u32 priority = (skb_peek(&chan->data_q))->priority;
4808                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4809                         int blocks;
4810
4811                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4812                                skb->len, skb->priority);
4813
4814                         /* Stop if priority has changed */
4815                         if (skb->priority < priority)
4816                                 break;
4817
4818                         skb = skb_dequeue(&chan->data_q);
4819
4820                         blocks = __get_blocks(hdev, skb);
4821                         if (blocks > hdev->block_cnt)
4822                                 return;
4823
4824                         hci_conn_enter_active_mode(chan->conn,
4825                                                    bt_cb(skb)->force_active);
4826
4827                         hci_send_frame(hdev, skb);
4828                         hdev->acl_last_tx = jiffies;
4829
4830                         hdev->block_cnt -= blocks;
4831                         quote -= blocks;
4832
4833                         chan->sent += blocks;
4834                         chan->conn->sent += blocks;
4835                 }
4836         }
4837
4838         if (cnt != hdev->block_cnt)
4839                 hci_prio_recalculate(hdev, type);
4840 }
4841
4842 static void hci_sched_acl(struct hci_dev *hdev)
4843 {
4844         BT_DBG("%s", hdev->name);
4845
4846         /* No ACL link over BR/EDR controller */
4847         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4848                 return;
4849
4850         /* No AMP link over AMP controller */
4851         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4852                 return;
4853
4854         switch (hdev->flow_ctl_mode) {
4855         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4856                 hci_sched_acl_pkt(hdev);
4857                 break;
4858
4859         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4860                 hci_sched_acl_blk(hdev);
4861                 break;
4862         }
4863 }
4864
4865 /* Schedule SCO */
4866 static void hci_sched_sco(struct hci_dev *hdev)
4867 {
4868         struct hci_conn *conn;
4869         struct sk_buff *skb;
4870         int quote;
4871
4872         BT_DBG("%s", hdev->name);
4873
4874         if (!hci_conn_num(hdev, SCO_LINK))
4875                 return;
4876
4877         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4878                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4879                         BT_DBG("skb %p len %d", skb, skb->len);
4880                         hci_send_frame(hdev, skb);
4881
4882                         conn->sent++;
4883                         if (conn->sent == ~0)
4884                                 conn->sent = 0;
4885                 }
4886         }
4887 }
4888
4889 static void hci_sched_esco(struct hci_dev *hdev)
4890 {
4891         struct hci_conn *conn;
4892         struct sk_buff *skb;
4893         int quote;
4894
4895         BT_DBG("%s", hdev->name);
4896
4897         if (!hci_conn_num(hdev, ESCO_LINK))
4898                 return;
4899
4900         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4901                                                      &quote))) {
4902                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4903                         BT_DBG("skb %p len %d", skb, skb->len);
4904                         hci_send_frame(hdev, skb);
4905
4906                         conn->sent++;
4907                         if (conn->sent == ~0)
4908                                 conn->sent = 0;
4909                 }
4910         }
4911 }
4912
4913 static void hci_sched_le(struct hci_dev *hdev)
4914 {
4915         struct hci_chan *chan;
4916         struct sk_buff *skb;
4917         int quote, cnt, tmp;
4918
4919         BT_DBG("%s", hdev->name);
4920
4921         if (!hci_conn_num(hdev, LE_LINK))
4922                 return;
4923
4924         if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
4925                 /* LE tx timeout must be longer than maximum
4926                  * link supervision timeout (40.9 seconds) */
4927                 if (!hdev->le_cnt && hdev->le_pkts &&
4928                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4929                         hci_link_tx_to(hdev, LE_LINK);
4930         }
4931
4932         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4933         tmp = cnt;
4934         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4935                 u32 priority = (skb_peek(&chan->data_q))->priority;
4936                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4937                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4938                                skb->len, skb->priority);
4939
4940                         /* Stop if priority has changed */
4941                         if (skb->priority < priority)
4942                                 break;
4943
4944                         skb = skb_dequeue(&chan->data_q);
4945
4946                         hci_send_frame(hdev, skb);
4947                         hdev->le_last_tx = jiffies;
4948
4949                         cnt--;
4950                         chan->sent++;
4951                         chan->conn->sent++;
4952                 }
4953         }
4954
4955         if (hdev->le_pkts)
4956                 hdev->le_cnt = cnt;
4957         else
4958                 hdev->acl_cnt = cnt;
4959
4960         if (cnt != tmp)
4961                 hci_prio_recalculate(hdev, LE_LINK);
4962 }
4963
4964 static void hci_tx_work(struct work_struct *work)
4965 {
4966         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4967         struct sk_buff *skb;
4968
4969         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4970                hdev->sco_cnt, hdev->le_cnt);
4971
4972         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4973                 /* Schedule queues and send stuff to HCI driver */
4974                 hci_sched_acl(hdev);
4975                 hci_sched_sco(hdev);
4976                 hci_sched_esco(hdev);
4977                 hci_sched_le(hdev);
4978         }
4979
4980         /* Send next queued raw (unknown type) packet */
4981         while ((skb = skb_dequeue(&hdev->raw_q)))
4982                 hci_send_frame(hdev, skb);
4983 }
4984
4985 /* ----- HCI RX task (incoming data processing) ----- */
4986
4987 /* ACL data packet */
4988 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4989 {
4990         struct hci_acl_hdr *hdr = (void *) skb->data;
4991         struct hci_conn *conn;
4992         __u16 handle, flags;
4993
4994         skb_pull(skb, HCI_ACL_HDR_SIZE);
4995
4996         handle = __le16_to_cpu(hdr->handle);
4997         flags  = hci_flags(handle);
4998         handle = hci_handle(handle);
4999
5000         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5001                handle, flags);
5002
5003         hdev->stat.acl_rx++;
5004
5005         hci_dev_lock(hdev);
5006         conn = hci_conn_hash_lookup_handle(hdev, handle);
5007         hci_dev_unlock(hdev);
5008
5009         if (conn) {
5010                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5011
5012                 /* Send to upper protocol */
5013                 l2cap_recv_acldata(conn, skb, flags);
5014                 return;
5015         } else {
5016                 BT_ERR("%s ACL packet for unknown connection handle %d",
5017                        hdev->name, handle);
5018         }
5019
5020         kfree_skb(skb);
5021 }
5022
5023 /* SCO data packet */
5024 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5025 {
5026         struct hci_sco_hdr *hdr = (void *) skb->data;
5027         struct hci_conn *conn;
5028         __u16 handle;
5029
5030         skb_pull(skb, HCI_SCO_HDR_SIZE);
5031
5032         handle = __le16_to_cpu(hdr->handle);
5033
5034         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5035
5036         hdev->stat.sco_rx++;
5037
5038         hci_dev_lock(hdev);
5039         conn = hci_conn_hash_lookup_handle(hdev, handle);
5040         hci_dev_unlock(hdev);
5041
5042         if (conn) {
5043                 /* Send to upper protocol */
5044                 sco_recv_scodata(conn, skb);
5045                 return;
5046         } else {
5047                 BT_ERR("%s SCO packet for unknown connection handle %d",
5048                        hdev->name, handle);
5049         }
5050
5051         kfree_skb(skb);
5052 }
5053
5054 static bool hci_req_is_complete(struct hci_dev *hdev)
5055 {
5056         struct sk_buff *skb;
5057
5058         skb = skb_peek(&hdev->cmd_q);
5059         if (!skb)
5060                 return true;
5061
5062         return bt_cb(skb)->req.start;
5063 }
5064
5065 static void hci_resend_last(struct hci_dev *hdev)
5066 {
5067         struct hci_command_hdr *sent;
5068         struct sk_buff *skb;
5069         u16 opcode;
5070
5071         if (!hdev->sent_cmd)
5072                 return;
5073
5074         sent = (void *) hdev->sent_cmd->data;
5075         opcode = __le16_to_cpu(sent->opcode);
5076         if (opcode == HCI_OP_RESET)
5077                 return;
5078
5079         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5080         if (!skb)
5081                 return;
5082
5083         skb_queue_head(&hdev->cmd_q, skb);
5084         queue_work(hdev->workqueue, &hdev->cmd_work);
5085 }
5086
5087 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5088 {
5089         hci_req_complete_t req_complete = NULL;
5090         struct sk_buff *skb;
5091         unsigned long flags;
5092
5093         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5094
5095         /* If the completed command doesn't match the last one that was
5096          * sent we need to do special handling of it.
5097          */
5098         if (!hci_sent_cmd_data(hdev, opcode)) {
5099                 /* Some CSR based controllers generate a spontaneous
5100                  * reset complete event during init and any pending
5101                  * command will never be completed. In such a case we
5102                  * need to resend whatever was the last sent
5103                  * command.
5104                  */
5105                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5106                         hci_resend_last(hdev);
5107
5108                 return;
5109         }
5110
5111         /* If the command succeeded and there's still more commands in
5112          * this request the request is not yet complete.
5113          */
5114         if (!status && !hci_req_is_complete(hdev))
5115                 return;
5116
5117         /* If this was the last command in a request the complete
5118          * callback would be found in hdev->sent_cmd instead of the
5119          * command queue (hdev->cmd_q).
5120          */
5121         if (hdev->sent_cmd) {
5122                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5123
5124                 if (req_complete) {
5125                         /* We must set the complete callback to NULL to
5126                          * avoid calling the callback more than once if
5127                          * this function gets called again.
5128                          */
5129                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5130
5131                         goto call_complete;
5132                 }
5133         }
5134
5135         /* Remove all pending commands belonging to this request */
5136         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5137         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5138                 if (bt_cb(skb)->req.start) {
5139                         __skb_queue_head(&hdev->cmd_q, skb);
5140                         break;
5141                 }
5142
5143                 req_complete = bt_cb(skb)->req.complete;
5144                 kfree_skb(skb);
5145         }
5146         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5147
5148 call_complete:
5149         if (req_complete)
5150                 req_complete(hdev, status);
5151 }
5152
5153 static void hci_rx_work(struct work_struct *work)
5154 {
5155         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5156         struct sk_buff *skb;
5157
5158         BT_DBG("%s", hdev->name);
5159
5160         while ((skb = skb_dequeue(&hdev->rx_q))) {
5161                 /* Send copy to monitor */
5162                 hci_send_to_monitor(hdev, skb);
5163
5164                 if (atomic_read(&hdev->promisc)) {
5165                         /* Send copy to the sockets */
5166                         hci_send_to_sock(hdev, skb);
5167                 }
5168
5169                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5170                         kfree_skb(skb);
5171                         continue;
5172                 }
5173
5174                 if (test_bit(HCI_INIT, &hdev->flags)) {
5175                         /* Don't process data packets in this states. */
5176                         switch (bt_cb(skb)->pkt_type) {
5177                         case HCI_ACLDATA_PKT:
5178                         case HCI_SCODATA_PKT:
5179                                 kfree_skb(skb);
5180                                 continue;
5181                         }
5182                 }
5183
5184                 /* Process frame */
5185                 switch (bt_cb(skb)->pkt_type) {
5186                 case HCI_EVENT_PKT:
5187                         BT_DBG("%s Event packet", hdev->name);
5188                         hci_event_packet(hdev, skb);
5189                         break;
5190
5191                 case HCI_ACLDATA_PKT:
5192                         BT_DBG("%s ACL data packet", hdev->name);
5193                         hci_acldata_packet(hdev, skb);
5194                         break;
5195
5196                 case HCI_SCODATA_PKT:
5197                         BT_DBG("%s SCO data packet", hdev->name);
5198                         hci_scodata_packet(hdev, skb);
5199                         break;
5200
5201                 default:
5202                         kfree_skb(skb);
5203                         break;
5204                 }
5205         }
5206 }
5207
5208 static void hci_cmd_work(struct work_struct *work)
5209 {
5210         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5211         struct sk_buff *skb;
5212
5213         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5214                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5215
5216         /* Send queued commands */
5217         if (atomic_read(&hdev->cmd_cnt)) {
5218                 skb = skb_dequeue(&hdev->cmd_q);
5219                 if (!skb)
5220                         return;
5221
5222                 kfree_skb(hdev->sent_cmd);
5223
5224                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5225                 if (hdev->sent_cmd) {
5226                         atomic_dec(&hdev->cmd_cnt);
5227                         hci_send_frame(hdev, skb);
5228                         if (test_bit(HCI_RESET, &hdev->flags))
5229                                 cancel_delayed_work(&hdev->cmd_timer);
5230                         else
5231                                 schedule_delayed_work(&hdev->cmd_timer,
5232                                                       HCI_CMD_TIMEOUT);
5233                 } else {
5234                         skb_queue_head(&hdev->cmd_q, skb);
5235                         queue_work(hdev->workqueue, &hdev->cmd_work);
5236                 }
5237         }
5238 }
5239
5240 void hci_req_add_le_scan_disable(struct hci_request *req)
5241 {
5242         struct hci_cp_le_set_scan_enable cp;
5243
5244         memset(&cp, 0, sizeof(cp));
5245         cp.enable = LE_SCAN_DISABLE;
5246         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5247 }
5248
5249 void hci_req_add_le_passive_scan(struct hci_request *req)
5250 {
5251         struct hci_cp_le_set_scan_param param_cp;
5252         struct hci_cp_le_set_scan_enable enable_cp;
5253         struct hci_dev *hdev = req->hdev;
5254         u8 own_addr_type;
5255
5256         /* Set require_privacy to false since no SCAN_REQ are send
5257          * during passive scanning. Not using an unresolvable address
5258          * here is important so that peer devices using direct
5259          * advertising with our address will be correctly reported
5260          * by the controller.
5261          */
5262         if (hci_update_random_address(req, false, &own_addr_type))
5263                 return;
5264
5265         memset(&param_cp, 0, sizeof(param_cp));
5266         param_cp.type = LE_SCAN_PASSIVE;
5267         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5268         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5269         param_cp.own_address_type = own_addr_type;
5270         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5271                     &param_cp);
5272
5273         memset(&enable_cp, 0, sizeof(enable_cp));
5274         enable_cp.enable = LE_SCAN_ENABLE;
5275         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5276         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5277                     &enable_cp);
5278 }
5279
5280 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5281 {
5282         if (status)
5283                 BT_DBG("HCI request failed to update background scanning: "
5284                        "status 0x%2.2x", status);
5285 }
5286
5287 /* This function controls the background scanning based on hdev->pend_le_conns
5288  * list. If there are pending LE connection we start the background scanning,
5289  * otherwise we stop it.
5290  *
5291  * This function requires the caller holds hdev->lock.
5292  */
5293 void hci_update_background_scan(struct hci_dev *hdev)
5294 {
5295         struct hci_request req;
5296         struct hci_conn *conn;
5297         int err;
5298
5299         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5300                 return;
5301
5302         hci_req_init(&req, hdev);
5303
5304         if (list_empty(&hdev->pend_le_conns)) {
5305                 /* If there is no pending LE connections, we should stop
5306                  * the background scanning.
5307                  */
5308
5309                 /* If controller is not scanning we are done. */
5310                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5311                         return;
5312
5313                 hci_req_add_le_scan_disable(&req);
5314
5315                 BT_DBG("%s stopping background scanning", hdev->name);
5316         } else {
5317                 /* If there is at least one pending LE connection, we should
5318                  * keep the background scan running.
5319                  */
5320
5321                 /* If controller is connecting, we should not start scanning
5322                  * since some controllers are not able to scan and connect at
5323                  * the same time.
5324                  */
5325                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5326                 if (conn)
5327                         return;
5328
5329                 /* If controller is currently scanning, we stop it to ensure we
5330                  * don't miss any advertising (due to duplicates filter).
5331                  */
5332                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5333                         hci_req_add_le_scan_disable(&req);
5334
5335                 hci_req_add_le_passive_scan(&req);
5336
5337                 BT_DBG("%s starting background scanning", hdev->name);
5338         }
5339
5340         err = hci_req_run(&req, update_background_scan_complete);
5341         if (err)
5342                 BT_ERR("Failed to run HCI request: err %d", err);
5343 }