Bluetooth: Add workaround for buggy max_page features page value
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
36
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
51
52 /* ---- HCI notifications ---- */
53
54 static void hci_notify(struct hci_dev *hdev, int event)
55 {
56         hci_sock_dev_event(hdev, event);
57 }
58
59 /* ---- HCI debugfs entries ---- */
60
61 static int features_show(struct seq_file *f, void *ptr)
62 {
63         struct hci_dev *hdev = f->private;
64         u8 p;
65
66         hci_dev_lock(hdev);
67         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68                 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70                            hdev->features[p][0], hdev->features[p][1],
71                            hdev->features[p][2], hdev->features[p][3],
72                            hdev->features[p][4], hdev->features[p][5],
73                            hdev->features[p][6], hdev->features[p][7]);
74         }
75         hci_dev_unlock(hdev);
76
77         return 0;
78 }
79
80 static int features_open(struct inode *inode, struct file *file)
81 {
82         return single_open(file, features_show, inode->i_private);
83 }
84
85 static const struct file_operations features_fops = {
86         .open           = features_open,
87         .read           = seq_read,
88         .llseek         = seq_lseek,
89         .release        = single_release,
90 };
91
92 static int blacklist_show(struct seq_file *f, void *p)
93 {
94         struct hci_dev *hdev = f->private;
95         struct bdaddr_list *b;
96
97         hci_dev_lock(hdev);
98         list_for_each_entry(b, &hdev->blacklist, list)
99                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
100         hci_dev_unlock(hdev);
101
102         return 0;
103 }
104
105 static int blacklist_open(struct inode *inode, struct file *file)
106 {
107         return single_open(file, blacklist_show, inode->i_private);
108 }
109
110 static const struct file_operations blacklist_fops = {
111         .open           = blacklist_open,
112         .read           = seq_read,
113         .llseek         = seq_lseek,
114         .release        = single_release,
115 };
116
117 static int uuids_show(struct seq_file *f, void *p)
118 {
119         struct hci_dev *hdev = f->private;
120         struct bt_uuid *uuid;
121
122         hci_dev_lock(hdev);
123         list_for_each_entry(uuid, &hdev->uuids, list) {
124                 u32 data0, data5;
125                 u16 data1, data2, data3, data4;
126
127                 data5 = get_unaligned_le32(uuid);
128                 data4 = get_unaligned_le16(uuid + 4);
129                 data3 = get_unaligned_le16(uuid + 6);
130                 data2 = get_unaligned_le16(uuid + 8);
131                 data1 = get_unaligned_le16(uuid + 10);
132                 data0 = get_unaligned_le32(uuid + 12);
133
134                 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135                            data0, data1, data2, data3, data4, data5);
136         }
137         hci_dev_unlock(hdev);
138
139         return 0;
140 }
141
142 static int uuids_open(struct inode *inode, struct file *file)
143 {
144         return single_open(file, uuids_show, inode->i_private);
145 }
146
147 static const struct file_operations uuids_fops = {
148         .open           = uuids_open,
149         .read           = seq_read,
150         .llseek         = seq_lseek,
151         .release        = single_release,
152 };
153
154 static int inquiry_cache_show(struct seq_file *f, void *p)
155 {
156         struct hci_dev *hdev = f->private;
157         struct discovery_state *cache = &hdev->discovery;
158         struct inquiry_entry *e;
159
160         hci_dev_lock(hdev);
161
162         list_for_each_entry(e, &cache->all, all) {
163                 struct inquiry_data *data = &e->data;
164                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
165                            &data->bdaddr,
166                            data->pscan_rep_mode, data->pscan_period_mode,
167                            data->pscan_mode, data->dev_class[2],
168                            data->dev_class[1], data->dev_class[0],
169                            __le16_to_cpu(data->clock_offset),
170                            data->rssi, data->ssp_mode, e->timestamp);
171         }
172
173         hci_dev_unlock(hdev);
174
175         return 0;
176 }
177
178 static int inquiry_cache_open(struct inode *inode, struct file *file)
179 {
180         return single_open(file, inquiry_cache_show, inode->i_private);
181 }
182
183 static const struct file_operations inquiry_cache_fops = {
184         .open           = inquiry_cache_open,
185         .read           = seq_read,
186         .llseek         = seq_lseek,
187         .release        = single_release,
188 };
189
190 static int voice_setting_get(void *data, u64 *val)
191 {
192         struct hci_dev *hdev = data;
193
194         hci_dev_lock(hdev);
195         *val = hdev->voice_setting;
196         hci_dev_unlock(hdev);
197
198         return 0;
199 }
200
201 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
202                         NULL, "0x%4.4llx\n");
203
204 static int auto_accept_delay_set(void *data, u64 val)
205 {
206         struct hci_dev *hdev = data;
207
208         hci_dev_lock(hdev);
209         hdev->auto_accept_delay = val;
210         hci_dev_unlock(hdev);
211
212         return 0;
213 }
214
215 static int auto_accept_delay_get(void *data, u64 *val)
216 {
217         struct hci_dev *hdev = data;
218
219         hci_dev_lock(hdev);
220         *val = hdev->auto_accept_delay;
221         hci_dev_unlock(hdev);
222
223         return 0;
224 }
225
226 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
227                         auto_accept_delay_set, "%llu\n");
228
229 static int idle_timeout_set(void *data, u64 val)
230 {
231         struct hci_dev *hdev = data;
232
233         if (val != 0 && (val < 500 || val > 3600000))
234                 return -EINVAL;
235
236         hci_dev_lock(hdev);
237         hdev->idle_timeout= val;
238         hci_dev_unlock(hdev);
239
240         return 0;
241 }
242
243 static int idle_timeout_get(void *data, u64 *val)
244 {
245         struct hci_dev *hdev = data;
246
247         hci_dev_lock(hdev);
248         *val = hdev->idle_timeout;
249         hci_dev_unlock(hdev);
250
251         return 0;
252 }
253
254 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
255                         idle_timeout_set, "%llu\n");
256
257 static int sniff_min_interval_set(void *data, u64 val)
258 {
259         struct hci_dev *hdev = data;
260
261         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
262                 return -EINVAL;
263
264         hci_dev_lock(hdev);
265         hdev->sniff_min_interval= val;
266         hci_dev_unlock(hdev);
267
268         return 0;
269 }
270
271 static int sniff_min_interval_get(void *data, u64 *val)
272 {
273         struct hci_dev *hdev = data;
274
275         hci_dev_lock(hdev);
276         *val = hdev->sniff_min_interval;
277         hci_dev_unlock(hdev);
278
279         return 0;
280 }
281
282 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
283                         sniff_min_interval_set, "%llu\n");
284
285 static int sniff_max_interval_set(void *data, u64 val)
286 {
287         struct hci_dev *hdev = data;
288
289         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
290                 return -EINVAL;
291
292         hci_dev_lock(hdev);
293         hdev->sniff_max_interval= val;
294         hci_dev_unlock(hdev);
295
296         return 0;
297 }
298
299 static int sniff_max_interval_get(void *data, u64 *val)
300 {
301         struct hci_dev *hdev = data;
302
303         hci_dev_lock(hdev);
304         *val = hdev->sniff_max_interval;
305         hci_dev_unlock(hdev);
306
307         return 0;
308 }
309
310 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
311                         sniff_max_interval_set, "%llu\n");
312
313 static int static_address_show(struct seq_file *f, void *p)
314 {
315         struct hci_dev *hdev = f->private;
316
317         hci_dev_lock(hdev);
318         seq_printf(f, "%pMR\n", &hdev->static_addr);
319         hci_dev_unlock(hdev);
320
321         return 0;
322 }
323
324 static int static_address_open(struct inode *inode, struct file *file)
325 {
326         return single_open(file, static_address_show, inode->i_private);
327 }
328
329 static const struct file_operations static_address_fops = {
330         .open           = static_address_open,
331         .read           = seq_read,
332         .llseek         = seq_lseek,
333         .release        = single_release,
334 };
335
336 /* ---- HCI requests ---- */
337
338 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
339 {
340         BT_DBG("%s result 0x%2.2x", hdev->name, result);
341
342         if (hdev->req_status == HCI_REQ_PEND) {
343                 hdev->req_result = result;
344                 hdev->req_status = HCI_REQ_DONE;
345                 wake_up_interruptible(&hdev->req_wait_q);
346         }
347 }
348
349 static void hci_req_cancel(struct hci_dev *hdev, int err)
350 {
351         BT_DBG("%s err 0x%2.2x", hdev->name, err);
352
353         if (hdev->req_status == HCI_REQ_PEND) {
354                 hdev->req_result = err;
355                 hdev->req_status = HCI_REQ_CANCELED;
356                 wake_up_interruptible(&hdev->req_wait_q);
357         }
358 }
359
360 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
361                                             u8 event)
362 {
363         struct hci_ev_cmd_complete *ev;
364         struct hci_event_hdr *hdr;
365         struct sk_buff *skb;
366
367         hci_dev_lock(hdev);
368
369         skb = hdev->recv_evt;
370         hdev->recv_evt = NULL;
371
372         hci_dev_unlock(hdev);
373
374         if (!skb)
375                 return ERR_PTR(-ENODATA);
376
377         if (skb->len < sizeof(*hdr)) {
378                 BT_ERR("Too short HCI event");
379                 goto failed;
380         }
381
382         hdr = (void *) skb->data;
383         skb_pull(skb, HCI_EVENT_HDR_SIZE);
384
385         if (event) {
386                 if (hdr->evt != event)
387                         goto failed;
388                 return skb;
389         }
390
391         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
392                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
393                 goto failed;
394         }
395
396         if (skb->len < sizeof(*ev)) {
397                 BT_ERR("Too short cmd_complete event");
398                 goto failed;
399         }
400
401         ev = (void *) skb->data;
402         skb_pull(skb, sizeof(*ev));
403
404         if (opcode == __le16_to_cpu(ev->opcode))
405                 return skb;
406
407         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
408                __le16_to_cpu(ev->opcode));
409
410 failed:
411         kfree_skb(skb);
412         return ERR_PTR(-ENODATA);
413 }
414
415 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
416                                   const void *param, u8 event, u32 timeout)
417 {
418         DECLARE_WAITQUEUE(wait, current);
419         struct hci_request req;
420         int err = 0;
421
422         BT_DBG("%s", hdev->name);
423
424         hci_req_init(&req, hdev);
425
426         hci_req_add_ev(&req, opcode, plen, param, event);
427
428         hdev->req_status = HCI_REQ_PEND;
429
430         err = hci_req_run(&req, hci_req_sync_complete);
431         if (err < 0)
432                 return ERR_PTR(err);
433
434         add_wait_queue(&hdev->req_wait_q, &wait);
435         set_current_state(TASK_INTERRUPTIBLE);
436
437         schedule_timeout(timeout);
438
439         remove_wait_queue(&hdev->req_wait_q, &wait);
440
441         if (signal_pending(current))
442                 return ERR_PTR(-EINTR);
443
444         switch (hdev->req_status) {
445         case HCI_REQ_DONE:
446                 err = -bt_to_errno(hdev->req_result);
447                 break;
448
449         case HCI_REQ_CANCELED:
450                 err = -hdev->req_result;
451                 break;
452
453         default:
454                 err = -ETIMEDOUT;
455                 break;
456         }
457
458         hdev->req_status = hdev->req_result = 0;
459
460         BT_DBG("%s end: err %d", hdev->name, err);
461
462         if (err < 0)
463                 return ERR_PTR(err);
464
465         return hci_get_cmd_complete(hdev, opcode, event);
466 }
467 EXPORT_SYMBOL(__hci_cmd_sync_ev);
468
469 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
470                                const void *param, u32 timeout)
471 {
472         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
473 }
474 EXPORT_SYMBOL(__hci_cmd_sync);
475
476 /* Execute request and wait for completion. */
477 static int __hci_req_sync(struct hci_dev *hdev,
478                           void (*func)(struct hci_request *req,
479                                       unsigned long opt),
480                           unsigned long opt, __u32 timeout)
481 {
482         struct hci_request req;
483         DECLARE_WAITQUEUE(wait, current);
484         int err = 0;
485
486         BT_DBG("%s start", hdev->name);
487
488         hci_req_init(&req, hdev);
489
490         hdev->req_status = HCI_REQ_PEND;
491
492         func(&req, opt);
493
494         err = hci_req_run(&req, hci_req_sync_complete);
495         if (err < 0) {
496                 hdev->req_status = 0;
497
498                 /* ENODATA means the HCI request command queue is empty.
499                  * This can happen when a request with conditionals doesn't
500                  * trigger any commands to be sent. This is normal behavior
501                  * and should not trigger an error return.
502                  */
503                 if (err == -ENODATA)
504                         return 0;
505
506                 return err;
507         }
508
509         add_wait_queue(&hdev->req_wait_q, &wait);
510         set_current_state(TASK_INTERRUPTIBLE);
511
512         schedule_timeout(timeout);
513
514         remove_wait_queue(&hdev->req_wait_q, &wait);
515
516         if (signal_pending(current))
517                 return -EINTR;
518
519         switch (hdev->req_status) {
520         case HCI_REQ_DONE:
521                 err = -bt_to_errno(hdev->req_result);
522                 break;
523
524         case HCI_REQ_CANCELED:
525                 err = -hdev->req_result;
526                 break;
527
528         default:
529                 err = -ETIMEDOUT;
530                 break;
531         }
532
533         hdev->req_status = hdev->req_result = 0;
534
535         BT_DBG("%s end: err %d", hdev->name, err);
536
537         return err;
538 }
539
540 static int hci_req_sync(struct hci_dev *hdev,
541                         void (*req)(struct hci_request *req,
542                                     unsigned long opt),
543                         unsigned long opt, __u32 timeout)
544 {
545         int ret;
546
547         if (!test_bit(HCI_UP, &hdev->flags))
548                 return -ENETDOWN;
549
550         /* Serialize all requests */
551         hci_req_lock(hdev);
552         ret = __hci_req_sync(hdev, req, opt, timeout);
553         hci_req_unlock(hdev);
554
555         return ret;
556 }
557
558 static void hci_reset_req(struct hci_request *req, unsigned long opt)
559 {
560         BT_DBG("%s %ld", req->hdev->name, opt);
561
562         /* Reset device */
563         set_bit(HCI_RESET, &req->hdev->flags);
564         hci_req_add(req, HCI_OP_RESET, 0, NULL);
565 }
566
567 static void bredr_init(struct hci_request *req)
568 {
569         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
570
571         /* Read Local Supported Features */
572         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
573
574         /* Read Local Version */
575         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
576
577         /* Read BD Address */
578         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
579 }
580
581 static void amp_init(struct hci_request *req)
582 {
583         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
584
585         /* Read Local Version */
586         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
587
588         /* Read Local Supported Commands */
589         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
590
591         /* Read Local Supported Features */
592         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
593
594         /* Read Local AMP Info */
595         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
596
597         /* Read Data Blk size */
598         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
599
600         /* Read Flow Control Mode */
601         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
602
603         /* Read Location Data */
604         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
605 }
606
607 static void hci_init1_req(struct hci_request *req, unsigned long opt)
608 {
609         struct hci_dev *hdev = req->hdev;
610
611         BT_DBG("%s %ld", hdev->name, opt);
612
613         /* Reset */
614         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
615                 hci_reset_req(req, 0);
616
617         switch (hdev->dev_type) {
618         case HCI_BREDR:
619                 bredr_init(req);
620                 break;
621
622         case HCI_AMP:
623                 amp_init(req);
624                 break;
625
626         default:
627                 BT_ERR("Unknown device type %d", hdev->dev_type);
628                 break;
629         }
630 }
631
632 static void bredr_setup(struct hci_request *req)
633 {
634         struct hci_dev *hdev = req->hdev;
635
636         __le16 param;
637         __u8 flt_type;
638
639         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
640         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
641
642         /* Read Class of Device */
643         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
644
645         /* Read Local Name */
646         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
647
648         /* Read Voice Setting */
649         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
650
651         /* Read Number of Supported IAC */
652         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
653
654         /* Read Current IAC LAP */
655         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
656
657         /* Clear Event Filters */
658         flt_type = HCI_FLT_CLEAR_ALL;
659         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
660
661         /* Connection accept timeout ~20 secs */
662         param = __constant_cpu_to_le16(0x7d00);
663         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
664
665         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
666          * but it does not support page scan related HCI commands.
667          */
668         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
669                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
670                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
671         }
672 }
673
674 static void le_setup(struct hci_request *req)
675 {
676         struct hci_dev *hdev = req->hdev;
677
678         /* Read LE Buffer Size */
679         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
680
681         /* Read LE Local Supported Features */
682         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
683
684         /* Read LE Advertising Channel TX Power */
685         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
686
687         /* Read LE White List Size */
688         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
689
690         /* Read LE Supported States */
691         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
692
693         /* LE-only controllers have LE implicitly enabled */
694         if (!lmp_bredr_capable(hdev))
695                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
696 }
697
698 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
699 {
700         if (lmp_ext_inq_capable(hdev))
701                 return 0x02;
702
703         if (lmp_inq_rssi_capable(hdev))
704                 return 0x01;
705
706         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
707             hdev->lmp_subver == 0x0757)
708                 return 0x01;
709
710         if (hdev->manufacturer == 15) {
711                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
712                         return 0x01;
713                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
714                         return 0x01;
715                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
716                         return 0x01;
717         }
718
719         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
720             hdev->lmp_subver == 0x1805)
721                 return 0x01;
722
723         return 0x00;
724 }
725
726 static void hci_setup_inquiry_mode(struct hci_request *req)
727 {
728         u8 mode;
729
730         mode = hci_get_inquiry_mode(req->hdev);
731
732         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
733 }
734
735 static void hci_setup_event_mask(struct hci_request *req)
736 {
737         struct hci_dev *hdev = req->hdev;
738
739         /* The second byte is 0xff instead of 0x9f (two reserved bits
740          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
741          * command otherwise.
742          */
743         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
744
745         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
746          * any event mask for pre 1.2 devices.
747          */
748         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
749                 return;
750
751         if (lmp_bredr_capable(hdev)) {
752                 events[4] |= 0x01; /* Flow Specification Complete */
753                 events[4] |= 0x02; /* Inquiry Result with RSSI */
754                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
755                 events[5] |= 0x08; /* Synchronous Connection Complete */
756                 events[5] |= 0x10; /* Synchronous Connection Changed */
757         } else {
758                 /* Use a different default for LE-only devices */
759                 memset(events, 0, sizeof(events));
760                 events[0] |= 0x10; /* Disconnection Complete */
761                 events[0] |= 0x80; /* Encryption Change */
762                 events[1] |= 0x08; /* Read Remote Version Information Complete */
763                 events[1] |= 0x20; /* Command Complete */
764                 events[1] |= 0x40; /* Command Status */
765                 events[1] |= 0x80; /* Hardware Error */
766                 events[2] |= 0x04; /* Number of Completed Packets */
767                 events[3] |= 0x02; /* Data Buffer Overflow */
768                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
769         }
770
771         if (lmp_inq_rssi_capable(hdev))
772                 events[4] |= 0x02; /* Inquiry Result with RSSI */
773
774         if (lmp_sniffsubr_capable(hdev))
775                 events[5] |= 0x20; /* Sniff Subrating */
776
777         if (lmp_pause_enc_capable(hdev))
778                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
779
780         if (lmp_ext_inq_capable(hdev))
781                 events[5] |= 0x40; /* Extended Inquiry Result */
782
783         if (lmp_no_flush_capable(hdev))
784                 events[7] |= 0x01; /* Enhanced Flush Complete */
785
786         if (lmp_lsto_capable(hdev))
787                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
788
789         if (lmp_ssp_capable(hdev)) {
790                 events[6] |= 0x01;      /* IO Capability Request */
791                 events[6] |= 0x02;      /* IO Capability Response */
792                 events[6] |= 0x04;      /* User Confirmation Request */
793                 events[6] |= 0x08;      /* User Passkey Request */
794                 events[6] |= 0x10;      /* Remote OOB Data Request */
795                 events[6] |= 0x20;      /* Simple Pairing Complete */
796                 events[7] |= 0x04;      /* User Passkey Notification */
797                 events[7] |= 0x08;      /* Keypress Notification */
798                 events[7] |= 0x10;      /* Remote Host Supported
799                                          * Features Notification
800                                          */
801         }
802
803         if (lmp_le_capable(hdev))
804                 events[7] |= 0x20;      /* LE Meta-Event */
805
806         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
807
808         if (lmp_le_capable(hdev)) {
809                 memset(events, 0, sizeof(events));
810                 events[0] = 0x1f;
811                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
812                             sizeof(events), events);
813         }
814 }
815
816 static void hci_init2_req(struct hci_request *req, unsigned long opt)
817 {
818         struct hci_dev *hdev = req->hdev;
819
820         if (lmp_bredr_capable(hdev))
821                 bredr_setup(req);
822         else
823                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
824
825         if (lmp_le_capable(hdev))
826                 le_setup(req);
827
828         hci_setup_event_mask(req);
829
830         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
831          * local supported commands HCI command.
832          */
833         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
834                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
835
836         if (lmp_ssp_capable(hdev)) {
837                 /* When SSP is available, then the host features page
838                  * should also be available as well. However some
839                  * controllers list the max_page as 0 as long as SSP
840                  * has not been enabled. To achieve proper debugging
841                  * output, force the minimum max_page to 1 at least.
842                  */
843                 hdev->max_page = 0x01;
844
845                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
846                         u8 mode = 0x01;
847                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
848                                     sizeof(mode), &mode);
849                 } else {
850                         struct hci_cp_write_eir cp;
851
852                         memset(hdev->eir, 0, sizeof(hdev->eir));
853                         memset(&cp, 0, sizeof(cp));
854
855                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
856                 }
857         }
858
859         if (lmp_inq_rssi_capable(hdev))
860                 hci_setup_inquiry_mode(req);
861
862         if (lmp_inq_tx_pwr_capable(hdev))
863                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
864
865         if (lmp_ext_feat_capable(hdev)) {
866                 struct hci_cp_read_local_ext_features cp;
867
868                 cp.page = 0x01;
869                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
870                             sizeof(cp), &cp);
871         }
872
873         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
874                 u8 enable = 1;
875                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
876                             &enable);
877         }
878 }
879
880 static void hci_setup_link_policy(struct hci_request *req)
881 {
882         struct hci_dev *hdev = req->hdev;
883         struct hci_cp_write_def_link_policy cp;
884         u16 link_policy = 0;
885
886         if (lmp_rswitch_capable(hdev))
887                 link_policy |= HCI_LP_RSWITCH;
888         if (lmp_hold_capable(hdev))
889                 link_policy |= HCI_LP_HOLD;
890         if (lmp_sniff_capable(hdev))
891                 link_policy |= HCI_LP_SNIFF;
892         if (lmp_park_capable(hdev))
893                 link_policy |= HCI_LP_PARK;
894
895         cp.policy = cpu_to_le16(link_policy);
896         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
897 }
898
899 static void hci_set_le_support(struct hci_request *req)
900 {
901         struct hci_dev *hdev = req->hdev;
902         struct hci_cp_write_le_host_supported cp;
903
904         /* LE-only devices do not support explicit enablement */
905         if (!lmp_bredr_capable(hdev))
906                 return;
907
908         memset(&cp, 0, sizeof(cp));
909
910         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
911                 cp.le = 0x01;
912                 cp.simul = lmp_le_br_capable(hdev);
913         }
914
915         if (cp.le != lmp_host_le_capable(hdev))
916                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
917                             &cp);
918 }
919
920 static void hci_set_event_mask_page_2(struct hci_request *req)
921 {
922         struct hci_dev *hdev = req->hdev;
923         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
924
925         /* If Connectionless Slave Broadcast master role is supported
926          * enable all necessary events for it.
927          */
928         if (hdev->features[2][0] & 0x01) {
929                 events[1] |= 0x40;      /* Triggered Clock Capture */
930                 events[1] |= 0x80;      /* Synchronization Train Complete */
931                 events[2] |= 0x10;      /* Slave Page Response Timeout */
932                 events[2] |= 0x20;      /* CSB Channel Map Change */
933         }
934
935         /* If Connectionless Slave Broadcast slave role is supported
936          * enable all necessary events for it.
937          */
938         if (hdev->features[2][0] & 0x02) {
939                 events[2] |= 0x01;      /* Synchronization Train Received */
940                 events[2] |= 0x02;      /* CSB Receive */
941                 events[2] |= 0x04;      /* CSB Timeout */
942                 events[2] |= 0x08;      /* Truncated Page Complete */
943         }
944
945         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
946 }
947
948 static void hci_init3_req(struct hci_request *req, unsigned long opt)
949 {
950         struct hci_dev *hdev = req->hdev;
951         u8 p;
952
953         /* Some Broadcom based Bluetooth controllers do not support the
954          * Delete Stored Link Key command. They are clearly indicating its
955          * absence in the bit mask of supported commands.
956          *
957          * Check the supported commands and only if the the command is marked
958          * as supported send it. If not supported assume that the controller
959          * does not have actual support for stored link keys which makes this
960          * command redundant anyway.
961          */
962         if (hdev->commands[6] & 0x80) {
963                 struct hci_cp_delete_stored_link_key cp;
964
965                 bacpy(&cp.bdaddr, BDADDR_ANY);
966                 cp.delete_all = 0x01;
967                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
968                             sizeof(cp), &cp);
969         }
970
971         if (hdev->commands[5] & 0x10)
972                 hci_setup_link_policy(req);
973
974         if (lmp_le_capable(hdev))
975                 hci_set_le_support(req);
976
977         /* Read features beyond page 1 if available */
978         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
979                 struct hci_cp_read_local_ext_features cp;
980
981                 cp.page = p;
982                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
983                             sizeof(cp), &cp);
984         }
985 }
986
987 static void hci_init4_req(struct hci_request *req, unsigned long opt)
988 {
989         struct hci_dev *hdev = req->hdev;
990
991         /* Set event mask page 2 if the HCI command for it is supported */
992         if (hdev->commands[22] & 0x04)
993                 hci_set_event_mask_page_2(req);
994
995         /* Check for Synchronization Train support */
996         if (hdev->features[2][0] & 0x04)
997                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
998 }
999
1000 static int __hci_init(struct hci_dev *hdev)
1001 {
1002         int err;
1003
1004         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1005         if (err < 0)
1006                 return err;
1007
1008         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1009          * BR/EDR/LE type controllers. AMP controllers only need the
1010          * first stage init.
1011          */
1012         if (hdev->dev_type != HCI_BREDR)
1013                 return 0;
1014
1015         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1016         if (err < 0)
1017                 return err;
1018
1019         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1020         if (err < 0)
1021                 return err;
1022
1023         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1024         if (err < 0)
1025                 return err;
1026
1027         /* Only create debugfs entries during the initial setup
1028          * phase and not every time the controller gets powered on.
1029          */
1030         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1031                 return 0;
1032
1033         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1034                             &features_fops);
1035         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1036                             &blacklist_fops);
1037         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1038
1039         if (lmp_bredr_capable(hdev)) {
1040                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1041                                     hdev, &inquiry_cache_fops);
1042                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1043                                     hdev, &voice_setting_fops);
1044         }
1045
1046         if (lmp_ssp_capable(hdev))
1047                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1048                                     hdev, &auto_accept_delay_fops);
1049
1050         if (lmp_sniff_capable(hdev)) {
1051                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1052                                     hdev, &idle_timeout_fops);
1053                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1054                                     hdev, &sniff_min_interval_fops);
1055                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1056                                     hdev, &sniff_max_interval_fops);
1057         }
1058
1059         if (lmp_le_capable(hdev))
1060                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1061                                    hdev, &static_address_fops);
1062
1063         return 0;
1064 }
1065
1066 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1067 {
1068         __u8 scan = opt;
1069
1070         BT_DBG("%s %x", req->hdev->name, scan);
1071
1072         /* Inquiry and Page scans */
1073         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1074 }
1075
1076 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1077 {
1078         __u8 auth = opt;
1079
1080         BT_DBG("%s %x", req->hdev->name, auth);
1081
1082         /* Authentication */
1083         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1084 }
1085
1086 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1087 {
1088         __u8 encrypt = opt;
1089
1090         BT_DBG("%s %x", req->hdev->name, encrypt);
1091
1092         /* Encryption */
1093         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1094 }
1095
1096 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1097 {
1098         __le16 policy = cpu_to_le16(opt);
1099
1100         BT_DBG("%s %x", req->hdev->name, policy);
1101
1102         /* Default link policy */
1103         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1104 }
1105
1106 /* Get HCI device by index.
1107  * Device is held on return. */
1108 struct hci_dev *hci_dev_get(int index)
1109 {
1110         struct hci_dev *hdev = NULL, *d;
1111
1112         BT_DBG("%d", index);
1113
1114         if (index < 0)
1115                 return NULL;
1116
1117         read_lock(&hci_dev_list_lock);
1118         list_for_each_entry(d, &hci_dev_list, list) {
1119                 if (d->id == index) {
1120                         hdev = hci_dev_hold(d);
1121                         break;
1122                 }
1123         }
1124         read_unlock(&hci_dev_list_lock);
1125         return hdev;
1126 }
1127
1128 /* ---- Inquiry support ---- */
1129
1130 bool hci_discovery_active(struct hci_dev *hdev)
1131 {
1132         struct discovery_state *discov = &hdev->discovery;
1133
1134         switch (discov->state) {
1135         case DISCOVERY_FINDING:
1136         case DISCOVERY_RESOLVING:
1137                 return true;
1138
1139         default:
1140                 return false;
1141         }
1142 }
1143
1144 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1145 {
1146         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1147
1148         if (hdev->discovery.state == state)
1149                 return;
1150
1151         switch (state) {
1152         case DISCOVERY_STOPPED:
1153                 if (hdev->discovery.state != DISCOVERY_STARTING)
1154                         mgmt_discovering(hdev, 0);
1155                 break;
1156         case DISCOVERY_STARTING:
1157                 break;
1158         case DISCOVERY_FINDING:
1159                 mgmt_discovering(hdev, 1);
1160                 break;
1161         case DISCOVERY_RESOLVING:
1162                 break;
1163         case DISCOVERY_STOPPING:
1164                 break;
1165         }
1166
1167         hdev->discovery.state = state;
1168 }
1169
1170 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1171 {
1172         struct discovery_state *cache = &hdev->discovery;
1173         struct inquiry_entry *p, *n;
1174
1175         list_for_each_entry_safe(p, n, &cache->all, all) {
1176                 list_del(&p->all);
1177                 kfree(p);
1178         }
1179
1180         INIT_LIST_HEAD(&cache->unknown);
1181         INIT_LIST_HEAD(&cache->resolve);
1182 }
1183
1184 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1185                                                bdaddr_t *bdaddr)
1186 {
1187         struct discovery_state *cache = &hdev->discovery;
1188         struct inquiry_entry *e;
1189
1190         BT_DBG("cache %p, %pMR", cache, bdaddr);
1191
1192         list_for_each_entry(e, &cache->all, all) {
1193                 if (!bacmp(&e->data.bdaddr, bdaddr))
1194                         return e;
1195         }
1196
1197         return NULL;
1198 }
1199
1200 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1201                                                        bdaddr_t *bdaddr)
1202 {
1203         struct discovery_state *cache = &hdev->discovery;
1204         struct inquiry_entry *e;
1205
1206         BT_DBG("cache %p, %pMR", cache, bdaddr);
1207
1208         list_for_each_entry(e, &cache->unknown, list) {
1209                 if (!bacmp(&e->data.bdaddr, bdaddr))
1210                         return e;
1211         }
1212
1213         return NULL;
1214 }
1215
1216 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1217                                                        bdaddr_t *bdaddr,
1218                                                        int state)
1219 {
1220         struct discovery_state *cache = &hdev->discovery;
1221         struct inquiry_entry *e;
1222
1223         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1224
1225         list_for_each_entry(e, &cache->resolve, list) {
1226                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1227                         return e;
1228                 if (!bacmp(&e->data.bdaddr, bdaddr))
1229                         return e;
1230         }
1231
1232         return NULL;
1233 }
1234
1235 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1236                                       struct inquiry_entry *ie)
1237 {
1238         struct discovery_state *cache = &hdev->discovery;
1239         struct list_head *pos = &cache->resolve;
1240         struct inquiry_entry *p;
1241
1242         list_del(&ie->list);
1243
1244         list_for_each_entry(p, &cache->resolve, list) {
1245                 if (p->name_state != NAME_PENDING &&
1246                     abs(p->data.rssi) >= abs(ie->data.rssi))
1247                         break;
1248                 pos = &p->list;
1249         }
1250
1251         list_add(&ie->list, pos);
1252 }
1253
1254 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1255                               bool name_known, bool *ssp)
1256 {
1257         struct discovery_state *cache = &hdev->discovery;
1258         struct inquiry_entry *ie;
1259
1260         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1261
1262         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1263
1264         if (ssp)
1265                 *ssp = data->ssp_mode;
1266
1267         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1268         if (ie) {
1269                 if (ie->data.ssp_mode && ssp)
1270                         *ssp = true;
1271
1272                 if (ie->name_state == NAME_NEEDED &&
1273                     data->rssi != ie->data.rssi) {
1274                         ie->data.rssi = data->rssi;
1275                         hci_inquiry_cache_update_resolve(hdev, ie);
1276                 }
1277
1278                 goto update;
1279         }
1280
1281         /* Entry not in the cache. Add new one. */
1282         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1283         if (!ie)
1284                 return false;
1285
1286         list_add(&ie->all, &cache->all);
1287
1288         if (name_known) {
1289                 ie->name_state = NAME_KNOWN;
1290         } else {
1291                 ie->name_state = NAME_NOT_KNOWN;
1292                 list_add(&ie->list, &cache->unknown);
1293         }
1294
1295 update:
1296         if (name_known && ie->name_state != NAME_KNOWN &&
1297             ie->name_state != NAME_PENDING) {
1298                 ie->name_state = NAME_KNOWN;
1299                 list_del(&ie->list);
1300         }
1301
1302         memcpy(&ie->data, data, sizeof(*data));
1303         ie->timestamp = jiffies;
1304         cache->timestamp = jiffies;
1305
1306         if (ie->name_state == NAME_NOT_KNOWN)
1307                 return false;
1308
1309         return true;
1310 }
1311
1312 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1313 {
1314         struct discovery_state *cache = &hdev->discovery;
1315         struct inquiry_info *info = (struct inquiry_info *) buf;
1316         struct inquiry_entry *e;
1317         int copied = 0;
1318
1319         list_for_each_entry(e, &cache->all, all) {
1320                 struct inquiry_data *data = &e->data;
1321
1322                 if (copied >= num)
1323                         break;
1324
1325                 bacpy(&info->bdaddr, &data->bdaddr);
1326                 info->pscan_rep_mode    = data->pscan_rep_mode;
1327                 info->pscan_period_mode = data->pscan_period_mode;
1328                 info->pscan_mode        = data->pscan_mode;
1329                 memcpy(info->dev_class, data->dev_class, 3);
1330                 info->clock_offset      = data->clock_offset;
1331
1332                 info++;
1333                 copied++;
1334         }
1335
1336         BT_DBG("cache %p, copied %d", cache, copied);
1337         return copied;
1338 }
1339
1340 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1341 {
1342         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1343         struct hci_dev *hdev = req->hdev;
1344         struct hci_cp_inquiry cp;
1345
1346         BT_DBG("%s", hdev->name);
1347
1348         if (test_bit(HCI_INQUIRY, &hdev->flags))
1349                 return;
1350
1351         /* Start Inquiry */
1352         memcpy(&cp.lap, &ir->lap, 3);
1353         cp.length  = ir->length;
1354         cp.num_rsp = ir->num_rsp;
1355         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1356 }
1357
1358 static int wait_inquiry(void *word)
1359 {
1360         schedule();
1361         return signal_pending(current);
1362 }
1363
1364 int hci_inquiry(void __user *arg)
1365 {
1366         __u8 __user *ptr = arg;
1367         struct hci_inquiry_req ir;
1368         struct hci_dev *hdev;
1369         int err = 0, do_inquiry = 0, max_rsp;
1370         long timeo;
1371         __u8 *buf;
1372
1373         if (copy_from_user(&ir, ptr, sizeof(ir)))
1374                 return -EFAULT;
1375
1376         hdev = hci_dev_get(ir.dev_id);
1377         if (!hdev)
1378                 return -ENODEV;
1379
1380         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1381                 err = -EBUSY;
1382                 goto done;
1383         }
1384
1385         if (hdev->dev_type != HCI_BREDR) {
1386                 err = -EOPNOTSUPP;
1387                 goto done;
1388         }
1389
1390         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1391                 err = -EOPNOTSUPP;
1392                 goto done;
1393         }
1394
1395         hci_dev_lock(hdev);
1396         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1397             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1398                 hci_inquiry_cache_flush(hdev);
1399                 do_inquiry = 1;
1400         }
1401         hci_dev_unlock(hdev);
1402
1403         timeo = ir.length * msecs_to_jiffies(2000);
1404
1405         if (do_inquiry) {
1406                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1407                                    timeo);
1408                 if (err < 0)
1409                         goto done;
1410
1411                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1412                  * cleared). If it is interrupted by a signal, return -EINTR.
1413                  */
1414                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1415                                 TASK_INTERRUPTIBLE))
1416                         return -EINTR;
1417         }
1418
1419         /* for unlimited number of responses we will use buffer with
1420          * 255 entries
1421          */
1422         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1423
1424         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1425          * copy it to the user space.
1426          */
1427         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1428         if (!buf) {
1429                 err = -ENOMEM;
1430                 goto done;
1431         }
1432
1433         hci_dev_lock(hdev);
1434         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1435         hci_dev_unlock(hdev);
1436
1437         BT_DBG("num_rsp %d", ir.num_rsp);
1438
1439         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1440                 ptr += sizeof(ir);
1441                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1442                                  ir.num_rsp))
1443                         err = -EFAULT;
1444         } else
1445                 err = -EFAULT;
1446
1447         kfree(buf);
1448
1449 done:
1450         hci_dev_put(hdev);
1451         return err;
1452 }
1453
1454 static int hci_dev_do_open(struct hci_dev *hdev)
1455 {
1456         int ret = 0;
1457
1458         BT_DBG("%s %p", hdev->name, hdev);
1459
1460         hci_req_lock(hdev);
1461
1462         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1463                 ret = -ENODEV;
1464                 goto done;
1465         }
1466
1467         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1468                 /* Check for rfkill but allow the HCI setup stage to
1469                  * proceed (which in itself doesn't cause any RF activity).
1470                  */
1471                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1472                         ret = -ERFKILL;
1473                         goto done;
1474                 }
1475
1476                 /* Check for valid public address or a configured static
1477                  * random adddress, but let the HCI setup proceed to
1478                  * be able to determine if there is a public address
1479                  * or not.
1480                  *
1481                  * This check is only valid for BR/EDR controllers
1482                  * since AMP controllers do not have an address.
1483                  */
1484                 if (hdev->dev_type == HCI_BREDR &&
1485                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1486                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1487                         ret = -EADDRNOTAVAIL;
1488                         goto done;
1489                 }
1490         }
1491
1492         if (test_bit(HCI_UP, &hdev->flags)) {
1493                 ret = -EALREADY;
1494                 goto done;
1495         }
1496
1497         if (hdev->open(hdev)) {
1498                 ret = -EIO;
1499                 goto done;
1500         }
1501
1502         atomic_set(&hdev->cmd_cnt, 1);
1503         set_bit(HCI_INIT, &hdev->flags);
1504
1505         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1506                 ret = hdev->setup(hdev);
1507
1508         if (!ret) {
1509                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1510                         set_bit(HCI_RAW, &hdev->flags);
1511
1512                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1513                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1514                         ret = __hci_init(hdev);
1515         }
1516
1517         clear_bit(HCI_INIT, &hdev->flags);
1518
1519         if (!ret) {
1520                 hci_dev_hold(hdev);
1521                 set_bit(HCI_UP, &hdev->flags);
1522                 hci_notify(hdev, HCI_DEV_UP);
1523                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1524                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1525                     hdev->dev_type == HCI_BREDR) {
1526                         hci_dev_lock(hdev);
1527                         mgmt_powered(hdev, 1);
1528                         hci_dev_unlock(hdev);
1529                 }
1530         } else {
1531                 /* Init failed, cleanup */
1532                 flush_work(&hdev->tx_work);
1533                 flush_work(&hdev->cmd_work);
1534                 flush_work(&hdev->rx_work);
1535
1536                 skb_queue_purge(&hdev->cmd_q);
1537                 skb_queue_purge(&hdev->rx_q);
1538
1539                 if (hdev->flush)
1540                         hdev->flush(hdev);
1541
1542                 if (hdev->sent_cmd) {
1543                         kfree_skb(hdev->sent_cmd);
1544                         hdev->sent_cmd = NULL;
1545                 }
1546
1547                 hdev->close(hdev);
1548                 hdev->flags = 0;
1549         }
1550
1551 done:
1552         hci_req_unlock(hdev);
1553         return ret;
1554 }
1555
1556 /* ---- HCI ioctl helpers ---- */
1557
1558 int hci_dev_open(__u16 dev)
1559 {
1560         struct hci_dev *hdev;
1561         int err;
1562
1563         hdev = hci_dev_get(dev);
1564         if (!hdev)
1565                 return -ENODEV;
1566
1567         /* We need to ensure that no other power on/off work is pending
1568          * before proceeding to call hci_dev_do_open. This is
1569          * particularly important if the setup procedure has not yet
1570          * completed.
1571          */
1572         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1573                 cancel_delayed_work(&hdev->power_off);
1574
1575         /* After this call it is guaranteed that the setup procedure
1576          * has finished. This means that error conditions like RFKILL
1577          * or no valid public or static random address apply.
1578          */
1579         flush_workqueue(hdev->req_workqueue);
1580
1581         err = hci_dev_do_open(hdev);
1582
1583         hci_dev_put(hdev);
1584
1585         return err;
1586 }
1587
1588 static int hci_dev_do_close(struct hci_dev *hdev)
1589 {
1590         BT_DBG("%s %p", hdev->name, hdev);
1591
1592         cancel_delayed_work(&hdev->power_off);
1593
1594         hci_req_cancel(hdev, ENODEV);
1595         hci_req_lock(hdev);
1596
1597         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1598                 del_timer_sync(&hdev->cmd_timer);
1599                 hci_req_unlock(hdev);
1600                 return 0;
1601         }
1602
1603         /* Flush RX and TX works */
1604         flush_work(&hdev->tx_work);
1605         flush_work(&hdev->rx_work);
1606
1607         if (hdev->discov_timeout > 0) {
1608                 cancel_delayed_work(&hdev->discov_off);
1609                 hdev->discov_timeout = 0;
1610                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1611                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1612         }
1613
1614         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1615                 cancel_delayed_work(&hdev->service_cache);
1616
1617         cancel_delayed_work_sync(&hdev->le_scan_disable);
1618
1619         hci_dev_lock(hdev);
1620         hci_inquiry_cache_flush(hdev);
1621         hci_conn_hash_flush(hdev);
1622         hci_dev_unlock(hdev);
1623
1624         hci_notify(hdev, HCI_DEV_DOWN);
1625
1626         if (hdev->flush)
1627                 hdev->flush(hdev);
1628
1629         /* Reset device */
1630         skb_queue_purge(&hdev->cmd_q);
1631         atomic_set(&hdev->cmd_cnt, 1);
1632         if (!test_bit(HCI_RAW, &hdev->flags) &&
1633             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1634             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1635                 set_bit(HCI_INIT, &hdev->flags);
1636                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1637                 clear_bit(HCI_INIT, &hdev->flags);
1638         }
1639
1640         /* flush cmd  work */
1641         flush_work(&hdev->cmd_work);
1642
1643         /* Drop queues */
1644         skb_queue_purge(&hdev->rx_q);
1645         skb_queue_purge(&hdev->cmd_q);
1646         skb_queue_purge(&hdev->raw_q);
1647
1648         /* Drop last sent command */
1649         if (hdev->sent_cmd) {
1650                 del_timer_sync(&hdev->cmd_timer);
1651                 kfree_skb(hdev->sent_cmd);
1652                 hdev->sent_cmd = NULL;
1653         }
1654
1655         kfree_skb(hdev->recv_evt);
1656         hdev->recv_evt = NULL;
1657
1658         /* After this point our queues are empty
1659          * and no tasks are scheduled. */
1660         hdev->close(hdev);
1661
1662         /* Clear flags */
1663         hdev->flags = 0;
1664         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1665
1666         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1667                 if (hdev->dev_type == HCI_BREDR) {
1668                         hci_dev_lock(hdev);
1669                         mgmt_powered(hdev, 0);
1670                         hci_dev_unlock(hdev);
1671                 }
1672         }
1673
1674         /* Controller radio is available but is currently powered down */
1675         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1676
1677         memset(hdev->eir, 0, sizeof(hdev->eir));
1678         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1679
1680         hci_req_unlock(hdev);
1681
1682         hci_dev_put(hdev);
1683         return 0;
1684 }
1685
1686 int hci_dev_close(__u16 dev)
1687 {
1688         struct hci_dev *hdev;
1689         int err;
1690
1691         hdev = hci_dev_get(dev);
1692         if (!hdev)
1693                 return -ENODEV;
1694
1695         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1696                 err = -EBUSY;
1697                 goto done;
1698         }
1699
1700         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1701                 cancel_delayed_work(&hdev->power_off);
1702
1703         err = hci_dev_do_close(hdev);
1704
1705 done:
1706         hci_dev_put(hdev);
1707         return err;
1708 }
1709
1710 int hci_dev_reset(__u16 dev)
1711 {
1712         struct hci_dev *hdev;
1713         int ret = 0;
1714
1715         hdev = hci_dev_get(dev);
1716         if (!hdev)
1717                 return -ENODEV;
1718
1719         hci_req_lock(hdev);
1720
1721         if (!test_bit(HCI_UP, &hdev->flags)) {
1722                 ret = -ENETDOWN;
1723                 goto done;
1724         }
1725
1726         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1727                 ret = -EBUSY;
1728                 goto done;
1729         }
1730
1731         /* Drop queues */
1732         skb_queue_purge(&hdev->rx_q);
1733         skb_queue_purge(&hdev->cmd_q);
1734
1735         hci_dev_lock(hdev);
1736         hci_inquiry_cache_flush(hdev);
1737         hci_conn_hash_flush(hdev);
1738         hci_dev_unlock(hdev);
1739
1740         if (hdev->flush)
1741                 hdev->flush(hdev);
1742
1743         atomic_set(&hdev->cmd_cnt, 1);
1744         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1745
1746         if (!test_bit(HCI_RAW, &hdev->flags))
1747                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1748
1749 done:
1750         hci_req_unlock(hdev);
1751         hci_dev_put(hdev);
1752         return ret;
1753 }
1754
1755 int hci_dev_reset_stat(__u16 dev)
1756 {
1757         struct hci_dev *hdev;
1758         int ret = 0;
1759
1760         hdev = hci_dev_get(dev);
1761         if (!hdev)
1762                 return -ENODEV;
1763
1764         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1765                 ret = -EBUSY;
1766                 goto done;
1767         }
1768
1769         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1770
1771 done:
1772         hci_dev_put(hdev);
1773         return ret;
1774 }
1775
1776 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1777 {
1778         struct hci_dev *hdev;
1779         struct hci_dev_req dr;
1780         int err = 0;
1781
1782         if (copy_from_user(&dr, arg, sizeof(dr)))
1783                 return -EFAULT;
1784
1785         hdev = hci_dev_get(dr.dev_id);
1786         if (!hdev)
1787                 return -ENODEV;
1788
1789         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1790                 err = -EBUSY;
1791                 goto done;
1792         }
1793
1794         if (hdev->dev_type != HCI_BREDR) {
1795                 err = -EOPNOTSUPP;
1796                 goto done;
1797         }
1798
1799         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1800                 err = -EOPNOTSUPP;
1801                 goto done;
1802         }
1803
1804         switch (cmd) {
1805         case HCISETAUTH:
1806                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1807                                    HCI_INIT_TIMEOUT);
1808                 break;
1809
1810         case HCISETENCRYPT:
1811                 if (!lmp_encrypt_capable(hdev)) {
1812                         err = -EOPNOTSUPP;
1813                         break;
1814                 }
1815
1816                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1817                         /* Auth must be enabled first */
1818                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1819                                            HCI_INIT_TIMEOUT);
1820                         if (err)
1821                                 break;
1822                 }
1823
1824                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1825                                    HCI_INIT_TIMEOUT);
1826                 break;
1827
1828         case HCISETSCAN:
1829                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1830                                    HCI_INIT_TIMEOUT);
1831                 break;
1832
1833         case HCISETLINKPOL:
1834                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1835                                    HCI_INIT_TIMEOUT);
1836                 break;
1837
1838         case HCISETLINKMODE:
1839                 hdev->link_mode = ((__u16) dr.dev_opt) &
1840                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1841                 break;
1842
1843         case HCISETPTYPE:
1844                 hdev->pkt_type = (__u16) dr.dev_opt;
1845                 break;
1846
1847         case HCISETACLMTU:
1848                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1849                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1850                 break;
1851
1852         case HCISETSCOMTU:
1853                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1854                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1855                 break;
1856
1857         default:
1858                 err = -EINVAL;
1859                 break;
1860         }
1861
1862 done:
1863         hci_dev_put(hdev);
1864         return err;
1865 }
1866
1867 int hci_get_dev_list(void __user *arg)
1868 {
1869         struct hci_dev *hdev;
1870         struct hci_dev_list_req *dl;
1871         struct hci_dev_req *dr;
1872         int n = 0, size, err;
1873         __u16 dev_num;
1874
1875         if (get_user(dev_num, (__u16 __user *) arg))
1876                 return -EFAULT;
1877
1878         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1879                 return -EINVAL;
1880
1881         size = sizeof(*dl) + dev_num * sizeof(*dr);
1882
1883         dl = kzalloc(size, GFP_KERNEL);
1884         if (!dl)
1885                 return -ENOMEM;
1886
1887         dr = dl->dev_req;
1888
1889         read_lock(&hci_dev_list_lock);
1890         list_for_each_entry(hdev, &hci_dev_list, list) {
1891                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1892                         cancel_delayed_work(&hdev->power_off);
1893
1894                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1895                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1896
1897                 (dr + n)->dev_id  = hdev->id;
1898                 (dr + n)->dev_opt = hdev->flags;
1899
1900                 if (++n >= dev_num)
1901                         break;
1902         }
1903         read_unlock(&hci_dev_list_lock);
1904
1905         dl->dev_num = n;
1906         size = sizeof(*dl) + n * sizeof(*dr);
1907
1908         err = copy_to_user(arg, dl, size);
1909         kfree(dl);
1910
1911         return err ? -EFAULT : 0;
1912 }
1913
1914 int hci_get_dev_info(void __user *arg)
1915 {
1916         struct hci_dev *hdev;
1917         struct hci_dev_info di;
1918         int err = 0;
1919
1920         if (copy_from_user(&di, arg, sizeof(di)))
1921                 return -EFAULT;
1922
1923         hdev = hci_dev_get(di.dev_id);
1924         if (!hdev)
1925                 return -ENODEV;
1926
1927         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1928                 cancel_delayed_work_sync(&hdev->power_off);
1929
1930         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1931                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1932
1933         strcpy(di.name, hdev->name);
1934         di.bdaddr   = hdev->bdaddr;
1935         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1936         di.flags    = hdev->flags;
1937         di.pkt_type = hdev->pkt_type;
1938         if (lmp_bredr_capable(hdev)) {
1939                 di.acl_mtu  = hdev->acl_mtu;
1940                 di.acl_pkts = hdev->acl_pkts;
1941                 di.sco_mtu  = hdev->sco_mtu;
1942                 di.sco_pkts = hdev->sco_pkts;
1943         } else {
1944                 di.acl_mtu  = hdev->le_mtu;
1945                 di.acl_pkts = hdev->le_pkts;
1946                 di.sco_mtu  = 0;
1947                 di.sco_pkts = 0;
1948         }
1949         di.link_policy = hdev->link_policy;
1950         di.link_mode   = hdev->link_mode;
1951
1952         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1953         memcpy(&di.features, &hdev->features, sizeof(di.features));
1954
1955         if (copy_to_user(arg, &di, sizeof(di)))
1956                 err = -EFAULT;
1957
1958         hci_dev_put(hdev);
1959
1960         return err;
1961 }
1962
1963 /* ---- Interface to HCI drivers ---- */
1964
1965 static int hci_rfkill_set_block(void *data, bool blocked)
1966 {
1967         struct hci_dev *hdev = data;
1968
1969         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1970
1971         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1972                 return -EBUSY;
1973
1974         if (blocked) {
1975                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1976                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1977                         hci_dev_do_close(hdev);
1978         } else {
1979                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1980         }
1981
1982         return 0;
1983 }
1984
1985 static const struct rfkill_ops hci_rfkill_ops = {
1986         .set_block = hci_rfkill_set_block,
1987 };
1988
1989 static void hci_power_on(struct work_struct *work)
1990 {
1991         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1992         int err;
1993
1994         BT_DBG("%s", hdev->name);
1995
1996         err = hci_dev_do_open(hdev);
1997         if (err < 0) {
1998                 mgmt_set_powered_failed(hdev, err);
1999                 return;
2000         }
2001
2002         /* During the HCI setup phase, a few error conditions are
2003          * ignored and they need to be checked now. If they are still
2004          * valid, it is important to turn the device back off.
2005          */
2006         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2007             (hdev->dev_type == HCI_BREDR &&
2008              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2009              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2010                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2011                 hci_dev_do_close(hdev);
2012         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2013                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2014                                    HCI_AUTO_OFF_TIMEOUT);
2015         }
2016
2017         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2018                 mgmt_index_added(hdev);
2019 }
2020
2021 static void hci_power_off(struct work_struct *work)
2022 {
2023         struct hci_dev *hdev = container_of(work, struct hci_dev,
2024                                             power_off.work);
2025
2026         BT_DBG("%s", hdev->name);
2027
2028         hci_dev_do_close(hdev);
2029 }
2030
2031 static void hci_discov_off(struct work_struct *work)
2032 {
2033         struct hci_dev *hdev;
2034
2035         hdev = container_of(work, struct hci_dev, discov_off.work);
2036
2037         BT_DBG("%s", hdev->name);
2038
2039         mgmt_discoverable_timeout(hdev);
2040 }
2041
2042 int hci_uuids_clear(struct hci_dev *hdev)
2043 {
2044         struct bt_uuid *uuid, *tmp;
2045
2046         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2047                 list_del(&uuid->list);
2048                 kfree(uuid);
2049         }
2050
2051         return 0;
2052 }
2053
2054 int hci_link_keys_clear(struct hci_dev *hdev)
2055 {
2056         struct list_head *p, *n;
2057
2058         list_for_each_safe(p, n, &hdev->link_keys) {
2059                 struct link_key *key;
2060
2061                 key = list_entry(p, struct link_key, list);
2062
2063                 list_del(p);
2064                 kfree(key);
2065         }
2066
2067         return 0;
2068 }
2069
2070 int hci_smp_ltks_clear(struct hci_dev *hdev)
2071 {
2072         struct smp_ltk *k, *tmp;
2073
2074         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2075                 list_del(&k->list);
2076                 kfree(k);
2077         }
2078
2079         return 0;
2080 }
2081
2082 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2083 {
2084         struct link_key *k;
2085
2086         list_for_each_entry(k, &hdev->link_keys, list)
2087                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2088                         return k;
2089
2090         return NULL;
2091 }
2092
2093 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2094                                u8 key_type, u8 old_key_type)
2095 {
2096         /* Legacy key */
2097         if (key_type < 0x03)
2098                 return true;
2099
2100         /* Debug keys are insecure so don't store them persistently */
2101         if (key_type == HCI_LK_DEBUG_COMBINATION)
2102                 return false;
2103
2104         /* Changed combination key and there's no previous one */
2105         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2106                 return false;
2107
2108         /* Security mode 3 case */
2109         if (!conn)
2110                 return true;
2111
2112         /* Neither local nor remote side had no-bonding as requirement */
2113         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2114                 return true;
2115
2116         /* Local side had dedicated bonding as requirement */
2117         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2118                 return true;
2119
2120         /* Remote side had dedicated bonding as requirement */
2121         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2122                 return true;
2123
2124         /* If none of the above criteria match, then don't store the key
2125          * persistently */
2126         return false;
2127 }
2128
2129 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2130 {
2131         struct smp_ltk *k;
2132
2133         list_for_each_entry(k, &hdev->long_term_keys, list) {
2134                 if (k->ediv != ediv ||
2135                     memcmp(rand, k->rand, sizeof(k->rand)))
2136                         continue;
2137
2138                 return k;
2139         }
2140
2141         return NULL;
2142 }
2143
2144 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2145                                      u8 addr_type)
2146 {
2147         struct smp_ltk *k;
2148
2149         list_for_each_entry(k, &hdev->long_term_keys, list)
2150                 if (addr_type == k->bdaddr_type &&
2151                     bacmp(bdaddr, &k->bdaddr) == 0)
2152                         return k;
2153
2154         return NULL;
2155 }
2156
2157 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2158                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2159 {
2160         struct link_key *key, *old_key;
2161         u8 old_key_type;
2162         bool persistent;
2163
2164         old_key = hci_find_link_key(hdev, bdaddr);
2165         if (old_key) {
2166                 old_key_type = old_key->type;
2167                 key = old_key;
2168         } else {
2169                 old_key_type = conn ? conn->key_type : 0xff;
2170                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2171                 if (!key)
2172                         return -ENOMEM;
2173                 list_add(&key->list, &hdev->link_keys);
2174         }
2175
2176         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2177
2178         /* Some buggy controller combinations generate a changed
2179          * combination key for legacy pairing even when there's no
2180          * previous key */
2181         if (type == HCI_LK_CHANGED_COMBINATION &&
2182             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2183                 type = HCI_LK_COMBINATION;
2184                 if (conn)
2185                         conn->key_type = type;
2186         }
2187
2188         bacpy(&key->bdaddr, bdaddr);
2189         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2190         key->pin_len = pin_len;
2191
2192         if (type == HCI_LK_CHANGED_COMBINATION)
2193                 key->type = old_key_type;
2194         else
2195                 key->type = type;
2196
2197         if (!new_key)
2198                 return 0;
2199
2200         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2201
2202         mgmt_new_link_key(hdev, key, persistent);
2203
2204         if (conn)
2205                 conn->flush_key = !persistent;
2206
2207         return 0;
2208 }
2209
2210 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2211                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2212                 ediv, u8 rand[8])
2213 {
2214         struct smp_ltk *key, *old_key;
2215
2216         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2217                 return 0;
2218
2219         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2220         if (old_key)
2221                 key = old_key;
2222         else {
2223                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2224                 if (!key)
2225                         return -ENOMEM;
2226                 list_add(&key->list, &hdev->long_term_keys);
2227         }
2228
2229         bacpy(&key->bdaddr, bdaddr);
2230         key->bdaddr_type = addr_type;
2231         memcpy(key->val, tk, sizeof(key->val));
2232         key->authenticated = authenticated;
2233         key->ediv = ediv;
2234         key->enc_size = enc_size;
2235         key->type = type;
2236         memcpy(key->rand, rand, sizeof(key->rand));
2237
2238         if (!new_key)
2239                 return 0;
2240
2241         if (type & HCI_SMP_LTK)
2242                 mgmt_new_ltk(hdev, key, 1);
2243
2244         return 0;
2245 }
2246
2247 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2248 {
2249         struct link_key *key;
2250
2251         key = hci_find_link_key(hdev, bdaddr);
2252         if (!key)
2253                 return -ENOENT;
2254
2255         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2256
2257         list_del(&key->list);
2258         kfree(key);
2259
2260         return 0;
2261 }
2262
2263 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2264 {
2265         struct smp_ltk *k, *tmp;
2266
2267         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2268                 if (bacmp(bdaddr, &k->bdaddr))
2269                         continue;
2270
2271                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2272
2273                 list_del(&k->list);
2274                 kfree(k);
2275         }
2276
2277         return 0;
2278 }
2279
2280 /* HCI command timer function */
2281 static void hci_cmd_timeout(unsigned long arg)
2282 {
2283         struct hci_dev *hdev = (void *) arg;
2284
2285         if (hdev->sent_cmd) {
2286                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2287                 u16 opcode = __le16_to_cpu(sent->opcode);
2288
2289                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2290         } else {
2291                 BT_ERR("%s command tx timeout", hdev->name);
2292         }
2293
2294         atomic_set(&hdev->cmd_cnt, 1);
2295         queue_work(hdev->workqueue, &hdev->cmd_work);
2296 }
2297
2298 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2299                                           bdaddr_t *bdaddr)
2300 {
2301         struct oob_data *data;
2302
2303         list_for_each_entry(data, &hdev->remote_oob_data, list)
2304                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2305                         return data;
2306
2307         return NULL;
2308 }
2309
2310 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2311 {
2312         struct oob_data *data;
2313
2314         data = hci_find_remote_oob_data(hdev, bdaddr);
2315         if (!data)
2316                 return -ENOENT;
2317
2318         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2319
2320         list_del(&data->list);
2321         kfree(data);
2322
2323         return 0;
2324 }
2325
2326 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2327 {
2328         struct oob_data *data, *n;
2329
2330         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2331                 list_del(&data->list);
2332                 kfree(data);
2333         }
2334
2335         return 0;
2336 }
2337
2338 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2339                             u8 *randomizer)
2340 {
2341         struct oob_data *data;
2342
2343         data = hci_find_remote_oob_data(hdev, bdaddr);
2344
2345         if (!data) {
2346                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2347                 if (!data)
2348                         return -ENOMEM;
2349
2350                 bacpy(&data->bdaddr, bdaddr);
2351                 list_add(&data->list, &hdev->remote_oob_data);
2352         }
2353
2354         memcpy(data->hash, hash, sizeof(data->hash));
2355         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2356
2357         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2358
2359         return 0;
2360 }
2361
2362 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2363                                          bdaddr_t *bdaddr, u8 type)
2364 {
2365         struct bdaddr_list *b;
2366
2367         list_for_each_entry(b, &hdev->blacklist, list) {
2368                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2369                         return b;
2370         }
2371
2372         return NULL;
2373 }
2374
2375 int hci_blacklist_clear(struct hci_dev *hdev)
2376 {
2377         struct list_head *p, *n;
2378
2379         list_for_each_safe(p, n, &hdev->blacklist) {
2380                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2381
2382                 list_del(p);
2383                 kfree(b);
2384         }
2385
2386         return 0;
2387 }
2388
2389 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2390 {
2391         struct bdaddr_list *entry;
2392
2393         if (!bacmp(bdaddr, BDADDR_ANY))
2394                 return -EBADF;
2395
2396         if (hci_blacklist_lookup(hdev, bdaddr, type))
2397                 return -EEXIST;
2398
2399         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2400         if (!entry)
2401                 return -ENOMEM;
2402
2403         bacpy(&entry->bdaddr, bdaddr);
2404         entry->bdaddr_type = type;
2405
2406         list_add(&entry->list, &hdev->blacklist);
2407
2408         return mgmt_device_blocked(hdev, bdaddr, type);
2409 }
2410
2411 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2412 {
2413         struct bdaddr_list *entry;
2414
2415         if (!bacmp(bdaddr, BDADDR_ANY))
2416                 return hci_blacklist_clear(hdev);
2417
2418         entry = hci_blacklist_lookup(hdev, bdaddr, type);
2419         if (!entry)
2420                 return -ENOENT;
2421
2422         list_del(&entry->list);
2423         kfree(entry);
2424
2425         return mgmt_device_unblocked(hdev, bdaddr, type);
2426 }
2427
2428 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2429 {
2430         if (status) {
2431                 BT_ERR("Failed to start inquiry: status %d", status);
2432
2433                 hci_dev_lock(hdev);
2434                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2435                 hci_dev_unlock(hdev);
2436                 return;
2437         }
2438 }
2439
2440 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2441 {
2442         /* General inquiry access code (GIAC) */
2443         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2444         struct hci_request req;
2445         struct hci_cp_inquiry cp;
2446         int err;
2447
2448         if (status) {
2449                 BT_ERR("Failed to disable LE scanning: status %d", status);
2450                 return;
2451         }
2452
2453         switch (hdev->discovery.type) {
2454         case DISCOV_TYPE_LE:
2455                 hci_dev_lock(hdev);
2456                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2457                 hci_dev_unlock(hdev);
2458                 break;
2459
2460         case DISCOV_TYPE_INTERLEAVED:
2461                 hci_req_init(&req, hdev);
2462
2463                 memset(&cp, 0, sizeof(cp));
2464                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2465                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2466                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2467
2468                 hci_dev_lock(hdev);
2469
2470                 hci_inquiry_cache_flush(hdev);
2471
2472                 err = hci_req_run(&req, inquiry_complete);
2473                 if (err) {
2474                         BT_ERR("Inquiry request failed: err %d", err);
2475                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2476                 }
2477
2478                 hci_dev_unlock(hdev);
2479                 break;
2480         }
2481 }
2482
2483 static void le_scan_disable_work(struct work_struct *work)
2484 {
2485         struct hci_dev *hdev = container_of(work, struct hci_dev,
2486                                             le_scan_disable.work);
2487         struct hci_cp_le_set_scan_enable cp;
2488         struct hci_request req;
2489         int err;
2490
2491         BT_DBG("%s", hdev->name);
2492
2493         hci_req_init(&req, hdev);
2494
2495         memset(&cp, 0, sizeof(cp));
2496         cp.enable = LE_SCAN_DISABLE;
2497         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2498
2499         err = hci_req_run(&req, le_scan_disable_work_complete);
2500         if (err)
2501                 BT_ERR("Disable LE scanning request failed: err %d", err);
2502 }
2503
2504 /* Alloc HCI device */
2505 struct hci_dev *hci_alloc_dev(void)
2506 {
2507         struct hci_dev *hdev;
2508
2509         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2510         if (!hdev)
2511                 return NULL;
2512
2513         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2514         hdev->esco_type = (ESCO_HV1);
2515         hdev->link_mode = (HCI_LM_ACCEPT);
2516         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2517         hdev->io_capability = 0x03;     /* No Input No Output */
2518         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2519         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2520
2521         hdev->sniff_max_interval = 800;
2522         hdev->sniff_min_interval = 80;
2523
2524         hdev->le_scan_interval = 0x0060;
2525         hdev->le_scan_window = 0x0030;
2526
2527         mutex_init(&hdev->lock);
2528         mutex_init(&hdev->req_lock);
2529
2530         INIT_LIST_HEAD(&hdev->mgmt_pending);
2531         INIT_LIST_HEAD(&hdev->blacklist);
2532         INIT_LIST_HEAD(&hdev->uuids);
2533         INIT_LIST_HEAD(&hdev->link_keys);
2534         INIT_LIST_HEAD(&hdev->long_term_keys);
2535         INIT_LIST_HEAD(&hdev->remote_oob_data);
2536         INIT_LIST_HEAD(&hdev->conn_hash.list);
2537
2538         INIT_WORK(&hdev->rx_work, hci_rx_work);
2539         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2540         INIT_WORK(&hdev->tx_work, hci_tx_work);
2541         INIT_WORK(&hdev->power_on, hci_power_on);
2542
2543         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2544         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2545         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2546
2547         skb_queue_head_init(&hdev->rx_q);
2548         skb_queue_head_init(&hdev->cmd_q);
2549         skb_queue_head_init(&hdev->raw_q);
2550
2551         init_waitqueue_head(&hdev->req_wait_q);
2552
2553         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2554
2555         hci_init_sysfs(hdev);
2556         discovery_init(hdev);
2557
2558         return hdev;
2559 }
2560 EXPORT_SYMBOL(hci_alloc_dev);
2561
2562 /* Free HCI device */
2563 void hci_free_dev(struct hci_dev *hdev)
2564 {
2565         /* will free via device release */
2566         put_device(&hdev->dev);
2567 }
2568 EXPORT_SYMBOL(hci_free_dev);
2569
2570 /* Register HCI device */
2571 int hci_register_dev(struct hci_dev *hdev)
2572 {
2573         int id, error;
2574
2575         if (!hdev->open || !hdev->close)
2576                 return -EINVAL;
2577
2578         /* Do not allow HCI_AMP devices to register at index 0,
2579          * so the index can be used as the AMP controller ID.
2580          */
2581         switch (hdev->dev_type) {
2582         case HCI_BREDR:
2583                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2584                 break;
2585         case HCI_AMP:
2586                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2587                 break;
2588         default:
2589                 return -EINVAL;
2590         }
2591
2592         if (id < 0)
2593                 return id;
2594
2595         sprintf(hdev->name, "hci%d", id);
2596         hdev->id = id;
2597
2598         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2599
2600         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2601                                           WQ_MEM_RECLAIM, 1, hdev->name);
2602         if (!hdev->workqueue) {
2603                 error = -ENOMEM;
2604                 goto err;
2605         }
2606
2607         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2608                                               WQ_MEM_RECLAIM, 1, hdev->name);
2609         if (!hdev->req_workqueue) {
2610                 destroy_workqueue(hdev->workqueue);
2611                 error = -ENOMEM;
2612                 goto err;
2613         }
2614
2615         if (!IS_ERR_OR_NULL(bt_debugfs))
2616                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2617
2618         dev_set_name(&hdev->dev, "%s", hdev->name);
2619
2620         error = device_add(&hdev->dev);
2621         if (error < 0)
2622                 goto err_wqueue;
2623
2624         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2625                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2626                                     hdev);
2627         if (hdev->rfkill) {
2628                 if (rfkill_register(hdev->rfkill) < 0) {
2629                         rfkill_destroy(hdev->rfkill);
2630                         hdev->rfkill = NULL;
2631                 }
2632         }
2633
2634         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2635                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2636
2637         set_bit(HCI_SETUP, &hdev->dev_flags);
2638         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2639
2640         if (hdev->dev_type == HCI_BREDR) {
2641                 /* Assume BR/EDR support until proven otherwise (such as
2642                  * through reading supported features during init.
2643                  */
2644                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2645         }
2646
2647         write_lock(&hci_dev_list_lock);
2648         list_add(&hdev->list, &hci_dev_list);
2649         write_unlock(&hci_dev_list_lock);
2650
2651         hci_notify(hdev, HCI_DEV_REG);
2652         hci_dev_hold(hdev);
2653
2654         queue_work(hdev->req_workqueue, &hdev->power_on);
2655
2656         return id;
2657
2658 err_wqueue:
2659         destroy_workqueue(hdev->workqueue);
2660         destroy_workqueue(hdev->req_workqueue);
2661 err:
2662         ida_simple_remove(&hci_index_ida, hdev->id);
2663
2664         return error;
2665 }
2666 EXPORT_SYMBOL(hci_register_dev);
2667
2668 /* Unregister HCI device */
2669 void hci_unregister_dev(struct hci_dev *hdev)
2670 {
2671         int i, id;
2672
2673         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2674
2675         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2676
2677         id = hdev->id;
2678
2679         write_lock(&hci_dev_list_lock);
2680         list_del(&hdev->list);
2681         write_unlock(&hci_dev_list_lock);
2682
2683         hci_dev_do_close(hdev);
2684
2685         for (i = 0; i < NUM_REASSEMBLY; i++)
2686                 kfree_skb(hdev->reassembly[i]);
2687
2688         cancel_work_sync(&hdev->power_on);
2689
2690         if (!test_bit(HCI_INIT, &hdev->flags) &&
2691             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2692                 hci_dev_lock(hdev);
2693                 mgmt_index_removed(hdev);
2694                 hci_dev_unlock(hdev);
2695         }
2696
2697         /* mgmt_index_removed should take care of emptying the
2698          * pending list */
2699         BUG_ON(!list_empty(&hdev->mgmt_pending));
2700
2701         hci_notify(hdev, HCI_DEV_UNREG);
2702
2703         if (hdev->rfkill) {
2704                 rfkill_unregister(hdev->rfkill);
2705                 rfkill_destroy(hdev->rfkill);
2706         }
2707
2708         device_del(&hdev->dev);
2709
2710         debugfs_remove_recursive(hdev->debugfs);
2711
2712         destroy_workqueue(hdev->workqueue);
2713         destroy_workqueue(hdev->req_workqueue);
2714
2715         hci_dev_lock(hdev);
2716         hci_blacklist_clear(hdev);
2717         hci_uuids_clear(hdev);
2718         hci_link_keys_clear(hdev);
2719         hci_smp_ltks_clear(hdev);
2720         hci_remote_oob_data_clear(hdev);
2721         hci_dev_unlock(hdev);
2722
2723         hci_dev_put(hdev);
2724
2725         ida_simple_remove(&hci_index_ida, id);
2726 }
2727 EXPORT_SYMBOL(hci_unregister_dev);
2728
2729 /* Suspend HCI device */
2730 int hci_suspend_dev(struct hci_dev *hdev)
2731 {
2732         hci_notify(hdev, HCI_DEV_SUSPEND);
2733         return 0;
2734 }
2735 EXPORT_SYMBOL(hci_suspend_dev);
2736
2737 /* Resume HCI device */
2738 int hci_resume_dev(struct hci_dev *hdev)
2739 {
2740         hci_notify(hdev, HCI_DEV_RESUME);
2741         return 0;
2742 }
2743 EXPORT_SYMBOL(hci_resume_dev);
2744
2745 /* Receive frame from HCI drivers */
2746 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2747 {
2748         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2749                       && !test_bit(HCI_INIT, &hdev->flags))) {
2750                 kfree_skb(skb);
2751                 return -ENXIO;
2752         }
2753
2754         /* Incoming skb */
2755         bt_cb(skb)->incoming = 1;
2756
2757         /* Time stamp */
2758         __net_timestamp(skb);
2759
2760         skb_queue_tail(&hdev->rx_q, skb);
2761         queue_work(hdev->workqueue, &hdev->rx_work);
2762
2763         return 0;
2764 }
2765 EXPORT_SYMBOL(hci_recv_frame);
2766
2767 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2768                           int count, __u8 index)
2769 {
2770         int len = 0;
2771         int hlen = 0;
2772         int remain = count;
2773         struct sk_buff *skb;
2774         struct bt_skb_cb *scb;
2775
2776         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2777             index >= NUM_REASSEMBLY)
2778                 return -EILSEQ;
2779
2780         skb = hdev->reassembly[index];
2781
2782         if (!skb) {
2783                 switch (type) {
2784                 case HCI_ACLDATA_PKT:
2785                         len = HCI_MAX_FRAME_SIZE;
2786                         hlen = HCI_ACL_HDR_SIZE;
2787                         break;
2788                 case HCI_EVENT_PKT:
2789                         len = HCI_MAX_EVENT_SIZE;
2790                         hlen = HCI_EVENT_HDR_SIZE;
2791                         break;
2792                 case HCI_SCODATA_PKT:
2793                         len = HCI_MAX_SCO_SIZE;
2794                         hlen = HCI_SCO_HDR_SIZE;
2795                         break;
2796                 }
2797
2798                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2799                 if (!skb)
2800                         return -ENOMEM;
2801
2802                 scb = (void *) skb->cb;
2803                 scb->expect = hlen;
2804                 scb->pkt_type = type;
2805
2806                 hdev->reassembly[index] = skb;
2807         }
2808
2809         while (count) {
2810                 scb = (void *) skb->cb;
2811                 len = min_t(uint, scb->expect, count);
2812
2813                 memcpy(skb_put(skb, len), data, len);
2814
2815                 count -= len;
2816                 data += len;
2817                 scb->expect -= len;
2818                 remain = count;
2819
2820                 switch (type) {
2821                 case HCI_EVENT_PKT:
2822                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2823                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2824                                 scb->expect = h->plen;
2825
2826                                 if (skb_tailroom(skb) < scb->expect) {
2827                                         kfree_skb(skb);
2828                                         hdev->reassembly[index] = NULL;
2829                                         return -ENOMEM;
2830                                 }
2831                         }
2832                         break;
2833
2834                 case HCI_ACLDATA_PKT:
2835                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2836                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2837                                 scb->expect = __le16_to_cpu(h->dlen);
2838
2839                                 if (skb_tailroom(skb) < scb->expect) {
2840                                         kfree_skb(skb);
2841                                         hdev->reassembly[index] = NULL;
2842                                         return -ENOMEM;
2843                                 }
2844                         }
2845                         break;
2846
2847                 case HCI_SCODATA_PKT:
2848                         if (skb->len == HCI_SCO_HDR_SIZE) {
2849                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2850                                 scb->expect = h->dlen;
2851
2852                                 if (skb_tailroom(skb) < scb->expect) {
2853                                         kfree_skb(skb);
2854                                         hdev->reassembly[index] = NULL;
2855                                         return -ENOMEM;
2856                                 }
2857                         }
2858                         break;
2859                 }
2860
2861                 if (scb->expect == 0) {
2862                         /* Complete frame */
2863
2864                         bt_cb(skb)->pkt_type = type;
2865                         hci_recv_frame(hdev, skb);
2866
2867                         hdev->reassembly[index] = NULL;
2868                         return remain;
2869                 }
2870         }
2871
2872         return remain;
2873 }
2874
2875 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2876 {
2877         int rem = 0;
2878
2879         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2880                 return -EILSEQ;
2881
2882         while (count) {
2883                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2884                 if (rem < 0)
2885                         return rem;
2886
2887                 data += (count - rem);
2888                 count = rem;
2889         }
2890
2891         return rem;
2892 }
2893 EXPORT_SYMBOL(hci_recv_fragment);
2894
2895 #define STREAM_REASSEMBLY 0
2896
2897 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2898 {
2899         int type;
2900         int rem = 0;
2901
2902         while (count) {
2903                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2904
2905                 if (!skb) {
2906                         struct { char type; } *pkt;
2907
2908                         /* Start of the frame */
2909                         pkt = data;
2910                         type = pkt->type;
2911
2912                         data++;
2913                         count--;
2914                 } else
2915                         type = bt_cb(skb)->pkt_type;
2916
2917                 rem = hci_reassembly(hdev, type, data, count,
2918                                      STREAM_REASSEMBLY);
2919                 if (rem < 0)
2920                         return rem;
2921
2922                 data += (count - rem);
2923                 count = rem;
2924         }
2925
2926         return rem;
2927 }
2928 EXPORT_SYMBOL(hci_recv_stream_fragment);
2929
2930 /* ---- Interface to upper protocols ---- */
2931
2932 int hci_register_cb(struct hci_cb *cb)
2933 {
2934         BT_DBG("%p name %s", cb, cb->name);
2935
2936         write_lock(&hci_cb_list_lock);
2937         list_add(&cb->list, &hci_cb_list);
2938         write_unlock(&hci_cb_list_lock);
2939
2940         return 0;
2941 }
2942 EXPORT_SYMBOL(hci_register_cb);
2943
2944 int hci_unregister_cb(struct hci_cb *cb)
2945 {
2946         BT_DBG("%p name %s", cb, cb->name);
2947
2948         write_lock(&hci_cb_list_lock);
2949         list_del(&cb->list);
2950         write_unlock(&hci_cb_list_lock);
2951
2952         return 0;
2953 }
2954 EXPORT_SYMBOL(hci_unregister_cb);
2955
2956 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2957 {
2958         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2959
2960         /* Time stamp */
2961         __net_timestamp(skb);
2962
2963         /* Send copy to monitor */
2964         hci_send_to_monitor(hdev, skb);
2965
2966         if (atomic_read(&hdev->promisc)) {
2967                 /* Send copy to the sockets */
2968                 hci_send_to_sock(hdev, skb);
2969         }
2970
2971         /* Get rid of skb owner, prior to sending to the driver. */
2972         skb_orphan(skb);
2973
2974         if (hdev->send(hdev, skb) < 0)
2975                 BT_ERR("%s sending frame failed", hdev->name);
2976 }
2977
2978 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2979 {
2980         skb_queue_head_init(&req->cmd_q);
2981         req->hdev = hdev;
2982         req->err = 0;
2983 }
2984
2985 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2986 {
2987         struct hci_dev *hdev = req->hdev;
2988         struct sk_buff *skb;
2989         unsigned long flags;
2990
2991         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2992
2993         /* If an error occured during request building, remove all HCI
2994          * commands queued on the HCI request queue.
2995          */
2996         if (req->err) {
2997                 skb_queue_purge(&req->cmd_q);
2998                 return req->err;
2999         }
3000
3001         /* Do not allow empty requests */
3002         if (skb_queue_empty(&req->cmd_q))
3003                 return -ENODATA;
3004
3005         skb = skb_peek_tail(&req->cmd_q);
3006         bt_cb(skb)->req.complete = complete;
3007
3008         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3009         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3010         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3011
3012         queue_work(hdev->workqueue, &hdev->cmd_work);
3013
3014         return 0;
3015 }
3016
3017 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3018                                        u32 plen, const void *param)
3019 {
3020         int len = HCI_COMMAND_HDR_SIZE + plen;
3021         struct hci_command_hdr *hdr;
3022         struct sk_buff *skb;
3023
3024         skb = bt_skb_alloc(len, GFP_ATOMIC);
3025         if (!skb)
3026                 return NULL;
3027
3028         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3029         hdr->opcode = cpu_to_le16(opcode);
3030         hdr->plen   = plen;
3031
3032         if (plen)
3033                 memcpy(skb_put(skb, plen), param, plen);
3034
3035         BT_DBG("skb len %d", skb->len);
3036
3037         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3038
3039         return skb;
3040 }
3041
3042 /* Send HCI command */
3043 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3044                  const void *param)
3045 {
3046         struct sk_buff *skb;
3047
3048         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3049
3050         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3051         if (!skb) {
3052                 BT_ERR("%s no memory for command", hdev->name);
3053                 return -ENOMEM;
3054         }
3055
3056         /* Stand-alone HCI commands must be flaged as
3057          * single-command requests.
3058          */
3059         bt_cb(skb)->req.start = true;
3060
3061         skb_queue_tail(&hdev->cmd_q, skb);
3062         queue_work(hdev->workqueue, &hdev->cmd_work);
3063
3064         return 0;
3065 }
3066
3067 /* Queue a command to an asynchronous HCI request */
3068 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3069                     const void *param, u8 event)
3070 {
3071         struct hci_dev *hdev = req->hdev;
3072         struct sk_buff *skb;
3073
3074         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3075
3076         /* If an error occured during request building, there is no point in
3077          * queueing the HCI command. We can simply return.
3078          */
3079         if (req->err)
3080                 return;
3081
3082         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3083         if (!skb) {
3084                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3085                        hdev->name, opcode);
3086                 req->err = -ENOMEM;
3087                 return;
3088         }
3089
3090         if (skb_queue_empty(&req->cmd_q))
3091                 bt_cb(skb)->req.start = true;
3092
3093         bt_cb(skb)->req.event = event;
3094
3095         skb_queue_tail(&req->cmd_q, skb);
3096 }
3097
3098 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3099                  const void *param)
3100 {
3101         hci_req_add_ev(req, opcode, plen, param, 0);
3102 }
3103
3104 /* Get data from the previously sent command */
3105 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3106 {
3107         struct hci_command_hdr *hdr;
3108
3109         if (!hdev->sent_cmd)
3110                 return NULL;
3111
3112         hdr = (void *) hdev->sent_cmd->data;
3113
3114         if (hdr->opcode != cpu_to_le16(opcode))
3115                 return NULL;
3116
3117         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3118
3119         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3120 }
3121
3122 /* Send ACL data */
3123 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3124 {
3125         struct hci_acl_hdr *hdr;
3126         int len = skb->len;
3127
3128         skb_push(skb, HCI_ACL_HDR_SIZE);
3129         skb_reset_transport_header(skb);
3130         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3131         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3132         hdr->dlen   = cpu_to_le16(len);
3133 }
3134
3135 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3136                           struct sk_buff *skb, __u16 flags)
3137 {
3138         struct hci_conn *conn = chan->conn;
3139         struct hci_dev *hdev = conn->hdev;
3140         struct sk_buff *list;
3141
3142         skb->len = skb_headlen(skb);
3143         skb->data_len = 0;
3144
3145         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3146
3147         switch (hdev->dev_type) {
3148         case HCI_BREDR:
3149                 hci_add_acl_hdr(skb, conn->handle, flags);
3150                 break;
3151         case HCI_AMP:
3152                 hci_add_acl_hdr(skb, chan->handle, flags);
3153                 break;
3154         default:
3155                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3156                 return;
3157         }
3158
3159         list = skb_shinfo(skb)->frag_list;
3160         if (!list) {
3161                 /* Non fragmented */
3162                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3163
3164                 skb_queue_tail(queue, skb);
3165         } else {
3166                 /* Fragmented */
3167                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3168
3169                 skb_shinfo(skb)->frag_list = NULL;
3170
3171                 /* Queue all fragments atomically */
3172                 spin_lock(&queue->lock);
3173
3174                 __skb_queue_tail(queue, skb);
3175
3176                 flags &= ~ACL_START;
3177                 flags |= ACL_CONT;
3178                 do {
3179                         skb = list; list = list->next;
3180
3181                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3182                         hci_add_acl_hdr(skb, conn->handle, flags);
3183
3184                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3185
3186                         __skb_queue_tail(queue, skb);
3187                 } while (list);
3188
3189                 spin_unlock(&queue->lock);
3190         }
3191 }
3192
3193 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3194 {
3195         struct hci_dev *hdev = chan->conn->hdev;
3196
3197         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3198
3199         hci_queue_acl(chan, &chan->data_q, skb, flags);
3200
3201         queue_work(hdev->workqueue, &hdev->tx_work);
3202 }
3203
3204 /* Send SCO data */
3205 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3206 {
3207         struct hci_dev *hdev = conn->hdev;
3208         struct hci_sco_hdr hdr;
3209
3210         BT_DBG("%s len %d", hdev->name, skb->len);
3211
3212         hdr.handle = cpu_to_le16(conn->handle);
3213         hdr.dlen   = skb->len;
3214
3215         skb_push(skb, HCI_SCO_HDR_SIZE);
3216         skb_reset_transport_header(skb);
3217         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3218
3219         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3220
3221         skb_queue_tail(&conn->data_q, skb);
3222         queue_work(hdev->workqueue, &hdev->tx_work);
3223 }
3224
3225 /* ---- HCI TX task (outgoing data) ---- */
3226
3227 /* HCI Connection scheduler */
3228 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3229                                      int *quote)
3230 {
3231         struct hci_conn_hash *h = &hdev->conn_hash;
3232         struct hci_conn *conn = NULL, *c;
3233         unsigned int num = 0, min = ~0;
3234
3235         /* We don't have to lock device here. Connections are always
3236          * added and removed with TX task disabled. */
3237
3238         rcu_read_lock();
3239
3240         list_for_each_entry_rcu(c, &h->list, list) {
3241                 if (c->type != type || skb_queue_empty(&c->data_q))
3242                         continue;
3243
3244                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3245                         continue;
3246
3247                 num++;
3248
3249                 if (c->sent < min) {
3250                         min  = c->sent;
3251                         conn = c;
3252                 }
3253
3254                 if (hci_conn_num(hdev, type) == num)
3255                         break;
3256         }
3257
3258         rcu_read_unlock();
3259
3260         if (conn) {
3261                 int cnt, q;
3262
3263                 switch (conn->type) {
3264                 case ACL_LINK:
3265                         cnt = hdev->acl_cnt;
3266                         break;
3267                 case SCO_LINK:
3268                 case ESCO_LINK:
3269                         cnt = hdev->sco_cnt;
3270                         break;
3271                 case LE_LINK:
3272                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3273                         break;
3274                 default:
3275                         cnt = 0;
3276                         BT_ERR("Unknown link type");
3277                 }
3278
3279                 q = cnt / num;
3280                 *quote = q ? q : 1;
3281         } else
3282                 *quote = 0;
3283
3284         BT_DBG("conn %p quote %d", conn, *quote);
3285         return conn;
3286 }
3287
3288 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3289 {
3290         struct hci_conn_hash *h = &hdev->conn_hash;
3291         struct hci_conn *c;
3292
3293         BT_ERR("%s link tx timeout", hdev->name);
3294
3295         rcu_read_lock();
3296
3297         /* Kill stalled connections */
3298         list_for_each_entry_rcu(c, &h->list, list) {
3299                 if (c->type == type && c->sent) {
3300                         BT_ERR("%s killing stalled connection %pMR",
3301                                hdev->name, &c->dst);
3302                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3303                 }
3304         }
3305
3306         rcu_read_unlock();
3307 }
3308
3309 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3310                                       int *quote)
3311 {
3312         struct hci_conn_hash *h = &hdev->conn_hash;
3313         struct hci_chan *chan = NULL;
3314         unsigned int num = 0, min = ~0, cur_prio = 0;
3315         struct hci_conn *conn;
3316         int cnt, q, conn_num = 0;
3317
3318         BT_DBG("%s", hdev->name);
3319
3320         rcu_read_lock();
3321
3322         list_for_each_entry_rcu(conn, &h->list, list) {
3323                 struct hci_chan *tmp;
3324
3325                 if (conn->type != type)
3326                         continue;
3327
3328                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3329                         continue;
3330
3331                 conn_num++;
3332
3333                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3334                         struct sk_buff *skb;
3335
3336                         if (skb_queue_empty(&tmp->data_q))
3337                                 continue;
3338
3339                         skb = skb_peek(&tmp->data_q);
3340                         if (skb->priority < cur_prio)
3341                                 continue;
3342
3343                         if (skb->priority > cur_prio) {
3344                                 num = 0;
3345                                 min = ~0;
3346                                 cur_prio = skb->priority;
3347                         }
3348
3349                         num++;
3350
3351                         if (conn->sent < min) {
3352                                 min  = conn->sent;
3353                                 chan = tmp;
3354                         }
3355                 }
3356
3357                 if (hci_conn_num(hdev, type) == conn_num)
3358                         break;
3359         }
3360
3361         rcu_read_unlock();
3362
3363         if (!chan)
3364                 return NULL;
3365
3366         switch (chan->conn->type) {
3367         case ACL_LINK:
3368                 cnt = hdev->acl_cnt;
3369                 break;
3370         case AMP_LINK:
3371                 cnt = hdev->block_cnt;
3372                 break;
3373         case SCO_LINK:
3374         case ESCO_LINK:
3375                 cnt = hdev->sco_cnt;
3376                 break;
3377         case LE_LINK:
3378                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3379                 break;
3380         default:
3381                 cnt = 0;
3382                 BT_ERR("Unknown link type");
3383         }
3384
3385         q = cnt / num;
3386         *quote = q ? q : 1;
3387         BT_DBG("chan %p quote %d", chan, *quote);
3388         return chan;
3389 }
3390
3391 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3392 {
3393         struct hci_conn_hash *h = &hdev->conn_hash;
3394         struct hci_conn *conn;
3395         int num = 0;
3396
3397         BT_DBG("%s", hdev->name);
3398
3399         rcu_read_lock();
3400
3401         list_for_each_entry_rcu(conn, &h->list, list) {
3402                 struct hci_chan *chan;
3403
3404                 if (conn->type != type)
3405                         continue;
3406
3407                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3408                         continue;
3409
3410                 num++;
3411
3412                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3413                         struct sk_buff *skb;
3414
3415                         if (chan->sent) {
3416                                 chan->sent = 0;
3417                                 continue;
3418                         }
3419
3420                         if (skb_queue_empty(&chan->data_q))
3421                                 continue;
3422
3423                         skb = skb_peek(&chan->data_q);
3424                         if (skb->priority >= HCI_PRIO_MAX - 1)
3425                                 continue;
3426
3427                         skb->priority = HCI_PRIO_MAX - 1;
3428
3429                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3430                                skb->priority);
3431                 }
3432
3433                 if (hci_conn_num(hdev, type) == num)
3434                         break;
3435         }
3436
3437         rcu_read_unlock();
3438
3439 }
3440
3441 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3442 {
3443         /* Calculate count of blocks used by this packet */
3444         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3445 }
3446
3447 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3448 {
3449         if (!test_bit(HCI_RAW, &hdev->flags)) {
3450                 /* ACL tx timeout must be longer than maximum
3451                  * link supervision timeout (40.9 seconds) */
3452                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3453                                        HCI_ACL_TX_TIMEOUT))
3454                         hci_link_tx_to(hdev, ACL_LINK);
3455         }
3456 }
3457
3458 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3459 {
3460         unsigned int cnt = hdev->acl_cnt;
3461         struct hci_chan *chan;
3462         struct sk_buff *skb;
3463         int quote;
3464
3465         __check_timeout(hdev, cnt);
3466
3467         while (hdev->acl_cnt &&
3468                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3469                 u32 priority = (skb_peek(&chan->data_q))->priority;
3470                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3471                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3472                                skb->len, skb->priority);
3473
3474                         /* Stop if priority has changed */
3475                         if (skb->priority < priority)
3476                                 break;
3477
3478                         skb = skb_dequeue(&chan->data_q);
3479
3480                         hci_conn_enter_active_mode(chan->conn,
3481                                                    bt_cb(skb)->force_active);
3482
3483                         hci_send_frame(hdev, skb);
3484                         hdev->acl_last_tx = jiffies;
3485
3486                         hdev->acl_cnt--;
3487                         chan->sent++;
3488                         chan->conn->sent++;
3489                 }
3490         }
3491
3492         if (cnt != hdev->acl_cnt)
3493                 hci_prio_recalculate(hdev, ACL_LINK);
3494 }
3495
3496 static void hci_sched_acl_blk(struct hci_dev *hdev)
3497 {
3498         unsigned int cnt = hdev->block_cnt;
3499         struct hci_chan *chan;
3500         struct sk_buff *skb;
3501         int quote;
3502         u8 type;
3503
3504         __check_timeout(hdev, cnt);
3505
3506         BT_DBG("%s", hdev->name);
3507
3508         if (hdev->dev_type == HCI_AMP)
3509                 type = AMP_LINK;
3510         else
3511                 type = ACL_LINK;
3512
3513         while (hdev->block_cnt > 0 &&
3514                (chan = hci_chan_sent(hdev, type, &quote))) {
3515                 u32 priority = (skb_peek(&chan->data_q))->priority;
3516                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3517                         int blocks;
3518
3519                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3520                                skb->len, skb->priority);
3521
3522                         /* Stop if priority has changed */
3523                         if (skb->priority < priority)
3524                                 break;
3525
3526                         skb = skb_dequeue(&chan->data_q);
3527
3528                         blocks = __get_blocks(hdev, skb);
3529                         if (blocks > hdev->block_cnt)
3530                                 return;
3531
3532                         hci_conn_enter_active_mode(chan->conn,
3533                                                    bt_cb(skb)->force_active);
3534
3535                         hci_send_frame(hdev, skb);
3536                         hdev->acl_last_tx = jiffies;
3537
3538                         hdev->block_cnt -= blocks;
3539                         quote -= blocks;
3540
3541                         chan->sent += blocks;
3542                         chan->conn->sent += blocks;
3543                 }
3544         }
3545
3546         if (cnt != hdev->block_cnt)
3547                 hci_prio_recalculate(hdev, type);
3548 }
3549
3550 static void hci_sched_acl(struct hci_dev *hdev)
3551 {
3552         BT_DBG("%s", hdev->name);
3553
3554         /* No ACL link over BR/EDR controller */
3555         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3556                 return;
3557
3558         /* No AMP link over AMP controller */
3559         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3560                 return;
3561
3562         switch (hdev->flow_ctl_mode) {
3563         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3564                 hci_sched_acl_pkt(hdev);
3565                 break;
3566
3567         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3568                 hci_sched_acl_blk(hdev);
3569                 break;
3570         }
3571 }
3572
3573 /* Schedule SCO */
3574 static void hci_sched_sco(struct hci_dev *hdev)
3575 {
3576         struct hci_conn *conn;
3577         struct sk_buff *skb;
3578         int quote;
3579
3580         BT_DBG("%s", hdev->name);
3581
3582         if (!hci_conn_num(hdev, SCO_LINK))
3583                 return;
3584
3585         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3586                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3587                         BT_DBG("skb %p len %d", skb, skb->len);
3588                         hci_send_frame(hdev, skb);
3589
3590                         conn->sent++;
3591                         if (conn->sent == ~0)
3592                                 conn->sent = 0;
3593                 }
3594         }
3595 }
3596
3597 static void hci_sched_esco(struct hci_dev *hdev)
3598 {
3599         struct hci_conn *conn;
3600         struct sk_buff *skb;
3601         int quote;
3602
3603         BT_DBG("%s", hdev->name);
3604
3605         if (!hci_conn_num(hdev, ESCO_LINK))
3606                 return;
3607
3608         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3609                                                      &quote))) {
3610                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3611                         BT_DBG("skb %p len %d", skb, skb->len);
3612                         hci_send_frame(hdev, skb);
3613
3614                         conn->sent++;
3615                         if (conn->sent == ~0)
3616                                 conn->sent = 0;
3617                 }
3618         }
3619 }
3620
3621 static void hci_sched_le(struct hci_dev *hdev)
3622 {
3623         struct hci_chan *chan;
3624         struct sk_buff *skb;
3625         int quote, cnt, tmp;
3626
3627         BT_DBG("%s", hdev->name);
3628
3629         if (!hci_conn_num(hdev, LE_LINK))
3630                 return;
3631
3632         if (!test_bit(HCI_RAW, &hdev->flags)) {
3633                 /* LE tx timeout must be longer than maximum
3634                  * link supervision timeout (40.9 seconds) */
3635                 if (!hdev->le_cnt && hdev->le_pkts &&
3636                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3637                         hci_link_tx_to(hdev, LE_LINK);
3638         }
3639
3640         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3641         tmp = cnt;
3642         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3643                 u32 priority = (skb_peek(&chan->data_q))->priority;
3644                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3645                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3646                                skb->len, skb->priority);
3647
3648                         /* Stop if priority has changed */
3649                         if (skb->priority < priority)
3650                                 break;
3651
3652                         skb = skb_dequeue(&chan->data_q);
3653
3654                         hci_send_frame(hdev, skb);
3655                         hdev->le_last_tx = jiffies;
3656
3657                         cnt--;
3658                         chan->sent++;
3659                         chan->conn->sent++;
3660                 }
3661         }
3662
3663         if (hdev->le_pkts)
3664                 hdev->le_cnt = cnt;
3665         else
3666                 hdev->acl_cnt = cnt;
3667
3668         if (cnt != tmp)
3669                 hci_prio_recalculate(hdev, LE_LINK);
3670 }
3671
3672 static void hci_tx_work(struct work_struct *work)
3673 {
3674         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3675         struct sk_buff *skb;
3676
3677         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3678                hdev->sco_cnt, hdev->le_cnt);
3679
3680         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3681                 /* Schedule queues and send stuff to HCI driver */
3682                 hci_sched_acl(hdev);
3683                 hci_sched_sco(hdev);
3684                 hci_sched_esco(hdev);
3685                 hci_sched_le(hdev);
3686         }
3687
3688         /* Send next queued raw (unknown type) packet */
3689         while ((skb = skb_dequeue(&hdev->raw_q)))
3690                 hci_send_frame(hdev, skb);
3691 }
3692
3693 /* ----- HCI RX task (incoming data processing) ----- */
3694
3695 /* ACL data packet */
3696 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3697 {
3698         struct hci_acl_hdr *hdr = (void *) skb->data;
3699         struct hci_conn *conn;
3700         __u16 handle, flags;
3701
3702         skb_pull(skb, HCI_ACL_HDR_SIZE);
3703
3704         handle = __le16_to_cpu(hdr->handle);
3705         flags  = hci_flags(handle);
3706         handle = hci_handle(handle);
3707
3708         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3709                handle, flags);
3710
3711         hdev->stat.acl_rx++;
3712
3713         hci_dev_lock(hdev);
3714         conn = hci_conn_hash_lookup_handle(hdev, handle);
3715         hci_dev_unlock(hdev);
3716
3717         if (conn) {
3718                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3719
3720                 /* Send to upper protocol */
3721                 l2cap_recv_acldata(conn, skb, flags);
3722                 return;
3723         } else {
3724                 BT_ERR("%s ACL packet for unknown connection handle %d",
3725                        hdev->name, handle);
3726         }
3727
3728         kfree_skb(skb);
3729 }
3730
3731 /* SCO data packet */
3732 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3733 {
3734         struct hci_sco_hdr *hdr = (void *) skb->data;
3735         struct hci_conn *conn;
3736         __u16 handle;
3737
3738         skb_pull(skb, HCI_SCO_HDR_SIZE);
3739
3740         handle = __le16_to_cpu(hdr->handle);
3741
3742         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3743
3744         hdev->stat.sco_rx++;
3745
3746         hci_dev_lock(hdev);
3747         conn = hci_conn_hash_lookup_handle(hdev, handle);
3748         hci_dev_unlock(hdev);
3749
3750         if (conn) {
3751                 /* Send to upper protocol */
3752                 sco_recv_scodata(conn, skb);
3753                 return;
3754         } else {
3755                 BT_ERR("%s SCO packet for unknown connection handle %d",
3756                        hdev->name, handle);
3757         }
3758
3759         kfree_skb(skb);
3760 }
3761
3762 static bool hci_req_is_complete(struct hci_dev *hdev)
3763 {
3764         struct sk_buff *skb;
3765
3766         skb = skb_peek(&hdev->cmd_q);
3767         if (!skb)
3768                 return true;
3769
3770         return bt_cb(skb)->req.start;
3771 }
3772
3773 static void hci_resend_last(struct hci_dev *hdev)
3774 {
3775         struct hci_command_hdr *sent;
3776         struct sk_buff *skb;
3777         u16 opcode;
3778
3779         if (!hdev->sent_cmd)
3780                 return;
3781
3782         sent = (void *) hdev->sent_cmd->data;
3783         opcode = __le16_to_cpu(sent->opcode);
3784         if (opcode == HCI_OP_RESET)
3785                 return;
3786
3787         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3788         if (!skb)
3789                 return;
3790
3791         skb_queue_head(&hdev->cmd_q, skb);
3792         queue_work(hdev->workqueue, &hdev->cmd_work);
3793 }
3794
3795 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3796 {
3797         hci_req_complete_t req_complete = NULL;
3798         struct sk_buff *skb;
3799         unsigned long flags;
3800
3801         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3802
3803         /* If the completed command doesn't match the last one that was
3804          * sent we need to do special handling of it.
3805          */
3806         if (!hci_sent_cmd_data(hdev, opcode)) {
3807                 /* Some CSR based controllers generate a spontaneous
3808                  * reset complete event during init and any pending
3809                  * command will never be completed. In such a case we
3810                  * need to resend whatever was the last sent
3811                  * command.
3812                  */
3813                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3814                         hci_resend_last(hdev);
3815
3816                 return;
3817         }
3818
3819         /* If the command succeeded and there's still more commands in
3820          * this request the request is not yet complete.
3821          */
3822         if (!status && !hci_req_is_complete(hdev))
3823                 return;
3824
3825         /* If this was the last command in a request the complete
3826          * callback would be found in hdev->sent_cmd instead of the
3827          * command queue (hdev->cmd_q).
3828          */
3829         if (hdev->sent_cmd) {
3830                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3831
3832                 if (req_complete) {
3833                         /* We must set the complete callback to NULL to
3834                          * avoid calling the callback more than once if
3835                          * this function gets called again.
3836                          */
3837                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3838
3839                         goto call_complete;
3840                 }
3841         }
3842
3843         /* Remove all pending commands belonging to this request */
3844         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3845         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3846                 if (bt_cb(skb)->req.start) {
3847                         __skb_queue_head(&hdev->cmd_q, skb);
3848                         break;
3849                 }
3850
3851                 req_complete = bt_cb(skb)->req.complete;
3852                 kfree_skb(skb);
3853         }
3854         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3855
3856 call_complete:
3857         if (req_complete)
3858                 req_complete(hdev, status);
3859 }
3860
3861 static void hci_rx_work(struct work_struct *work)
3862 {
3863         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3864         struct sk_buff *skb;
3865
3866         BT_DBG("%s", hdev->name);
3867
3868         while ((skb = skb_dequeue(&hdev->rx_q))) {
3869                 /* Send copy to monitor */
3870                 hci_send_to_monitor(hdev, skb);
3871
3872                 if (atomic_read(&hdev->promisc)) {
3873                         /* Send copy to the sockets */
3874                         hci_send_to_sock(hdev, skb);
3875                 }
3876
3877                 if (test_bit(HCI_RAW, &hdev->flags) ||
3878                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3879                         kfree_skb(skb);
3880                         continue;
3881                 }
3882
3883                 if (test_bit(HCI_INIT, &hdev->flags)) {
3884                         /* Don't process data packets in this states. */
3885                         switch (bt_cb(skb)->pkt_type) {
3886                         case HCI_ACLDATA_PKT:
3887                         case HCI_SCODATA_PKT:
3888                                 kfree_skb(skb);
3889                                 continue;
3890                         }
3891                 }
3892
3893                 /* Process frame */
3894                 switch (bt_cb(skb)->pkt_type) {
3895                 case HCI_EVENT_PKT:
3896                         BT_DBG("%s Event packet", hdev->name);
3897                         hci_event_packet(hdev, skb);
3898                         break;
3899
3900                 case HCI_ACLDATA_PKT:
3901                         BT_DBG("%s ACL data packet", hdev->name);
3902                         hci_acldata_packet(hdev, skb);
3903                         break;
3904
3905                 case HCI_SCODATA_PKT:
3906                         BT_DBG("%s SCO data packet", hdev->name);
3907                         hci_scodata_packet(hdev, skb);
3908                         break;
3909
3910                 default:
3911                         kfree_skb(skb);
3912                         break;
3913                 }
3914         }
3915 }
3916
3917 static void hci_cmd_work(struct work_struct *work)
3918 {
3919         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3920         struct sk_buff *skb;
3921
3922         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3923                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3924
3925         /* Send queued commands */
3926         if (atomic_read(&hdev->cmd_cnt)) {
3927                 skb = skb_dequeue(&hdev->cmd_q);
3928                 if (!skb)
3929                         return;
3930
3931                 kfree_skb(hdev->sent_cmd);
3932
3933                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3934                 if (hdev->sent_cmd) {
3935                         atomic_dec(&hdev->cmd_cnt);
3936                         hci_send_frame(hdev, skb);
3937                         if (test_bit(HCI_RESET, &hdev->flags))
3938                                 del_timer(&hdev->cmd_timer);
3939                         else
3940                                 mod_timer(&hdev->cmd_timer,
3941                                           jiffies + HCI_CMD_TIMEOUT);
3942                 } else {
3943                         skb_queue_head(&hdev->cmd_q, skb);
3944                         queue_work(hdev->workqueue, &hdev->cmd_work);
3945                 }
3946         }
3947 }