Bluetooth: Rename hci_request to hci_req_sync
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62         BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64         /* If this is the init phase check if the completed command matches
65          * the last init command, and if not just return.
66          */
67         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69                 u16 opcode = __le16_to_cpu(sent->opcode);
70                 struct sk_buff *skb;
71
72                 /* Some CSR based controllers generate a spontaneous
73                  * reset complete event during init and any pending
74                  * command will never be completed. In such a case we
75                  * need to resend whatever was the last sent
76                  * command.
77                  */
78
79                 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80                         return;
81
82                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83                 if (skb) {
84                         skb_queue_head(&hdev->cmd_q, skb);
85                         queue_work(hdev->workqueue, &hdev->cmd_work);
86                 }
87
88                 return;
89         }
90
91         if (hdev->req_status == HCI_REQ_PEND) {
92                 hdev->req_result = result;
93                 hdev->req_status = HCI_REQ_DONE;
94                 wake_up_interruptible(&hdev->req_wait_q);
95         }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100         BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = err;
104                 hdev->req_status = HCI_REQ_CANCELED;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_req_sync(struct hci_dev *hdev,
111                           void (*req)(struct hci_dev *hdev, unsigned long opt),
112                           unsigned long opt, __u32 timeout)
113 {
114         DECLARE_WAITQUEUE(wait, current);
115         int err = 0;
116
117         BT_DBG("%s start", hdev->name);
118
119         hdev->req_status = HCI_REQ_PEND;
120
121         add_wait_queue(&hdev->req_wait_q, &wait);
122         set_current_state(TASK_INTERRUPTIBLE);
123
124         req(hdev, opt);
125         schedule_timeout(timeout);
126
127         remove_wait_queue(&hdev->req_wait_q, &wait);
128
129         if (signal_pending(current))
130                 return -EINTR;
131
132         switch (hdev->req_status) {
133         case HCI_REQ_DONE:
134                 err = -bt_to_errno(hdev->req_result);
135                 break;
136
137         case HCI_REQ_CANCELED:
138                 err = -hdev->req_result;
139                 break;
140
141         default:
142                 err = -ETIMEDOUT;
143                 break;
144         }
145
146         hdev->req_status = hdev->req_result = 0;
147
148         BT_DBG("%s end: err %d", hdev->name, err);
149
150         return err;
151 }
152
153 static int hci_req_sync(struct hci_dev *hdev,
154                         void (*req)(struct hci_dev *hdev, unsigned long opt),
155                         unsigned long opt, __u32 timeout)
156 {
157         int ret;
158
159         if (!test_bit(HCI_UP, &hdev->flags))
160                 return -ENETDOWN;
161
162         /* Serialize all requests */
163         hci_req_lock(hdev);
164         ret = __hci_req_sync(hdev, req, opt, timeout);
165         hci_req_unlock(hdev);
166
167         return ret;
168 }
169
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172         BT_DBG("%s %ld", hdev->name, opt);
173
174         /* Reset device */
175         set_bit(HCI_RESET, &hdev->flags);
176         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178
179 static void bredr_init(struct hci_dev *hdev)
180 {
181         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
183         /* Read Local Supported Features */
184         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
185
186         /* Read Local Version */
187         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
188 }
189
190 static void amp_init(struct hci_dev *hdev)
191 {
192         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
194         /* Read Local Version */
195         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196
197         /* Read Local AMP Info */
198         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
199
200         /* Read Data Blk size */
201         hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
202 }
203
204 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205 {
206         struct sk_buff *skb;
207
208         BT_DBG("%s %ld", hdev->name, opt);
209
210         /* Driver initialization */
211
212         /* Special commands */
213         while ((skb = skb_dequeue(&hdev->driver_init))) {
214                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215                 skb->dev = (void *) hdev;
216
217                 skb_queue_tail(&hdev->cmd_q, skb);
218                 queue_work(hdev->workqueue, &hdev->cmd_work);
219         }
220         skb_queue_purge(&hdev->driver_init);
221
222         /* Reset */
223         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224                 hci_reset_req(hdev, 0);
225
226         switch (hdev->dev_type) {
227         case HCI_BREDR:
228                 bredr_init(hdev);
229                 break;
230
231         case HCI_AMP:
232                 amp_init(hdev);
233                 break;
234
235         default:
236                 BT_ERR("Unknown device type %d", hdev->dev_type);
237                 break;
238         }
239 }
240
241 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242 {
243         __u8 scan = opt;
244
245         BT_DBG("%s %x", hdev->name, scan);
246
247         /* Inquiry and Page scans */
248         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
249 }
250
251 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252 {
253         __u8 auth = opt;
254
255         BT_DBG("%s %x", hdev->name, auth);
256
257         /* Authentication */
258         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
259 }
260
261 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262 {
263         __u8 encrypt = opt;
264
265         BT_DBG("%s %x", hdev->name, encrypt);
266
267         /* Encryption */
268         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
269 }
270
271 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272 {
273         __le16 policy = cpu_to_le16(opt);
274
275         BT_DBG("%s %x", hdev->name, policy);
276
277         /* Default link policy */
278         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279 }
280
281 /* Get HCI device by index.
282  * Device is held on return. */
283 struct hci_dev *hci_dev_get(int index)
284 {
285         struct hci_dev *hdev = NULL, *d;
286
287         BT_DBG("%d", index);
288
289         if (index < 0)
290                 return NULL;
291
292         read_lock(&hci_dev_list_lock);
293         list_for_each_entry(d, &hci_dev_list, list) {
294                 if (d->id == index) {
295                         hdev = hci_dev_hold(d);
296                         break;
297                 }
298         }
299         read_unlock(&hci_dev_list_lock);
300         return hdev;
301 }
302
303 /* ---- Inquiry support ---- */
304
305 bool hci_discovery_active(struct hci_dev *hdev)
306 {
307         struct discovery_state *discov = &hdev->discovery;
308
309         switch (discov->state) {
310         case DISCOVERY_FINDING:
311         case DISCOVERY_RESOLVING:
312                 return true;
313
314         default:
315                 return false;
316         }
317 }
318
319 void hci_discovery_set_state(struct hci_dev *hdev, int state)
320 {
321         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323         if (hdev->discovery.state == state)
324                 return;
325
326         switch (state) {
327         case DISCOVERY_STOPPED:
328                 if (hdev->discovery.state != DISCOVERY_STARTING)
329                         mgmt_discovering(hdev, 0);
330                 break;
331         case DISCOVERY_STARTING:
332                 break;
333         case DISCOVERY_FINDING:
334                 mgmt_discovering(hdev, 1);
335                 break;
336         case DISCOVERY_RESOLVING:
337                 break;
338         case DISCOVERY_STOPPING:
339                 break;
340         }
341
342         hdev->discovery.state = state;
343 }
344
345 static void inquiry_cache_flush(struct hci_dev *hdev)
346 {
347         struct discovery_state *cache = &hdev->discovery;
348         struct inquiry_entry *p, *n;
349
350         list_for_each_entry_safe(p, n, &cache->all, all) {
351                 list_del(&p->all);
352                 kfree(p);
353         }
354
355         INIT_LIST_HEAD(&cache->unknown);
356         INIT_LIST_HEAD(&cache->resolve);
357 }
358
359 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360                                                bdaddr_t *bdaddr)
361 {
362         struct discovery_state *cache = &hdev->discovery;
363         struct inquiry_entry *e;
364
365         BT_DBG("cache %p, %pMR", cache, bdaddr);
366
367         list_for_each_entry(e, &cache->all, all) {
368                 if (!bacmp(&e->data.bdaddr, bdaddr))
369                         return e;
370         }
371
372         return NULL;
373 }
374
375 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
376                                                        bdaddr_t *bdaddr)
377 {
378         struct discovery_state *cache = &hdev->discovery;
379         struct inquiry_entry *e;
380
381         BT_DBG("cache %p, %pMR", cache, bdaddr);
382
383         list_for_each_entry(e, &cache->unknown, list) {
384                 if (!bacmp(&e->data.bdaddr, bdaddr))
385                         return e;
386         }
387
388         return NULL;
389 }
390
391 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
392                                                        bdaddr_t *bdaddr,
393                                                        int state)
394 {
395         struct discovery_state *cache = &hdev->discovery;
396         struct inquiry_entry *e;
397
398         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
399
400         list_for_each_entry(e, &cache->resolve, list) {
401                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402                         return e;
403                 if (!bacmp(&e->data.bdaddr, bdaddr))
404                         return e;
405         }
406
407         return NULL;
408 }
409
410 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
411                                       struct inquiry_entry *ie)
412 {
413         struct discovery_state *cache = &hdev->discovery;
414         struct list_head *pos = &cache->resolve;
415         struct inquiry_entry *p;
416
417         list_del(&ie->list);
418
419         list_for_each_entry(p, &cache->resolve, list) {
420                 if (p->name_state != NAME_PENDING &&
421                     abs(p->data.rssi) >= abs(ie->data.rssi))
422                         break;
423                 pos = &p->list;
424         }
425
426         list_add(&ie->list, pos);
427 }
428
429 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
430                               bool name_known, bool *ssp)
431 {
432         struct discovery_state *cache = &hdev->discovery;
433         struct inquiry_entry *ie;
434
435         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
436
437         hci_remove_remote_oob_data(hdev, &data->bdaddr);
438
439         if (ssp)
440                 *ssp = data->ssp_mode;
441
442         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
443         if (ie) {
444                 if (ie->data.ssp_mode && ssp)
445                         *ssp = true;
446
447                 if (ie->name_state == NAME_NEEDED &&
448                     data->rssi != ie->data.rssi) {
449                         ie->data.rssi = data->rssi;
450                         hci_inquiry_cache_update_resolve(hdev, ie);
451                 }
452
453                 goto update;
454         }
455
456         /* Entry not in the cache. Add new one. */
457         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
458         if (!ie)
459                 return false;
460
461         list_add(&ie->all, &cache->all);
462
463         if (name_known) {
464                 ie->name_state = NAME_KNOWN;
465         } else {
466                 ie->name_state = NAME_NOT_KNOWN;
467                 list_add(&ie->list, &cache->unknown);
468         }
469
470 update:
471         if (name_known && ie->name_state != NAME_KNOWN &&
472             ie->name_state != NAME_PENDING) {
473                 ie->name_state = NAME_KNOWN;
474                 list_del(&ie->list);
475         }
476
477         memcpy(&ie->data, data, sizeof(*data));
478         ie->timestamp = jiffies;
479         cache->timestamp = jiffies;
480
481         if (ie->name_state == NAME_NOT_KNOWN)
482                 return false;
483
484         return true;
485 }
486
487 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
488 {
489         struct discovery_state *cache = &hdev->discovery;
490         struct inquiry_info *info = (struct inquiry_info *) buf;
491         struct inquiry_entry *e;
492         int copied = 0;
493
494         list_for_each_entry(e, &cache->all, all) {
495                 struct inquiry_data *data = &e->data;
496
497                 if (copied >= num)
498                         break;
499
500                 bacpy(&info->bdaddr, &data->bdaddr);
501                 info->pscan_rep_mode    = data->pscan_rep_mode;
502                 info->pscan_period_mode = data->pscan_period_mode;
503                 info->pscan_mode        = data->pscan_mode;
504                 memcpy(info->dev_class, data->dev_class, 3);
505                 info->clock_offset      = data->clock_offset;
506
507                 info++;
508                 copied++;
509         }
510
511         BT_DBG("cache %p, copied %d", cache, copied);
512         return copied;
513 }
514
515 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
516 {
517         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
518         struct hci_cp_inquiry cp;
519
520         BT_DBG("%s", hdev->name);
521
522         if (test_bit(HCI_INQUIRY, &hdev->flags))
523                 return;
524
525         /* Start Inquiry */
526         memcpy(&cp.lap, &ir->lap, 3);
527         cp.length  = ir->length;
528         cp.num_rsp = ir->num_rsp;
529         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
530 }
531
532 int hci_inquiry(void __user *arg)
533 {
534         __u8 __user *ptr = arg;
535         struct hci_inquiry_req ir;
536         struct hci_dev *hdev;
537         int err = 0, do_inquiry = 0, max_rsp;
538         long timeo;
539         __u8 *buf;
540
541         if (copy_from_user(&ir, ptr, sizeof(ir)))
542                 return -EFAULT;
543
544         hdev = hci_dev_get(ir.dev_id);
545         if (!hdev)
546                 return -ENODEV;
547
548         hci_dev_lock(hdev);
549         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
550             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
551                 inquiry_cache_flush(hdev);
552                 do_inquiry = 1;
553         }
554         hci_dev_unlock(hdev);
555
556         timeo = ir.length * msecs_to_jiffies(2000);
557
558         if (do_inquiry) {
559                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
560                                    timeo);
561                 if (err < 0)
562                         goto done;
563         }
564
565         /* for unlimited number of responses we will use buffer with
566          * 255 entries
567          */
568         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
569
570         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
571          * copy it to the user space.
572          */
573         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
574         if (!buf) {
575                 err = -ENOMEM;
576                 goto done;
577         }
578
579         hci_dev_lock(hdev);
580         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
581         hci_dev_unlock(hdev);
582
583         BT_DBG("num_rsp %d", ir.num_rsp);
584
585         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
586                 ptr += sizeof(ir);
587                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
588                                  ir.num_rsp))
589                         err = -EFAULT;
590         } else
591                 err = -EFAULT;
592
593         kfree(buf);
594
595 done:
596         hci_dev_put(hdev);
597         return err;
598 }
599
600 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
601 {
602         u8 ad_len = 0, flags = 0;
603         size_t name_len;
604
605         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
606                 flags |= LE_AD_GENERAL;
607
608         if (!lmp_bredr_capable(hdev))
609                 flags |= LE_AD_NO_BREDR;
610
611         if (lmp_le_br_capable(hdev))
612                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
613
614         if (lmp_host_le_br_capable(hdev))
615                 flags |= LE_AD_SIM_LE_BREDR_HOST;
616
617         if (flags) {
618                 BT_DBG("adv flags 0x%02x", flags);
619
620                 ptr[0] = 2;
621                 ptr[1] = EIR_FLAGS;
622                 ptr[2] = flags;
623
624                 ad_len += 3;
625                 ptr += 3;
626         }
627
628         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
629                 ptr[0] = 2;
630                 ptr[1] = EIR_TX_POWER;
631                 ptr[2] = (u8) hdev->adv_tx_power;
632
633                 ad_len += 3;
634                 ptr += 3;
635         }
636
637         name_len = strlen(hdev->dev_name);
638         if (name_len > 0) {
639                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
640
641                 if (name_len > max_len) {
642                         name_len = max_len;
643                         ptr[1] = EIR_NAME_SHORT;
644                 } else
645                         ptr[1] = EIR_NAME_COMPLETE;
646
647                 ptr[0] = name_len + 1;
648
649                 memcpy(ptr + 2, hdev->dev_name, name_len);
650
651                 ad_len += (name_len + 2);
652                 ptr += (name_len + 2);
653         }
654
655         return ad_len;
656 }
657
658 int hci_update_ad(struct hci_dev *hdev)
659 {
660         struct hci_cp_le_set_adv_data cp;
661         u8 len;
662         int err;
663
664         hci_dev_lock(hdev);
665
666         if (!lmp_le_capable(hdev)) {
667                 err = -EINVAL;
668                 goto unlock;
669         }
670
671         memset(&cp, 0, sizeof(cp));
672
673         len = create_ad(hdev, cp.data);
674
675         if (hdev->adv_data_len == len &&
676             memcmp(cp.data, hdev->adv_data, len) == 0) {
677                 err = 0;
678                 goto unlock;
679         }
680
681         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
682         hdev->adv_data_len = len;
683
684         cp.length = len;
685         err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
686
687 unlock:
688         hci_dev_unlock(hdev);
689
690         return err;
691 }
692
693 /* ---- HCI ioctl helpers ---- */
694
695 int hci_dev_open(__u16 dev)
696 {
697         struct hci_dev *hdev;
698         int ret = 0;
699
700         hdev = hci_dev_get(dev);
701         if (!hdev)
702                 return -ENODEV;
703
704         BT_DBG("%s %p", hdev->name, hdev);
705
706         hci_req_lock(hdev);
707
708         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
709                 ret = -ENODEV;
710                 goto done;
711         }
712
713         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
714                 ret = -ERFKILL;
715                 goto done;
716         }
717
718         if (test_bit(HCI_UP, &hdev->flags)) {
719                 ret = -EALREADY;
720                 goto done;
721         }
722
723         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
724                 set_bit(HCI_RAW, &hdev->flags);
725
726         /* Treat all non BR/EDR controllers as raw devices if
727            enable_hs is not set */
728         if (hdev->dev_type != HCI_BREDR && !enable_hs)
729                 set_bit(HCI_RAW, &hdev->flags);
730
731         if (hdev->open(hdev)) {
732                 ret = -EIO;
733                 goto done;
734         }
735
736         if (!test_bit(HCI_RAW, &hdev->flags)) {
737                 atomic_set(&hdev->cmd_cnt, 1);
738                 set_bit(HCI_INIT, &hdev->flags);
739                 hdev->init_last_cmd = 0;
740
741                 ret = __hci_req_sync(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
742
743                 clear_bit(HCI_INIT, &hdev->flags);
744         }
745
746         if (!ret) {
747                 hci_dev_hold(hdev);
748                 set_bit(HCI_UP, &hdev->flags);
749                 hci_notify(hdev, HCI_DEV_UP);
750                 hci_update_ad(hdev);
751                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
752                     mgmt_valid_hdev(hdev)) {
753                         hci_dev_lock(hdev);
754                         mgmt_powered(hdev, 1);
755                         hci_dev_unlock(hdev);
756                 }
757         } else {
758                 /* Init failed, cleanup */
759                 flush_work(&hdev->tx_work);
760                 flush_work(&hdev->cmd_work);
761                 flush_work(&hdev->rx_work);
762
763                 skb_queue_purge(&hdev->cmd_q);
764                 skb_queue_purge(&hdev->rx_q);
765
766                 if (hdev->flush)
767                         hdev->flush(hdev);
768
769                 if (hdev->sent_cmd) {
770                         kfree_skb(hdev->sent_cmd);
771                         hdev->sent_cmd = NULL;
772                 }
773
774                 hdev->close(hdev);
775                 hdev->flags = 0;
776         }
777
778 done:
779         hci_req_unlock(hdev);
780         hci_dev_put(hdev);
781         return ret;
782 }
783
784 static int hci_dev_do_close(struct hci_dev *hdev)
785 {
786         BT_DBG("%s %p", hdev->name, hdev);
787
788         cancel_work_sync(&hdev->le_scan);
789
790         cancel_delayed_work(&hdev->power_off);
791
792         hci_req_cancel(hdev, ENODEV);
793         hci_req_lock(hdev);
794
795         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
796                 del_timer_sync(&hdev->cmd_timer);
797                 hci_req_unlock(hdev);
798                 return 0;
799         }
800
801         /* Flush RX and TX works */
802         flush_work(&hdev->tx_work);
803         flush_work(&hdev->rx_work);
804
805         if (hdev->discov_timeout > 0) {
806                 cancel_delayed_work(&hdev->discov_off);
807                 hdev->discov_timeout = 0;
808                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
809         }
810
811         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
812                 cancel_delayed_work(&hdev->service_cache);
813
814         cancel_delayed_work_sync(&hdev->le_scan_disable);
815
816         hci_dev_lock(hdev);
817         inquiry_cache_flush(hdev);
818         hci_conn_hash_flush(hdev);
819         hci_dev_unlock(hdev);
820
821         hci_notify(hdev, HCI_DEV_DOWN);
822
823         if (hdev->flush)
824                 hdev->flush(hdev);
825
826         /* Reset device */
827         skb_queue_purge(&hdev->cmd_q);
828         atomic_set(&hdev->cmd_cnt, 1);
829         if (!test_bit(HCI_RAW, &hdev->flags) &&
830             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
831                 set_bit(HCI_INIT, &hdev->flags);
832                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
833                 clear_bit(HCI_INIT, &hdev->flags);
834         }
835
836         /* flush cmd  work */
837         flush_work(&hdev->cmd_work);
838
839         /* Drop queues */
840         skb_queue_purge(&hdev->rx_q);
841         skb_queue_purge(&hdev->cmd_q);
842         skb_queue_purge(&hdev->raw_q);
843
844         /* Drop last sent command */
845         if (hdev->sent_cmd) {
846                 del_timer_sync(&hdev->cmd_timer);
847                 kfree_skb(hdev->sent_cmd);
848                 hdev->sent_cmd = NULL;
849         }
850
851         /* After this point our queues are empty
852          * and no tasks are scheduled. */
853         hdev->close(hdev);
854
855         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
856             mgmt_valid_hdev(hdev)) {
857                 hci_dev_lock(hdev);
858                 mgmt_powered(hdev, 0);
859                 hci_dev_unlock(hdev);
860         }
861
862         /* Clear flags */
863         hdev->flags = 0;
864
865         /* Controller radio is available but is currently powered down */
866         hdev->amp_status = 0;
867
868         memset(hdev->eir, 0, sizeof(hdev->eir));
869         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
870
871         hci_req_unlock(hdev);
872
873         hci_dev_put(hdev);
874         return 0;
875 }
876
877 int hci_dev_close(__u16 dev)
878 {
879         struct hci_dev *hdev;
880         int err;
881
882         hdev = hci_dev_get(dev);
883         if (!hdev)
884                 return -ENODEV;
885
886         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
887                 cancel_delayed_work(&hdev->power_off);
888
889         err = hci_dev_do_close(hdev);
890
891         hci_dev_put(hdev);
892         return err;
893 }
894
895 int hci_dev_reset(__u16 dev)
896 {
897         struct hci_dev *hdev;
898         int ret = 0;
899
900         hdev = hci_dev_get(dev);
901         if (!hdev)
902                 return -ENODEV;
903
904         hci_req_lock(hdev);
905
906         if (!test_bit(HCI_UP, &hdev->flags))
907                 goto done;
908
909         /* Drop queues */
910         skb_queue_purge(&hdev->rx_q);
911         skb_queue_purge(&hdev->cmd_q);
912
913         hci_dev_lock(hdev);
914         inquiry_cache_flush(hdev);
915         hci_conn_hash_flush(hdev);
916         hci_dev_unlock(hdev);
917
918         if (hdev->flush)
919                 hdev->flush(hdev);
920
921         atomic_set(&hdev->cmd_cnt, 1);
922         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
923
924         if (!test_bit(HCI_RAW, &hdev->flags))
925                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
926
927 done:
928         hci_req_unlock(hdev);
929         hci_dev_put(hdev);
930         return ret;
931 }
932
933 int hci_dev_reset_stat(__u16 dev)
934 {
935         struct hci_dev *hdev;
936         int ret = 0;
937
938         hdev = hci_dev_get(dev);
939         if (!hdev)
940                 return -ENODEV;
941
942         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
943
944         hci_dev_put(hdev);
945
946         return ret;
947 }
948
949 int hci_dev_cmd(unsigned int cmd, void __user *arg)
950 {
951         struct hci_dev *hdev;
952         struct hci_dev_req dr;
953         int err = 0;
954
955         if (copy_from_user(&dr, arg, sizeof(dr)))
956                 return -EFAULT;
957
958         hdev = hci_dev_get(dr.dev_id);
959         if (!hdev)
960                 return -ENODEV;
961
962         switch (cmd) {
963         case HCISETAUTH:
964                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
965                                    HCI_INIT_TIMEOUT);
966                 break;
967
968         case HCISETENCRYPT:
969                 if (!lmp_encrypt_capable(hdev)) {
970                         err = -EOPNOTSUPP;
971                         break;
972                 }
973
974                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
975                         /* Auth must be enabled first */
976                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
977                                            HCI_INIT_TIMEOUT);
978                         if (err)
979                                 break;
980                 }
981
982                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
983                                    HCI_INIT_TIMEOUT);
984                 break;
985
986         case HCISETSCAN:
987                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
988                                    HCI_INIT_TIMEOUT);
989                 break;
990
991         case HCISETLINKPOL:
992                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
993                                    HCI_INIT_TIMEOUT);
994                 break;
995
996         case HCISETLINKMODE:
997                 hdev->link_mode = ((__u16) dr.dev_opt) &
998                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
999                 break;
1000
1001         case HCISETPTYPE:
1002                 hdev->pkt_type = (__u16) dr.dev_opt;
1003                 break;
1004
1005         case HCISETACLMTU:
1006                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1007                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1008                 break;
1009
1010         case HCISETSCOMTU:
1011                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1012                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1013                 break;
1014
1015         default:
1016                 err = -EINVAL;
1017                 break;
1018         }
1019
1020         hci_dev_put(hdev);
1021         return err;
1022 }
1023
1024 int hci_get_dev_list(void __user *arg)
1025 {
1026         struct hci_dev *hdev;
1027         struct hci_dev_list_req *dl;
1028         struct hci_dev_req *dr;
1029         int n = 0, size, err;
1030         __u16 dev_num;
1031
1032         if (get_user(dev_num, (__u16 __user *) arg))
1033                 return -EFAULT;
1034
1035         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1036                 return -EINVAL;
1037
1038         size = sizeof(*dl) + dev_num * sizeof(*dr);
1039
1040         dl = kzalloc(size, GFP_KERNEL);
1041         if (!dl)
1042                 return -ENOMEM;
1043
1044         dr = dl->dev_req;
1045
1046         read_lock(&hci_dev_list_lock);
1047         list_for_each_entry(hdev, &hci_dev_list, list) {
1048                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1049                         cancel_delayed_work(&hdev->power_off);
1050
1051                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1052                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1053
1054                 (dr + n)->dev_id  = hdev->id;
1055                 (dr + n)->dev_opt = hdev->flags;
1056
1057                 if (++n >= dev_num)
1058                         break;
1059         }
1060         read_unlock(&hci_dev_list_lock);
1061
1062         dl->dev_num = n;
1063         size = sizeof(*dl) + n * sizeof(*dr);
1064
1065         err = copy_to_user(arg, dl, size);
1066         kfree(dl);
1067
1068         return err ? -EFAULT : 0;
1069 }
1070
1071 int hci_get_dev_info(void __user *arg)
1072 {
1073         struct hci_dev *hdev;
1074         struct hci_dev_info di;
1075         int err = 0;
1076
1077         if (copy_from_user(&di, arg, sizeof(di)))
1078                 return -EFAULT;
1079
1080         hdev = hci_dev_get(di.dev_id);
1081         if (!hdev)
1082                 return -ENODEV;
1083
1084         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1085                 cancel_delayed_work_sync(&hdev->power_off);
1086
1087         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1088                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1089
1090         strcpy(di.name, hdev->name);
1091         di.bdaddr   = hdev->bdaddr;
1092         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1093         di.flags    = hdev->flags;
1094         di.pkt_type = hdev->pkt_type;
1095         if (lmp_bredr_capable(hdev)) {
1096                 di.acl_mtu  = hdev->acl_mtu;
1097                 di.acl_pkts = hdev->acl_pkts;
1098                 di.sco_mtu  = hdev->sco_mtu;
1099                 di.sco_pkts = hdev->sco_pkts;
1100         } else {
1101                 di.acl_mtu  = hdev->le_mtu;
1102                 di.acl_pkts = hdev->le_pkts;
1103                 di.sco_mtu  = 0;
1104                 di.sco_pkts = 0;
1105         }
1106         di.link_policy = hdev->link_policy;
1107         di.link_mode   = hdev->link_mode;
1108
1109         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1110         memcpy(&di.features, &hdev->features, sizeof(di.features));
1111
1112         if (copy_to_user(arg, &di, sizeof(di)))
1113                 err = -EFAULT;
1114
1115         hci_dev_put(hdev);
1116
1117         return err;
1118 }
1119
1120 /* ---- Interface to HCI drivers ---- */
1121
1122 static int hci_rfkill_set_block(void *data, bool blocked)
1123 {
1124         struct hci_dev *hdev = data;
1125
1126         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1127
1128         if (!blocked)
1129                 return 0;
1130
1131         hci_dev_do_close(hdev);
1132
1133         return 0;
1134 }
1135
1136 static const struct rfkill_ops hci_rfkill_ops = {
1137         .set_block = hci_rfkill_set_block,
1138 };
1139
1140 static void hci_power_on(struct work_struct *work)
1141 {
1142         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1143
1144         BT_DBG("%s", hdev->name);
1145
1146         if (hci_dev_open(hdev->id) < 0)
1147                 return;
1148
1149         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1150                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1151                                    HCI_AUTO_OFF_TIMEOUT);
1152
1153         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1154                 mgmt_index_added(hdev);
1155 }
1156
1157 static void hci_power_off(struct work_struct *work)
1158 {
1159         struct hci_dev *hdev = container_of(work, struct hci_dev,
1160                                             power_off.work);
1161
1162         BT_DBG("%s", hdev->name);
1163
1164         hci_dev_do_close(hdev);
1165 }
1166
1167 static void hci_discov_off(struct work_struct *work)
1168 {
1169         struct hci_dev *hdev;
1170         u8 scan = SCAN_PAGE;
1171
1172         hdev = container_of(work, struct hci_dev, discov_off.work);
1173
1174         BT_DBG("%s", hdev->name);
1175
1176         hci_dev_lock(hdev);
1177
1178         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1179
1180         hdev->discov_timeout = 0;
1181
1182         hci_dev_unlock(hdev);
1183 }
1184
1185 int hci_uuids_clear(struct hci_dev *hdev)
1186 {
1187         struct bt_uuid *uuid, *tmp;
1188
1189         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1190                 list_del(&uuid->list);
1191                 kfree(uuid);
1192         }
1193
1194         return 0;
1195 }
1196
1197 int hci_link_keys_clear(struct hci_dev *hdev)
1198 {
1199         struct list_head *p, *n;
1200
1201         list_for_each_safe(p, n, &hdev->link_keys) {
1202                 struct link_key *key;
1203
1204                 key = list_entry(p, struct link_key, list);
1205
1206                 list_del(p);
1207                 kfree(key);
1208         }
1209
1210         return 0;
1211 }
1212
1213 int hci_smp_ltks_clear(struct hci_dev *hdev)
1214 {
1215         struct smp_ltk *k, *tmp;
1216
1217         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1218                 list_del(&k->list);
1219                 kfree(k);
1220         }
1221
1222         return 0;
1223 }
1224
1225 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1226 {
1227         struct link_key *k;
1228
1229         list_for_each_entry(k, &hdev->link_keys, list)
1230                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1231                         return k;
1232
1233         return NULL;
1234 }
1235
1236 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1237                                u8 key_type, u8 old_key_type)
1238 {
1239         /* Legacy key */
1240         if (key_type < 0x03)
1241                 return true;
1242
1243         /* Debug keys are insecure so don't store them persistently */
1244         if (key_type == HCI_LK_DEBUG_COMBINATION)
1245                 return false;
1246
1247         /* Changed combination key and there's no previous one */
1248         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1249                 return false;
1250
1251         /* Security mode 3 case */
1252         if (!conn)
1253                 return true;
1254
1255         /* Neither local nor remote side had no-bonding as requirement */
1256         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1257                 return true;
1258
1259         /* Local side had dedicated bonding as requirement */
1260         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1261                 return true;
1262
1263         /* Remote side had dedicated bonding as requirement */
1264         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1265                 return true;
1266
1267         /* If none of the above criteria match, then don't store the key
1268          * persistently */
1269         return false;
1270 }
1271
1272 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1273 {
1274         struct smp_ltk *k;
1275
1276         list_for_each_entry(k, &hdev->long_term_keys, list) {
1277                 if (k->ediv != ediv ||
1278                     memcmp(rand, k->rand, sizeof(k->rand)))
1279                         continue;
1280
1281                 return k;
1282         }
1283
1284         return NULL;
1285 }
1286
1287 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1288                                      u8 addr_type)
1289 {
1290         struct smp_ltk *k;
1291
1292         list_for_each_entry(k, &hdev->long_term_keys, list)
1293                 if (addr_type == k->bdaddr_type &&
1294                     bacmp(bdaddr, &k->bdaddr) == 0)
1295                         return k;
1296
1297         return NULL;
1298 }
1299
1300 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1301                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1302 {
1303         struct link_key *key, *old_key;
1304         u8 old_key_type;
1305         bool persistent;
1306
1307         old_key = hci_find_link_key(hdev, bdaddr);
1308         if (old_key) {
1309                 old_key_type = old_key->type;
1310                 key = old_key;
1311         } else {
1312                 old_key_type = conn ? conn->key_type : 0xff;
1313                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1314                 if (!key)
1315                         return -ENOMEM;
1316                 list_add(&key->list, &hdev->link_keys);
1317         }
1318
1319         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1320
1321         /* Some buggy controller combinations generate a changed
1322          * combination key for legacy pairing even when there's no
1323          * previous key */
1324         if (type == HCI_LK_CHANGED_COMBINATION &&
1325             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1326                 type = HCI_LK_COMBINATION;
1327                 if (conn)
1328                         conn->key_type = type;
1329         }
1330
1331         bacpy(&key->bdaddr, bdaddr);
1332         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1333         key->pin_len = pin_len;
1334
1335         if (type == HCI_LK_CHANGED_COMBINATION)
1336                 key->type = old_key_type;
1337         else
1338                 key->type = type;
1339
1340         if (!new_key)
1341                 return 0;
1342
1343         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1344
1345         mgmt_new_link_key(hdev, key, persistent);
1346
1347         if (conn)
1348                 conn->flush_key = !persistent;
1349
1350         return 0;
1351 }
1352
1353 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1354                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1355                 ediv, u8 rand[8])
1356 {
1357         struct smp_ltk *key, *old_key;
1358
1359         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1360                 return 0;
1361
1362         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1363         if (old_key)
1364                 key = old_key;
1365         else {
1366                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1367                 if (!key)
1368                         return -ENOMEM;
1369                 list_add(&key->list, &hdev->long_term_keys);
1370         }
1371
1372         bacpy(&key->bdaddr, bdaddr);
1373         key->bdaddr_type = addr_type;
1374         memcpy(key->val, tk, sizeof(key->val));
1375         key->authenticated = authenticated;
1376         key->ediv = ediv;
1377         key->enc_size = enc_size;
1378         key->type = type;
1379         memcpy(key->rand, rand, sizeof(key->rand));
1380
1381         if (!new_key)
1382                 return 0;
1383
1384         if (type & HCI_SMP_LTK)
1385                 mgmt_new_ltk(hdev, key, 1);
1386
1387         return 0;
1388 }
1389
1390 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1391 {
1392         struct link_key *key;
1393
1394         key = hci_find_link_key(hdev, bdaddr);
1395         if (!key)
1396                 return -ENOENT;
1397
1398         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1399
1400         list_del(&key->list);
1401         kfree(key);
1402
1403         return 0;
1404 }
1405
1406 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1407 {
1408         struct smp_ltk *k, *tmp;
1409
1410         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1411                 if (bacmp(bdaddr, &k->bdaddr))
1412                         continue;
1413
1414                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1415
1416                 list_del(&k->list);
1417                 kfree(k);
1418         }
1419
1420         return 0;
1421 }
1422
1423 /* HCI command timer function */
1424 static void hci_cmd_timeout(unsigned long arg)
1425 {
1426         struct hci_dev *hdev = (void *) arg;
1427
1428         if (hdev->sent_cmd) {
1429                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1430                 u16 opcode = __le16_to_cpu(sent->opcode);
1431
1432                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1433         } else {
1434                 BT_ERR("%s command tx timeout", hdev->name);
1435         }
1436
1437         atomic_set(&hdev->cmd_cnt, 1);
1438         queue_work(hdev->workqueue, &hdev->cmd_work);
1439 }
1440
1441 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1442                                           bdaddr_t *bdaddr)
1443 {
1444         struct oob_data *data;
1445
1446         list_for_each_entry(data, &hdev->remote_oob_data, list)
1447                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1448                         return data;
1449
1450         return NULL;
1451 }
1452
1453 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1454 {
1455         struct oob_data *data;
1456
1457         data = hci_find_remote_oob_data(hdev, bdaddr);
1458         if (!data)
1459                 return -ENOENT;
1460
1461         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1462
1463         list_del(&data->list);
1464         kfree(data);
1465
1466         return 0;
1467 }
1468
1469 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1470 {
1471         struct oob_data *data, *n;
1472
1473         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1474                 list_del(&data->list);
1475                 kfree(data);
1476         }
1477
1478         return 0;
1479 }
1480
1481 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1482                             u8 *randomizer)
1483 {
1484         struct oob_data *data;
1485
1486         data = hci_find_remote_oob_data(hdev, bdaddr);
1487
1488         if (!data) {
1489                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1490                 if (!data)
1491                         return -ENOMEM;
1492
1493                 bacpy(&data->bdaddr, bdaddr);
1494                 list_add(&data->list, &hdev->remote_oob_data);
1495         }
1496
1497         memcpy(data->hash, hash, sizeof(data->hash));
1498         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1499
1500         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1501
1502         return 0;
1503 }
1504
1505 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1506 {
1507         struct bdaddr_list *b;
1508
1509         list_for_each_entry(b, &hdev->blacklist, list)
1510                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1511                         return b;
1512
1513         return NULL;
1514 }
1515
1516 int hci_blacklist_clear(struct hci_dev *hdev)
1517 {
1518         struct list_head *p, *n;
1519
1520         list_for_each_safe(p, n, &hdev->blacklist) {
1521                 struct bdaddr_list *b;
1522
1523                 b = list_entry(p, struct bdaddr_list, list);
1524
1525                 list_del(p);
1526                 kfree(b);
1527         }
1528
1529         return 0;
1530 }
1531
1532 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1533 {
1534         struct bdaddr_list *entry;
1535
1536         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1537                 return -EBADF;
1538
1539         if (hci_blacklist_lookup(hdev, bdaddr))
1540                 return -EEXIST;
1541
1542         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1543         if (!entry)
1544                 return -ENOMEM;
1545
1546         bacpy(&entry->bdaddr, bdaddr);
1547
1548         list_add(&entry->list, &hdev->blacklist);
1549
1550         return mgmt_device_blocked(hdev, bdaddr, type);
1551 }
1552
1553 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1554 {
1555         struct bdaddr_list *entry;
1556
1557         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1558                 return hci_blacklist_clear(hdev);
1559
1560         entry = hci_blacklist_lookup(hdev, bdaddr);
1561         if (!entry)
1562                 return -ENOENT;
1563
1564         list_del(&entry->list);
1565         kfree(entry);
1566
1567         return mgmt_device_unblocked(hdev, bdaddr, type);
1568 }
1569
1570 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1571 {
1572         struct le_scan_params *param =  (struct le_scan_params *) opt;
1573         struct hci_cp_le_set_scan_param cp;
1574
1575         memset(&cp, 0, sizeof(cp));
1576         cp.type = param->type;
1577         cp.interval = cpu_to_le16(param->interval);
1578         cp.window = cpu_to_le16(param->window);
1579
1580         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1581 }
1582
1583 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1584 {
1585         struct hci_cp_le_set_scan_enable cp;
1586
1587         memset(&cp, 0, sizeof(cp));
1588         cp.enable = 1;
1589         cp.filter_dup = 1;
1590
1591         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1592 }
1593
1594 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1595                           u16 window, int timeout)
1596 {
1597         long timeo = msecs_to_jiffies(3000);
1598         struct le_scan_params param;
1599         int err;
1600
1601         BT_DBG("%s", hdev->name);
1602
1603         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1604                 return -EINPROGRESS;
1605
1606         param.type = type;
1607         param.interval = interval;
1608         param.window = window;
1609
1610         hci_req_lock(hdev);
1611
1612         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1613                              timeo);
1614         if (!err)
1615                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1616
1617         hci_req_unlock(hdev);
1618
1619         if (err < 0)
1620                 return err;
1621
1622         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1623                            msecs_to_jiffies(timeout));
1624
1625         return 0;
1626 }
1627
1628 int hci_cancel_le_scan(struct hci_dev *hdev)
1629 {
1630         BT_DBG("%s", hdev->name);
1631
1632         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1633                 return -EALREADY;
1634
1635         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1636                 struct hci_cp_le_set_scan_enable cp;
1637
1638                 /* Send HCI command to disable LE Scan */
1639                 memset(&cp, 0, sizeof(cp));
1640                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1641         }
1642
1643         return 0;
1644 }
1645
1646 static void le_scan_disable_work(struct work_struct *work)
1647 {
1648         struct hci_dev *hdev = container_of(work, struct hci_dev,
1649                                             le_scan_disable.work);
1650         struct hci_cp_le_set_scan_enable cp;
1651
1652         BT_DBG("%s", hdev->name);
1653
1654         memset(&cp, 0, sizeof(cp));
1655
1656         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1657 }
1658
1659 static void le_scan_work(struct work_struct *work)
1660 {
1661         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1662         struct le_scan_params *param = &hdev->le_scan_params;
1663
1664         BT_DBG("%s", hdev->name);
1665
1666         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1667                        param->timeout);
1668 }
1669
1670 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1671                 int timeout)
1672 {
1673         struct le_scan_params *param = &hdev->le_scan_params;
1674
1675         BT_DBG("%s", hdev->name);
1676
1677         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1678                 return -ENOTSUPP;
1679
1680         if (work_busy(&hdev->le_scan))
1681                 return -EINPROGRESS;
1682
1683         param->type = type;
1684         param->interval = interval;
1685         param->window = window;
1686         param->timeout = timeout;
1687
1688         queue_work(system_long_wq, &hdev->le_scan);
1689
1690         return 0;
1691 }
1692
1693 /* Alloc HCI device */
1694 struct hci_dev *hci_alloc_dev(void)
1695 {
1696         struct hci_dev *hdev;
1697
1698         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1699         if (!hdev)
1700                 return NULL;
1701
1702         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1703         hdev->esco_type = (ESCO_HV1);
1704         hdev->link_mode = (HCI_LM_ACCEPT);
1705         hdev->io_capability = 0x03; /* No Input No Output */
1706         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1707         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1708
1709         hdev->sniff_max_interval = 800;
1710         hdev->sniff_min_interval = 80;
1711
1712         mutex_init(&hdev->lock);
1713         mutex_init(&hdev->req_lock);
1714
1715         INIT_LIST_HEAD(&hdev->mgmt_pending);
1716         INIT_LIST_HEAD(&hdev->blacklist);
1717         INIT_LIST_HEAD(&hdev->uuids);
1718         INIT_LIST_HEAD(&hdev->link_keys);
1719         INIT_LIST_HEAD(&hdev->long_term_keys);
1720         INIT_LIST_HEAD(&hdev->remote_oob_data);
1721         INIT_LIST_HEAD(&hdev->conn_hash.list);
1722
1723         INIT_WORK(&hdev->rx_work, hci_rx_work);
1724         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1725         INIT_WORK(&hdev->tx_work, hci_tx_work);
1726         INIT_WORK(&hdev->power_on, hci_power_on);
1727         INIT_WORK(&hdev->le_scan, le_scan_work);
1728
1729         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1730         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1731         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1732
1733         skb_queue_head_init(&hdev->driver_init);
1734         skb_queue_head_init(&hdev->rx_q);
1735         skb_queue_head_init(&hdev->cmd_q);
1736         skb_queue_head_init(&hdev->raw_q);
1737
1738         init_waitqueue_head(&hdev->req_wait_q);
1739
1740         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1741
1742         hci_init_sysfs(hdev);
1743         discovery_init(hdev);
1744
1745         return hdev;
1746 }
1747 EXPORT_SYMBOL(hci_alloc_dev);
1748
1749 /* Free HCI device */
1750 void hci_free_dev(struct hci_dev *hdev)
1751 {
1752         skb_queue_purge(&hdev->driver_init);
1753
1754         /* will free via device release */
1755         put_device(&hdev->dev);
1756 }
1757 EXPORT_SYMBOL(hci_free_dev);
1758
1759 /* Register HCI device */
1760 int hci_register_dev(struct hci_dev *hdev)
1761 {
1762         int id, error;
1763
1764         if (!hdev->open || !hdev->close)
1765                 return -EINVAL;
1766
1767         /* Do not allow HCI_AMP devices to register at index 0,
1768          * so the index can be used as the AMP controller ID.
1769          */
1770         switch (hdev->dev_type) {
1771         case HCI_BREDR:
1772                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1773                 break;
1774         case HCI_AMP:
1775                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1776                 break;
1777         default:
1778                 return -EINVAL;
1779         }
1780
1781         if (id < 0)
1782                 return id;
1783
1784         sprintf(hdev->name, "hci%d", id);
1785         hdev->id = id;
1786
1787         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1788
1789         write_lock(&hci_dev_list_lock);
1790         list_add(&hdev->list, &hci_dev_list);
1791         write_unlock(&hci_dev_list_lock);
1792
1793         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1794                                           WQ_MEM_RECLAIM, 1);
1795         if (!hdev->workqueue) {
1796                 error = -ENOMEM;
1797                 goto err;
1798         }
1799
1800         hdev->req_workqueue = alloc_workqueue(hdev->name,
1801                                               WQ_HIGHPRI | WQ_UNBOUND |
1802                                               WQ_MEM_RECLAIM, 1);
1803         if (!hdev->req_workqueue) {
1804                 destroy_workqueue(hdev->workqueue);
1805                 error = -ENOMEM;
1806                 goto err;
1807         }
1808
1809         error = hci_add_sysfs(hdev);
1810         if (error < 0)
1811                 goto err_wqueue;
1812
1813         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1814                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1815                                     hdev);
1816         if (hdev->rfkill) {
1817                 if (rfkill_register(hdev->rfkill) < 0) {
1818                         rfkill_destroy(hdev->rfkill);
1819                         hdev->rfkill = NULL;
1820                 }
1821         }
1822
1823         set_bit(HCI_SETUP, &hdev->dev_flags);
1824
1825         if (hdev->dev_type != HCI_AMP)
1826                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1827
1828         hci_notify(hdev, HCI_DEV_REG);
1829         hci_dev_hold(hdev);
1830
1831         queue_work(hdev->req_workqueue, &hdev->power_on);
1832
1833         return id;
1834
1835 err_wqueue:
1836         destroy_workqueue(hdev->workqueue);
1837         destroy_workqueue(hdev->req_workqueue);
1838 err:
1839         ida_simple_remove(&hci_index_ida, hdev->id);
1840         write_lock(&hci_dev_list_lock);
1841         list_del(&hdev->list);
1842         write_unlock(&hci_dev_list_lock);
1843
1844         return error;
1845 }
1846 EXPORT_SYMBOL(hci_register_dev);
1847
1848 /* Unregister HCI device */
1849 void hci_unregister_dev(struct hci_dev *hdev)
1850 {
1851         int i, id;
1852
1853         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1854
1855         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1856
1857         id = hdev->id;
1858
1859         write_lock(&hci_dev_list_lock);
1860         list_del(&hdev->list);
1861         write_unlock(&hci_dev_list_lock);
1862
1863         hci_dev_do_close(hdev);
1864
1865         for (i = 0; i < NUM_REASSEMBLY; i++)
1866                 kfree_skb(hdev->reassembly[i]);
1867
1868         cancel_work_sync(&hdev->power_on);
1869
1870         if (!test_bit(HCI_INIT, &hdev->flags) &&
1871             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1872                 hci_dev_lock(hdev);
1873                 mgmt_index_removed(hdev);
1874                 hci_dev_unlock(hdev);
1875         }
1876
1877         /* mgmt_index_removed should take care of emptying the
1878          * pending list */
1879         BUG_ON(!list_empty(&hdev->mgmt_pending));
1880
1881         hci_notify(hdev, HCI_DEV_UNREG);
1882
1883         if (hdev->rfkill) {
1884                 rfkill_unregister(hdev->rfkill);
1885                 rfkill_destroy(hdev->rfkill);
1886         }
1887
1888         hci_del_sysfs(hdev);
1889
1890         destroy_workqueue(hdev->workqueue);
1891         destroy_workqueue(hdev->req_workqueue);
1892
1893         hci_dev_lock(hdev);
1894         hci_blacklist_clear(hdev);
1895         hci_uuids_clear(hdev);
1896         hci_link_keys_clear(hdev);
1897         hci_smp_ltks_clear(hdev);
1898         hci_remote_oob_data_clear(hdev);
1899         hci_dev_unlock(hdev);
1900
1901         hci_dev_put(hdev);
1902
1903         ida_simple_remove(&hci_index_ida, id);
1904 }
1905 EXPORT_SYMBOL(hci_unregister_dev);
1906
1907 /* Suspend HCI device */
1908 int hci_suspend_dev(struct hci_dev *hdev)
1909 {
1910         hci_notify(hdev, HCI_DEV_SUSPEND);
1911         return 0;
1912 }
1913 EXPORT_SYMBOL(hci_suspend_dev);
1914
1915 /* Resume HCI device */
1916 int hci_resume_dev(struct hci_dev *hdev)
1917 {
1918         hci_notify(hdev, HCI_DEV_RESUME);
1919         return 0;
1920 }
1921 EXPORT_SYMBOL(hci_resume_dev);
1922
1923 /* Receive frame from HCI drivers */
1924 int hci_recv_frame(struct sk_buff *skb)
1925 {
1926         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1927         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1928                       && !test_bit(HCI_INIT, &hdev->flags))) {
1929                 kfree_skb(skb);
1930                 return -ENXIO;
1931         }
1932
1933         /* Incoming skb */
1934         bt_cb(skb)->incoming = 1;
1935
1936         /* Time stamp */
1937         __net_timestamp(skb);
1938
1939         skb_queue_tail(&hdev->rx_q, skb);
1940         queue_work(hdev->workqueue, &hdev->rx_work);
1941
1942         return 0;
1943 }
1944 EXPORT_SYMBOL(hci_recv_frame);
1945
1946 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1947                           int count, __u8 index)
1948 {
1949         int len = 0;
1950         int hlen = 0;
1951         int remain = count;
1952         struct sk_buff *skb;
1953         struct bt_skb_cb *scb;
1954
1955         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1956             index >= NUM_REASSEMBLY)
1957                 return -EILSEQ;
1958
1959         skb = hdev->reassembly[index];
1960
1961         if (!skb) {
1962                 switch (type) {
1963                 case HCI_ACLDATA_PKT:
1964                         len = HCI_MAX_FRAME_SIZE;
1965                         hlen = HCI_ACL_HDR_SIZE;
1966                         break;
1967                 case HCI_EVENT_PKT:
1968                         len = HCI_MAX_EVENT_SIZE;
1969                         hlen = HCI_EVENT_HDR_SIZE;
1970                         break;
1971                 case HCI_SCODATA_PKT:
1972                         len = HCI_MAX_SCO_SIZE;
1973                         hlen = HCI_SCO_HDR_SIZE;
1974                         break;
1975                 }
1976
1977                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1978                 if (!skb)
1979                         return -ENOMEM;
1980
1981                 scb = (void *) skb->cb;
1982                 scb->expect = hlen;
1983                 scb->pkt_type = type;
1984
1985                 skb->dev = (void *) hdev;
1986                 hdev->reassembly[index] = skb;
1987         }
1988
1989         while (count) {
1990                 scb = (void *) skb->cb;
1991                 len = min_t(uint, scb->expect, count);
1992
1993                 memcpy(skb_put(skb, len), data, len);
1994
1995                 count -= len;
1996                 data += len;
1997                 scb->expect -= len;
1998                 remain = count;
1999
2000                 switch (type) {
2001                 case HCI_EVENT_PKT:
2002                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2003                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2004                                 scb->expect = h->plen;
2005
2006                                 if (skb_tailroom(skb) < scb->expect) {
2007                                         kfree_skb(skb);
2008                                         hdev->reassembly[index] = NULL;
2009                                         return -ENOMEM;
2010                                 }
2011                         }
2012                         break;
2013
2014                 case HCI_ACLDATA_PKT:
2015                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2016                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2017                                 scb->expect = __le16_to_cpu(h->dlen);
2018
2019                                 if (skb_tailroom(skb) < scb->expect) {
2020                                         kfree_skb(skb);
2021                                         hdev->reassembly[index] = NULL;
2022                                         return -ENOMEM;
2023                                 }
2024                         }
2025                         break;
2026
2027                 case HCI_SCODATA_PKT:
2028                         if (skb->len == HCI_SCO_HDR_SIZE) {
2029                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2030                                 scb->expect = h->dlen;
2031
2032                                 if (skb_tailroom(skb) < scb->expect) {
2033                                         kfree_skb(skb);
2034                                         hdev->reassembly[index] = NULL;
2035                                         return -ENOMEM;
2036                                 }
2037                         }
2038                         break;
2039                 }
2040
2041                 if (scb->expect == 0) {
2042                         /* Complete frame */
2043
2044                         bt_cb(skb)->pkt_type = type;
2045                         hci_recv_frame(skb);
2046
2047                         hdev->reassembly[index] = NULL;
2048                         return remain;
2049                 }
2050         }
2051
2052         return remain;
2053 }
2054
2055 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2056 {
2057         int rem = 0;
2058
2059         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2060                 return -EILSEQ;
2061
2062         while (count) {
2063                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2064                 if (rem < 0)
2065                         return rem;
2066
2067                 data += (count - rem);
2068                 count = rem;
2069         }
2070
2071         return rem;
2072 }
2073 EXPORT_SYMBOL(hci_recv_fragment);
2074
2075 #define STREAM_REASSEMBLY 0
2076
2077 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2078 {
2079         int type;
2080         int rem = 0;
2081
2082         while (count) {
2083                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2084
2085                 if (!skb) {
2086                         struct { char type; } *pkt;
2087
2088                         /* Start of the frame */
2089                         pkt = data;
2090                         type = pkt->type;
2091
2092                         data++;
2093                         count--;
2094                 } else
2095                         type = bt_cb(skb)->pkt_type;
2096
2097                 rem = hci_reassembly(hdev, type, data, count,
2098                                      STREAM_REASSEMBLY);
2099                 if (rem < 0)
2100                         return rem;
2101
2102                 data += (count - rem);
2103                 count = rem;
2104         }
2105
2106         return rem;
2107 }
2108 EXPORT_SYMBOL(hci_recv_stream_fragment);
2109
2110 /* ---- Interface to upper protocols ---- */
2111
2112 int hci_register_cb(struct hci_cb *cb)
2113 {
2114         BT_DBG("%p name %s", cb, cb->name);
2115
2116         write_lock(&hci_cb_list_lock);
2117         list_add(&cb->list, &hci_cb_list);
2118         write_unlock(&hci_cb_list_lock);
2119
2120         return 0;
2121 }
2122 EXPORT_SYMBOL(hci_register_cb);
2123
2124 int hci_unregister_cb(struct hci_cb *cb)
2125 {
2126         BT_DBG("%p name %s", cb, cb->name);
2127
2128         write_lock(&hci_cb_list_lock);
2129         list_del(&cb->list);
2130         write_unlock(&hci_cb_list_lock);
2131
2132         return 0;
2133 }
2134 EXPORT_SYMBOL(hci_unregister_cb);
2135
2136 static int hci_send_frame(struct sk_buff *skb)
2137 {
2138         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2139
2140         if (!hdev) {
2141                 kfree_skb(skb);
2142                 return -ENODEV;
2143         }
2144
2145         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2146
2147         /* Time stamp */
2148         __net_timestamp(skb);
2149
2150         /* Send copy to monitor */
2151         hci_send_to_monitor(hdev, skb);
2152
2153         if (atomic_read(&hdev->promisc)) {
2154                 /* Send copy to the sockets */
2155                 hci_send_to_sock(hdev, skb);
2156         }
2157
2158         /* Get rid of skb owner, prior to sending to the driver. */
2159         skb_orphan(skb);
2160
2161         return hdev->send(skb);
2162 }
2163
2164 /* Send HCI command */
2165 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2166 {
2167         int len = HCI_COMMAND_HDR_SIZE + plen;
2168         struct hci_command_hdr *hdr;
2169         struct sk_buff *skb;
2170
2171         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2172
2173         skb = bt_skb_alloc(len, GFP_ATOMIC);
2174         if (!skb) {
2175                 BT_ERR("%s no memory for command", hdev->name);
2176                 return -ENOMEM;
2177         }
2178
2179         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2180         hdr->opcode = cpu_to_le16(opcode);
2181         hdr->plen   = plen;
2182
2183         if (plen)
2184                 memcpy(skb_put(skb, plen), param, plen);
2185
2186         BT_DBG("skb len %d", skb->len);
2187
2188         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2189         skb->dev = (void *) hdev;
2190
2191         if (test_bit(HCI_INIT, &hdev->flags))
2192                 hdev->init_last_cmd = opcode;
2193
2194         skb_queue_tail(&hdev->cmd_q, skb);
2195         queue_work(hdev->workqueue, &hdev->cmd_work);
2196
2197         return 0;
2198 }
2199
2200 /* Get data from the previously sent command */
2201 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2202 {
2203         struct hci_command_hdr *hdr;
2204
2205         if (!hdev->sent_cmd)
2206                 return NULL;
2207
2208         hdr = (void *) hdev->sent_cmd->data;
2209
2210         if (hdr->opcode != cpu_to_le16(opcode))
2211                 return NULL;
2212
2213         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2214
2215         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2216 }
2217
2218 /* Send ACL data */
2219 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2220 {
2221         struct hci_acl_hdr *hdr;
2222         int len = skb->len;
2223
2224         skb_push(skb, HCI_ACL_HDR_SIZE);
2225         skb_reset_transport_header(skb);
2226         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2227         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2228         hdr->dlen   = cpu_to_le16(len);
2229 }
2230
2231 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2232                           struct sk_buff *skb, __u16 flags)
2233 {
2234         struct hci_conn *conn = chan->conn;
2235         struct hci_dev *hdev = conn->hdev;
2236         struct sk_buff *list;
2237
2238         skb->len = skb_headlen(skb);
2239         skb->data_len = 0;
2240
2241         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2242
2243         switch (hdev->dev_type) {
2244         case HCI_BREDR:
2245                 hci_add_acl_hdr(skb, conn->handle, flags);
2246                 break;
2247         case HCI_AMP:
2248                 hci_add_acl_hdr(skb, chan->handle, flags);
2249                 break;
2250         default:
2251                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2252                 return;
2253         }
2254
2255         list = skb_shinfo(skb)->frag_list;
2256         if (!list) {
2257                 /* Non fragmented */
2258                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2259
2260                 skb_queue_tail(queue, skb);
2261         } else {
2262                 /* Fragmented */
2263                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2264
2265                 skb_shinfo(skb)->frag_list = NULL;
2266
2267                 /* Queue all fragments atomically */
2268                 spin_lock(&queue->lock);
2269
2270                 __skb_queue_tail(queue, skb);
2271
2272                 flags &= ~ACL_START;
2273                 flags |= ACL_CONT;
2274                 do {
2275                         skb = list; list = list->next;
2276
2277                         skb->dev = (void *) hdev;
2278                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2279                         hci_add_acl_hdr(skb, conn->handle, flags);
2280
2281                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2282
2283                         __skb_queue_tail(queue, skb);
2284                 } while (list);
2285
2286                 spin_unlock(&queue->lock);
2287         }
2288 }
2289
2290 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2291 {
2292         struct hci_dev *hdev = chan->conn->hdev;
2293
2294         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2295
2296         skb->dev = (void *) hdev;
2297
2298         hci_queue_acl(chan, &chan->data_q, skb, flags);
2299
2300         queue_work(hdev->workqueue, &hdev->tx_work);
2301 }
2302
2303 /* Send SCO data */
2304 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2305 {
2306         struct hci_dev *hdev = conn->hdev;
2307         struct hci_sco_hdr hdr;
2308
2309         BT_DBG("%s len %d", hdev->name, skb->len);
2310
2311         hdr.handle = cpu_to_le16(conn->handle);
2312         hdr.dlen   = skb->len;
2313
2314         skb_push(skb, HCI_SCO_HDR_SIZE);
2315         skb_reset_transport_header(skb);
2316         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2317
2318         skb->dev = (void *) hdev;
2319         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2320
2321         skb_queue_tail(&conn->data_q, skb);
2322         queue_work(hdev->workqueue, &hdev->tx_work);
2323 }
2324
2325 /* ---- HCI TX task (outgoing data) ---- */
2326
2327 /* HCI Connection scheduler */
2328 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2329                                      int *quote)
2330 {
2331         struct hci_conn_hash *h = &hdev->conn_hash;
2332         struct hci_conn *conn = NULL, *c;
2333         unsigned int num = 0, min = ~0;
2334
2335         /* We don't have to lock device here. Connections are always
2336          * added and removed with TX task disabled. */
2337
2338         rcu_read_lock();
2339
2340         list_for_each_entry_rcu(c, &h->list, list) {
2341                 if (c->type != type || skb_queue_empty(&c->data_q))
2342                         continue;
2343
2344                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2345                         continue;
2346
2347                 num++;
2348
2349                 if (c->sent < min) {
2350                         min  = c->sent;
2351                         conn = c;
2352                 }
2353
2354                 if (hci_conn_num(hdev, type) == num)
2355                         break;
2356         }
2357
2358         rcu_read_unlock();
2359
2360         if (conn) {
2361                 int cnt, q;
2362
2363                 switch (conn->type) {
2364                 case ACL_LINK:
2365                         cnt = hdev->acl_cnt;
2366                         break;
2367                 case SCO_LINK:
2368                 case ESCO_LINK:
2369                         cnt = hdev->sco_cnt;
2370                         break;
2371                 case LE_LINK:
2372                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2373                         break;
2374                 default:
2375                         cnt = 0;
2376                         BT_ERR("Unknown link type");
2377                 }
2378
2379                 q = cnt / num;
2380                 *quote = q ? q : 1;
2381         } else
2382                 *quote = 0;
2383
2384         BT_DBG("conn %p quote %d", conn, *quote);
2385         return conn;
2386 }
2387
2388 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2389 {
2390         struct hci_conn_hash *h = &hdev->conn_hash;
2391         struct hci_conn *c;
2392
2393         BT_ERR("%s link tx timeout", hdev->name);
2394
2395         rcu_read_lock();
2396
2397         /* Kill stalled connections */
2398         list_for_each_entry_rcu(c, &h->list, list) {
2399                 if (c->type == type && c->sent) {
2400                         BT_ERR("%s killing stalled connection %pMR",
2401                                hdev->name, &c->dst);
2402                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2403                 }
2404         }
2405
2406         rcu_read_unlock();
2407 }
2408
2409 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2410                                       int *quote)
2411 {
2412         struct hci_conn_hash *h = &hdev->conn_hash;
2413         struct hci_chan *chan = NULL;
2414         unsigned int num = 0, min = ~0, cur_prio = 0;
2415         struct hci_conn *conn;
2416         int cnt, q, conn_num = 0;
2417
2418         BT_DBG("%s", hdev->name);
2419
2420         rcu_read_lock();
2421
2422         list_for_each_entry_rcu(conn, &h->list, list) {
2423                 struct hci_chan *tmp;
2424
2425                 if (conn->type != type)
2426                         continue;
2427
2428                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2429                         continue;
2430
2431                 conn_num++;
2432
2433                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2434                         struct sk_buff *skb;
2435
2436                         if (skb_queue_empty(&tmp->data_q))
2437                                 continue;
2438
2439                         skb = skb_peek(&tmp->data_q);
2440                         if (skb->priority < cur_prio)
2441                                 continue;
2442
2443                         if (skb->priority > cur_prio) {
2444                                 num = 0;
2445                                 min = ~0;
2446                                 cur_prio = skb->priority;
2447                         }
2448
2449                         num++;
2450
2451                         if (conn->sent < min) {
2452                                 min  = conn->sent;
2453                                 chan = tmp;
2454                         }
2455                 }
2456
2457                 if (hci_conn_num(hdev, type) == conn_num)
2458                         break;
2459         }
2460
2461         rcu_read_unlock();
2462
2463         if (!chan)
2464                 return NULL;
2465
2466         switch (chan->conn->type) {
2467         case ACL_LINK:
2468                 cnt = hdev->acl_cnt;
2469                 break;
2470         case AMP_LINK:
2471                 cnt = hdev->block_cnt;
2472                 break;
2473         case SCO_LINK:
2474         case ESCO_LINK:
2475                 cnt = hdev->sco_cnt;
2476                 break;
2477         case LE_LINK:
2478                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2479                 break;
2480         default:
2481                 cnt = 0;
2482                 BT_ERR("Unknown link type");
2483         }
2484
2485         q = cnt / num;
2486         *quote = q ? q : 1;
2487         BT_DBG("chan %p quote %d", chan, *quote);
2488         return chan;
2489 }
2490
2491 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2492 {
2493         struct hci_conn_hash *h = &hdev->conn_hash;
2494         struct hci_conn *conn;
2495         int num = 0;
2496
2497         BT_DBG("%s", hdev->name);
2498
2499         rcu_read_lock();
2500
2501         list_for_each_entry_rcu(conn, &h->list, list) {
2502                 struct hci_chan *chan;
2503
2504                 if (conn->type != type)
2505                         continue;
2506
2507                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2508                         continue;
2509
2510                 num++;
2511
2512                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2513                         struct sk_buff *skb;
2514
2515                         if (chan->sent) {
2516                                 chan->sent = 0;
2517                                 continue;
2518                         }
2519
2520                         if (skb_queue_empty(&chan->data_q))
2521                                 continue;
2522
2523                         skb = skb_peek(&chan->data_q);
2524                         if (skb->priority >= HCI_PRIO_MAX - 1)
2525                                 continue;
2526
2527                         skb->priority = HCI_PRIO_MAX - 1;
2528
2529                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2530                                skb->priority);
2531                 }
2532
2533                 if (hci_conn_num(hdev, type) == num)
2534                         break;
2535         }
2536
2537         rcu_read_unlock();
2538
2539 }
2540
2541 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2542 {
2543         /* Calculate count of blocks used by this packet */
2544         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2545 }
2546
2547 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2548 {
2549         if (!test_bit(HCI_RAW, &hdev->flags)) {
2550                 /* ACL tx timeout must be longer than maximum
2551                  * link supervision timeout (40.9 seconds) */
2552                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2553                                        HCI_ACL_TX_TIMEOUT))
2554                         hci_link_tx_to(hdev, ACL_LINK);
2555         }
2556 }
2557
2558 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2559 {
2560         unsigned int cnt = hdev->acl_cnt;
2561         struct hci_chan *chan;
2562         struct sk_buff *skb;
2563         int quote;
2564
2565         __check_timeout(hdev, cnt);
2566
2567         while (hdev->acl_cnt &&
2568                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2569                 u32 priority = (skb_peek(&chan->data_q))->priority;
2570                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2571                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2572                                skb->len, skb->priority);
2573
2574                         /* Stop if priority has changed */
2575                         if (skb->priority < priority)
2576                                 break;
2577
2578                         skb = skb_dequeue(&chan->data_q);
2579
2580                         hci_conn_enter_active_mode(chan->conn,
2581                                                    bt_cb(skb)->force_active);
2582
2583                         hci_send_frame(skb);
2584                         hdev->acl_last_tx = jiffies;
2585
2586                         hdev->acl_cnt--;
2587                         chan->sent++;
2588                         chan->conn->sent++;
2589                 }
2590         }
2591
2592         if (cnt != hdev->acl_cnt)
2593                 hci_prio_recalculate(hdev, ACL_LINK);
2594 }
2595
2596 static void hci_sched_acl_blk(struct hci_dev *hdev)
2597 {
2598         unsigned int cnt = hdev->block_cnt;
2599         struct hci_chan *chan;
2600         struct sk_buff *skb;
2601         int quote;
2602         u8 type;
2603
2604         __check_timeout(hdev, cnt);
2605
2606         BT_DBG("%s", hdev->name);
2607
2608         if (hdev->dev_type == HCI_AMP)
2609                 type = AMP_LINK;
2610         else
2611                 type = ACL_LINK;
2612
2613         while (hdev->block_cnt > 0 &&
2614                (chan = hci_chan_sent(hdev, type, &quote))) {
2615                 u32 priority = (skb_peek(&chan->data_q))->priority;
2616                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2617                         int blocks;
2618
2619                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2620                                skb->len, skb->priority);
2621
2622                         /* Stop if priority has changed */
2623                         if (skb->priority < priority)
2624                                 break;
2625
2626                         skb = skb_dequeue(&chan->data_q);
2627
2628                         blocks = __get_blocks(hdev, skb);
2629                         if (blocks > hdev->block_cnt)
2630                                 return;
2631
2632                         hci_conn_enter_active_mode(chan->conn,
2633                                                    bt_cb(skb)->force_active);
2634
2635                         hci_send_frame(skb);
2636                         hdev->acl_last_tx = jiffies;
2637
2638                         hdev->block_cnt -= blocks;
2639                         quote -= blocks;
2640
2641                         chan->sent += blocks;
2642                         chan->conn->sent += blocks;
2643                 }
2644         }
2645
2646         if (cnt != hdev->block_cnt)
2647                 hci_prio_recalculate(hdev, type);
2648 }
2649
2650 static void hci_sched_acl(struct hci_dev *hdev)
2651 {
2652         BT_DBG("%s", hdev->name);
2653
2654         /* No ACL link over BR/EDR controller */
2655         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2656                 return;
2657
2658         /* No AMP link over AMP controller */
2659         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2660                 return;
2661
2662         switch (hdev->flow_ctl_mode) {
2663         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2664                 hci_sched_acl_pkt(hdev);
2665                 break;
2666
2667         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2668                 hci_sched_acl_blk(hdev);
2669                 break;
2670         }
2671 }
2672
2673 /* Schedule SCO */
2674 static void hci_sched_sco(struct hci_dev *hdev)
2675 {
2676         struct hci_conn *conn;
2677         struct sk_buff *skb;
2678         int quote;
2679
2680         BT_DBG("%s", hdev->name);
2681
2682         if (!hci_conn_num(hdev, SCO_LINK))
2683                 return;
2684
2685         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2686                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2687                         BT_DBG("skb %p len %d", skb, skb->len);
2688                         hci_send_frame(skb);
2689
2690                         conn->sent++;
2691                         if (conn->sent == ~0)
2692                                 conn->sent = 0;
2693                 }
2694         }
2695 }
2696
2697 static void hci_sched_esco(struct hci_dev *hdev)
2698 {
2699         struct hci_conn *conn;
2700         struct sk_buff *skb;
2701         int quote;
2702
2703         BT_DBG("%s", hdev->name);
2704
2705         if (!hci_conn_num(hdev, ESCO_LINK))
2706                 return;
2707
2708         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2709                                                      &quote))) {
2710                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2711                         BT_DBG("skb %p len %d", skb, skb->len);
2712                         hci_send_frame(skb);
2713
2714                         conn->sent++;
2715                         if (conn->sent == ~0)
2716                                 conn->sent = 0;
2717                 }
2718         }
2719 }
2720
2721 static void hci_sched_le(struct hci_dev *hdev)
2722 {
2723         struct hci_chan *chan;
2724         struct sk_buff *skb;
2725         int quote, cnt, tmp;
2726
2727         BT_DBG("%s", hdev->name);
2728
2729         if (!hci_conn_num(hdev, LE_LINK))
2730                 return;
2731
2732         if (!test_bit(HCI_RAW, &hdev->flags)) {
2733                 /* LE tx timeout must be longer than maximum
2734                  * link supervision timeout (40.9 seconds) */
2735                 if (!hdev->le_cnt && hdev->le_pkts &&
2736                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
2737                         hci_link_tx_to(hdev, LE_LINK);
2738         }
2739
2740         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2741         tmp = cnt;
2742         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2743                 u32 priority = (skb_peek(&chan->data_q))->priority;
2744                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2745                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2746                                skb->len, skb->priority);
2747
2748                         /* Stop if priority has changed */
2749                         if (skb->priority < priority)
2750                                 break;
2751
2752                         skb = skb_dequeue(&chan->data_q);
2753
2754                         hci_send_frame(skb);
2755                         hdev->le_last_tx = jiffies;
2756
2757                         cnt--;
2758                         chan->sent++;
2759                         chan->conn->sent++;
2760                 }
2761         }
2762
2763         if (hdev->le_pkts)
2764                 hdev->le_cnt = cnt;
2765         else
2766                 hdev->acl_cnt = cnt;
2767
2768         if (cnt != tmp)
2769                 hci_prio_recalculate(hdev, LE_LINK);
2770 }
2771
2772 static void hci_tx_work(struct work_struct *work)
2773 {
2774         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2775         struct sk_buff *skb;
2776
2777         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2778                hdev->sco_cnt, hdev->le_cnt);
2779
2780         /* Schedule queues and send stuff to HCI driver */
2781
2782         hci_sched_acl(hdev);
2783
2784         hci_sched_sco(hdev);
2785
2786         hci_sched_esco(hdev);
2787
2788         hci_sched_le(hdev);
2789
2790         /* Send next queued raw (unknown type) packet */
2791         while ((skb = skb_dequeue(&hdev->raw_q)))
2792                 hci_send_frame(skb);
2793 }
2794
2795 /* ----- HCI RX task (incoming data processing) ----- */
2796
2797 /* ACL data packet */
2798 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2799 {
2800         struct hci_acl_hdr *hdr = (void *) skb->data;
2801         struct hci_conn *conn;
2802         __u16 handle, flags;
2803
2804         skb_pull(skb, HCI_ACL_HDR_SIZE);
2805
2806         handle = __le16_to_cpu(hdr->handle);
2807         flags  = hci_flags(handle);
2808         handle = hci_handle(handle);
2809
2810         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2811                handle, flags);
2812
2813         hdev->stat.acl_rx++;
2814
2815         hci_dev_lock(hdev);
2816         conn = hci_conn_hash_lookup_handle(hdev, handle);
2817         hci_dev_unlock(hdev);
2818
2819         if (conn) {
2820                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2821
2822                 /* Send to upper protocol */
2823                 l2cap_recv_acldata(conn, skb, flags);
2824                 return;
2825         } else {
2826                 BT_ERR("%s ACL packet for unknown connection handle %d",
2827                        hdev->name, handle);
2828         }
2829
2830         kfree_skb(skb);
2831 }
2832
2833 /* SCO data packet */
2834 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2835 {
2836         struct hci_sco_hdr *hdr = (void *) skb->data;
2837         struct hci_conn *conn;
2838         __u16 handle;
2839
2840         skb_pull(skb, HCI_SCO_HDR_SIZE);
2841
2842         handle = __le16_to_cpu(hdr->handle);
2843
2844         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2845
2846         hdev->stat.sco_rx++;
2847
2848         hci_dev_lock(hdev);
2849         conn = hci_conn_hash_lookup_handle(hdev, handle);
2850         hci_dev_unlock(hdev);
2851
2852         if (conn) {
2853                 /* Send to upper protocol */
2854                 sco_recv_scodata(conn, skb);
2855                 return;
2856         } else {
2857                 BT_ERR("%s SCO packet for unknown connection handle %d",
2858                        hdev->name, handle);
2859         }
2860
2861         kfree_skb(skb);
2862 }
2863
2864 static void hci_rx_work(struct work_struct *work)
2865 {
2866         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2867         struct sk_buff *skb;
2868
2869         BT_DBG("%s", hdev->name);
2870
2871         while ((skb = skb_dequeue(&hdev->rx_q))) {
2872                 /* Send copy to monitor */
2873                 hci_send_to_monitor(hdev, skb);
2874
2875                 if (atomic_read(&hdev->promisc)) {
2876                         /* Send copy to the sockets */
2877                         hci_send_to_sock(hdev, skb);
2878                 }
2879
2880                 if (test_bit(HCI_RAW, &hdev->flags)) {
2881                         kfree_skb(skb);
2882                         continue;
2883                 }
2884
2885                 if (test_bit(HCI_INIT, &hdev->flags)) {
2886                         /* Don't process data packets in this states. */
2887                         switch (bt_cb(skb)->pkt_type) {
2888                         case HCI_ACLDATA_PKT:
2889                         case HCI_SCODATA_PKT:
2890                                 kfree_skb(skb);
2891                                 continue;
2892                         }
2893                 }
2894
2895                 /* Process frame */
2896                 switch (bt_cb(skb)->pkt_type) {
2897                 case HCI_EVENT_PKT:
2898                         BT_DBG("%s Event packet", hdev->name);
2899                         hci_event_packet(hdev, skb);
2900                         break;
2901
2902                 case HCI_ACLDATA_PKT:
2903                         BT_DBG("%s ACL data packet", hdev->name);
2904                         hci_acldata_packet(hdev, skb);
2905                         break;
2906
2907                 case HCI_SCODATA_PKT:
2908                         BT_DBG("%s SCO data packet", hdev->name);
2909                         hci_scodata_packet(hdev, skb);
2910                         break;
2911
2912                 default:
2913                         kfree_skb(skb);
2914                         break;
2915                 }
2916         }
2917 }
2918
2919 static void hci_cmd_work(struct work_struct *work)
2920 {
2921         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2922         struct sk_buff *skb;
2923
2924         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2925                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2926
2927         /* Send queued commands */
2928         if (atomic_read(&hdev->cmd_cnt)) {
2929                 skb = skb_dequeue(&hdev->cmd_q);
2930                 if (!skb)
2931                         return;
2932
2933                 kfree_skb(hdev->sent_cmd);
2934
2935                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2936                 if (hdev->sent_cmd) {
2937                         atomic_dec(&hdev->cmd_cnt);
2938                         hci_send_frame(skb);
2939                         if (test_bit(HCI_RESET, &hdev->flags))
2940                                 del_timer(&hdev->cmd_timer);
2941                         else
2942                                 mod_timer(&hdev->cmd_timer,
2943                                           jiffies + HCI_CMD_TIMEOUT);
2944                 } else {
2945                         skb_queue_head(&hdev->cmd_q, skb);
2946                         queue_work(hdev->workqueue, &hdev->cmd_work);
2947                 }
2948         }
2949 }
2950
2951 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2952 {
2953         /* General inquiry access code (GIAC) */
2954         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2955         struct hci_cp_inquiry cp;
2956
2957         BT_DBG("%s", hdev->name);
2958
2959         if (test_bit(HCI_INQUIRY, &hdev->flags))
2960                 return -EINPROGRESS;
2961
2962         inquiry_cache_flush(hdev);
2963
2964         memset(&cp, 0, sizeof(cp));
2965         memcpy(&cp.lap, lap, sizeof(cp.lap));
2966         cp.length  = length;
2967
2968         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2969 }
2970
2971 int hci_cancel_inquiry(struct hci_dev *hdev)
2972 {
2973         BT_DBG("%s", hdev->name);
2974
2975         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2976                 return -EALREADY;
2977
2978         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2979 }
2980
2981 u8 bdaddr_to_le(u8 bdaddr_type)
2982 {
2983         switch (bdaddr_type) {
2984         case BDADDR_LE_PUBLIC:
2985                 return ADDR_LE_DEV_PUBLIC;
2986
2987         default:
2988                 /* Fallback to LE Random address type */
2989                 return ADDR_LE_DEV_RANDOM;
2990         }
2991 }