Bluetooth: Fix hci_inquiry ioctl usage
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84                           void (*func)(struct hci_request *req,
85                                       unsigned long opt),
86                           unsigned long opt, __u32 timeout)
87 {
88         struct hci_request req;
89         DECLARE_WAITQUEUE(wait, current);
90         int err = 0;
91
92         BT_DBG("%s start", hdev->name);
93
94         hci_req_init(&req, hdev);
95
96         hdev->req_status = HCI_REQ_PEND;
97
98         func(&req, opt);
99
100         err = hci_req_run(&req, hci_req_sync_complete);
101         if (err < 0) {
102                 hdev->req_status = 0;
103
104                 /* ENODATA means the HCI request command queue is empty.
105                  * This can happen when a request with conditionals doesn't
106                  * trigger any commands to be sent. This is normal behavior
107                  * and should not trigger an error return.
108                  */
109                 if (err == -ENODATA)
110                         return 0;
111
112                 return err;
113         }
114
115         add_wait_queue(&hdev->req_wait_q, &wait);
116         set_current_state(TASK_INTERRUPTIBLE);
117
118         schedule_timeout(timeout);
119
120         remove_wait_queue(&hdev->req_wait_q, &wait);
121
122         if (signal_pending(current))
123                 return -EINTR;
124
125         switch (hdev->req_status) {
126         case HCI_REQ_DONE:
127                 err = -bt_to_errno(hdev->req_result);
128                 break;
129
130         case HCI_REQ_CANCELED:
131                 err = -hdev->req_result;
132                 break;
133
134         default:
135                 err = -ETIMEDOUT;
136                 break;
137         }
138
139         hdev->req_status = hdev->req_result = 0;
140
141         BT_DBG("%s end: err %d", hdev->name, err);
142
143         return err;
144 }
145
146 static int hci_req_sync(struct hci_dev *hdev,
147                         void (*req)(struct hci_request *req,
148                                     unsigned long opt),
149                         unsigned long opt, __u32 timeout)
150 {
151         int ret;
152
153         if (!test_bit(HCI_UP, &hdev->flags))
154                 return -ENETDOWN;
155
156         /* Serialize all requests */
157         hci_req_lock(hdev);
158         ret = __hci_req_sync(hdev, req, opt, timeout);
159         hci_req_unlock(hdev);
160
161         return ret;
162 }
163
164 static void hci_reset_req(struct hci_request *req, unsigned long opt)
165 {
166         BT_DBG("%s %ld", req->hdev->name, opt);
167
168         /* Reset device */
169         set_bit(HCI_RESET, &req->hdev->flags);
170         hci_req_add(req, HCI_OP_RESET, 0, NULL);
171 }
172
173 static void bredr_init(struct hci_request *req)
174 {
175         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
176
177         /* Read Local Supported Features */
178         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
179
180         /* Read Local Version */
181         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
182
183         /* Read BD Address */
184         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
185 }
186
187 static void amp_init(struct hci_request *req)
188 {
189         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
190
191         /* Read Local Version */
192         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
193
194         /* Read Local AMP Info */
195         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
196
197         /* Read Data Blk size */
198         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
199 }
200
201 static void hci_init1_req(struct hci_request *req, unsigned long opt)
202 {
203         struct hci_dev *hdev = req->hdev;
204         struct hci_request init_req;
205         struct sk_buff *skb;
206
207         BT_DBG("%s %ld", hdev->name, opt);
208
209         /* Driver initialization */
210
211         hci_req_init(&init_req, hdev);
212
213         /* Special commands */
214         while ((skb = skb_dequeue(&hdev->driver_init))) {
215                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216                 skb->dev = (void *) hdev;
217
218                 if (skb_queue_empty(&init_req.cmd_q))
219                         bt_cb(skb)->req.start = true;
220
221                 skb_queue_tail(&init_req.cmd_q, skb);
222         }
223         skb_queue_purge(&hdev->driver_init);
224
225         hci_req_run(&init_req, NULL);
226
227         /* Reset */
228         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229                 hci_reset_req(req, 0);
230
231         switch (hdev->dev_type) {
232         case HCI_BREDR:
233                 bredr_init(req);
234                 break;
235
236         case HCI_AMP:
237                 amp_init(req);
238                 break;
239
240         default:
241                 BT_ERR("Unknown device type %d", hdev->dev_type);
242                 break;
243         }
244 }
245
246 static void bredr_setup(struct hci_request *req)
247 {
248         struct hci_cp_delete_stored_link_key cp;
249         __le16 param;
250         __u8 flt_type;
251
252         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
254
255         /* Read Class of Device */
256         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
257
258         /* Read Local Name */
259         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
260
261         /* Read Voice Setting */
262         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
263
264         /* Clear Event Filters */
265         flt_type = HCI_FLT_CLEAR_ALL;
266         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
267
268         /* Connection accept timeout ~20 secs */
269         param = __constant_cpu_to_le16(0x7d00);
270         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
271
272         bacpy(&cp.bdaddr, BDADDR_ANY);
273         cp.delete_all = 0x01;
274         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
275
276         /* Read page scan parameters */
277         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
280         }
281 }
282
283 static void le_setup(struct hci_request *req)
284 {
285         /* Read LE Buffer Size */
286         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287
288         /* Read LE Local Supported Features */
289         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
290
291         /* Read LE Advertising Channel TX Power */
292         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
293
294         /* Read LE White List Size */
295         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
296
297         /* Read LE Supported States */
298         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
299 }
300
301 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302 {
303         if (lmp_ext_inq_capable(hdev))
304                 return 0x02;
305
306         if (lmp_inq_rssi_capable(hdev))
307                 return 0x01;
308
309         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310             hdev->lmp_subver == 0x0757)
311                 return 0x01;
312
313         if (hdev->manufacturer == 15) {
314                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315                         return 0x01;
316                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317                         return 0x01;
318                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319                         return 0x01;
320         }
321
322         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323             hdev->lmp_subver == 0x1805)
324                 return 0x01;
325
326         return 0x00;
327 }
328
329 static void hci_setup_inquiry_mode(struct hci_request *req)
330 {
331         u8 mode;
332
333         mode = hci_get_inquiry_mode(req->hdev);
334
335         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
336 }
337
338 static void hci_setup_event_mask(struct hci_request *req)
339 {
340         struct hci_dev *hdev = req->hdev;
341
342         /* The second byte is 0xff instead of 0x9f (two reserved bits
343          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
344          * command otherwise.
345          */
346         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
347
348         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349          * any event mask for pre 1.2 devices.
350          */
351         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
352                 return;
353
354         if (lmp_bredr_capable(hdev)) {
355                 events[4] |= 0x01; /* Flow Specification Complete */
356                 events[4] |= 0x02; /* Inquiry Result with RSSI */
357                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358                 events[5] |= 0x08; /* Synchronous Connection Complete */
359                 events[5] |= 0x10; /* Synchronous Connection Changed */
360         }
361
362         if (lmp_inq_rssi_capable(hdev))
363                 events[4] |= 0x02; /* Inquiry Result with RSSI */
364
365         if (lmp_sniffsubr_capable(hdev))
366                 events[5] |= 0x20; /* Sniff Subrating */
367
368         if (lmp_pause_enc_capable(hdev))
369                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370
371         if (lmp_ext_inq_capable(hdev))
372                 events[5] |= 0x40; /* Extended Inquiry Result */
373
374         if (lmp_no_flush_capable(hdev))
375                 events[7] |= 0x01; /* Enhanced Flush Complete */
376
377         if (lmp_lsto_capable(hdev))
378                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
379
380         if (lmp_ssp_capable(hdev)) {
381                 events[6] |= 0x01;      /* IO Capability Request */
382                 events[6] |= 0x02;      /* IO Capability Response */
383                 events[6] |= 0x04;      /* User Confirmation Request */
384                 events[6] |= 0x08;      /* User Passkey Request */
385                 events[6] |= 0x10;      /* Remote OOB Data Request */
386                 events[6] |= 0x20;      /* Simple Pairing Complete */
387                 events[7] |= 0x04;      /* User Passkey Notification */
388                 events[7] |= 0x08;      /* Keypress Notification */
389                 events[7] |= 0x10;      /* Remote Host Supported
390                                          * Features Notification
391                                          */
392         }
393
394         if (lmp_le_capable(hdev))
395                 events[7] |= 0x20;      /* LE Meta-Event */
396
397         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
398
399         if (lmp_le_capable(hdev)) {
400                 memset(events, 0, sizeof(events));
401                 events[0] = 0x1f;
402                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403                             sizeof(events), events);
404         }
405 }
406
407 static void hci_init2_req(struct hci_request *req, unsigned long opt)
408 {
409         struct hci_dev *hdev = req->hdev;
410
411         if (lmp_bredr_capable(hdev))
412                 bredr_setup(req);
413
414         if (lmp_le_capable(hdev))
415                 le_setup(req);
416
417         hci_setup_event_mask(req);
418
419         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
420                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
421
422         if (lmp_ssp_capable(hdev)) {
423                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
424                         u8 mode = 0x01;
425                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426                                     sizeof(mode), &mode);
427                 } else {
428                         struct hci_cp_write_eir cp;
429
430                         memset(hdev->eir, 0, sizeof(hdev->eir));
431                         memset(&cp, 0, sizeof(cp));
432
433                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
434                 }
435         }
436
437         if (lmp_inq_rssi_capable(hdev))
438                 hci_setup_inquiry_mode(req);
439
440         if (lmp_inq_tx_pwr_capable(hdev))
441                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
442
443         if (lmp_ext_feat_capable(hdev)) {
444                 struct hci_cp_read_local_ext_features cp;
445
446                 cp.page = 0x01;
447                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
448                             sizeof(cp), &cp);
449         }
450
451         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
452                 u8 enable = 1;
453                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
454                             &enable);
455         }
456 }
457
458 static void hci_setup_link_policy(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461         struct hci_cp_write_def_link_policy cp;
462         u16 link_policy = 0;
463
464         if (lmp_rswitch_capable(hdev))
465                 link_policy |= HCI_LP_RSWITCH;
466         if (lmp_hold_capable(hdev))
467                 link_policy |= HCI_LP_HOLD;
468         if (lmp_sniff_capable(hdev))
469                 link_policy |= HCI_LP_SNIFF;
470         if (lmp_park_capable(hdev))
471                 link_policy |= HCI_LP_PARK;
472
473         cp.policy = cpu_to_le16(link_policy);
474         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
475 }
476
477 static void hci_set_le_support(struct hci_request *req)
478 {
479         struct hci_dev *hdev = req->hdev;
480         struct hci_cp_write_le_host_supported cp;
481
482         memset(&cp, 0, sizeof(cp));
483
484         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
485                 cp.le = 0x01;
486                 cp.simul = lmp_le_br_capable(hdev);
487         }
488
489         if (cp.le != lmp_host_le_capable(hdev))
490                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
491                             &cp);
492 }
493
494 static void hci_init3_req(struct hci_request *req, unsigned long opt)
495 {
496         struct hci_dev *hdev = req->hdev;
497
498         if (hdev->commands[5] & 0x10)
499                 hci_setup_link_policy(req);
500
501         if (lmp_le_capable(hdev)) {
502                 hci_set_le_support(req);
503                 hci_update_ad(req);
504         }
505 }
506
507 static int __hci_init(struct hci_dev *hdev)
508 {
509         int err;
510
511         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
512         if (err < 0)
513                 return err;
514
515         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516          * BR/EDR/LE type controllers. AMP controllers only need the
517          * first stage init.
518          */
519         if (hdev->dev_type != HCI_BREDR)
520                 return 0;
521
522         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
523         if (err < 0)
524                 return err;
525
526         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
527 }
528
529 static void hci_scan_req(struct hci_request *req, unsigned long opt)
530 {
531         __u8 scan = opt;
532
533         BT_DBG("%s %x", req->hdev->name, scan);
534
535         /* Inquiry and Page scans */
536         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
537 }
538
539 static void hci_auth_req(struct hci_request *req, unsigned long opt)
540 {
541         __u8 auth = opt;
542
543         BT_DBG("%s %x", req->hdev->name, auth);
544
545         /* Authentication */
546         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
547 }
548
549 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
550 {
551         __u8 encrypt = opt;
552
553         BT_DBG("%s %x", req->hdev->name, encrypt);
554
555         /* Encryption */
556         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
557 }
558
559 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
560 {
561         __le16 policy = cpu_to_le16(opt);
562
563         BT_DBG("%s %x", req->hdev->name, policy);
564
565         /* Default link policy */
566         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
567 }
568
569 /* Get HCI device by index.
570  * Device is held on return. */
571 struct hci_dev *hci_dev_get(int index)
572 {
573         struct hci_dev *hdev = NULL, *d;
574
575         BT_DBG("%d", index);
576
577         if (index < 0)
578                 return NULL;
579
580         read_lock(&hci_dev_list_lock);
581         list_for_each_entry(d, &hci_dev_list, list) {
582                 if (d->id == index) {
583                         hdev = hci_dev_hold(d);
584                         break;
585                 }
586         }
587         read_unlock(&hci_dev_list_lock);
588         return hdev;
589 }
590
591 /* ---- Inquiry support ---- */
592
593 bool hci_discovery_active(struct hci_dev *hdev)
594 {
595         struct discovery_state *discov = &hdev->discovery;
596
597         switch (discov->state) {
598         case DISCOVERY_FINDING:
599         case DISCOVERY_RESOLVING:
600                 return true;
601
602         default:
603                 return false;
604         }
605 }
606
607 void hci_discovery_set_state(struct hci_dev *hdev, int state)
608 {
609         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
610
611         if (hdev->discovery.state == state)
612                 return;
613
614         switch (state) {
615         case DISCOVERY_STOPPED:
616                 if (hdev->discovery.state != DISCOVERY_STARTING)
617                         mgmt_discovering(hdev, 0);
618                 break;
619         case DISCOVERY_STARTING:
620                 break;
621         case DISCOVERY_FINDING:
622                 mgmt_discovering(hdev, 1);
623                 break;
624         case DISCOVERY_RESOLVING:
625                 break;
626         case DISCOVERY_STOPPING:
627                 break;
628         }
629
630         hdev->discovery.state = state;
631 }
632
633 static void inquiry_cache_flush(struct hci_dev *hdev)
634 {
635         struct discovery_state *cache = &hdev->discovery;
636         struct inquiry_entry *p, *n;
637
638         list_for_each_entry_safe(p, n, &cache->all, all) {
639                 list_del(&p->all);
640                 kfree(p);
641         }
642
643         INIT_LIST_HEAD(&cache->unknown);
644         INIT_LIST_HEAD(&cache->resolve);
645 }
646
647 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
648                                                bdaddr_t *bdaddr)
649 {
650         struct discovery_state *cache = &hdev->discovery;
651         struct inquiry_entry *e;
652
653         BT_DBG("cache %p, %pMR", cache, bdaddr);
654
655         list_for_each_entry(e, &cache->all, all) {
656                 if (!bacmp(&e->data.bdaddr, bdaddr))
657                         return e;
658         }
659
660         return NULL;
661 }
662
663 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
664                                                        bdaddr_t *bdaddr)
665 {
666         struct discovery_state *cache = &hdev->discovery;
667         struct inquiry_entry *e;
668
669         BT_DBG("cache %p, %pMR", cache, bdaddr);
670
671         list_for_each_entry(e, &cache->unknown, list) {
672                 if (!bacmp(&e->data.bdaddr, bdaddr))
673                         return e;
674         }
675
676         return NULL;
677 }
678
679 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
680                                                        bdaddr_t *bdaddr,
681                                                        int state)
682 {
683         struct discovery_state *cache = &hdev->discovery;
684         struct inquiry_entry *e;
685
686         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
687
688         list_for_each_entry(e, &cache->resolve, list) {
689                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
690                         return e;
691                 if (!bacmp(&e->data.bdaddr, bdaddr))
692                         return e;
693         }
694
695         return NULL;
696 }
697
698 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
699                                       struct inquiry_entry *ie)
700 {
701         struct discovery_state *cache = &hdev->discovery;
702         struct list_head *pos = &cache->resolve;
703         struct inquiry_entry *p;
704
705         list_del(&ie->list);
706
707         list_for_each_entry(p, &cache->resolve, list) {
708                 if (p->name_state != NAME_PENDING &&
709                     abs(p->data.rssi) >= abs(ie->data.rssi))
710                         break;
711                 pos = &p->list;
712         }
713
714         list_add(&ie->list, pos);
715 }
716
717 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
718                               bool name_known, bool *ssp)
719 {
720         struct discovery_state *cache = &hdev->discovery;
721         struct inquiry_entry *ie;
722
723         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
724
725         hci_remove_remote_oob_data(hdev, &data->bdaddr);
726
727         if (ssp)
728                 *ssp = data->ssp_mode;
729
730         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
731         if (ie) {
732                 if (ie->data.ssp_mode && ssp)
733                         *ssp = true;
734
735                 if (ie->name_state == NAME_NEEDED &&
736                     data->rssi != ie->data.rssi) {
737                         ie->data.rssi = data->rssi;
738                         hci_inquiry_cache_update_resolve(hdev, ie);
739                 }
740
741                 goto update;
742         }
743
744         /* Entry not in the cache. Add new one. */
745         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
746         if (!ie)
747                 return false;
748
749         list_add(&ie->all, &cache->all);
750
751         if (name_known) {
752                 ie->name_state = NAME_KNOWN;
753         } else {
754                 ie->name_state = NAME_NOT_KNOWN;
755                 list_add(&ie->list, &cache->unknown);
756         }
757
758 update:
759         if (name_known && ie->name_state != NAME_KNOWN &&
760             ie->name_state != NAME_PENDING) {
761                 ie->name_state = NAME_KNOWN;
762                 list_del(&ie->list);
763         }
764
765         memcpy(&ie->data, data, sizeof(*data));
766         ie->timestamp = jiffies;
767         cache->timestamp = jiffies;
768
769         if (ie->name_state == NAME_NOT_KNOWN)
770                 return false;
771
772         return true;
773 }
774
775 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
776 {
777         struct discovery_state *cache = &hdev->discovery;
778         struct inquiry_info *info = (struct inquiry_info *) buf;
779         struct inquiry_entry *e;
780         int copied = 0;
781
782         list_for_each_entry(e, &cache->all, all) {
783                 struct inquiry_data *data = &e->data;
784
785                 if (copied >= num)
786                         break;
787
788                 bacpy(&info->bdaddr, &data->bdaddr);
789                 info->pscan_rep_mode    = data->pscan_rep_mode;
790                 info->pscan_period_mode = data->pscan_period_mode;
791                 info->pscan_mode        = data->pscan_mode;
792                 memcpy(info->dev_class, data->dev_class, 3);
793                 info->clock_offset      = data->clock_offset;
794
795                 info++;
796                 copied++;
797         }
798
799         BT_DBG("cache %p, copied %d", cache, copied);
800         return copied;
801 }
802
803 static void hci_inq_req(struct hci_request *req, unsigned long opt)
804 {
805         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
806         struct hci_dev *hdev = req->hdev;
807         struct hci_cp_inquiry cp;
808
809         BT_DBG("%s", hdev->name);
810
811         if (test_bit(HCI_INQUIRY, &hdev->flags))
812                 return;
813
814         /* Start Inquiry */
815         memcpy(&cp.lap, &ir->lap, 3);
816         cp.length  = ir->length;
817         cp.num_rsp = ir->num_rsp;
818         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
819 }
820
821 static int wait_inquiry(void *word)
822 {
823         schedule();
824         return signal_pending(current);
825 }
826
827 int hci_inquiry(void __user *arg)
828 {
829         __u8 __user *ptr = arg;
830         struct hci_inquiry_req ir;
831         struct hci_dev *hdev;
832         int err = 0, do_inquiry = 0, max_rsp;
833         long timeo;
834         __u8 *buf;
835
836         if (copy_from_user(&ir, ptr, sizeof(ir)))
837                 return -EFAULT;
838
839         hdev = hci_dev_get(ir.dev_id);
840         if (!hdev)
841                 return -ENODEV;
842
843         hci_dev_lock(hdev);
844         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
845             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
846                 inquiry_cache_flush(hdev);
847                 do_inquiry = 1;
848         }
849         hci_dev_unlock(hdev);
850
851         timeo = ir.length * msecs_to_jiffies(2000);
852
853         if (do_inquiry) {
854                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
855                                    timeo);
856                 if (err < 0)
857                         goto done;
858
859                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
860                  * cleared). If it is interrupted by a signal, return -EINTR.
861                  */
862                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
863                                 TASK_INTERRUPTIBLE))
864                         return -EINTR;
865         }
866
867         /* for unlimited number of responses we will use buffer with
868          * 255 entries
869          */
870         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
871
872         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
873          * copy it to the user space.
874          */
875         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
876         if (!buf) {
877                 err = -ENOMEM;
878                 goto done;
879         }
880
881         hci_dev_lock(hdev);
882         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
883         hci_dev_unlock(hdev);
884
885         BT_DBG("num_rsp %d", ir.num_rsp);
886
887         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
888                 ptr += sizeof(ir);
889                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
890                                  ir.num_rsp))
891                         err = -EFAULT;
892         } else
893                 err = -EFAULT;
894
895         kfree(buf);
896
897 done:
898         hci_dev_put(hdev);
899         return err;
900 }
901
902 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
903 {
904         u8 ad_len = 0, flags = 0;
905         size_t name_len;
906
907         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
908                 flags |= LE_AD_GENERAL;
909
910         if (!lmp_bredr_capable(hdev))
911                 flags |= LE_AD_NO_BREDR;
912
913         if (lmp_le_br_capable(hdev))
914                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
915
916         if (lmp_host_le_br_capable(hdev))
917                 flags |= LE_AD_SIM_LE_BREDR_HOST;
918
919         if (flags) {
920                 BT_DBG("adv flags 0x%02x", flags);
921
922                 ptr[0] = 2;
923                 ptr[1] = EIR_FLAGS;
924                 ptr[2] = flags;
925
926                 ad_len += 3;
927                 ptr += 3;
928         }
929
930         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
931                 ptr[0] = 2;
932                 ptr[1] = EIR_TX_POWER;
933                 ptr[2] = (u8) hdev->adv_tx_power;
934
935                 ad_len += 3;
936                 ptr += 3;
937         }
938
939         name_len = strlen(hdev->dev_name);
940         if (name_len > 0) {
941                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
942
943                 if (name_len > max_len) {
944                         name_len = max_len;
945                         ptr[1] = EIR_NAME_SHORT;
946                 } else
947                         ptr[1] = EIR_NAME_COMPLETE;
948
949                 ptr[0] = name_len + 1;
950
951                 memcpy(ptr + 2, hdev->dev_name, name_len);
952
953                 ad_len += (name_len + 2);
954                 ptr += (name_len + 2);
955         }
956
957         return ad_len;
958 }
959
960 void hci_update_ad(struct hci_request *req)
961 {
962         struct hci_dev *hdev = req->hdev;
963         struct hci_cp_le_set_adv_data cp;
964         u8 len;
965
966         if (!lmp_le_capable(hdev))
967                 return;
968
969         memset(&cp, 0, sizeof(cp));
970
971         len = create_ad(hdev, cp.data);
972
973         if (hdev->adv_data_len == len &&
974             memcmp(cp.data, hdev->adv_data, len) == 0)
975                 return;
976
977         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
978         hdev->adv_data_len = len;
979
980         cp.length = len;
981
982         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
983 }
984
985 /* ---- HCI ioctl helpers ---- */
986
987 int hci_dev_open(__u16 dev)
988 {
989         struct hci_dev *hdev;
990         int ret = 0;
991
992         hdev = hci_dev_get(dev);
993         if (!hdev)
994                 return -ENODEV;
995
996         BT_DBG("%s %p", hdev->name, hdev);
997
998         hci_req_lock(hdev);
999
1000         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1001                 ret = -ENODEV;
1002                 goto done;
1003         }
1004
1005         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1006                 ret = -ERFKILL;
1007                 goto done;
1008         }
1009
1010         if (test_bit(HCI_UP, &hdev->flags)) {
1011                 ret = -EALREADY;
1012                 goto done;
1013         }
1014
1015         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1016                 set_bit(HCI_RAW, &hdev->flags);
1017
1018         /* Treat all non BR/EDR controllers as raw devices if
1019            enable_hs is not set */
1020         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1021                 set_bit(HCI_RAW, &hdev->flags);
1022
1023         if (hdev->open(hdev)) {
1024                 ret = -EIO;
1025                 goto done;
1026         }
1027
1028         if (!test_bit(HCI_RAW, &hdev->flags)) {
1029                 atomic_set(&hdev->cmd_cnt, 1);
1030                 set_bit(HCI_INIT, &hdev->flags);
1031                 ret = __hci_init(hdev);
1032                 clear_bit(HCI_INIT, &hdev->flags);
1033         }
1034
1035         if (!ret) {
1036                 hci_dev_hold(hdev);
1037                 set_bit(HCI_UP, &hdev->flags);
1038                 hci_notify(hdev, HCI_DEV_UP);
1039                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1040                     mgmt_valid_hdev(hdev)) {
1041                         hci_dev_lock(hdev);
1042                         mgmt_powered(hdev, 1);
1043                         hci_dev_unlock(hdev);
1044                 }
1045         } else {
1046                 /* Init failed, cleanup */
1047                 flush_work(&hdev->tx_work);
1048                 flush_work(&hdev->cmd_work);
1049                 flush_work(&hdev->rx_work);
1050
1051                 skb_queue_purge(&hdev->cmd_q);
1052                 skb_queue_purge(&hdev->rx_q);
1053
1054                 if (hdev->flush)
1055                         hdev->flush(hdev);
1056
1057                 if (hdev->sent_cmd) {
1058                         kfree_skb(hdev->sent_cmd);
1059                         hdev->sent_cmd = NULL;
1060                 }
1061
1062                 hdev->close(hdev);
1063                 hdev->flags = 0;
1064         }
1065
1066 done:
1067         hci_req_unlock(hdev);
1068         hci_dev_put(hdev);
1069         return ret;
1070 }
1071
1072 static int hci_dev_do_close(struct hci_dev *hdev)
1073 {
1074         BT_DBG("%s %p", hdev->name, hdev);
1075
1076         cancel_work_sync(&hdev->le_scan);
1077
1078         cancel_delayed_work(&hdev->power_off);
1079
1080         hci_req_cancel(hdev, ENODEV);
1081         hci_req_lock(hdev);
1082
1083         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1084                 del_timer_sync(&hdev->cmd_timer);
1085                 hci_req_unlock(hdev);
1086                 return 0;
1087         }
1088
1089         /* Flush RX and TX works */
1090         flush_work(&hdev->tx_work);
1091         flush_work(&hdev->rx_work);
1092
1093         if (hdev->discov_timeout > 0) {
1094                 cancel_delayed_work(&hdev->discov_off);
1095                 hdev->discov_timeout = 0;
1096                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1097         }
1098
1099         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1100                 cancel_delayed_work(&hdev->service_cache);
1101
1102         cancel_delayed_work_sync(&hdev->le_scan_disable);
1103
1104         hci_dev_lock(hdev);
1105         inquiry_cache_flush(hdev);
1106         hci_conn_hash_flush(hdev);
1107         hci_dev_unlock(hdev);
1108
1109         hci_notify(hdev, HCI_DEV_DOWN);
1110
1111         if (hdev->flush)
1112                 hdev->flush(hdev);
1113
1114         /* Reset device */
1115         skb_queue_purge(&hdev->cmd_q);
1116         atomic_set(&hdev->cmd_cnt, 1);
1117         if (!test_bit(HCI_RAW, &hdev->flags) &&
1118             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1119                 set_bit(HCI_INIT, &hdev->flags);
1120                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1121                 clear_bit(HCI_INIT, &hdev->flags);
1122         }
1123
1124         /* flush cmd  work */
1125         flush_work(&hdev->cmd_work);
1126
1127         /* Drop queues */
1128         skb_queue_purge(&hdev->rx_q);
1129         skb_queue_purge(&hdev->cmd_q);
1130         skb_queue_purge(&hdev->raw_q);
1131
1132         /* Drop last sent command */
1133         if (hdev->sent_cmd) {
1134                 del_timer_sync(&hdev->cmd_timer);
1135                 kfree_skb(hdev->sent_cmd);
1136                 hdev->sent_cmd = NULL;
1137         }
1138
1139         /* After this point our queues are empty
1140          * and no tasks are scheduled. */
1141         hdev->close(hdev);
1142
1143         /* Clear flags */
1144         hdev->flags = 0;
1145         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1146
1147         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1148             mgmt_valid_hdev(hdev)) {
1149                 hci_dev_lock(hdev);
1150                 mgmt_powered(hdev, 0);
1151                 hci_dev_unlock(hdev);
1152         }
1153
1154         /* Controller radio is available but is currently powered down */
1155         hdev->amp_status = 0;
1156
1157         memset(hdev->eir, 0, sizeof(hdev->eir));
1158         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1159
1160         hci_req_unlock(hdev);
1161
1162         hci_dev_put(hdev);
1163         return 0;
1164 }
1165
1166 int hci_dev_close(__u16 dev)
1167 {
1168         struct hci_dev *hdev;
1169         int err;
1170
1171         hdev = hci_dev_get(dev);
1172         if (!hdev)
1173                 return -ENODEV;
1174
1175         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1176                 cancel_delayed_work(&hdev->power_off);
1177
1178         err = hci_dev_do_close(hdev);
1179
1180         hci_dev_put(hdev);
1181         return err;
1182 }
1183
1184 int hci_dev_reset(__u16 dev)
1185 {
1186         struct hci_dev *hdev;
1187         int ret = 0;
1188
1189         hdev = hci_dev_get(dev);
1190         if (!hdev)
1191                 return -ENODEV;
1192
1193         hci_req_lock(hdev);
1194
1195         if (!test_bit(HCI_UP, &hdev->flags))
1196                 goto done;
1197
1198         /* Drop queues */
1199         skb_queue_purge(&hdev->rx_q);
1200         skb_queue_purge(&hdev->cmd_q);
1201
1202         hci_dev_lock(hdev);
1203         inquiry_cache_flush(hdev);
1204         hci_conn_hash_flush(hdev);
1205         hci_dev_unlock(hdev);
1206
1207         if (hdev->flush)
1208                 hdev->flush(hdev);
1209
1210         atomic_set(&hdev->cmd_cnt, 1);
1211         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1212
1213         if (!test_bit(HCI_RAW, &hdev->flags))
1214                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1215
1216 done:
1217         hci_req_unlock(hdev);
1218         hci_dev_put(hdev);
1219         return ret;
1220 }
1221
1222 int hci_dev_reset_stat(__u16 dev)
1223 {
1224         struct hci_dev *hdev;
1225         int ret = 0;
1226
1227         hdev = hci_dev_get(dev);
1228         if (!hdev)
1229                 return -ENODEV;
1230
1231         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1232
1233         hci_dev_put(hdev);
1234
1235         return ret;
1236 }
1237
1238 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1239 {
1240         struct hci_dev *hdev;
1241         struct hci_dev_req dr;
1242         int err = 0;
1243
1244         if (copy_from_user(&dr, arg, sizeof(dr)))
1245                 return -EFAULT;
1246
1247         hdev = hci_dev_get(dr.dev_id);
1248         if (!hdev)
1249                 return -ENODEV;
1250
1251         switch (cmd) {
1252         case HCISETAUTH:
1253                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1254                                    HCI_INIT_TIMEOUT);
1255                 break;
1256
1257         case HCISETENCRYPT:
1258                 if (!lmp_encrypt_capable(hdev)) {
1259                         err = -EOPNOTSUPP;
1260                         break;
1261                 }
1262
1263                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1264                         /* Auth must be enabled first */
1265                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1266                                            HCI_INIT_TIMEOUT);
1267                         if (err)
1268                                 break;
1269                 }
1270
1271                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1272                                    HCI_INIT_TIMEOUT);
1273                 break;
1274
1275         case HCISETSCAN:
1276                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1277                                    HCI_INIT_TIMEOUT);
1278                 break;
1279
1280         case HCISETLINKPOL:
1281                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1282                                    HCI_INIT_TIMEOUT);
1283                 break;
1284
1285         case HCISETLINKMODE:
1286                 hdev->link_mode = ((__u16) dr.dev_opt) &
1287                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1288                 break;
1289
1290         case HCISETPTYPE:
1291                 hdev->pkt_type = (__u16) dr.dev_opt;
1292                 break;
1293
1294         case HCISETACLMTU:
1295                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1296                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1297                 break;
1298
1299         case HCISETSCOMTU:
1300                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1301                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1302                 break;
1303
1304         default:
1305                 err = -EINVAL;
1306                 break;
1307         }
1308
1309         hci_dev_put(hdev);
1310         return err;
1311 }
1312
1313 int hci_get_dev_list(void __user *arg)
1314 {
1315         struct hci_dev *hdev;
1316         struct hci_dev_list_req *dl;
1317         struct hci_dev_req *dr;
1318         int n = 0, size, err;
1319         __u16 dev_num;
1320
1321         if (get_user(dev_num, (__u16 __user *) arg))
1322                 return -EFAULT;
1323
1324         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1325                 return -EINVAL;
1326
1327         size = sizeof(*dl) + dev_num * sizeof(*dr);
1328
1329         dl = kzalloc(size, GFP_KERNEL);
1330         if (!dl)
1331                 return -ENOMEM;
1332
1333         dr = dl->dev_req;
1334
1335         read_lock(&hci_dev_list_lock);
1336         list_for_each_entry(hdev, &hci_dev_list, list) {
1337                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1338                         cancel_delayed_work(&hdev->power_off);
1339
1340                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1341                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1342
1343                 (dr + n)->dev_id  = hdev->id;
1344                 (dr + n)->dev_opt = hdev->flags;
1345
1346                 if (++n >= dev_num)
1347                         break;
1348         }
1349         read_unlock(&hci_dev_list_lock);
1350
1351         dl->dev_num = n;
1352         size = sizeof(*dl) + n * sizeof(*dr);
1353
1354         err = copy_to_user(arg, dl, size);
1355         kfree(dl);
1356
1357         return err ? -EFAULT : 0;
1358 }
1359
1360 int hci_get_dev_info(void __user *arg)
1361 {
1362         struct hci_dev *hdev;
1363         struct hci_dev_info di;
1364         int err = 0;
1365
1366         if (copy_from_user(&di, arg, sizeof(di)))
1367                 return -EFAULT;
1368
1369         hdev = hci_dev_get(di.dev_id);
1370         if (!hdev)
1371                 return -ENODEV;
1372
1373         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1374                 cancel_delayed_work_sync(&hdev->power_off);
1375
1376         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1377                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1378
1379         strcpy(di.name, hdev->name);
1380         di.bdaddr   = hdev->bdaddr;
1381         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1382         di.flags    = hdev->flags;
1383         di.pkt_type = hdev->pkt_type;
1384         if (lmp_bredr_capable(hdev)) {
1385                 di.acl_mtu  = hdev->acl_mtu;
1386                 di.acl_pkts = hdev->acl_pkts;
1387                 di.sco_mtu  = hdev->sco_mtu;
1388                 di.sco_pkts = hdev->sco_pkts;
1389         } else {
1390                 di.acl_mtu  = hdev->le_mtu;
1391                 di.acl_pkts = hdev->le_pkts;
1392                 di.sco_mtu  = 0;
1393                 di.sco_pkts = 0;
1394         }
1395         di.link_policy = hdev->link_policy;
1396         di.link_mode   = hdev->link_mode;
1397
1398         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1399         memcpy(&di.features, &hdev->features, sizeof(di.features));
1400
1401         if (copy_to_user(arg, &di, sizeof(di)))
1402                 err = -EFAULT;
1403
1404         hci_dev_put(hdev);
1405
1406         return err;
1407 }
1408
1409 /* ---- Interface to HCI drivers ---- */
1410
1411 static int hci_rfkill_set_block(void *data, bool blocked)
1412 {
1413         struct hci_dev *hdev = data;
1414
1415         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1416
1417         if (!blocked)
1418                 return 0;
1419
1420         hci_dev_do_close(hdev);
1421
1422         return 0;
1423 }
1424
1425 static const struct rfkill_ops hci_rfkill_ops = {
1426         .set_block = hci_rfkill_set_block,
1427 };
1428
1429 static void hci_power_on(struct work_struct *work)
1430 {
1431         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1432
1433         BT_DBG("%s", hdev->name);
1434
1435         if (hci_dev_open(hdev->id) < 0)
1436                 return;
1437
1438         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1439                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1440                                    HCI_AUTO_OFF_TIMEOUT);
1441
1442         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1443                 mgmt_index_added(hdev);
1444 }
1445
1446 static void hci_power_off(struct work_struct *work)
1447 {
1448         struct hci_dev *hdev = container_of(work, struct hci_dev,
1449                                             power_off.work);
1450
1451         BT_DBG("%s", hdev->name);
1452
1453         hci_dev_do_close(hdev);
1454 }
1455
1456 static void hci_discov_off(struct work_struct *work)
1457 {
1458         struct hci_dev *hdev;
1459         u8 scan = SCAN_PAGE;
1460
1461         hdev = container_of(work, struct hci_dev, discov_off.work);
1462
1463         BT_DBG("%s", hdev->name);
1464
1465         hci_dev_lock(hdev);
1466
1467         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1468
1469         hdev->discov_timeout = 0;
1470
1471         hci_dev_unlock(hdev);
1472 }
1473
1474 int hci_uuids_clear(struct hci_dev *hdev)
1475 {
1476         struct bt_uuid *uuid, *tmp;
1477
1478         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1479                 list_del(&uuid->list);
1480                 kfree(uuid);
1481         }
1482
1483         return 0;
1484 }
1485
1486 int hci_link_keys_clear(struct hci_dev *hdev)
1487 {
1488         struct list_head *p, *n;
1489
1490         list_for_each_safe(p, n, &hdev->link_keys) {
1491                 struct link_key *key;
1492
1493                 key = list_entry(p, struct link_key, list);
1494
1495                 list_del(p);
1496                 kfree(key);
1497         }
1498
1499         return 0;
1500 }
1501
1502 int hci_smp_ltks_clear(struct hci_dev *hdev)
1503 {
1504         struct smp_ltk *k, *tmp;
1505
1506         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1507                 list_del(&k->list);
1508                 kfree(k);
1509         }
1510
1511         return 0;
1512 }
1513
1514 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1515 {
1516         struct link_key *k;
1517
1518         list_for_each_entry(k, &hdev->link_keys, list)
1519                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1520                         return k;
1521
1522         return NULL;
1523 }
1524
1525 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1526                                u8 key_type, u8 old_key_type)
1527 {
1528         /* Legacy key */
1529         if (key_type < 0x03)
1530                 return true;
1531
1532         /* Debug keys are insecure so don't store them persistently */
1533         if (key_type == HCI_LK_DEBUG_COMBINATION)
1534                 return false;
1535
1536         /* Changed combination key and there's no previous one */
1537         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1538                 return false;
1539
1540         /* Security mode 3 case */
1541         if (!conn)
1542                 return true;
1543
1544         /* Neither local nor remote side had no-bonding as requirement */
1545         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1546                 return true;
1547
1548         /* Local side had dedicated bonding as requirement */
1549         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1550                 return true;
1551
1552         /* Remote side had dedicated bonding as requirement */
1553         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1554                 return true;
1555
1556         /* If none of the above criteria match, then don't store the key
1557          * persistently */
1558         return false;
1559 }
1560
1561 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1562 {
1563         struct smp_ltk *k;
1564
1565         list_for_each_entry(k, &hdev->long_term_keys, list) {
1566                 if (k->ediv != ediv ||
1567                     memcmp(rand, k->rand, sizeof(k->rand)))
1568                         continue;
1569
1570                 return k;
1571         }
1572
1573         return NULL;
1574 }
1575
1576 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1577                                      u8 addr_type)
1578 {
1579         struct smp_ltk *k;
1580
1581         list_for_each_entry(k, &hdev->long_term_keys, list)
1582                 if (addr_type == k->bdaddr_type &&
1583                     bacmp(bdaddr, &k->bdaddr) == 0)
1584                         return k;
1585
1586         return NULL;
1587 }
1588
1589 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1590                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1591 {
1592         struct link_key *key, *old_key;
1593         u8 old_key_type;
1594         bool persistent;
1595
1596         old_key = hci_find_link_key(hdev, bdaddr);
1597         if (old_key) {
1598                 old_key_type = old_key->type;
1599                 key = old_key;
1600         } else {
1601                 old_key_type = conn ? conn->key_type : 0xff;
1602                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1603                 if (!key)
1604                         return -ENOMEM;
1605                 list_add(&key->list, &hdev->link_keys);
1606         }
1607
1608         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1609
1610         /* Some buggy controller combinations generate a changed
1611          * combination key for legacy pairing even when there's no
1612          * previous key */
1613         if (type == HCI_LK_CHANGED_COMBINATION &&
1614             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1615                 type = HCI_LK_COMBINATION;
1616                 if (conn)
1617                         conn->key_type = type;
1618         }
1619
1620         bacpy(&key->bdaddr, bdaddr);
1621         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1622         key->pin_len = pin_len;
1623
1624         if (type == HCI_LK_CHANGED_COMBINATION)
1625                 key->type = old_key_type;
1626         else
1627                 key->type = type;
1628
1629         if (!new_key)
1630                 return 0;
1631
1632         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1633
1634         mgmt_new_link_key(hdev, key, persistent);
1635
1636         if (conn)
1637                 conn->flush_key = !persistent;
1638
1639         return 0;
1640 }
1641
1642 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1643                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1644                 ediv, u8 rand[8])
1645 {
1646         struct smp_ltk *key, *old_key;
1647
1648         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1649                 return 0;
1650
1651         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1652         if (old_key)
1653                 key = old_key;
1654         else {
1655                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1656                 if (!key)
1657                         return -ENOMEM;
1658                 list_add(&key->list, &hdev->long_term_keys);
1659         }
1660
1661         bacpy(&key->bdaddr, bdaddr);
1662         key->bdaddr_type = addr_type;
1663         memcpy(key->val, tk, sizeof(key->val));
1664         key->authenticated = authenticated;
1665         key->ediv = ediv;
1666         key->enc_size = enc_size;
1667         key->type = type;
1668         memcpy(key->rand, rand, sizeof(key->rand));
1669
1670         if (!new_key)
1671                 return 0;
1672
1673         if (type & HCI_SMP_LTK)
1674                 mgmt_new_ltk(hdev, key, 1);
1675
1676         return 0;
1677 }
1678
1679 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1680 {
1681         struct link_key *key;
1682
1683         key = hci_find_link_key(hdev, bdaddr);
1684         if (!key)
1685                 return -ENOENT;
1686
1687         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1688
1689         list_del(&key->list);
1690         kfree(key);
1691
1692         return 0;
1693 }
1694
1695 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1696 {
1697         struct smp_ltk *k, *tmp;
1698
1699         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1700                 if (bacmp(bdaddr, &k->bdaddr))
1701                         continue;
1702
1703                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1704
1705                 list_del(&k->list);
1706                 kfree(k);
1707         }
1708
1709         return 0;
1710 }
1711
1712 /* HCI command timer function */
1713 static void hci_cmd_timeout(unsigned long arg)
1714 {
1715         struct hci_dev *hdev = (void *) arg;
1716
1717         if (hdev->sent_cmd) {
1718                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1719                 u16 opcode = __le16_to_cpu(sent->opcode);
1720
1721                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1722         } else {
1723                 BT_ERR("%s command tx timeout", hdev->name);
1724         }
1725
1726         atomic_set(&hdev->cmd_cnt, 1);
1727         queue_work(hdev->workqueue, &hdev->cmd_work);
1728 }
1729
1730 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1731                                           bdaddr_t *bdaddr)
1732 {
1733         struct oob_data *data;
1734
1735         list_for_each_entry(data, &hdev->remote_oob_data, list)
1736                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1737                         return data;
1738
1739         return NULL;
1740 }
1741
1742 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1743 {
1744         struct oob_data *data;
1745
1746         data = hci_find_remote_oob_data(hdev, bdaddr);
1747         if (!data)
1748                 return -ENOENT;
1749
1750         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1751
1752         list_del(&data->list);
1753         kfree(data);
1754
1755         return 0;
1756 }
1757
1758 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1759 {
1760         struct oob_data *data, *n;
1761
1762         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1763                 list_del(&data->list);
1764                 kfree(data);
1765         }
1766
1767         return 0;
1768 }
1769
1770 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1771                             u8 *randomizer)
1772 {
1773         struct oob_data *data;
1774
1775         data = hci_find_remote_oob_data(hdev, bdaddr);
1776
1777         if (!data) {
1778                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1779                 if (!data)
1780                         return -ENOMEM;
1781
1782                 bacpy(&data->bdaddr, bdaddr);
1783                 list_add(&data->list, &hdev->remote_oob_data);
1784         }
1785
1786         memcpy(data->hash, hash, sizeof(data->hash));
1787         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1788
1789         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1790
1791         return 0;
1792 }
1793
1794 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1795 {
1796         struct bdaddr_list *b;
1797
1798         list_for_each_entry(b, &hdev->blacklist, list)
1799                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1800                         return b;
1801
1802         return NULL;
1803 }
1804
1805 int hci_blacklist_clear(struct hci_dev *hdev)
1806 {
1807         struct list_head *p, *n;
1808
1809         list_for_each_safe(p, n, &hdev->blacklist) {
1810                 struct bdaddr_list *b;
1811
1812                 b = list_entry(p, struct bdaddr_list, list);
1813
1814                 list_del(p);
1815                 kfree(b);
1816         }
1817
1818         return 0;
1819 }
1820
1821 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1822 {
1823         struct bdaddr_list *entry;
1824
1825         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1826                 return -EBADF;
1827
1828         if (hci_blacklist_lookup(hdev, bdaddr))
1829                 return -EEXIST;
1830
1831         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1832         if (!entry)
1833                 return -ENOMEM;
1834
1835         bacpy(&entry->bdaddr, bdaddr);
1836
1837         list_add(&entry->list, &hdev->blacklist);
1838
1839         return mgmt_device_blocked(hdev, bdaddr, type);
1840 }
1841
1842 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1843 {
1844         struct bdaddr_list *entry;
1845
1846         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1847                 return hci_blacklist_clear(hdev);
1848
1849         entry = hci_blacklist_lookup(hdev, bdaddr);
1850         if (!entry)
1851                 return -ENOENT;
1852
1853         list_del(&entry->list);
1854         kfree(entry);
1855
1856         return mgmt_device_unblocked(hdev, bdaddr, type);
1857 }
1858
1859 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1860 {
1861         struct le_scan_params *param =  (struct le_scan_params *) opt;
1862         struct hci_cp_le_set_scan_param cp;
1863
1864         memset(&cp, 0, sizeof(cp));
1865         cp.type = param->type;
1866         cp.interval = cpu_to_le16(param->interval);
1867         cp.window = cpu_to_le16(param->window);
1868
1869         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1870 }
1871
1872 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1873 {
1874         struct hci_cp_le_set_scan_enable cp;
1875
1876         memset(&cp, 0, sizeof(cp));
1877         cp.enable = 1;
1878         cp.filter_dup = 1;
1879
1880         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1881 }
1882
1883 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1884                           u16 window, int timeout)
1885 {
1886         long timeo = msecs_to_jiffies(3000);
1887         struct le_scan_params param;
1888         int err;
1889
1890         BT_DBG("%s", hdev->name);
1891
1892         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1893                 return -EINPROGRESS;
1894
1895         param.type = type;
1896         param.interval = interval;
1897         param.window = window;
1898
1899         hci_req_lock(hdev);
1900
1901         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1902                              timeo);
1903         if (!err)
1904                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1905
1906         hci_req_unlock(hdev);
1907
1908         if (err < 0)
1909                 return err;
1910
1911         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1912                            msecs_to_jiffies(timeout));
1913
1914         return 0;
1915 }
1916
1917 int hci_cancel_le_scan(struct hci_dev *hdev)
1918 {
1919         BT_DBG("%s", hdev->name);
1920
1921         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1922                 return -EALREADY;
1923
1924         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1925                 struct hci_cp_le_set_scan_enable cp;
1926
1927                 /* Send HCI command to disable LE Scan */
1928                 memset(&cp, 0, sizeof(cp));
1929                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1930         }
1931
1932         return 0;
1933 }
1934
1935 static void le_scan_disable_work(struct work_struct *work)
1936 {
1937         struct hci_dev *hdev = container_of(work, struct hci_dev,
1938                                             le_scan_disable.work);
1939         struct hci_cp_le_set_scan_enable cp;
1940
1941         BT_DBG("%s", hdev->name);
1942
1943         memset(&cp, 0, sizeof(cp));
1944
1945         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1946 }
1947
1948 static void le_scan_work(struct work_struct *work)
1949 {
1950         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1951         struct le_scan_params *param = &hdev->le_scan_params;
1952
1953         BT_DBG("%s", hdev->name);
1954
1955         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1956                        param->timeout);
1957 }
1958
1959 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1960                 int timeout)
1961 {
1962         struct le_scan_params *param = &hdev->le_scan_params;
1963
1964         BT_DBG("%s", hdev->name);
1965
1966         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1967                 return -ENOTSUPP;
1968
1969         if (work_busy(&hdev->le_scan))
1970                 return -EINPROGRESS;
1971
1972         param->type = type;
1973         param->interval = interval;
1974         param->window = window;
1975         param->timeout = timeout;
1976
1977         queue_work(system_long_wq, &hdev->le_scan);
1978
1979         return 0;
1980 }
1981
1982 /* Alloc HCI device */
1983 struct hci_dev *hci_alloc_dev(void)
1984 {
1985         struct hci_dev *hdev;
1986
1987         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1988         if (!hdev)
1989                 return NULL;
1990
1991         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1992         hdev->esco_type = (ESCO_HV1);
1993         hdev->link_mode = (HCI_LM_ACCEPT);
1994         hdev->io_capability = 0x03; /* No Input No Output */
1995         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1996         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1997
1998         hdev->sniff_max_interval = 800;
1999         hdev->sniff_min_interval = 80;
2000
2001         mutex_init(&hdev->lock);
2002         mutex_init(&hdev->req_lock);
2003
2004         INIT_LIST_HEAD(&hdev->mgmt_pending);
2005         INIT_LIST_HEAD(&hdev->blacklist);
2006         INIT_LIST_HEAD(&hdev->uuids);
2007         INIT_LIST_HEAD(&hdev->link_keys);
2008         INIT_LIST_HEAD(&hdev->long_term_keys);
2009         INIT_LIST_HEAD(&hdev->remote_oob_data);
2010         INIT_LIST_HEAD(&hdev->conn_hash.list);
2011
2012         INIT_WORK(&hdev->rx_work, hci_rx_work);
2013         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2014         INIT_WORK(&hdev->tx_work, hci_tx_work);
2015         INIT_WORK(&hdev->power_on, hci_power_on);
2016         INIT_WORK(&hdev->le_scan, le_scan_work);
2017
2018         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2019         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2020         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2021
2022         skb_queue_head_init(&hdev->driver_init);
2023         skb_queue_head_init(&hdev->rx_q);
2024         skb_queue_head_init(&hdev->cmd_q);
2025         skb_queue_head_init(&hdev->raw_q);
2026
2027         init_waitqueue_head(&hdev->req_wait_q);
2028
2029         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2030
2031         hci_init_sysfs(hdev);
2032         discovery_init(hdev);
2033
2034         return hdev;
2035 }
2036 EXPORT_SYMBOL(hci_alloc_dev);
2037
2038 /* Free HCI device */
2039 void hci_free_dev(struct hci_dev *hdev)
2040 {
2041         skb_queue_purge(&hdev->driver_init);
2042
2043         /* will free via device release */
2044         put_device(&hdev->dev);
2045 }
2046 EXPORT_SYMBOL(hci_free_dev);
2047
2048 /* Register HCI device */
2049 int hci_register_dev(struct hci_dev *hdev)
2050 {
2051         int id, error;
2052
2053         if (!hdev->open || !hdev->close)
2054                 return -EINVAL;
2055
2056         /* Do not allow HCI_AMP devices to register at index 0,
2057          * so the index can be used as the AMP controller ID.
2058          */
2059         switch (hdev->dev_type) {
2060         case HCI_BREDR:
2061                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2062                 break;
2063         case HCI_AMP:
2064                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2065                 break;
2066         default:
2067                 return -EINVAL;
2068         }
2069
2070         if (id < 0)
2071                 return id;
2072
2073         sprintf(hdev->name, "hci%d", id);
2074         hdev->id = id;
2075
2076         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2077
2078         write_lock(&hci_dev_list_lock);
2079         list_add(&hdev->list, &hci_dev_list);
2080         write_unlock(&hci_dev_list_lock);
2081
2082         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2083                                           WQ_MEM_RECLAIM, 1);
2084         if (!hdev->workqueue) {
2085                 error = -ENOMEM;
2086                 goto err;
2087         }
2088
2089         hdev->req_workqueue = alloc_workqueue(hdev->name,
2090                                               WQ_HIGHPRI | WQ_UNBOUND |
2091                                               WQ_MEM_RECLAIM, 1);
2092         if (!hdev->req_workqueue) {
2093                 destroy_workqueue(hdev->workqueue);
2094                 error = -ENOMEM;
2095                 goto err;
2096         }
2097
2098         error = hci_add_sysfs(hdev);
2099         if (error < 0)
2100                 goto err_wqueue;
2101
2102         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2103                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2104                                     hdev);
2105         if (hdev->rfkill) {
2106                 if (rfkill_register(hdev->rfkill) < 0) {
2107                         rfkill_destroy(hdev->rfkill);
2108                         hdev->rfkill = NULL;
2109                 }
2110         }
2111
2112         set_bit(HCI_SETUP, &hdev->dev_flags);
2113
2114         if (hdev->dev_type != HCI_AMP)
2115                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2116
2117         hci_notify(hdev, HCI_DEV_REG);
2118         hci_dev_hold(hdev);
2119
2120         queue_work(hdev->req_workqueue, &hdev->power_on);
2121
2122         return id;
2123
2124 err_wqueue:
2125         destroy_workqueue(hdev->workqueue);
2126         destroy_workqueue(hdev->req_workqueue);
2127 err:
2128         ida_simple_remove(&hci_index_ida, hdev->id);
2129         write_lock(&hci_dev_list_lock);
2130         list_del(&hdev->list);
2131         write_unlock(&hci_dev_list_lock);
2132
2133         return error;
2134 }
2135 EXPORT_SYMBOL(hci_register_dev);
2136
2137 /* Unregister HCI device */
2138 void hci_unregister_dev(struct hci_dev *hdev)
2139 {
2140         int i, id;
2141
2142         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2143
2144         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2145
2146         id = hdev->id;
2147
2148         write_lock(&hci_dev_list_lock);
2149         list_del(&hdev->list);
2150         write_unlock(&hci_dev_list_lock);
2151
2152         hci_dev_do_close(hdev);
2153
2154         for (i = 0; i < NUM_REASSEMBLY; i++)
2155                 kfree_skb(hdev->reassembly[i]);
2156
2157         cancel_work_sync(&hdev->power_on);
2158
2159         if (!test_bit(HCI_INIT, &hdev->flags) &&
2160             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2161                 hci_dev_lock(hdev);
2162                 mgmt_index_removed(hdev);
2163                 hci_dev_unlock(hdev);
2164         }
2165
2166         /* mgmt_index_removed should take care of emptying the
2167          * pending list */
2168         BUG_ON(!list_empty(&hdev->mgmt_pending));
2169
2170         hci_notify(hdev, HCI_DEV_UNREG);
2171
2172         if (hdev->rfkill) {
2173                 rfkill_unregister(hdev->rfkill);
2174                 rfkill_destroy(hdev->rfkill);
2175         }
2176
2177         hci_del_sysfs(hdev);
2178
2179         destroy_workqueue(hdev->workqueue);
2180         destroy_workqueue(hdev->req_workqueue);
2181
2182         hci_dev_lock(hdev);
2183         hci_blacklist_clear(hdev);
2184         hci_uuids_clear(hdev);
2185         hci_link_keys_clear(hdev);
2186         hci_smp_ltks_clear(hdev);
2187         hci_remote_oob_data_clear(hdev);
2188         hci_dev_unlock(hdev);
2189
2190         hci_dev_put(hdev);
2191
2192         ida_simple_remove(&hci_index_ida, id);
2193 }
2194 EXPORT_SYMBOL(hci_unregister_dev);
2195
2196 /* Suspend HCI device */
2197 int hci_suspend_dev(struct hci_dev *hdev)
2198 {
2199         hci_notify(hdev, HCI_DEV_SUSPEND);
2200         return 0;
2201 }
2202 EXPORT_SYMBOL(hci_suspend_dev);
2203
2204 /* Resume HCI device */
2205 int hci_resume_dev(struct hci_dev *hdev)
2206 {
2207         hci_notify(hdev, HCI_DEV_RESUME);
2208         return 0;
2209 }
2210 EXPORT_SYMBOL(hci_resume_dev);
2211
2212 /* Receive frame from HCI drivers */
2213 int hci_recv_frame(struct sk_buff *skb)
2214 {
2215         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2216         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2217                       && !test_bit(HCI_INIT, &hdev->flags))) {
2218                 kfree_skb(skb);
2219                 return -ENXIO;
2220         }
2221
2222         /* Incoming skb */
2223         bt_cb(skb)->incoming = 1;
2224
2225         /* Time stamp */
2226         __net_timestamp(skb);
2227
2228         skb_queue_tail(&hdev->rx_q, skb);
2229         queue_work(hdev->workqueue, &hdev->rx_work);
2230
2231         return 0;
2232 }
2233 EXPORT_SYMBOL(hci_recv_frame);
2234
2235 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2236                           int count, __u8 index)
2237 {
2238         int len = 0;
2239         int hlen = 0;
2240         int remain = count;
2241         struct sk_buff *skb;
2242         struct bt_skb_cb *scb;
2243
2244         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2245             index >= NUM_REASSEMBLY)
2246                 return -EILSEQ;
2247
2248         skb = hdev->reassembly[index];
2249
2250         if (!skb) {
2251                 switch (type) {
2252                 case HCI_ACLDATA_PKT:
2253                         len = HCI_MAX_FRAME_SIZE;
2254                         hlen = HCI_ACL_HDR_SIZE;
2255                         break;
2256                 case HCI_EVENT_PKT:
2257                         len = HCI_MAX_EVENT_SIZE;
2258                         hlen = HCI_EVENT_HDR_SIZE;
2259                         break;
2260                 case HCI_SCODATA_PKT:
2261                         len = HCI_MAX_SCO_SIZE;
2262                         hlen = HCI_SCO_HDR_SIZE;
2263                         break;
2264                 }
2265
2266                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2267                 if (!skb)
2268                         return -ENOMEM;
2269
2270                 scb = (void *) skb->cb;
2271                 scb->expect = hlen;
2272                 scb->pkt_type = type;
2273
2274                 skb->dev = (void *) hdev;
2275                 hdev->reassembly[index] = skb;
2276         }
2277
2278         while (count) {
2279                 scb = (void *) skb->cb;
2280                 len = min_t(uint, scb->expect, count);
2281
2282                 memcpy(skb_put(skb, len), data, len);
2283
2284                 count -= len;
2285                 data += len;
2286                 scb->expect -= len;
2287                 remain = count;
2288
2289                 switch (type) {
2290                 case HCI_EVENT_PKT:
2291                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2292                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2293                                 scb->expect = h->plen;
2294
2295                                 if (skb_tailroom(skb) < scb->expect) {
2296                                         kfree_skb(skb);
2297                                         hdev->reassembly[index] = NULL;
2298                                         return -ENOMEM;
2299                                 }
2300                         }
2301                         break;
2302
2303                 case HCI_ACLDATA_PKT:
2304                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2305                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2306                                 scb->expect = __le16_to_cpu(h->dlen);
2307
2308                                 if (skb_tailroom(skb) < scb->expect) {
2309                                         kfree_skb(skb);
2310                                         hdev->reassembly[index] = NULL;
2311                                         return -ENOMEM;
2312                                 }
2313                         }
2314                         break;
2315
2316                 case HCI_SCODATA_PKT:
2317                         if (skb->len == HCI_SCO_HDR_SIZE) {
2318                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2319                                 scb->expect = h->dlen;
2320
2321                                 if (skb_tailroom(skb) < scb->expect) {
2322                                         kfree_skb(skb);
2323                                         hdev->reassembly[index] = NULL;
2324                                         return -ENOMEM;
2325                                 }
2326                         }
2327                         break;
2328                 }
2329
2330                 if (scb->expect == 0) {
2331                         /* Complete frame */
2332
2333                         bt_cb(skb)->pkt_type = type;
2334                         hci_recv_frame(skb);
2335
2336                         hdev->reassembly[index] = NULL;
2337                         return remain;
2338                 }
2339         }
2340
2341         return remain;
2342 }
2343
2344 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2345 {
2346         int rem = 0;
2347
2348         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2349                 return -EILSEQ;
2350
2351         while (count) {
2352                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2353                 if (rem < 0)
2354                         return rem;
2355
2356                 data += (count - rem);
2357                 count = rem;
2358         }
2359
2360         return rem;
2361 }
2362 EXPORT_SYMBOL(hci_recv_fragment);
2363
2364 #define STREAM_REASSEMBLY 0
2365
2366 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2367 {
2368         int type;
2369         int rem = 0;
2370
2371         while (count) {
2372                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2373
2374                 if (!skb) {
2375                         struct { char type; } *pkt;
2376
2377                         /* Start of the frame */
2378                         pkt = data;
2379                         type = pkt->type;
2380
2381                         data++;
2382                         count--;
2383                 } else
2384                         type = bt_cb(skb)->pkt_type;
2385
2386                 rem = hci_reassembly(hdev, type, data, count,
2387                                      STREAM_REASSEMBLY);
2388                 if (rem < 0)
2389                         return rem;
2390
2391                 data += (count - rem);
2392                 count = rem;
2393         }
2394
2395         return rem;
2396 }
2397 EXPORT_SYMBOL(hci_recv_stream_fragment);
2398
2399 /* ---- Interface to upper protocols ---- */
2400
2401 int hci_register_cb(struct hci_cb *cb)
2402 {
2403         BT_DBG("%p name %s", cb, cb->name);
2404
2405         write_lock(&hci_cb_list_lock);
2406         list_add(&cb->list, &hci_cb_list);
2407         write_unlock(&hci_cb_list_lock);
2408
2409         return 0;
2410 }
2411 EXPORT_SYMBOL(hci_register_cb);
2412
2413 int hci_unregister_cb(struct hci_cb *cb)
2414 {
2415         BT_DBG("%p name %s", cb, cb->name);
2416
2417         write_lock(&hci_cb_list_lock);
2418         list_del(&cb->list);
2419         write_unlock(&hci_cb_list_lock);
2420
2421         return 0;
2422 }
2423 EXPORT_SYMBOL(hci_unregister_cb);
2424
2425 static int hci_send_frame(struct sk_buff *skb)
2426 {
2427         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2428
2429         if (!hdev) {
2430                 kfree_skb(skb);
2431                 return -ENODEV;
2432         }
2433
2434         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2435
2436         /* Time stamp */
2437         __net_timestamp(skb);
2438
2439         /* Send copy to monitor */
2440         hci_send_to_monitor(hdev, skb);
2441
2442         if (atomic_read(&hdev->promisc)) {
2443                 /* Send copy to the sockets */
2444                 hci_send_to_sock(hdev, skb);
2445         }
2446
2447         /* Get rid of skb owner, prior to sending to the driver. */
2448         skb_orphan(skb);
2449
2450         return hdev->send(skb);
2451 }
2452
2453 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2454 {
2455         skb_queue_head_init(&req->cmd_q);
2456         req->hdev = hdev;
2457         req->err = 0;
2458 }
2459
2460 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2461 {
2462         struct hci_dev *hdev = req->hdev;
2463         struct sk_buff *skb;
2464         unsigned long flags;
2465
2466         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2467
2468         /* If an error occured during request building, remove all HCI
2469          * commands queued on the HCI request queue.
2470          */
2471         if (req->err) {
2472                 skb_queue_purge(&req->cmd_q);
2473                 return req->err;
2474         }
2475
2476         /* Do not allow empty requests */
2477         if (skb_queue_empty(&req->cmd_q))
2478                 return -ENODATA;
2479
2480         skb = skb_peek_tail(&req->cmd_q);
2481         bt_cb(skb)->req.complete = complete;
2482
2483         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2484         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2485         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2486
2487         queue_work(hdev->workqueue, &hdev->cmd_work);
2488
2489         return 0;
2490 }
2491
2492 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2493                                        u32 plen, void *param)
2494 {
2495         int len = HCI_COMMAND_HDR_SIZE + plen;
2496         struct hci_command_hdr *hdr;
2497         struct sk_buff *skb;
2498
2499         skb = bt_skb_alloc(len, GFP_ATOMIC);
2500         if (!skb)
2501                 return NULL;
2502
2503         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2504         hdr->opcode = cpu_to_le16(opcode);
2505         hdr->plen   = plen;
2506
2507         if (plen)
2508                 memcpy(skb_put(skb, plen), param, plen);
2509
2510         BT_DBG("skb len %d", skb->len);
2511
2512         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2513         skb->dev = (void *) hdev;
2514
2515         return skb;
2516 }
2517
2518 /* Send HCI command */
2519 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2520 {
2521         struct sk_buff *skb;
2522
2523         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2524
2525         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2526         if (!skb) {
2527                 BT_ERR("%s no memory for command", hdev->name);
2528                 return -ENOMEM;
2529         }
2530
2531         /* Stand-alone HCI commands must be flaged as
2532          * single-command requests.
2533          */
2534         bt_cb(skb)->req.start = true;
2535
2536         skb_queue_tail(&hdev->cmd_q, skb);
2537         queue_work(hdev->workqueue, &hdev->cmd_work);
2538
2539         return 0;
2540 }
2541
2542 /* Queue a command to an asynchronous HCI request */
2543 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2544 {
2545         struct hci_dev *hdev = req->hdev;
2546         struct sk_buff *skb;
2547
2548         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2549
2550         /* If an error occured during request building, there is no point in
2551          * queueing the HCI command. We can simply return.
2552          */
2553         if (req->err)
2554                 return;
2555
2556         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2557         if (!skb) {
2558                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2559                        hdev->name, opcode);
2560                 req->err = -ENOMEM;
2561                 return;
2562         }
2563
2564         if (skb_queue_empty(&req->cmd_q))
2565                 bt_cb(skb)->req.start = true;
2566
2567         skb_queue_tail(&req->cmd_q, skb);
2568 }
2569
2570 /* Get data from the previously sent command */
2571 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2572 {
2573         struct hci_command_hdr *hdr;
2574
2575         if (!hdev->sent_cmd)
2576                 return NULL;
2577
2578         hdr = (void *) hdev->sent_cmd->data;
2579
2580         if (hdr->opcode != cpu_to_le16(opcode))
2581                 return NULL;
2582
2583         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2584
2585         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2586 }
2587
2588 /* Send ACL data */
2589 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2590 {
2591         struct hci_acl_hdr *hdr;
2592         int len = skb->len;
2593
2594         skb_push(skb, HCI_ACL_HDR_SIZE);
2595         skb_reset_transport_header(skb);
2596         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2597         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2598         hdr->dlen   = cpu_to_le16(len);
2599 }
2600
2601 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2602                           struct sk_buff *skb, __u16 flags)
2603 {
2604         struct hci_conn *conn = chan->conn;
2605         struct hci_dev *hdev = conn->hdev;
2606         struct sk_buff *list;
2607
2608         skb->len = skb_headlen(skb);
2609         skb->data_len = 0;
2610
2611         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2612
2613         switch (hdev->dev_type) {
2614         case HCI_BREDR:
2615                 hci_add_acl_hdr(skb, conn->handle, flags);
2616                 break;
2617         case HCI_AMP:
2618                 hci_add_acl_hdr(skb, chan->handle, flags);
2619                 break;
2620         default:
2621                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2622                 return;
2623         }
2624
2625         list = skb_shinfo(skb)->frag_list;
2626         if (!list) {
2627                 /* Non fragmented */
2628                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2629
2630                 skb_queue_tail(queue, skb);
2631         } else {
2632                 /* Fragmented */
2633                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2634
2635                 skb_shinfo(skb)->frag_list = NULL;
2636
2637                 /* Queue all fragments atomically */
2638                 spin_lock(&queue->lock);
2639
2640                 __skb_queue_tail(queue, skb);
2641
2642                 flags &= ~ACL_START;
2643                 flags |= ACL_CONT;
2644                 do {
2645                         skb = list; list = list->next;
2646
2647                         skb->dev = (void *) hdev;
2648                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2649                         hci_add_acl_hdr(skb, conn->handle, flags);
2650
2651                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2652
2653                         __skb_queue_tail(queue, skb);
2654                 } while (list);
2655
2656                 spin_unlock(&queue->lock);
2657         }
2658 }
2659
2660 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2661 {
2662         struct hci_dev *hdev = chan->conn->hdev;
2663
2664         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2665
2666         skb->dev = (void *) hdev;
2667
2668         hci_queue_acl(chan, &chan->data_q, skb, flags);
2669
2670         queue_work(hdev->workqueue, &hdev->tx_work);
2671 }
2672
2673 /* Send SCO data */
2674 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2675 {
2676         struct hci_dev *hdev = conn->hdev;
2677         struct hci_sco_hdr hdr;
2678
2679         BT_DBG("%s len %d", hdev->name, skb->len);
2680
2681         hdr.handle = cpu_to_le16(conn->handle);
2682         hdr.dlen   = skb->len;
2683
2684         skb_push(skb, HCI_SCO_HDR_SIZE);
2685         skb_reset_transport_header(skb);
2686         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2687
2688         skb->dev = (void *) hdev;
2689         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2690
2691         skb_queue_tail(&conn->data_q, skb);
2692         queue_work(hdev->workqueue, &hdev->tx_work);
2693 }
2694
2695 /* ---- HCI TX task (outgoing data) ---- */
2696
2697 /* HCI Connection scheduler */
2698 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2699                                      int *quote)
2700 {
2701         struct hci_conn_hash *h = &hdev->conn_hash;
2702         struct hci_conn *conn = NULL, *c;
2703         unsigned int num = 0, min = ~0;
2704
2705         /* We don't have to lock device here. Connections are always
2706          * added and removed with TX task disabled. */
2707
2708         rcu_read_lock();
2709
2710         list_for_each_entry_rcu(c, &h->list, list) {
2711                 if (c->type != type || skb_queue_empty(&c->data_q))
2712                         continue;
2713
2714                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2715                         continue;
2716
2717                 num++;
2718
2719                 if (c->sent < min) {
2720                         min  = c->sent;
2721                         conn = c;
2722                 }
2723
2724                 if (hci_conn_num(hdev, type) == num)
2725                         break;
2726         }
2727
2728         rcu_read_unlock();
2729
2730         if (conn) {
2731                 int cnt, q;
2732
2733                 switch (conn->type) {
2734                 case ACL_LINK:
2735                         cnt = hdev->acl_cnt;
2736                         break;
2737                 case SCO_LINK:
2738                 case ESCO_LINK:
2739                         cnt = hdev->sco_cnt;
2740                         break;
2741                 case LE_LINK:
2742                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2743                         break;
2744                 default:
2745                         cnt = 0;
2746                         BT_ERR("Unknown link type");
2747                 }
2748
2749                 q = cnt / num;
2750                 *quote = q ? q : 1;
2751         } else
2752                 *quote = 0;
2753
2754         BT_DBG("conn %p quote %d", conn, *quote);
2755         return conn;
2756 }
2757
2758 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2759 {
2760         struct hci_conn_hash *h = &hdev->conn_hash;
2761         struct hci_conn *c;
2762
2763         BT_ERR("%s link tx timeout", hdev->name);
2764
2765         rcu_read_lock();
2766
2767         /* Kill stalled connections */
2768         list_for_each_entry_rcu(c, &h->list, list) {
2769                 if (c->type == type && c->sent) {
2770                         BT_ERR("%s killing stalled connection %pMR",
2771                                hdev->name, &c->dst);
2772                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2773                 }
2774         }
2775
2776         rcu_read_unlock();
2777 }
2778
2779 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2780                                       int *quote)
2781 {
2782         struct hci_conn_hash *h = &hdev->conn_hash;
2783         struct hci_chan *chan = NULL;
2784         unsigned int num = 0, min = ~0, cur_prio = 0;
2785         struct hci_conn *conn;
2786         int cnt, q, conn_num = 0;
2787
2788         BT_DBG("%s", hdev->name);
2789
2790         rcu_read_lock();
2791
2792         list_for_each_entry_rcu(conn, &h->list, list) {
2793                 struct hci_chan *tmp;
2794
2795                 if (conn->type != type)
2796                         continue;
2797
2798                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2799                         continue;
2800
2801                 conn_num++;
2802
2803                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2804                         struct sk_buff *skb;
2805
2806                         if (skb_queue_empty(&tmp->data_q))
2807                                 continue;
2808
2809                         skb = skb_peek(&tmp->data_q);
2810                         if (skb->priority < cur_prio)
2811                                 continue;
2812
2813                         if (skb->priority > cur_prio) {
2814                                 num = 0;
2815                                 min = ~0;
2816                                 cur_prio = skb->priority;
2817                         }
2818
2819                         num++;
2820
2821                         if (conn->sent < min) {
2822                                 min  = conn->sent;
2823                                 chan = tmp;
2824                         }
2825                 }
2826
2827                 if (hci_conn_num(hdev, type) == conn_num)
2828                         break;
2829         }
2830
2831         rcu_read_unlock();
2832
2833         if (!chan)
2834                 return NULL;
2835
2836         switch (chan->conn->type) {
2837         case ACL_LINK:
2838                 cnt = hdev->acl_cnt;
2839                 break;
2840         case AMP_LINK:
2841                 cnt = hdev->block_cnt;
2842                 break;
2843         case SCO_LINK:
2844         case ESCO_LINK:
2845                 cnt = hdev->sco_cnt;
2846                 break;
2847         case LE_LINK:
2848                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2849                 break;
2850         default:
2851                 cnt = 0;
2852                 BT_ERR("Unknown link type");
2853         }
2854
2855         q = cnt / num;
2856         *quote = q ? q : 1;
2857         BT_DBG("chan %p quote %d", chan, *quote);
2858         return chan;
2859 }
2860
2861 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2862 {
2863         struct hci_conn_hash *h = &hdev->conn_hash;
2864         struct hci_conn *conn;
2865         int num = 0;
2866
2867         BT_DBG("%s", hdev->name);
2868
2869         rcu_read_lock();
2870
2871         list_for_each_entry_rcu(conn, &h->list, list) {
2872                 struct hci_chan *chan;
2873
2874                 if (conn->type != type)
2875                         continue;
2876
2877                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2878                         continue;
2879
2880                 num++;
2881
2882                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2883                         struct sk_buff *skb;
2884
2885                         if (chan->sent) {
2886                                 chan->sent = 0;
2887                                 continue;
2888                         }
2889
2890                         if (skb_queue_empty(&chan->data_q))
2891                                 continue;
2892
2893                         skb = skb_peek(&chan->data_q);
2894                         if (skb->priority >= HCI_PRIO_MAX - 1)
2895                                 continue;
2896
2897                         skb->priority = HCI_PRIO_MAX - 1;
2898
2899                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2900                                skb->priority);
2901                 }
2902
2903                 if (hci_conn_num(hdev, type) == num)
2904                         break;
2905         }
2906
2907         rcu_read_unlock();
2908
2909 }
2910
2911 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2912 {
2913         /* Calculate count of blocks used by this packet */
2914         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2915 }
2916
2917 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2918 {
2919         if (!test_bit(HCI_RAW, &hdev->flags)) {
2920                 /* ACL tx timeout must be longer than maximum
2921                  * link supervision timeout (40.9 seconds) */
2922                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2923                                        HCI_ACL_TX_TIMEOUT))
2924                         hci_link_tx_to(hdev, ACL_LINK);
2925         }
2926 }
2927
2928 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2929 {
2930         unsigned int cnt = hdev->acl_cnt;
2931         struct hci_chan *chan;
2932         struct sk_buff *skb;
2933         int quote;
2934
2935         __check_timeout(hdev, cnt);
2936
2937         while (hdev->acl_cnt &&
2938                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2939                 u32 priority = (skb_peek(&chan->data_q))->priority;
2940                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2941                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2942                                skb->len, skb->priority);
2943
2944                         /* Stop if priority has changed */
2945                         if (skb->priority < priority)
2946                                 break;
2947
2948                         skb = skb_dequeue(&chan->data_q);
2949
2950                         hci_conn_enter_active_mode(chan->conn,
2951                                                    bt_cb(skb)->force_active);
2952
2953                         hci_send_frame(skb);
2954                         hdev->acl_last_tx = jiffies;
2955
2956                         hdev->acl_cnt--;
2957                         chan->sent++;
2958                         chan->conn->sent++;
2959                 }
2960         }
2961
2962         if (cnt != hdev->acl_cnt)
2963                 hci_prio_recalculate(hdev, ACL_LINK);
2964 }
2965
2966 static void hci_sched_acl_blk(struct hci_dev *hdev)
2967 {
2968         unsigned int cnt = hdev->block_cnt;
2969         struct hci_chan *chan;
2970         struct sk_buff *skb;
2971         int quote;
2972         u8 type;
2973
2974         __check_timeout(hdev, cnt);
2975
2976         BT_DBG("%s", hdev->name);
2977
2978         if (hdev->dev_type == HCI_AMP)
2979                 type = AMP_LINK;
2980         else
2981                 type = ACL_LINK;
2982
2983         while (hdev->block_cnt > 0 &&
2984                (chan = hci_chan_sent(hdev, type, &quote))) {
2985                 u32 priority = (skb_peek(&chan->data_q))->priority;
2986                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2987                         int blocks;
2988
2989                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2990                                skb->len, skb->priority);
2991
2992                         /* Stop if priority has changed */
2993                         if (skb->priority < priority)
2994                                 break;
2995
2996                         skb = skb_dequeue(&chan->data_q);
2997
2998                         blocks = __get_blocks(hdev, skb);
2999                         if (blocks > hdev->block_cnt)
3000                                 return;
3001
3002                         hci_conn_enter_active_mode(chan->conn,
3003                                                    bt_cb(skb)->force_active);
3004
3005                         hci_send_frame(skb);
3006                         hdev->acl_last_tx = jiffies;
3007
3008                         hdev->block_cnt -= blocks;
3009                         quote -= blocks;
3010
3011                         chan->sent += blocks;
3012                         chan->conn->sent += blocks;
3013                 }
3014         }
3015
3016         if (cnt != hdev->block_cnt)
3017                 hci_prio_recalculate(hdev, type);
3018 }
3019
3020 static void hci_sched_acl(struct hci_dev *hdev)
3021 {
3022         BT_DBG("%s", hdev->name);
3023
3024         /* No ACL link over BR/EDR controller */
3025         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3026                 return;
3027
3028         /* No AMP link over AMP controller */
3029         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3030                 return;
3031
3032         switch (hdev->flow_ctl_mode) {
3033         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3034                 hci_sched_acl_pkt(hdev);
3035                 break;
3036
3037         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3038                 hci_sched_acl_blk(hdev);
3039                 break;
3040         }
3041 }
3042
3043 /* Schedule SCO */
3044 static void hci_sched_sco(struct hci_dev *hdev)
3045 {
3046         struct hci_conn *conn;
3047         struct sk_buff *skb;
3048         int quote;
3049
3050         BT_DBG("%s", hdev->name);
3051
3052         if (!hci_conn_num(hdev, SCO_LINK))
3053                 return;
3054
3055         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3056                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3057                         BT_DBG("skb %p len %d", skb, skb->len);
3058                         hci_send_frame(skb);
3059
3060                         conn->sent++;
3061                         if (conn->sent == ~0)
3062                                 conn->sent = 0;
3063                 }
3064         }
3065 }
3066
3067 static void hci_sched_esco(struct hci_dev *hdev)
3068 {
3069         struct hci_conn *conn;
3070         struct sk_buff *skb;
3071         int quote;
3072
3073         BT_DBG("%s", hdev->name);
3074
3075         if (!hci_conn_num(hdev, ESCO_LINK))
3076                 return;
3077
3078         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3079                                                      &quote))) {
3080                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3081                         BT_DBG("skb %p len %d", skb, skb->len);
3082                         hci_send_frame(skb);
3083
3084                         conn->sent++;
3085                         if (conn->sent == ~0)
3086                                 conn->sent = 0;
3087                 }
3088         }
3089 }
3090
3091 static void hci_sched_le(struct hci_dev *hdev)
3092 {
3093         struct hci_chan *chan;
3094         struct sk_buff *skb;
3095         int quote, cnt, tmp;
3096
3097         BT_DBG("%s", hdev->name);
3098
3099         if (!hci_conn_num(hdev, LE_LINK))
3100                 return;
3101
3102         if (!test_bit(HCI_RAW, &hdev->flags)) {
3103                 /* LE tx timeout must be longer than maximum
3104                  * link supervision timeout (40.9 seconds) */
3105                 if (!hdev->le_cnt && hdev->le_pkts &&
3106                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3107                         hci_link_tx_to(hdev, LE_LINK);
3108         }
3109
3110         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3111         tmp = cnt;
3112         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3113                 u32 priority = (skb_peek(&chan->data_q))->priority;
3114                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3115                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3116                                skb->len, skb->priority);
3117
3118                         /* Stop if priority has changed */
3119                         if (skb->priority < priority)
3120                                 break;
3121
3122                         skb = skb_dequeue(&chan->data_q);
3123
3124                         hci_send_frame(skb);
3125                         hdev->le_last_tx = jiffies;
3126
3127                         cnt--;
3128                         chan->sent++;
3129                         chan->conn->sent++;
3130                 }
3131         }
3132
3133         if (hdev->le_pkts)
3134                 hdev->le_cnt = cnt;
3135         else
3136                 hdev->acl_cnt = cnt;
3137
3138         if (cnt != tmp)
3139                 hci_prio_recalculate(hdev, LE_LINK);
3140 }
3141
3142 static void hci_tx_work(struct work_struct *work)
3143 {
3144         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3145         struct sk_buff *skb;
3146
3147         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3148                hdev->sco_cnt, hdev->le_cnt);
3149
3150         /* Schedule queues and send stuff to HCI driver */
3151
3152         hci_sched_acl(hdev);
3153
3154         hci_sched_sco(hdev);
3155
3156         hci_sched_esco(hdev);
3157
3158         hci_sched_le(hdev);
3159
3160         /* Send next queued raw (unknown type) packet */
3161         while ((skb = skb_dequeue(&hdev->raw_q)))
3162                 hci_send_frame(skb);
3163 }
3164
3165 /* ----- HCI RX task (incoming data processing) ----- */
3166
3167 /* ACL data packet */
3168 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3169 {
3170         struct hci_acl_hdr *hdr = (void *) skb->data;
3171         struct hci_conn *conn;
3172         __u16 handle, flags;
3173
3174         skb_pull(skb, HCI_ACL_HDR_SIZE);
3175
3176         handle = __le16_to_cpu(hdr->handle);
3177         flags  = hci_flags(handle);
3178         handle = hci_handle(handle);
3179
3180         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3181                handle, flags);
3182
3183         hdev->stat.acl_rx++;
3184
3185         hci_dev_lock(hdev);
3186         conn = hci_conn_hash_lookup_handle(hdev, handle);
3187         hci_dev_unlock(hdev);
3188
3189         if (conn) {
3190                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3191
3192                 /* Send to upper protocol */
3193                 l2cap_recv_acldata(conn, skb, flags);
3194                 return;
3195         } else {
3196                 BT_ERR("%s ACL packet for unknown connection handle %d",
3197                        hdev->name, handle);
3198         }
3199
3200         kfree_skb(skb);
3201 }
3202
3203 /* SCO data packet */
3204 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3205 {
3206         struct hci_sco_hdr *hdr = (void *) skb->data;
3207         struct hci_conn *conn;
3208         __u16 handle;
3209
3210         skb_pull(skb, HCI_SCO_HDR_SIZE);
3211
3212         handle = __le16_to_cpu(hdr->handle);
3213
3214         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3215
3216         hdev->stat.sco_rx++;
3217
3218         hci_dev_lock(hdev);
3219         conn = hci_conn_hash_lookup_handle(hdev, handle);
3220         hci_dev_unlock(hdev);
3221
3222         if (conn) {
3223                 /* Send to upper protocol */
3224                 sco_recv_scodata(conn, skb);
3225                 return;
3226         } else {
3227                 BT_ERR("%s SCO packet for unknown connection handle %d",
3228                        hdev->name, handle);
3229         }
3230
3231         kfree_skb(skb);
3232 }
3233
3234 static bool hci_req_is_complete(struct hci_dev *hdev)
3235 {
3236         struct sk_buff *skb;
3237
3238         skb = skb_peek(&hdev->cmd_q);
3239         if (!skb)
3240                 return true;
3241
3242         return bt_cb(skb)->req.start;
3243 }
3244
3245 static void hci_resend_last(struct hci_dev *hdev)
3246 {
3247         struct hci_command_hdr *sent;
3248         struct sk_buff *skb;
3249         u16 opcode;
3250
3251         if (!hdev->sent_cmd)
3252                 return;
3253
3254         sent = (void *) hdev->sent_cmd->data;
3255         opcode = __le16_to_cpu(sent->opcode);
3256         if (opcode == HCI_OP_RESET)
3257                 return;
3258
3259         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3260         if (!skb)
3261                 return;
3262
3263         skb_queue_head(&hdev->cmd_q, skb);
3264         queue_work(hdev->workqueue, &hdev->cmd_work);
3265 }
3266
3267 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3268 {
3269         hci_req_complete_t req_complete = NULL;
3270         struct sk_buff *skb;
3271         unsigned long flags;
3272
3273         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3274
3275         /* If the completed command doesn't match the last one that was
3276          * sent we need to do special handling of it.
3277          */
3278         if (!hci_sent_cmd_data(hdev, opcode)) {
3279                 /* Some CSR based controllers generate a spontaneous
3280                  * reset complete event during init and any pending
3281                  * command will never be completed. In such a case we
3282                  * need to resend whatever was the last sent
3283                  * command.
3284                  */
3285                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3286                         hci_resend_last(hdev);
3287
3288                 return;
3289         }
3290
3291         /* If the command succeeded and there's still more commands in
3292          * this request the request is not yet complete.
3293          */
3294         if (!status && !hci_req_is_complete(hdev))
3295                 return;
3296
3297         /* If this was the last command in a request the complete
3298          * callback would be found in hdev->sent_cmd instead of the
3299          * command queue (hdev->cmd_q).
3300          */
3301         if (hdev->sent_cmd) {
3302                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3303                 if (req_complete)
3304                         goto call_complete;
3305         }
3306
3307         /* Remove all pending commands belonging to this request */
3308         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3309         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3310                 if (bt_cb(skb)->req.start) {
3311                         __skb_queue_head(&hdev->cmd_q, skb);
3312                         break;
3313                 }
3314
3315                 req_complete = bt_cb(skb)->req.complete;
3316                 kfree_skb(skb);
3317         }
3318         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3319
3320 call_complete:
3321         if (req_complete)
3322                 req_complete(hdev, status);
3323 }
3324
3325 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3326 {
3327         hci_req_complete_t req_complete = NULL;
3328
3329         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3330
3331         if (status) {
3332                 hci_req_cmd_complete(hdev, opcode, status);
3333                 return;
3334         }
3335
3336         /* No need to handle success status if there are more commands */
3337         if (!hci_req_is_complete(hdev))
3338                 return;
3339
3340         if (hdev->sent_cmd)
3341                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3342
3343         /* If the request doesn't have a complete callback or there
3344          * are other commands/requests in the hdev queue we consider
3345          * this request as completed.
3346          */
3347         if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3348                 hci_req_cmd_complete(hdev, opcode, status);
3349 }
3350
3351 static void hci_rx_work(struct work_struct *work)
3352 {
3353         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3354         struct sk_buff *skb;
3355
3356         BT_DBG("%s", hdev->name);
3357
3358         while ((skb = skb_dequeue(&hdev->rx_q))) {
3359                 /* Send copy to monitor */
3360                 hci_send_to_monitor(hdev, skb);
3361
3362                 if (atomic_read(&hdev->promisc)) {
3363                         /* Send copy to the sockets */
3364                         hci_send_to_sock(hdev, skb);
3365                 }
3366
3367                 if (test_bit(HCI_RAW, &hdev->flags)) {
3368                         kfree_skb(skb);
3369                         continue;
3370                 }
3371
3372                 if (test_bit(HCI_INIT, &hdev->flags)) {
3373                         /* Don't process data packets in this states. */
3374                         switch (bt_cb(skb)->pkt_type) {
3375                         case HCI_ACLDATA_PKT:
3376                         case HCI_SCODATA_PKT:
3377                                 kfree_skb(skb);
3378                                 continue;
3379                         }
3380                 }
3381
3382                 /* Process frame */
3383                 switch (bt_cb(skb)->pkt_type) {
3384                 case HCI_EVENT_PKT:
3385                         BT_DBG("%s Event packet", hdev->name);
3386                         hci_event_packet(hdev, skb);
3387                         break;
3388
3389                 case HCI_ACLDATA_PKT:
3390                         BT_DBG("%s ACL data packet", hdev->name);
3391                         hci_acldata_packet(hdev, skb);
3392                         break;
3393
3394                 case HCI_SCODATA_PKT:
3395                         BT_DBG("%s SCO data packet", hdev->name);
3396                         hci_scodata_packet(hdev, skb);
3397                         break;
3398
3399                 default:
3400                         kfree_skb(skb);
3401                         break;
3402                 }
3403         }
3404 }
3405
3406 static void hci_cmd_work(struct work_struct *work)
3407 {
3408         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3409         struct sk_buff *skb;
3410
3411         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3412                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3413
3414         /* Send queued commands */
3415         if (atomic_read(&hdev->cmd_cnt)) {
3416                 skb = skb_dequeue(&hdev->cmd_q);
3417                 if (!skb)
3418                         return;
3419
3420                 kfree_skb(hdev->sent_cmd);
3421
3422                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3423                 if (hdev->sent_cmd) {
3424                         atomic_dec(&hdev->cmd_cnt);
3425                         hci_send_frame(skb);
3426                         if (test_bit(HCI_RESET, &hdev->flags))
3427                                 del_timer(&hdev->cmd_timer);
3428                         else
3429                                 mod_timer(&hdev->cmd_timer,
3430                                           jiffies + HCI_CMD_TIMEOUT);
3431                 } else {
3432                         skb_queue_head(&hdev->cmd_q, skb);
3433                         queue_work(hdev->workqueue, &hdev->cmd_work);
3434                 }
3435         }
3436 }
3437
3438 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3439 {
3440         /* General inquiry access code (GIAC) */
3441         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3442         struct hci_cp_inquiry cp;
3443
3444         BT_DBG("%s", hdev->name);
3445
3446         if (test_bit(HCI_INQUIRY, &hdev->flags))
3447                 return -EINPROGRESS;
3448
3449         inquiry_cache_flush(hdev);
3450
3451         memset(&cp, 0, sizeof(cp));
3452         memcpy(&cp.lap, lap, sizeof(cp.lap));
3453         cp.length  = length;
3454
3455         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3456 }
3457
3458 int hci_cancel_inquiry(struct hci_dev *hdev)
3459 {
3460         BT_DBG("%s", hdev->name);
3461
3462         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3463                 return -EALREADY;
3464
3465         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3466 }
3467
3468 u8 bdaddr_to_le(u8 bdaddr_type)
3469 {
3470         switch (bdaddr_type) {
3471         case BDADDR_LE_PUBLIC:
3472                 return ADDR_LE_DEV_PUBLIC;
3473
3474         default:
3475                 /* Fallback to LE Random address type */
3476                 return ADDR_LE_DEV_RANDOM;
3477         }
3478 }