2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode;
45 struct inquiry_entry {
46 struct inquiry_entry *next;
48 struct inquiry_data data;
51 struct inquiry_cache {
54 struct inquiry_entry *list;
57 struct hci_conn_hash {
58 struct list_head list;
65 struct list_head list;
89 __u16 sniff_min_interval;
90 __u16 sniff_max_interval;
100 unsigned int acl_pkts;
101 unsigned int sco_pkts;
103 unsigned long cmd_last_tx;
104 unsigned long acl_last_tx;
105 unsigned long sco_last_tx;
107 struct tasklet_struct cmd_task;
108 struct tasklet_struct rx_task;
109 struct tasklet_struct tx_task;
111 struct sk_buff_head rx_q;
112 struct sk_buff_head raw_q;
113 struct sk_buff_head cmd_q;
115 struct sk_buff *sent_cmd;
116 struct sk_buff *reassembly[3];
118 struct semaphore req_lock;
119 wait_queue_head_t req_wait_q;
123 struct inquiry_cache inq_cache;
124 struct hci_conn_hash conn_hash;
126 struct hci_dev_stats stat;
128 struct sk_buff_head driver_init;
135 struct device *parent;
138 struct module *owner;
140 int (*open)(struct hci_dev *hdev);
141 int (*close)(struct hci_dev *hdev);
142 int (*flush)(struct hci_dev *hdev);
143 int (*send)(struct sk_buff *skb);
144 void (*destruct)(struct hci_dev *hdev);
145 void (*notify)(struct hci_dev *hdev, unsigned int evt);
146 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
150 struct list_head list;
173 struct sk_buff_head data_q;
175 struct timer_list disc_timer;
176 struct timer_list idle_timer;
178 struct work_struct work;
182 struct hci_dev *hdev;
187 struct hci_conn *link;
190 extern struct hci_proto *hci_proto[];
191 extern struct list_head hci_dev_list;
192 extern struct list_head hci_cb_list;
193 extern rwlock_t hci_dev_list_lock;
194 extern rwlock_t hci_cb_list_lock;
196 /* ----- Inquiry cache ----- */
197 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
198 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
200 #define inquiry_cache_lock(c) spin_lock(&c->lock)
201 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
202 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
203 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
205 static inline void inquiry_cache_init(struct hci_dev *hdev)
207 struct inquiry_cache *c = &hdev->inq_cache;
208 spin_lock_init(&c->lock);
212 static inline int inquiry_cache_empty(struct hci_dev *hdev)
214 struct inquiry_cache *c = &hdev->inq_cache;
215 return (c->list == NULL);
218 static inline long inquiry_cache_age(struct hci_dev *hdev)
220 struct inquiry_cache *c = &hdev->inq_cache;
221 return jiffies - c->timestamp;
224 static inline long inquiry_entry_age(struct inquiry_entry *e)
226 return jiffies - e->timestamp;
229 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
230 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
232 /* ----- HCI Connections ----- */
235 HCI_CONN_ENCRYPT_PEND,
236 HCI_CONN_RSWITCH_PEND,
237 HCI_CONN_MODE_CHANGE_PEND,
240 static inline void hci_conn_hash_init(struct hci_dev *hdev)
242 struct hci_conn_hash *h = &hdev->conn_hash;
243 INIT_LIST_HEAD(&h->list);
244 spin_lock_init(&h->lock);
249 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
251 struct hci_conn_hash *h = &hdev->conn_hash;
252 list_add(&c->list, &h->list);
253 if (c->type == ACL_LINK)
259 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
261 struct hci_conn_hash *h = &hdev->conn_hash;
263 if (c->type == ACL_LINK)
269 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
272 struct hci_conn_hash *h = &hdev->conn_hash;
276 list_for_each(p, &h->list) {
277 c = list_entry(p, struct hci_conn, list);
278 if (c->handle == handle)
284 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
285 __u8 type, bdaddr_t *ba)
287 struct hci_conn_hash *h = &hdev->conn_hash;
291 list_for_each(p, &h->list) {
292 c = list_entry(p, struct hci_conn, list);
293 if (c->type == type && !bacmp(&c->dst, ba))
299 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
300 __u8 type, __u16 state)
302 struct hci_conn_hash *h = &hdev->conn_hash;
306 list_for_each(p, &h->list) {
307 c = list_entry(p, struct hci_conn, list);
308 if (c->type == type && c->state == state)
314 void hci_acl_connect(struct hci_conn *conn);
315 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
316 void hci_add_sco(struct hci_conn *conn, __u16 handle);
317 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
319 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
320 int hci_conn_del(struct hci_conn *conn);
321 void hci_conn_hash_flush(struct hci_dev *hdev);
322 void hci_conn_check_pending(struct hci_dev *hdev);
324 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
325 int hci_conn_auth(struct hci_conn *conn);
326 int hci_conn_encrypt(struct hci_conn *conn);
327 int hci_conn_change_link_key(struct hci_conn *conn);
328 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
330 void hci_conn_enter_active_mode(struct hci_conn *conn);
331 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
333 static inline void hci_conn_hold(struct hci_conn *conn)
335 atomic_inc(&conn->refcnt);
336 del_timer(&conn->disc_timer);
339 static inline void hci_conn_put(struct hci_conn *conn)
341 if (atomic_dec_and_test(&conn->refcnt)) {
343 if (conn->type == ACL_LINK) {
344 del_timer(&conn->idle_timer);
345 if (conn->state == BT_CONNECTED) {
346 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
350 timeo = msecs_to_jiffies(10);
352 timeo = msecs_to_jiffies(10);
353 mod_timer(&conn->disc_timer, jiffies + timeo);
357 /* ----- HCI tasks ----- */
358 static inline void hci_sched_cmd(struct hci_dev *hdev)
360 tasklet_schedule(&hdev->cmd_task);
363 static inline void hci_sched_rx(struct hci_dev *hdev)
365 tasklet_schedule(&hdev->rx_task);
368 static inline void hci_sched_tx(struct hci_dev *hdev)
370 tasklet_schedule(&hdev->tx_task);
373 /* ----- HCI Devices ----- */
374 static inline void __hci_dev_put(struct hci_dev *d)
376 if (atomic_dec_and_test(&d->refcnt))
380 static inline void hci_dev_put(struct hci_dev *d)
383 module_put(d->owner);
386 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
388 atomic_inc(&d->refcnt);
392 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
394 if (try_module_get(d->owner))
395 return __hci_dev_hold(d);
399 #define hci_dev_lock(d) spin_lock(&d->lock)
400 #define hci_dev_unlock(d) spin_unlock(&d->lock)
401 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
402 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
404 struct hci_dev *hci_dev_get(int index);
405 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
407 struct hci_dev *hci_alloc_dev(void);
408 void hci_free_dev(struct hci_dev *hdev);
409 int hci_register_dev(struct hci_dev *hdev);
410 int hci_unregister_dev(struct hci_dev *hdev);
411 int hci_suspend_dev(struct hci_dev *hdev);
412 int hci_resume_dev(struct hci_dev *hdev);
413 int hci_dev_open(__u16 dev);
414 int hci_dev_close(__u16 dev);
415 int hci_dev_reset(__u16 dev);
416 int hci_dev_reset_stat(__u16 dev);
417 int hci_dev_cmd(unsigned int cmd, void __user *arg);
418 int hci_get_dev_list(void __user *arg);
419 int hci_get_dev_info(void __user *arg);
420 int hci_get_conn_list(void __user *arg);
421 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
422 int hci_inquiry(void __user *arg);
424 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
426 /* Receive frame from HCI drivers */
427 static inline int hci_recv_frame(struct sk_buff *skb)
429 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
430 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
431 && !test_bit(HCI_INIT, &hdev->flags))) {
437 bt_cb(skb)->incoming = 1;
440 __net_timestamp(skb);
442 /* Queue frame for rx task */
443 skb_queue_tail(&hdev->rx_q, skb);
448 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
450 int hci_register_sysfs(struct hci_dev *hdev);
451 void hci_unregister_sysfs(struct hci_dev *hdev);
452 void hci_conn_add_sysfs(struct hci_conn *conn);
453 void hci_conn_del_sysfs(struct hci_conn *conn);
455 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
457 /* ----- LMP capabilities ----- */
458 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
459 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
460 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
461 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
462 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
464 /* ----- HCI protocols ----- */
472 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
473 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
474 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
475 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
476 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
477 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
478 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
481 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
483 register struct hci_proto *hp;
486 hp = hci_proto[HCI_PROTO_L2CAP];
487 if (hp && hp->connect_ind)
488 mask |= hp->connect_ind(hdev, bdaddr, type);
490 hp = hci_proto[HCI_PROTO_SCO];
491 if (hp && hp->connect_ind)
492 mask |= hp->connect_ind(hdev, bdaddr, type);
497 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
499 register struct hci_proto *hp;
501 hp = hci_proto[HCI_PROTO_L2CAP];
502 if (hp && hp->connect_cfm)
503 hp->connect_cfm(conn, status);
505 hp = hci_proto[HCI_PROTO_SCO];
506 if (hp && hp->connect_cfm)
507 hp->connect_cfm(conn, status);
510 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
512 register struct hci_proto *hp;
514 hp = hci_proto[HCI_PROTO_L2CAP];
515 if (hp && hp->disconn_ind)
516 hp->disconn_ind(conn, reason);
518 hp = hci_proto[HCI_PROTO_SCO];
519 if (hp && hp->disconn_ind)
520 hp->disconn_ind(conn, reason);
523 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
525 register struct hci_proto *hp;
527 hp = hci_proto[HCI_PROTO_L2CAP];
528 if (hp && hp->auth_cfm)
529 hp->auth_cfm(conn, status);
531 hp = hci_proto[HCI_PROTO_SCO];
532 if (hp && hp->auth_cfm)
533 hp->auth_cfm(conn, status);
536 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
538 register struct hci_proto *hp;
540 hp = hci_proto[HCI_PROTO_L2CAP];
541 if (hp && hp->encrypt_cfm)
542 hp->encrypt_cfm(conn, status, encrypt);
544 hp = hci_proto[HCI_PROTO_SCO];
545 if (hp && hp->encrypt_cfm)
546 hp->encrypt_cfm(conn, status, encrypt);
549 int hci_register_proto(struct hci_proto *hproto);
550 int hci_unregister_proto(struct hci_proto *hproto);
552 /* ----- HCI callbacks ----- */
554 struct list_head list;
558 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
559 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
560 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
561 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
564 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
568 hci_proto_auth_cfm(conn, status);
570 read_lock_bh(&hci_cb_list_lock);
571 list_for_each(p, &hci_cb_list) {
572 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
574 cb->auth_cfm(conn, status);
576 read_unlock_bh(&hci_cb_list_lock);
579 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
583 hci_proto_encrypt_cfm(conn, status, encrypt);
585 read_lock_bh(&hci_cb_list_lock);
586 list_for_each(p, &hci_cb_list) {
587 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
589 cb->encrypt_cfm(conn, status, encrypt);
591 read_unlock_bh(&hci_cb_list_lock);
594 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
598 read_lock_bh(&hci_cb_list_lock);
599 list_for_each(p, &hci_cb_list) {
600 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
601 if (cb->key_change_cfm)
602 cb->key_change_cfm(conn, status);
604 read_unlock_bh(&hci_cb_list_lock);
607 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
611 read_lock_bh(&hci_cb_list_lock);
612 list_for_each(p, &hci_cb_list) {
613 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
614 if (cb->role_switch_cfm)
615 cb->role_switch_cfm(conn, status, role);
617 read_unlock_bh(&hci_cb_list_lock);
620 int hci_register_cb(struct hci_cb *hcb);
621 int hci_unregister_cb(struct hci_cb *hcb);
623 int hci_register_notifier(struct notifier_block *nb);
624 int hci_unregister_notifier(struct notifier_block *nb);
626 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
627 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
628 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
630 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
632 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
634 /* ----- HCI Sockets ----- */
635 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
637 /* HCI info for socket */
638 #define hci_pi(sk) ((struct hci_pinfo *) sk)
642 struct hci_dev *hdev;
643 struct hci_filter filter;
647 /* HCI security filter */
648 #define HCI_SFLT_MAX_OGF 5
650 struct hci_sec_filter {
653 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
656 /* ----- HCI requests ----- */
657 #define HCI_REQ_DONE 0
658 #define HCI_REQ_PEND 1
659 #define HCI_REQ_CANCELED 2
661 #define hci_req_lock(d) down(&d->req_lock)
662 #define hci_req_unlock(d) up(&d->req_lock)
664 void hci_req_complete(struct hci_dev *hdev, int result);
666 #endif /* __HCI_CORE_H */