2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode;
45 struct inquiry_entry {
46 struct inquiry_entry *next;
48 struct inquiry_data data;
51 struct inquiry_cache {
54 struct inquiry_entry *list;
57 struct hci_conn_hash {
58 struct list_head list;
65 struct list_head list;
82 __u16 sniff_min_interval;
83 __u16 sniff_max_interval;
93 unsigned int acl_pkts;
94 unsigned int sco_pkts;
96 unsigned long cmd_last_tx;
97 unsigned long acl_last_tx;
98 unsigned long sco_last_tx;
100 struct tasklet_struct cmd_task;
101 struct tasklet_struct rx_task;
102 struct tasklet_struct tx_task;
104 struct sk_buff_head rx_q;
105 struct sk_buff_head raw_q;
106 struct sk_buff_head cmd_q;
108 struct sk_buff *sent_cmd;
110 struct semaphore req_lock;
111 wait_queue_head_t req_wait_q;
115 struct inquiry_cache inq_cache;
116 struct hci_conn_hash conn_hash;
118 struct hci_dev_stats stat;
120 struct sk_buff_head driver_init;
127 struct class_device class_dev;
129 struct module *owner;
131 int (*open)(struct hci_dev *hdev);
132 int (*close)(struct hci_dev *hdev);
133 int (*flush)(struct hci_dev *hdev);
134 int (*send)(struct sk_buff *skb);
135 void (*destruct)(struct hci_dev *hdev);
136 void (*notify)(struct hci_dev *hdev, unsigned int evt);
137 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
141 struct list_head list;
162 struct sk_buff_head data_q;
164 struct timer_list disc_timer;
165 struct timer_list idle_timer;
167 struct hci_dev *hdev;
172 struct hci_conn *link;
175 extern struct hci_proto *hci_proto[];
176 extern struct list_head hci_dev_list;
177 extern struct list_head hci_cb_list;
178 extern rwlock_t hci_dev_list_lock;
179 extern rwlock_t hci_cb_list_lock;
181 /* ----- Inquiry cache ----- */
182 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
183 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
185 #define inquiry_cache_lock(c) spin_lock(&c->lock)
186 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
187 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
188 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
190 static inline void inquiry_cache_init(struct hci_dev *hdev)
192 struct inquiry_cache *c = &hdev->inq_cache;
193 spin_lock_init(&c->lock);
197 static inline int inquiry_cache_empty(struct hci_dev *hdev)
199 struct inquiry_cache *c = &hdev->inq_cache;
200 return (c->list == NULL);
203 static inline long inquiry_cache_age(struct hci_dev *hdev)
205 struct inquiry_cache *c = &hdev->inq_cache;
206 return jiffies - c->timestamp;
209 static inline long inquiry_entry_age(struct inquiry_entry *e)
211 return jiffies - e->timestamp;
214 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
215 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
217 /* ----- HCI Connections ----- */
220 HCI_CONN_ENCRYPT_PEND,
221 HCI_CONN_RSWITCH_PEND,
222 HCI_CONN_MODE_CHANGE_PEND,
225 static inline void hci_conn_hash_init(struct hci_dev *hdev)
227 struct hci_conn_hash *h = &hdev->conn_hash;
228 INIT_LIST_HEAD(&h->list);
229 spin_lock_init(&h->lock);
234 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
236 struct hci_conn_hash *h = &hdev->conn_hash;
237 list_add(&c->list, &h->list);
238 if (c->type == ACL_LINK)
244 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
246 struct hci_conn_hash *h = &hdev->conn_hash;
248 if (c->type == ACL_LINK)
254 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
257 struct hci_conn_hash *h = &hdev->conn_hash;
261 list_for_each(p, &h->list) {
262 c = list_entry(p, struct hci_conn, list);
263 if (c->handle == handle)
269 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
270 __u8 type, bdaddr_t *ba)
272 struct hci_conn_hash *h = &hdev->conn_hash;
276 list_for_each(p, &h->list) {
277 c = list_entry(p, struct hci_conn, list);
278 if (c->type == type && !bacmp(&c->dst, ba))
284 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
285 void hci_add_sco(struct hci_conn *conn, __u16 handle);
287 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
288 int hci_conn_del(struct hci_conn *conn);
289 void hci_conn_hash_flush(struct hci_dev *hdev);
291 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
292 int hci_conn_auth(struct hci_conn *conn);
293 int hci_conn_encrypt(struct hci_conn *conn);
294 int hci_conn_change_link_key(struct hci_conn *conn);
295 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
297 void hci_conn_enter_active_mode(struct hci_conn *conn);
298 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
300 static inline void hci_conn_hold(struct hci_conn *conn)
302 atomic_inc(&conn->refcnt);
303 del_timer(&conn->disc_timer);
306 static inline void hci_conn_put(struct hci_conn *conn)
308 if (atomic_dec_and_test(&conn->refcnt)) {
310 if (conn->type == ACL_LINK) {
311 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
314 del_timer(&conn->idle_timer);
316 timeo = msecs_to_jiffies(10);
317 mod_timer(&conn->disc_timer, jiffies + timeo);
321 /* ----- HCI tasks ----- */
322 static inline void hci_sched_cmd(struct hci_dev *hdev)
324 tasklet_schedule(&hdev->cmd_task);
327 static inline void hci_sched_rx(struct hci_dev *hdev)
329 tasklet_schedule(&hdev->rx_task);
332 static inline void hci_sched_tx(struct hci_dev *hdev)
334 tasklet_schedule(&hdev->tx_task);
337 /* ----- HCI Devices ----- */
338 static inline void __hci_dev_put(struct hci_dev *d)
340 if (atomic_dec_and_test(&d->refcnt))
344 static inline void hci_dev_put(struct hci_dev *d)
347 module_put(d->owner);
350 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
352 atomic_inc(&d->refcnt);
356 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
358 if (try_module_get(d->owner))
359 return __hci_dev_hold(d);
363 #define hci_dev_lock(d) spin_lock(&d->lock)
364 #define hci_dev_unlock(d) spin_unlock(&d->lock)
365 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
366 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
368 struct hci_dev *hci_dev_get(int index);
369 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
371 struct hci_dev *hci_alloc_dev(void);
372 void hci_free_dev(struct hci_dev *hdev);
373 int hci_register_dev(struct hci_dev *hdev);
374 int hci_unregister_dev(struct hci_dev *hdev);
375 int hci_suspend_dev(struct hci_dev *hdev);
376 int hci_resume_dev(struct hci_dev *hdev);
377 int hci_dev_open(__u16 dev);
378 int hci_dev_close(__u16 dev);
379 int hci_dev_reset(__u16 dev);
380 int hci_dev_reset_stat(__u16 dev);
381 int hci_dev_cmd(unsigned int cmd, void __user *arg);
382 int hci_get_dev_list(void __user *arg);
383 int hci_get_dev_info(void __user *arg);
384 int hci_get_conn_list(void __user *arg);
385 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
386 int hci_inquiry(void __user *arg);
388 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
390 /* Receive frame from HCI drivers */
391 static inline int hci_recv_frame(struct sk_buff *skb)
393 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
394 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
395 && !test_bit(HCI_INIT, &hdev->flags))) {
401 bt_cb(skb)->incoming = 1;
404 __net_timestamp(skb);
406 /* Queue frame for rx task */
407 skb_queue_tail(&hdev->rx_q, skb);
412 int hci_register_sysfs(struct hci_dev *hdev);
413 void hci_unregister_sysfs(struct hci_dev *hdev);
415 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev))
417 /* ----- LMP capabilities ----- */
418 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
419 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
420 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
421 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
423 /* ----- HCI protocols ----- */
431 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
432 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
433 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
434 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
435 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
436 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
437 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
440 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
442 register struct hci_proto *hp;
445 hp = hci_proto[HCI_PROTO_L2CAP];
446 if (hp && hp->connect_ind)
447 mask |= hp->connect_ind(hdev, bdaddr, type);
449 hp = hci_proto[HCI_PROTO_SCO];
450 if (hp && hp->connect_ind)
451 mask |= hp->connect_ind(hdev, bdaddr, type);
456 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
458 register struct hci_proto *hp;
460 hp = hci_proto[HCI_PROTO_L2CAP];
461 if (hp && hp->connect_cfm)
462 hp->connect_cfm(conn, status);
464 hp = hci_proto[HCI_PROTO_SCO];
465 if (hp && hp->connect_cfm)
466 hp->connect_cfm(conn, status);
469 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
471 register struct hci_proto *hp;
473 hp = hci_proto[HCI_PROTO_L2CAP];
474 if (hp && hp->disconn_ind)
475 hp->disconn_ind(conn, reason);
477 hp = hci_proto[HCI_PROTO_SCO];
478 if (hp && hp->disconn_ind)
479 hp->disconn_ind(conn, reason);
482 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
484 register struct hci_proto *hp;
486 hp = hci_proto[HCI_PROTO_L2CAP];
487 if (hp && hp->auth_cfm)
488 hp->auth_cfm(conn, status);
490 hp = hci_proto[HCI_PROTO_SCO];
491 if (hp && hp->auth_cfm)
492 hp->auth_cfm(conn, status);
495 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
497 register struct hci_proto *hp;
499 hp = hci_proto[HCI_PROTO_L2CAP];
500 if (hp && hp->encrypt_cfm)
501 hp->encrypt_cfm(conn, status);
503 hp = hci_proto[HCI_PROTO_SCO];
504 if (hp && hp->encrypt_cfm)
505 hp->encrypt_cfm(conn, status);
508 int hci_register_proto(struct hci_proto *hproto);
509 int hci_unregister_proto(struct hci_proto *hproto);
511 /* ----- HCI callbacks ----- */
513 struct list_head list;
517 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
518 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
519 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
520 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
523 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
527 hci_proto_auth_cfm(conn, status);
529 read_lock_bh(&hci_cb_list_lock);
530 list_for_each(p, &hci_cb_list) {
531 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
533 cb->auth_cfm(conn, status);
535 read_unlock_bh(&hci_cb_list_lock);
538 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
542 hci_proto_encrypt_cfm(conn, status);
544 read_lock_bh(&hci_cb_list_lock);
545 list_for_each(p, &hci_cb_list) {
546 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
548 cb->encrypt_cfm(conn, status, encrypt);
550 read_unlock_bh(&hci_cb_list_lock);
553 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
557 read_lock_bh(&hci_cb_list_lock);
558 list_for_each(p, &hci_cb_list) {
559 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
560 if (cb->key_change_cfm)
561 cb->key_change_cfm(conn, status);
563 read_unlock_bh(&hci_cb_list_lock);
566 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
570 read_lock_bh(&hci_cb_list_lock);
571 list_for_each(p, &hci_cb_list) {
572 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
573 if (cb->role_switch_cfm)
574 cb->role_switch_cfm(conn, status, role);
576 read_unlock_bh(&hci_cb_list_lock);
579 int hci_register_cb(struct hci_cb *hcb);
580 int hci_unregister_cb(struct hci_cb *hcb);
582 int hci_register_notifier(struct notifier_block *nb);
583 int hci_unregister_notifier(struct notifier_block *nb);
585 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
586 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
587 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
589 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
591 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
593 /* ----- HCI Sockets ----- */
594 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
596 /* HCI info for socket */
597 #define hci_pi(sk) ((struct hci_pinfo *) sk)
601 struct hci_dev *hdev;
602 struct hci_filter filter;
606 /* HCI security filter */
607 #define HCI_SFLT_MAX_OGF 5
609 struct hci_sec_filter {
612 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
615 /* ----- HCI requests ----- */
616 #define HCI_REQ_DONE 0
617 #define HCI_REQ_PEND 1
618 #define HCI_REQ_CANCELED 2
620 #define hci_req_lock(d) down(&d->req_lock)
621 #define hci_req_unlock(d) up(&d->req_lock)
623 void hci_req_complete(struct hci_dev *hdev, int result);
625 #endif /* __HCI_CORE_H */