📄 hci_core.c
字号:
{ struct hci_dev_list_req *dl; struct hci_dev_req *dr; struct list_head *p; int n = 0, size, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; size = sizeof(*dl) + dev_num * sizeof(*dr); if (!(dl = kmalloc(size, GFP_KERNEL))) return -ENOMEM; dr = dl->dev_req; read_lock_bh(&hci_dev_list_lock); list_for_each(p, &hci_dev_list) { struct hci_dev *hdev; hdev = list_entry(p, struct hci_dev, list); (dr + n)->dev_id = hdev->id; (dr + n)->dev_opt = hdev->flags; if (++n >= dev_num) break; } read_unlock_bh(&hci_dev_list_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*dr); err = copy_to_user(arg, dl, size); kfree(dl); return err ? -EFAULT : 0;}int hci_get_dev_info(void __user *arg){ struct hci_dev *hdev; struct hci_dev_info di; int err = 0; if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; if (!(hdev = hci_dev_get(di.dev_id))) return -ENODEV; strcpy(di.name, hdev->name); di.bdaddr = hdev->bdaddr; di.type = hdev->type; di.flags = hdev->flags; di.pkt_type = hdev->pkt_type; di.acl_mtu = hdev->acl_mtu; di.acl_pkts = hdev->acl_pkts; di.sco_mtu = hdev->sco_mtu; di.sco_pkts = hdev->sco_pkts; di.link_policy = hdev->link_policy; di.link_mode = hdev->link_mode; memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); memcpy(&di.features, &hdev->features, sizeof(di.features)); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; hci_dev_put(hdev); return err;}/* ---- Interface to HCI drivers ---- *//* Alloc HCI device */struct hci_dev *hci_alloc_dev(void){ struct hci_dev *hdev; hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL); if (!hdev) return NULL; memset(hdev, 0, sizeof(struct hci_dev)); return hdev;}EXPORT_SYMBOL(hci_alloc_dev);/* Free HCI device */void hci_free_dev(struct hci_dev *hdev){ /* will free via class release */ class_device_put(&hdev->class_dev);}EXPORT_SYMBOL(hci_free_dev);/* Register HCI device */int hci_register_dev(struct hci_dev *hdev){ struct list_head *head = &hci_dev_list, *p; int id = 0; BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); if (!hdev->open || !hdev->close || !hdev->destruct) return -EINVAL; write_lock_bh(&hci_dev_list_lock); /* Find first available device id */ list_for_each(p, &hci_dev_list) { if (list_entry(p, struct hci_dev, list)->id != id) break; head = p; id++; } sprintf(hdev->name, "hci%d", id); hdev->id = id; list_add(&hdev->list, head); atomic_set(&hdev->refcnt, 1); spin_lock_init(&hdev->lock); hdev->flags = 0; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->link_mode = (HCI_LM_ACCEPT); tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); init_waitqueue_head(&hdev->req_wait_q); init_MUTEX(&hdev->req_lock); inquiry_cache_init(hdev); hci_conn_hash_init(hdev); memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); atomic_set(&hdev->promisc, 0); write_unlock_bh(&hci_dev_list_lock); hci_register_sysfs(hdev); hci_notify(hdev, HCI_DEV_REG); return id;}EXPORT_SYMBOL(hci_register_dev);/* Unregister HCI device */int hci_unregister_dev(struct hci_dev *hdev){ BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); hci_unregister_sysfs(hdev); write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); write_unlock_bh(&hci_dev_list_lock); hci_dev_do_close(hdev); hci_notify(hdev, HCI_DEV_UNREG); __hci_dev_put(hdev); return 0;}EXPORT_SYMBOL(hci_unregister_dev);/* Suspend HCI device */int hci_suspend_dev(struct hci_dev *hdev){ hci_notify(hdev, HCI_DEV_SUSPEND); return 0;}EXPORT_SYMBOL(hci_suspend_dev);/* Resume HCI device */int hci_resume_dev(struct hci_dev *hdev){ hci_notify(hdev, HCI_DEV_RESUME); return 0;}EXPORT_SYMBOL(hci_resume_dev);/* ---- Interface to upper protocols ---- *//* Register/Unregister protocols. * hci_task_lock is used to ensure that no tasks are running. */int hci_register_proto(struct hci_proto *hp){ int err = 0; BT_DBG("%p name %s id %d", hp, hp->name, hp->id); if (hp->id >= HCI_MAX_PROTO) return -EINVAL; write_lock_bh(&hci_task_lock); if (!hci_proto[hp->id]) hci_proto[hp->id] = hp; else err = -EEXIST; write_unlock_bh(&hci_task_lock); return err;}EXPORT_SYMBOL(hci_register_proto);int hci_unregister_proto(struct hci_proto *hp){ int err = 0; BT_DBG("%p name %s id %d", hp, hp->name, hp->id); if (hp->id >= HCI_MAX_PROTO) return -EINVAL; write_lock_bh(&hci_task_lock); if (hci_proto[hp->id]) hci_proto[hp->id] = NULL; else err = -ENOENT; write_unlock_bh(&hci_task_lock); return err;}EXPORT_SYMBOL(hci_unregister_proto);static int hci_send_frame(struct sk_buff *skb){ struct hci_dev *hdev = (struct hci_dev *) skb->dev; if (!hdev) { kfree_skb(skb); return -ENODEV; } BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len); if (atomic_read(&hdev->promisc)) { /* Time stamp */ do_gettimeofday(&skb->stamp); hci_send_to_sock(hdev, skb); } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); return hdev->send(skb);}/* Send HCI command */int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param){ int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("%s Can't allocate memory for HCI command", hdev->name); return -ENOMEM; } hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf)); hdr->plen = plen; if (plen) memcpy(skb_put(skb, plen), param, plen); BT_DBG("skb len %d", skb->len); skb->pkt_type = HCI_COMMAND_PKT; skb->dev = (void *) hdev; skb_queue_tail(&hdev->cmd_q, skb); hci_sched_cmd(hdev); return 0;}EXPORT_SYMBOL(hci_send_cmd);/* Get data from the previously sent command */void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf){ struct hci_command_hdr *hdr; if (!hdev->sent_cmd) return NULL; hdr = (void *) hdev->sent_cmd->data; if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf))) return NULL; BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;}/* Send ACL data */static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags){ struct hci_acl_hdr *hdr; int len = skb->len; hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE); hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = __cpu_to_le16(len); skb->h.raw = (void *) hdr;}int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags){ struct hci_dev *hdev = conn->hdev; struct sk_buff *list; BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); skb->dev = (void *) hdev; skb->pkt_type = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); if (!(list = skb_shinfo(skb)->frag_list)) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(&conn->data_q, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically */ spin_lock_bh(&conn->data_q.lock); __skb_queue_tail(&conn->data_q, skb); do { skb = list; list = list->next; skb->dev = (void *) hdev; skb->pkt_type = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(&conn->data_q, skb); } while (list); spin_unlock_bh(&conn->data_q.lock); } hci_sched_tx(hdev); return 0;}EXPORT_SYMBOL(hci_send_acl);/* Send SCO data */int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb){ struct hci_dev *hdev = conn->hdev; struct hci_sco_hdr hdr; BT_DBG("%s len %d", hdev->name, skb->len); if (skb->len > hdev->sco_mtu) { kfree_skb(skb); return -EINVAL; } hdr.handle = __cpu_to_le16(conn->handle); hdr.dlen = skb->len; skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE); memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE); skb->dev = (void *) hdev; skb->pkt_type = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); hci_sched_tx(hdev); return 0;}EXPORT_SYMBOL(hci_send_sco);/* ---- HCI TX task (outgoing data) ---- *//* HCI Connection scheduler */static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote){ struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL; int num = 0, min = ~0; struct list_head *p; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ list_for_each(p, &h->list) { struct hci_conn *c; c = list_entry(p, struct hci_conn, list); if (c->type != type || c->state != BT_CONNECTED || skb_queue_empty(&c->data_q)) continue; num++; if (c->sent < min) { min = c->sent; conn = c; } } if (conn) { int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); int q = cnt / num; *quote = q ? q : 1; } else *quote = 0; BT_DBG("conn %p quote %d", conn, *quote); return conn;}static inline void hci_acl_tx_to(struct hci_dev *hdev){ struct hci_conn_hash *h = &hdev->conn_hash; struct list_head *p; struct hci_conn *c; BT_ERR("%s ACL tx timeout", hdev->name); /* Kill stalled connections */ list_for_each(p, &h->list) { c = list_entry(p, struct hci_conn, list); if (c->type == ACL_LINK && c->sent) { BT_ERR("%s killing stalled ACL connection %s", hdev->name, batostr(&c->dst)); hci_acl_disconn(c, 0x13); } }}static inline void hci_sched_acl(struct hci_dev *hdev){ struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); /* ACL tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45)) hci_acl_tx_to(hdev); while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; conn->sent++; } }}/* Schedule SCO */static inline void hci_sched_sco(struct hci_dev *hdev){ struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } }}static void hci_tx_task(unsigned long arg){ struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; read_lock(&hci_task_lock); BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); /* Schedule queues and send stuff to HCI driver */ hci_sched_acl(hdev); hci_sched_sco(hdev); /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(skb); read_unlock(&hci_task_lock);}/* ----- HCI RX task (incoming data proccessing) ----- *//* ACL data packet */static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb){ struct hci_acl_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle, flags; skb_pull(skb, HCI_ACL_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); hdev->stat.acl_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { register struct hci_proto *hp; /* Send to upper protocol */ if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { hp->recv_acldata(conn, skb, flags); return; } } else { BT_ERR("%s ACL packet for unknown connection handle %d", hdev->name, handle); } kfree_skb(skb);}/* SCO data packet */static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb){ struct hci_sco_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle; skb_pull(skb, HCI_SCO_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); hdev->stat.sco_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { register struct hci_proto *hp; /* Send to upper protocol */ if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { hp->recv_scodata(conn, skb); return; } } else { BT_ERR("%s SCO packet for unknown connection handle %d", hdev->name, handle); } kfree_skb(skb);}void hci_rx_task(unsigned long arg){ struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s", hdev->name); read_lock(&hci_task_lock); while ((skb = skb_dequeue(&hdev->rx_q))) { if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } if (test_bit(HCI_RAW, &hdev->flags)) { kfree_skb(skb); continue; } if (test_bit(HCI_INIT, &hdev->flags)) { /* Don't process data packets in this states. */ switch (skb->pkt_type) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: kfree_skb(skb); continue; }; } /* Process frame */ switch (skb->pkt_type) { case HCI_EVENT_PKT: hci_event_packet(hdev, skb); break; case HCI_ACLDATA_PKT: BT_DBG("%s ACL data packet", hdev->name); hci_acldata_packet(hdev, skb); break; case HCI_SCODATA_PKT: BT_DBG("%s SCO data packet", hdev->name); hci_scodata_packet(hdev, skb); break; default: kfree_skb(skb); break; } } read_unlock(&hci_task_lock);}static void hci_cmd_task(unsigned long arg){ struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) { BT_ERR("%s command tx timeout", hdev->name); atomic_set(&hdev->cmd_cnt, 1); } /* Send queued commands */ if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { if (hdev->sent_cmd) kfree_skb(hdev->sent_cmd); if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { atomic_dec(&hdev->cmd_cnt); hci_send_frame(skb); hdev->cmd_last_tx = jiffies; } else { skb_queue_head(&hdev->cmd_q, skb); hci_sched_cmd(hdev); } }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -