📄 hci_core.c
字号:
if (hp->id >= HCI_MAX_PROTO) return -EINVAL; write_lock_bh(&hci_task_lock); if (hci_proto[hp->id]) hci_proto[hp->id] = NULL; else err = -ENOENT; write_unlock_bh(&hci_task_lock); return err;}static int hci_send_frame(struct sk_buff *skb){ struct hci_dev *hdev = (struct hci_dev *) skb->dev; if (!hdev) { kfree_skb(skb); return -ENODEV; } BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len); if (atomic_read(&hdev->promisc)) { /* Time stamp */ do_gettimeofday(&skb->stamp); hci_send_to_sock(hdev, skb); } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); return hdev->send(skb);}/* Send HCI command */int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param){ int len = HCI_COMMAND_HDR_SIZE + plen; hci_command_hdr *hc; struct sk_buff *skb; BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); if (!(skb = bluez_skb_alloc(len, GFP_ATOMIC))) { BT_ERR("%s Can't allocate memory for HCI command", hdev->name); return -ENOMEM; } hc = (hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); hc->opcode = __cpu_to_le16(cmd_opcode_pack(ogf, ocf)); hc->plen = plen; if (plen) memcpy(skb_put(skb, plen), param, plen); BT_DBG("skb len %d", skb->len); skb->pkt_type = HCI_COMMAND_PKT; skb->dev = (void *) hdev; skb_queue_tail(&hdev->cmd_q, skb); hci_sched_cmd(hdev); return 0;}/* Get data from the previously sent command */void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf){ hci_command_hdr *hc; if (!hdev->sent_cmd) return NULL; hc = (void *) hdev->sent_cmd->data; if (hc->opcode != __cpu_to_le16(cmd_opcode_pack(ogf, ocf))) return NULL; BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;}/* Send ACL data */static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags){ int len = skb->len; hci_acl_hdr *ah; ah = (hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE); ah->handle = __cpu_to_le16(acl_handle_pack(handle, flags)); ah->dlen = __cpu_to_le16(len); skb->h.raw = (void *) ah;}int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags){ struct hci_dev *hdev = conn->hdev; struct sk_buff *list; BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); skb->dev = (void *) hdev; skb->pkt_type = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); if (!(list = skb_shinfo(skb)->frag_list)) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(&conn->data_q, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically */ spin_lock_bh(&conn->data_q.lock); __skb_queue_tail(&conn->data_q, skb); do { skb = list; list = list->next; skb->dev = (void *) hdev; skb->pkt_type = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(&conn->data_q, skb); } while (list); spin_unlock_bh(&conn->data_q.lock); } hci_sched_tx(hdev); return 0;}/* Send SCO data */int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb){ struct hci_dev *hdev = conn->hdev; hci_sco_hdr hs; BT_DBG("%s len %d", hdev->name, skb->len); if (skb->len > hdev->sco_mtu) { kfree_skb(skb); return -EINVAL; } hs.handle = __cpu_to_le16(conn->handle); hs.dlen = skb->len; skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE); memcpy(skb->h.raw, &hs, HCI_SCO_HDR_SIZE); skb->dev = (void *) hdev; skb->pkt_type = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); hci_sched_tx(hdev); return 0;}/* ---- HCI TX task (outgoing data) ---- *//* HCI Connection scheduler */static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote){ struct conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL; int num = 0, min = ~0; struct list_head *p; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ list_for_each(p, &h->list) { struct hci_conn *c; c = list_entry(p, struct hci_conn, list); if (c->type != type || c->state != BT_CONNECTED || skb_queue_empty(&c->data_q)) continue; num++; if (c->sent < min) { min = c->sent; conn = c; } } if (conn) { int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); int q = cnt / num; *quote = q ? q : 1; } else *quote = 0; BT_DBG("conn %p quote %d", conn, *quote); return conn;}static inline void hci_acl_tx_to(struct hci_dev *hdev){ struct conn_hash *h = &hdev->conn_hash; struct list_head *p; struct hci_conn *c; BT_ERR("%s ACL tx timeout", hdev->name); /* Kill stalled connections */ list_for_each(p, &h->list) { c = list_entry(p, struct hci_conn, list); if (c->type == ACL_LINK && c->sent) { BT_ERR("%s killing stalled ACL connection %s", hdev->name, batostr(&c->dst)); hci_acl_disconn(c, 0x13); } }}static inline void hci_sched_acl(struct hci_dev *hdev){ struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); /* ACL tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45)) hci_acl_tx_to(hdev); while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; conn->sent++; } }}/* Schedule SCO */static inline void hci_sched_sco(struct hci_dev *hdev){ struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } }}static void hci_tx_task(unsigned long arg){ struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; read_lock(&hci_task_lock); BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); /* Schedule queues and send stuff to HCI driver */ hci_sched_acl(hdev); hci_sched_sco(hdev); /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(skb); read_unlock(&hci_task_lock);}/* ----- HCI RX task (incomming data proccessing) ----- *//* ACL data packet */static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb){ hci_acl_hdr *ah = (void *) skb->data; struct hci_conn *conn; __u16 handle, flags; skb_pull(skb, HCI_ACL_HDR_SIZE); handle = __le16_to_cpu(ah->handle); flags = acl_flags(handle); handle = acl_handle(handle); BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); hdev->stat.acl_rx++; hci_dev_lock(hdev); conn = conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { register struct hci_proto *hp; /* Send to upper protocol */ if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { hp->recv_acldata(conn, skb, flags); return; } } else { BT_ERR("%s ACL packet for unknown connection handle %d", hdev->name, handle); } kfree_skb(skb);}/* SCO data packet */static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb){ hci_sco_hdr *sh = (void *) skb->data; struct hci_conn *conn; __u16 handle; skb_pull(skb, HCI_SCO_HDR_SIZE); handle = __le16_to_cpu(sh->handle); BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); hdev->stat.sco_rx++; hci_dev_lock(hdev); conn = conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { register struct hci_proto *hp; /* Send to upper protocol */ if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { hp->recv_scodata(conn, skb); return; } } else { BT_ERR("%s SCO packet for unknown connection handle %d", hdev->name, handle); } kfree_skb(skb);}void hci_rx_task(unsigned long arg){ struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s", hdev->name); read_lock(&hci_task_lock); while ((skb = skb_dequeue(&hdev->rx_q))) { if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } if (test_bit(HCI_RAW, &hdev->flags)) { kfree_skb(skb); continue; } if (test_bit(HCI_INIT, &hdev->flags)) { /* Don't process data packets in this states. */ switch (skb->pkt_type) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: kfree_skb(skb); continue; }; } /* Process frame */ switch (skb->pkt_type) { case HCI_EVENT_PKT: hci_event_packet(hdev, skb); break; case HCI_ACLDATA_PKT: BT_DBG("%s ACL data packet", hdev->name); hci_acldata_packet(hdev, skb); break; case HCI_SCODATA_PKT: BT_DBG("%s SCO data packet", hdev->name); hci_scodata_packet(hdev, skb); break; default: kfree_skb(skb); break; } } read_unlock(&hci_task_lock);}static void hci_cmd_task(unsigned long arg){ struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) { BT_ERR("%s command tx timeout", hdev->name); atomic_set(&hdev->cmd_cnt, 1); } /* Send queued commands */ if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { if (hdev->sent_cmd) kfree_skb(hdev->sent_cmd); if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { atomic_dec(&hdev->cmd_cnt); hci_send_frame(skb); hdev->cmd_last_tx = jiffies; } else { skb_queue_head(&hdev->cmd_q, skb); hci_sched_cmd(hdev); } }}/* ---- Initialization ---- */int hci_core_init(void){ return 0;}int hci_core_cleanup(void){ return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -