📄 hci_core.c
字号:
}static int hci_dev_do_close(struct hci_dev *hdev){ BT_DBG("%s %p", hdev->name, hdev); hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev); if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { hci_req_unlock(hdev); return 0; } /* Kill RX and TX tasks */ tasklet_kill(&hdev->rx_task); tasklet_kill(&hdev->tx_task); hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock_bh(hdev); hci_notify(hdev, HCI_DEV_DOWN); if (hdev->flush) hdev->flush(hdev); /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); if (!test_bit(HCI_RAW, &hdev->flags)) { set_bit(HCI_INIT, &hdev->flags); __hci_request(hdev, hci_reset_req, 0, msecs_to_jiffies(250)); clear_bit(HCI_INIT, &hdev->flags); } /* Kill cmd task */ tasklet_kill(&hdev->cmd_task); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->raw_q); /* Drop last sent command */ if (hdev->sent_cmd) { kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } /* After this point our queues are empty * and no tasks are scheduled. */ hdev->close(hdev); /* Clear flags */ hdev->flags = 0; hci_req_unlock(hdev); hci_dev_put(hdev); return 0;}int hci_dev_close(__u16 dev){ struct hci_dev *hdev; int err; if (!(hdev = hci_dev_get(dev))) return -ENODEV; err = hci_dev_do_close(hdev); hci_dev_put(hdev); return err;}int hci_dev_reset(__u16 dev){ struct hci_dev *hdev; int ret = 0; if (!(hdev = hci_dev_get(dev))) return -ENODEV; hci_req_lock(hdev); tasklet_disable(&hdev->tx_task); if (!test_bit(HCI_UP, &hdev->flags)) goto done; /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock_bh(hdev); if (hdev->flush) hdev->flush(hdev); atomic_set(&hdev->cmd_cnt, 1); hdev->acl_cnt = 0; hdev->sco_cnt = 0; if (!test_bit(HCI_RAW, &hdev->flags)) ret = __hci_request(hdev, hci_reset_req, 0, msecs_to_jiffies(HCI_INIT_TIMEOUT));done: tasklet_enable(&hdev->tx_task); hci_req_unlock(hdev); hci_dev_put(hdev); return ret;}int hci_dev_reset_stat(__u16 dev){ struct hci_dev *hdev; int ret = 0; if (!(hdev = hci_dev_get(dev))) return -ENODEV; memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); hci_dev_put(hdev); return ret;}int hci_dev_cmd(unsigned int cmd, void __user *arg){ struct hci_dev *hdev; struct hci_dev_req dr; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) return -EFAULT; if (!(hdev = hci_dev_get(dr.dev_id))) return -ENODEV; switch (cmd) { case HCISETAUTH: err = hci_request(hdev, hci_auth_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETENCRYPT: if (!lmp_encrypt_capable(hdev)) { err = -EOPNOTSUPP; break; } if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ err = hci_request(hdev, hci_auth_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); if (err) break; } err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETSCAN: err = hci_request(hdev, hci_scan_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETPTYPE: hdev->pkt_type = (__u16) dr.dev_opt; break; case HCISETLINKPOL: hdev->link_policy = (__u16) dr.dev_opt; break; case HCISETLINKMODE: hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); break; case HCISETACLMTU: hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1); hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0); break; case HCISETSCOMTU: hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1); hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0); break; default: err = -EINVAL; break; } hci_dev_put(hdev); return err;}int hci_get_dev_list(void __user *arg){ struct hci_dev_list_req *dl; struct hci_dev_req *dr; struct list_head *p; int n = 0, size, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; size = sizeof(*dl) + dev_num * sizeof(*dr); if (!(dl = kmalloc(size, GFP_KERNEL))) return -ENOMEM; dr = dl->dev_req; read_lock_bh(&hci_dev_list_lock); list_for_each(p, &hci_dev_list) { struct hci_dev *hdev; hdev = list_entry(p, struct hci_dev, list); (dr + n)->dev_id = hdev->id; (dr + n)->dev_opt = hdev->flags; if (++n >= dev_num) break; } read_unlock_bh(&hci_dev_list_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*dr); err = copy_to_user(arg, dl, size); kfree(dl); return err ? -EFAULT : 0;}int hci_get_dev_info(void __user *arg){ struct hci_dev *hdev; struct hci_dev_info di; int err = 0; if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; if (!(hdev = hci_dev_get(di.dev_id))) return -ENODEV; strcpy(di.name, hdev->name); di.bdaddr = hdev->bdaddr; di.type = hdev->type; di.flags = hdev->flags; di.pkt_type = hdev->pkt_type; di.acl_mtu = hdev->acl_mtu; di.acl_pkts = hdev->acl_pkts; di.sco_mtu = hdev->sco_mtu; di.sco_pkts = hdev->sco_pkts; di.link_policy = hdev->link_policy; di.link_mode = hdev->link_mode; memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); memcpy(&di.features, &hdev->features, sizeof(di.features)); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; hci_dev_put(hdev); return err;}/* ---- Interface to HCI drivers ---- *//* Alloc HCI device */struct hci_dev *hci_alloc_dev(void){ struct hci_dev *hdev; hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); if (!hdev) return NULL; skb_queue_head_init(&hdev->driver_init); return hdev;}EXPORT_SYMBOL(hci_alloc_dev);/* Free HCI device */void hci_free_dev(struct hci_dev *hdev){ skb_queue_purge(&hdev->driver_init); /* will free via device release */ put_device(&hdev->dev);}EXPORT_SYMBOL(hci_free_dev);/* Register HCI device */int hci_register_dev(struct hci_dev *hdev){ struct list_head *head = &hci_dev_list, *p; int i, id = 0; BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); if (!hdev->open || !hdev->close || !hdev->destruct) return -EINVAL; write_lock_bh(&hci_dev_list_lock); /* Find first available device id */ list_for_each(p, &hci_dev_list) { if (list_entry(p, struct hci_dev, list)->id != id) break; head = p; id++; } sprintf(hdev->name, "hci%d", id); hdev->id = id; list_add(&hdev->list, head); atomic_set(&hdev->refcnt, 1); spin_lock_init(&hdev->lock); hdev->flags = 0; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); hdev->idle_timeout = 0; hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); for (i = 0; i < 3; i++) hdev->reassembly[i] = NULL; init_waitqueue_head(&hdev->req_wait_q); init_MUTEX(&hdev->req_lock); inquiry_cache_init(hdev); hci_conn_hash_init(hdev); memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); atomic_set(&hdev->promisc, 0); write_unlock_bh(&hci_dev_list_lock); hci_register_sysfs(hdev); hci_notify(hdev, HCI_DEV_REG); return id;}EXPORT_SYMBOL(hci_register_dev);/* Unregister HCI device */int hci_unregister_dev(struct hci_dev *hdev){ int i; BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); hci_unregister_sysfs(hdev); write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); write_unlock_bh(&hci_dev_list_lock); hci_dev_do_close(hdev); for (i = 0; i < 3; i++) kfree_skb(hdev->reassembly[i]); hci_notify(hdev, HCI_DEV_UNREG); __hci_dev_put(hdev); return 0;}EXPORT_SYMBOL(hci_unregister_dev);/* Suspend HCI device */int hci_suspend_dev(struct hci_dev *hdev){ hci_notify(hdev, HCI_DEV_SUSPEND); return 0;}EXPORT_SYMBOL(hci_suspend_dev);/* Resume HCI device */int hci_resume_dev(struct hci_dev *hdev){ hci_notify(hdev, HCI_DEV_RESUME); return 0;}EXPORT_SYMBOL(hci_resume_dev);/* Receive packet type fragment */#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count){ if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) return -EILSEQ; while (count) { struct sk_buff *skb = __reassembly(hdev, type); struct { int expect; } *scb; int len = 0; if (!skb) { /* Start of the frame */ switch (type) { case HCI_EVENT_PKT: if (count >= HCI_EVENT_HDR_SIZE) { struct hci_event_hdr *h = data; len = HCI_EVENT_HDR_SIZE + h->plen; } else return -EILSEQ; break; case HCI_ACLDATA_PKT: if (count >= HCI_ACL_HDR_SIZE) { struct hci_acl_hdr *h = data; len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); } else return -EILSEQ; break; case HCI_SCODATA_PKT: if (count >= HCI_SCO_HDR_SIZE) { struct hci_sco_hdr *h = data; len = HCI_SCO_HDR_SIZE + h->dlen; } else return -EILSEQ; break; } skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for packet", hdev->name); return -ENOMEM; } skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = type; __reassembly(hdev, type) = skb; scb = (void *) skb->cb; scb->expect = len; } else { /* Continuation */ scb = (void *) skb->cb; len = scb->expect; } len = min(len, count); memcpy(skb_put(skb, len), data, len); scb->expect -= len; if (scb->expect == 0) { /* Complete frame */ __reassembly(hdev, type) = NULL; bt_cb(skb)->pkt_type = type; hci_recv_frame(skb); } count -= len; data += len; } return 0;}EXPORT_SYMBOL(hci_recv_fragment);/* ---- Interface to upper protocols ---- *//* Register/Unregister protocols. * hci_task_lock is used to ensure that no tasks are running. */int hci_register_proto(struct hci_proto *hp){ int err = 0; BT_DBG("%p name %s id %d", hp, hp->name, hp->id); if (hp->id >= HCI_MAX_PROTO) return -EINVAL; write_lock_bh(&hci_task_lock); if (!hci_proto[hp->id]) hci_proto[hp->id] = hp; else err = -EEXIST; write_unlock_bh(&hci_task_lock);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -