ldc.c
来自「linux 内核源代码」· C语言 代码 · 共 2,379 行 · 第 1/4 页
C
2,379 行
if (!err) { lp->ver = ver; lp->hs_state = LDC_HS_GOTVERS; } } if (err) return ldc_abort(lp); return 0;}static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp){ ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n", vp->major, vp->minor); if (lp->hs_state == LDC_HS_GOTVERS) { if (lp->ver.major != vp->major || lp->ver.minor != vp->minor) return ldc_abort(lp); } else { lp->ver = *vp; lp->hs_state = LDC_HS_GOTVERS; } if (send_rts(lp)) return ldc_abort(lp); return 0;}static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp){ struct ldc_version *vap; if ((vp->major == 0 && vp->minor == 0) || !(vap = find_by_major(vp->major))) { return ldc_abort(lp); } else { struct ldc_packet *p; unsigned long new_tail; p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, vap, sizeof(*vap), &new_tail); if (p) return send_tx_packet(lp, p, new_tail); else return ldc_abort(lp); }}static int process_version(struct ldc_channel *lp, struct ldc_packet *p){ struct ldc_version *vp; vp = (struct ldc_version *) p->u.u_data; switch (p->stype) { case LDC_INFO: return process_ver_info(lp, vp); case LDC_ACK: return process_ver_ack(lp, vp); case LDC_NACK: return process_ver_nack(lp, vp); default: return ldc_abort(lp); }}static int process_rts(struct ldc_channel *lp, struct ldc_packet *p){ ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n", p->stype, p->seqid, p->env); if (p->stype != LDC_INFO || lp->hs_state != LDC_HS_GOTVERS || p->env != lp->cfg.mode) return ldc_abort(lp); lp->snd_nxt = p->seqid; lp->rcv_nxt = p->seqid; lp->hs_state = LDC_HS_SENTRTR; if (send_rtr(lp)) return ldc_abort(lp); return 0;}static int process_rtr(struct ldc_channel *lp, struct ldc_packet *p){ ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n", p->stype, p->seqid, p->env); if (p->stype != LDC_INFO || p->env != lp->cfg.mode) return ldc_abort(lp); lp->snd_nxt = p->seqid; lp->hs_state = LDC_HS_COMPLETE; ldc_set_state(lp, LDC_STATE_CONNECTED); send_rdx(lp); return LDC_EVENT_UP;}static int rx_seq_ok(struct ldc_channel *lp, u32 seqid){ return lp->rcv_nxt + 1 == seqid;}static int process_rdx(struct ldc_channel *lp, struct ldc_packet *p){ ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n", p->stype, p->seqid, p->env, p->u.r.ackid); if (p->stype != LDC_INFO || !(rx_seq_ok(lp, p->seqid))) return ldc_abort(lp); lp->rcv_nxt = p->seqid; lp->hs_state = LDC_HS_COMPLETE; ldc_set_state(lp, LDC_STATE_CONNECTED); return LDC_EVENT_UP;}static int process_control_frame(struct ldc_channel *lp, struct ldc_packet *p){ switch (p->ctrl) { case LDC_VERS: return process_version(lp, p); case LDC_RTS: return process_rts(lp, p); case LDC_RTR: return process_rtr(lp, p); case LDC_RDX: return process_rdx(lp, p); default: return ldc_abort(lp); }}static int process_error_frame(struct ldc_channel *lp, struct ldc_packet *p){ return ldc_abort(lp);}static int process_data_ack(struct ldc_channel *lp, struct ldc_packet *ack){ unsigned long head = lp->tx_acked; u32 ackid = ack->u.r.ackid; while (1) { struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE); head = tx_advance(lp, head); if (p->seqid == ackid) { lp->tx_acked = head; return 0; } if (head == lp->tx_tail) return ldc_abort(lp); } return 0;}static void send_events(struct ldc_channel *lp, unsigned int event_mask){ if (event_mask & LDC_EVENT_RESET) lp->cfg.event(lp->event_arg, LDC_EVENT_RESET); if (event_mask & LDC_EVENT_UP) lp->cfg.event(lp->event_arg, LDC_EVENT_UP); if (event_mask & LDC_EVENT_DATA_READY) lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY);}static irqreturn_t ldc_rx(int irq, void *dev_id){ struct ldc_channel *lp = dev_id; unsigned long orig_state, hv_err, flags; unsigned int event_mask; spin_lock_irqsave(&lp->lock, flags); orig_state = lp->chan_state; hv_err = sun4v_ldc_rx_get_state(lp->id, &lp->rx_head, &lp->rx_tail, &lp->chan_state); ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); event_mask = 0; if (lp->cfg.mode == LDC_MODE_RAW && lp->chan_state == LDC_CHANNEL_UP) { lp->hs_state = LDC_HS_COMPLETE; ldc_set_state(lp, LDC_STATE_CONNECTED); event_mask |= LDC_EVENT_UP; orig_state = lp->chan_state; } /* If we are in reset state, flush the RX queue and ignore * everything. */ if (lp->flags & LDC_FLAG_RESET) { (void) __set_rx_head(lp, lp->rx_tail); goto out; } /* Once we finish the handshake, we let the ldc_read() * paths do all of the control frame and state management. * Just trigger the callback. */ if (lp->hs_state == LDC_HS_COMPLETE) {handshake_complete: if (lp->chan_state != orig_state) { unsigned int event = LDC_EVENT_RESET; if (lp->chan_state == LDC_CHANNEL_UP) event = LDC_EVENT_UP; event_mask |= event; } if (lp->rx_head != lp->rx_tail) event_mask |= LDC_EVENT_DATA_READY; goto out; } if (lp->chan_state != orig_state) goto out; while (lp->rx_head != lp->rx_tail) { struct ldc_packet *p; unsigned long new; int err; p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); switch (p->type) { case LDC_CTRL: err = process_control_frame(lp, p); if (err > 0) event_mask |= err; break; case LDC_DATA: event_mask |= LDC_EVENT_DATA_READY; err = 0; break; case LDC_ERR: err = process_error_frame(lp, p); break; default: err = ldc_abort(lp); break; } if (err < 0) break; new = lp->rx_head; new += LDC_PACKET_SIZE; if (new == (lp->rx_num_entries * LDC_PACKET_SIZE)) new = 0; lp->rx_head = new; err = __set_rx_head(lp, new); if (err < 0) { (void) ldc_abort(lp); break; } if (lp->hs_state == LDC_HS_COMPLETE) goto handshake_complete; }out: spin_unlock_irqrestore(&lp->lock, flags); send_events(lp, event_mask); return IRQ_HANDLED;}static irqreturn_t ldc_tx(int irq, void *dev_id){ struct ldc_channel *lp = dev_id; unsigned long flags, hv_err, orig_state; unsigned int event_mask = 0; spin_lock_irqsave(&lp->lock, flags); orig_state = lp->chan_state; hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail, &lp->chan_state); ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); if (lp->cfg.mode == LDC_MODE_RAW && lp->chan_state == LDC_CHANNEL_UP) { lp->hs_state = LDC_HS_COMPLETE; ldc_set_state(lp, LDC_STATE_CONNECTED); event_mask |= LDC_EVENT_UP; } spin_unlock_irqrestore(&lp->lock, flags); send_events(lp, event_mask); return IRQ_HANDLED;}/* XXX ldc_alloc() and ldc_free() needs to run under a mutex so * XXX that addition and removal from the ldc_channel_list has * XXX atomicity, otherwise the __ldc_channel_exists() check is * XXX totally pointless as another thread can slip into ldc_alloc() * XXX and add a channel with the same ID. There also needs to be * XXX a spinlock for ldc_channel_list. */static HLIST_HEAD(ldc_channel_list);static int __ldc_channel_exists(unsigned long id){ struct ldc_channel *lp; struct hlist_node *n; hlist_for_each_entry(lp, n, &ldc_channel_list, list) { if (lp->id == id) return 1; } return 0;}static int alloc_queue(const char *name, unsigned long num_entries, struct ldc_packet **base, unsigned long *ra){ unsigned long size, order; void *q; size = num_entries * LDC_PACKET_SIZE; order = get_order(size); q = (void *) __get_free_pages(GFP_KERNEL, order); if (!q) { printk(KERN_ERR PFX "Alloc of %s queue failed with " "size=%lu order=%lu\n", name, size, order); return -ENOMEM; } memset(q, 0, PAGE_SIZE << order); *base = q; *ra = __pa(q); return 0;}static void free_queue(unsigned long num_entries, struct ldc_packet *q){ unsigned long size, order; if (!q) return; size = num_entries * LDC_PACKET_SIZE; order = get_order(size); free_pages((unsigned long)q, order);}/* XXX Make this configurable... XXX */#define LDC_IOTABLE_SIZE (8 * 1024)static int ldc_iommu_init(struct ldc_channel *lp){ unsigned long sz, num_tsb_entries, tsbsize, order; struct ldc_iommu *iommu = &lp->iommu; struct ldc_mtable_entry *table; unsigned long hv_err; int err; num_tsb_entries = LDC_IOTABLE_SIZE; tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); spin_lock_init(&iommu->lock); sz = num_tsb_entries / 8; sz = (sz + 7UL) & ~7UL; iommu->arena.map = kzalloc(sz, GFP_KERNEL); if (!iommu->arena.map) { printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz); return -ENOMEM; } iommu->arena.limit = num_tsb_entries; order = get_order(tsbsize); table = (struct ldc_mtable_entry *) __get_free_pages(GFP_KERNEL, order); err = -ENOMEM; if (!table) { printk(KERN_ERR PFX "Alloc of MTE table failed, " "size=%lu order=%lu\n", tsbsize, order); goto out_free_map; } memset(table, 0, PAGE_SIZE << order); iommu->page_table = table; hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table), num_tsb_entries); err = -EINVAL; if (hv_err) goto out_free_table; return 0;out_free_table: free_pages((unsigned long) table, order); iommu->page_table = NULL;out_free_map: kfree(iommu->arena.map); iommu->arena.map = NULL; return err;}static void ldc_iommu_release(struct ldc_channel *lp){ struct ldc_iommu *iommu = &lp->iommu; unsigned long num_tsb_entries, tsbsize, order; (void) sun4v_ldc_set_map_table(lp->id, 0, 0); num_tsb_entries = iommu->arena.limit; tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); order = get_order(tsbsize); free_pages((unsigned long) iommu->page_table, order); iommu->page_table = NULL; kfree(iommu->arena.map); iommu->arena.map = NULL;}struct ldc_channel *ldc_alloc(unsigned long id, const struct ldc_channel_config *cfgp, void *event_arg){ struct ldc_channel *lp; const struct ldc_mode_ops *mops; unsigned long dummy1, dummy2, hv_err; u8 mss, *mssbuf; int err; err = -ENODEV; if (!ldom_domaining_enabled) goto out_err; err = -EINVAL; if (!cfgp) goto out_err; switch (cfgp->mode) { case LDC_MODE_RAW: mops = &raw_ops; mss = LDC_PACKET_SIZE; break; case LDC_MODE_UNRELIABLE: mops = &nonraw_ops; mss = LDC_PACKET_SIZE - 8; break; case LDC_MODE_STREAM: mops = &stream_ops; mss = LDC_PACKET_SIZE - 8 - 8; break; default: goto out_err; } if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq) goto out_err; hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2); err = -ENODEV; if (hv_err == HV_ECHANNEL) goto out_err; err = -EEXIST; if (__ldc_channel_exists(id)) goto out_err; mssbuf = NULL; lp = kzalloc(sizeof(*lp), GFP_KERNEL); err = -ENOMEM; if (!lp) goto out_err; spin_lock_init(&lp->lock); lp->id = id; err = ldc_iommu_init(lp); if (err) goto out_free_ldc; lp->mops = mops; lp->mss = mss; lp->cfg = *cfgp; if (!lp->cfg.mtu) lp->cfg.mtu = LDC_DEFAULT_MTU; if (lp->cfg.mode == LDC_MODE_STREAM) { mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL); if (!mssbuf) { err = -ENOMEM; goto out_free_iommu; } lp->mssbuf = mssbuf; } lp->event_arg = event_arg; /* XXX allow setting via ldc_channel_config to override defaults * XXX or use some formula based upon mtu */ lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES; lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES; err = alloc_queue("TX", lp->tx_num_entries, &lp->tx_base, &lp->tx_ra); if (err) goto out_free_mssbuf; err = alloc_queue("RX", lp->rx_num_entries, &lp->rx_base, &lp->rx_ra); if (err) goto out_free_txq; lp->flags |= LDC_FLAG_ALLOCED_QUEUES; lp->hs_state = LDC_HS_CLOSED; ldc_set_state(lp, LDC_STATE_INIT); INIT_HLIST_NODE(&lp->list); hlist_add_head(&lp->list, &ldc_channel_list); INIT_HLIST_HEAD(&lp->mh_list); return lp;out_free_txq: free_queue(lp->tx_num_entries, lp->tx_base);out_free_mssbuf: if (mssbuf) kfree(mssbuf);out_free_iommu: ldc_iommu_release(lp);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?