📄 iwch_cm.c
字号:
* Upcall from the adapter indicating data has been transmitted. * For us its just the single MPA request or reply. We can now free * the skb holding the mpa message. */static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_ep *ep = ctx; struct cpl_wr_ack *hdr = cplhdr(skb); unsigned int credits = ntohs(hdr->credits); PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); if (credits == 0) return CPL_RET_BUF_DONE; BUG_ON(credits != 1); BUG_ON(ep->mpa_skb == NULL); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; dst_confirm(ep->dst); if (state_read(&ep->com) == MPA_REP_SENT) { ep->com.rpl_done = 1; PDBG("waking up ep %p\n", ep); wake_up(&ep->com.waitq); } return CPL_RET_BUF_DONE;}static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_ep *ep = ctx; PDBG("%s ep %p\n", __FUNCTION__, ep); /* * We get 2 abort replies from the HW. The first one must * be ignored except for scribbling that we need one more. */ if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { ep->flags |= ABORT_REQ_IN_PROGRESS; return CPL_RET_BUF_DONE; } close_complete_upcall(ep); state_set(&ep->com, DEAD); release_ep_resources(ep); return CPL_RET_BUF_DONE;}/* * Return whether a failed active open has allocated a TID */static inline int act_open_has_tid(int status){ return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS;}static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_ep *ep = ctx; struct cpl_act_open_rpl *rpl = cplhdr(skb); PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status, status2errno(rpl->status)); connect_reply_upcall(ep, status2errno(rpl->status)); state_set(&ep->com, DEAD); if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status)) release_tid(ep->com.tdev, GET_TID(rpl), NULL); cxgb3_free_atid(ep->com.tdev, ep->atid); dst_release(ep->dst); l2t_release(L2DATA(ep->com.tdev), ep->l2t); put_ep(&ep->com); return CPL_RET_BUF_DONE;}static int listen_start(struct iwch_listen_ep *ep){ struct sk_buff *skb; struct cpl_pass_open_req *req; PDBG("%s ep %p\n", __FUNCTION__, ep); skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n"); return -ENOMEM; } req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); req->local_port = ep->com.local_addr.sin_port; req->local_ip = ep->com.local_addr.sin_addr.s_addr; req->peer_port = 0; req->peer_ip = 0; req->peer_netmask = 0; req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS); req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10)); req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); skb->priority = 1; cxgb3_ofld_send(ep->com.tdev, skb); return 0;}static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_listen_ep *ep = ctx; struct cpl_pass_open_rpl *rpl = cplhdr(skb); PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep, rpl->status, status2errno(rpl->status)); ep->com.rpl_err = status2errno(rpl->status); ep->com.rpl_done = 1; wake_up(&ep->com.waitq); return CPL_RET_BUF_DONE;}static int listen_stop(struct iwch_listen_ep *ep){ struct sk_buff *skb; struct cpl_close_listserv_req *req; PDBG("%s ep %p\n", __FUNCTION__, ep); skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); return -ENOMEM; } req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); skb->priority = 1; cxgb3_ofld_send(ep->com.tdev, skb); return 0;}static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_listen_ep *ep = ctx; struct cpl_close_listserv_rpl *rpl = cplhdr(skb); PDBG("%s ep %p\n", __FUNCTION__, ep); ep->com.rpl_err = status2errno(rpl->status); ep->com.rpl_done = 1; wake_up(&ep->com.waitq); return CPL_RET_BUF_DONE;}static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb){ struct cpl_pass_accept_rpl *rpl; unsigned int mtu_idx; u32 opt0h, opt0l, opt2; int wscale; PDBG("%s ep %p\n", __FUNCTION__, ep); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(*rpl)); skb_get(skb); mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); wscale = compute_wscale(rcv_win); opt0h = V_NAGLE(0) | V_NO_CONG(nocong) | V_KEEP_ALIVE(1) | F_TCAM_BYPASS | V_WND_SCALE(wscale) | V_MSS_IDX(mtu_idx) | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); rpl = cplhdr(skb); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid)); rpl->peer_ip = peer_ip; rpl->opt0h = htonl(opt0h); rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT); rpl->opt2 = htonl(opt2); rpl->rsvd = rpl->opt2; /* workaround for HW bug */ skb->priority = CPL_PRIORITY_SETUP; l2t_send(ep->com.tdev, skb, ep->l2t); return;}static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, struct sk_buff *skb){ PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid, peer_ip); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(struct cpl_tid_release)); skb_get(skb); if (tdev->type == T3B) release_tid(tdev, hwtid, skb); else { struct cpl_pass_accept_rpl *rpl; rpl = cplhdr(skb); skb->priority = CPL_PRIORITY_SETUP; rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, hwtid)); rpl->peer_ip = peer_ip; rpl->opt0h = htonl(F_TCAM_BYPASS); rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); rpl->opt2 = 0; rpl->rsvd = rpl->opt2; cxgb3_ofld_send(tdev, skb); }}static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_ep *child_ep, *parent_ep = ctx; struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int hwtid = GET_TID(req); struct dst_entry *dst; struct l2t_entry *l2t; struct rtable *rt; struct iff_mac tim; PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid); if (state_read(&parent_ep->com) != LISTEN) { printk(KERN_ERR "%s - listening ep not in LISTEN\n", __FUNCTION__); goto reject; } /* * Find the netdev for this connection request. */ tim.mac_addr = req->dst_mac; tim.vlan_tag = ntohs(req->vlan_tag); if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { printk(KERN_ERR "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", __FUNCTION__, req->dst_mac[0], req->dst_mac[1], req->dst_mac[2], req->dst_mac[3], req->dst_mac[4], req->dst_mac[5]); goto reject; } /* Find output route */ rt = find_route(tdev, req->local_ip, req->peer_ip, req->local_port, req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); if (!rt) { printk(KERN_ERR MOD "%s - failed to find dst entry!\n", __FUNCTION__); goto reject; } dst = &rt->u.dst; l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __FUNCTION__); dst_release(dst); goto reject; } child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); if (!child_ep) { printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", __FUNCTION__); l2t_release(L2DATA(tdev), l2t); dst_release(dst); goto reject; } state_set(&child_ep->com, CONNECTING); child_ep->com.tdev = tdev; child_ep->com.cm_id = NULL; child_ep->com.local_addr.sin_family = PF_INET; child_ep->com.local_addr.sin_port = req->local_port; child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; child_ep->com.remote_addr.sin_family = PF_INET; child_ep->com.remote_addr.sin_port = req->peer_port; child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid)); child_ep->l2t = l2t; child_ep->dst = dst; child_ep->hwtid = hwtid; init_timer(&child_ep->timer); cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid); accept_cr(child_ep, req->peer_ip, skb); goto out;reject: reject_cr(tdev, hwtid, req->peer_ip, skb);out: return CPL_RET_BUF_DONE;}static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_ep *ep = ctx; struct cpl_pass_establish *req = cplhdr(skb); PDBG("%s ep %p\n", __FUNCTION__, ep); ep->snd_seq = ntohl(req->snd_isn); ep->rcv_seq = ntohl(req->rcv_isn); set_emss(ep, ntohs(req->tcp_opt)); dst_confirm(ep->dst); state_set(&ep->com, MPA_REQ_WAIT); start_ep_timer(ep); return CPL_RET_BUF_DONE;}static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_ep *ep = ctx; struct iwch_qp_attributes attrs; unsigned long flags; int disconnect = 1; int release = 0; PDBG("%s ep %p\n", __FUNCTION__, ep); dst_confirm(ep->dst); spin_lock_irqsave(&ep->com.lock, flags); switch (ep->com.state) { case MPA_REQ_WAIT: __state_set(&ep->com, CLOSING); break; case MPA_REQ_SENT: __state_set(&ep->com, CLOSING); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REQ_RCVD: /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or * rejects the CR. */ __state_set(&ep->com, CLOSING); get_ep(&ep->com); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); ep->com.rpl_done = 1; ep->com.rpl_err = -ECONNRESET; PDBG("waking up ep %p\n", ep); wake_up(&ep->com.waitq); break; case FPDU_MODE: start_ep_timer(ep); __state_set(&ep->com, CLOSING); attrs.next_state = IWCH_QP_STATE_CLOSING; iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); peer_close_upcall(ep); break; case ABORTING: disconnect = 0; break; case CLOSING: __state_set(&ep->com, MORIBUND); disconnect = 0; break; case MORIBUND: stop_ep_timer(ep); if (ep->com.cm_id && ep->com.qp) { attrs.next_state = IWCH_QP_STATE_IDLE; iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep); __state_set(&ep->com, DEAD); release = 1; disconnect = 0; break; case DEAD: disconnect = 0; break; default: BUG_ON(1); } spin_unlock_irqrestore(&ep->com.lock, flags); if (disconnect) iwch_ep_disconnect(ep, 0, GFP_KERNEL); if (release) release_ep_resources(ep); return CPL_RET_BUF_DONE;}/* * Returns whether an ABORT_REQ_RSS message is a negative advice. */static int is_neg_adv_abort(unsigned int status){ return status == CPL_ERR_RTX_NEG_ADVICE || status == CPL_ERR_PERSIST_NEG_ADVICE;}static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct cpl_abort_req_rss *req = cplhdr(skb); struct iwch_ep *ep = ctx; struct cpl_abort_rpl *rpl; struct sk_buff *rpl_skb; struct iwch_qp_attributes attrs; int ret; int state; if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); t3_l2t_send_event(ep->com.tdev, ep->l2t); return CPL_RET_BUF_DONE; } /* * We get 2 peer aborts from the HW. The first one must * be ignored except for scribbling that we need one more. */ if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { ep->flags |= PEER_ABORT_IN_PROGRESS; return CPL_RET_BUF_DONE; } state = state_read(&ep->com); PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state); switch (state) { case CONNECTING: break; case MPA_REQ_WAIT: stop_ep_timer(ep); break; case MPA_REQ_SENT: stop_ep_timer(ep); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REP_SENT: ep->com.rpl_done = 1; ep->com.rpl_err = -ECONNRESET; PDBG("waking up ep %p\n", ep); wake_up(&ep->com.waitq); break; case MPA_REQ_RCVD: /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or * rejects the CR. */ get_ep(&ep->com); break; case MORIBUND: case CLOSING: stop_ep_timer(ep); /*FALLTHROUGH*/ case FPDU_MODE: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = IWCH_QP_STATE_ERROR; ret = iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret) printk(KERN_ERR MOD "%s - qp <- error failed!\n", __FUNCTION__); } peer_abort_upcall(ep); break; case ABORTING: break; case DEAD: PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__); return CPL_RET_BUF_DONE; default: BUG_ON(1); break; } dst_confirm(ep->dst); rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); if (!rpl_skb) { printk(KERN_ERR MOD "%s - cannot allocate skb!\n", __FUNCTION__); dst_release(ep->dst); l2t_release(L2DATA(ep->com.tdev), ep->l2t); put_ep(&ep->com); return CPL_RET_BUF_DONE; } rpl_skb->priority = CPL_PRIORITY_DATA; rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); rpl->cmd = CPL_ABORT_NO_RST; cxgb3_ofld_send(ep->com.tdev, rpl_skb); if (state != ABORTING) { state_set(&ep->com, DEAD); release_ep_resources(ep); } return CPL_RET_BUF_DONE;}static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx){ struct iwch_ep *ep = ctx; struct iwch_qp_attributes attrs; unsigned long flags; int release = 0; PDBG("%s ep %p\n", __FUNCTION__, ep); BUG_ON(!ep); /* The cm_id may be null if we failed to connect */ spin_lock_irqsave(&ep->com.lock, flags); switch (ep->com.state) { case CLOSING:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -