📄 mad.c
字号:
} } }out: return;}static struct ib_mad_agent_private *find_mad_agent(struct ib_mad_port_private *port_priv, struct ib_mad *mad){ struct ib_mad_agent_private *mad_agent = NULL; unsigned long flags; spin_lock_irqsave(&port_priv->reg_lock, flags); if (response_mad(mad)) { u32 hi_tid; struct ib_mad_agent_private *entry; /* * Routing is based on high 32 bits of transaction ID * of MAD. */ hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; list_for_each_entry(entry, &port_priv->agent_list, agent_list) { if (entry->agent.hi_tid == hi_tid) { mad_agent = entry; break; } } } else { struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; struct ib_vendor_mad *vendor_mad; int index; /* * Routing is based on version, class, and method * For "newer" vendor MADs, also based on OUI */ if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) goto out; if (!is_vendor_class(mad->mad_hdr.mgmt_class)) { class = port_priv->version[ mad->mad_hdr.class_version].class; if (!class) goto out; method = class->method_table[convert_mgmt_class( mad->mad_hdr.mgmt_class)]; if (method) mad_agent = method->agent[mad->mad_hdr.method & ~IB_MGMT_METHOD_RESP]; } else { vendor = port_priv->version[ mad->mad_hdr.class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[vendor_class_index( mad->mad_hdr.mgmt_class)]; if (!vendor_class) goto out; /* Find matching OUI */ vendor_mad = (struct ib_vendor_mad *)mad; index = find_vendor_oui(vendor_class, vendor_mad->oui); if (index == -1) goto out; method = vendor_class->method_table[index]; if (method) { mad_agent = method->agent[mad->mad_hdr.method & ~IB_MGMT_METHOD_RESP]; } } } if (mad_agent) { if (mad_agent->agent.recv_handler) atomic_inc(&mad_agent->refcount); else { printk(KERN_NOTICE PFX "No receive handler for client " "%p on port %d\n", &mad_agent->agent, port_priv->port_num); mad_agent = NULL; } }out: spin_unlock_irqrestore(&port_priv->reg_lock, flags); return mad_agent;}static int validate_mad(struct ib_mad *mad, u32 qp_num){ int valid = 0; /* Make sure MAD base version is understood */ if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { printk(KERN_ERR PFX "MAD received with unsupported base " "version %d\n", mad->mad_hdr.base_version); goto out; } /* Filter SMI packets sent to other than QP0 */ if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { if (qp_num == 0) valid = 1; } else { /* Filter GSI packets sent to QP0 */ if (qp_num != 0) valid = 1; }out: return valid;}static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_hdr *mad_hdr){ struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; return !mad_agent_priv->agent.rmpp_version || !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) || (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);}struct ib_mad_send_wr_private*ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid){ struct ib_mad_send_wr_private *mad_send_wr; list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, agent_list) { if (mad_send_wr->tid == tid) return mad_send_wr; } /* * It's possible to receive the response before we've * been notified that the send has completed */ list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && mad_send_wr->tid == tid && mad_send_wr->timeout) { /* Verify request has not been canceled */ return (mad_send_wr->status == IB_WC_SUCCESS) ? mad_send_wr : NULL; } } return NULL;}void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr){ mad_send_wr->timeout = 0; if (mad_send_wr->refcount == 1) { list_del(&mad_send_wr->agent_list); list_add_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->done_list); }}static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_recv_wc *mad_recv_wc){ struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; __be64 tid; INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (mad_agent_priv->agent.rmpp_version) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { if (atomic_dec_and_test(&mad_agent_priv->refcount)) wake_up(&mad_agent_priv->wait); return; } } /* Complete corresponding request */ if (response_mad(mad_recv_wc->recv_buf.mad)) { tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ib_free_recv_mad(mad_recv_wc); if (atomic_dec_and_test(&mad_agent_priv->refcount)) wake_up(&mad_agent_priv->wait); return; } ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Defined behavior is to complete response before request */ mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); atomic_dec(&mad_agent_priv->refcount); mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.vendor_err = 0; mad_send_wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); if (atomic_dec_and_test(&mad_agent_priv->refcount)) wake_up(&mad_agent_priv->wait); }}static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc){ struct ib_mad_qp_info *qp_info; struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *recv, *response; struct ib_mad_list_head *mad_list; struct ib_mad_agent_private *mad_agent; response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!response) printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " "for response buffer\n"); mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; qp_info = mad_list->mad_queue->qp_info; dequeue_mad(mad_list); mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); dma_unmap_single(port_priv->device->dma_device, pci_unmap_addr(&recv->header, mapping), sizeof(struct ib_mad_private) - sizeof(struct ib_mad_private_header), DMA_FROM_DEVICE); /* Setup MAD receive work completion from "normal" work completion */ recv->header.wc = *wc; recv->header.recv_wc.wc = &recv->header.wc; recv->header.recv_wc.mad_len = sizeof(struct ib_mad); recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; recv->header.recv_wc.recv_buf.grh = &recv->grh; if (atomic_read(&qp_info->snoop_count)) snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); /* Validate MAD */ if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) goto out; if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { if (!smi_handle_dr_smp_recv(&recv->mad.smp, port_priv->device->node_type, port_priv->port_num, port_priv->device->phys_port_cnt)) goto out; if (!smi_check_forward_dr_smp(&recv->mad.smp)) goto local; if (!smi_handle_dr_smp_send(&recv->mad.smp, port_priv->device->node_type, port_priv->port_num)) goto out; if (!smi_check_local_dr_smp(&recv->mad.smp, port_priv->device, port_priv->port_num)) goto out; }local: /* Give driver "right of first refusal" on incoming MAD */ if (port_priv->device->process_mad) { int ret; if (!response) { printk(KERN_ERR PFX "No memory for response MAD\n"); /* * Is it better to assume that * it wouldn't be processed ? */ goto out; } ret = port_priv->device->process_mad(port_priv->device, 0, port_priv->port_num, wc, &recv->grh, &recv->mad.mad, &response->mad.mad); if (ret & IB_MAD_RESULT_SUCCESS) { if (ret & IB_MAD_RESULT_CONSUMED) goto out; if (ret & IB_MAD_RESULT_REPLY) { agent_send_response(&response->mad.mad, &recv->grh, wc, port_priv->device, port_priv->port_num, qp_info->qp->qp_num); goto out; } } } mad_agent = find_mad_agent(port_priv, &recv->mad.mad); if (mad_agent) { ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); /* * recv is freed up in error cases in ib_mad_complete_recv * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; }out: /* Post another receive request for this QP */ if (response) { ib_mad_post_receive_mads(qp_info, response); if (recv) kmem_cache_free(ib_mad_cache, recv); } else ib_mad_post_receive_mads(qp_info, recv);}static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv){ struct ib_mad_send_wr_private *mad_send_wr; unsigned long delay; if (list_empty(&mad_agent_priv->wait_list)) { cancel_delayed_work(&mad_agent_priv->timed_work); } else { mad_send_wr = list_entry(mad_agent_priv->wait_list.next, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_agent_priv->timeout, mad_send_wr->timeout)) { mad_agent_priv->timeout = mad_send_wr->timeout; cancel_delayed_work(&mad_agent_priv->timed_work); delay = mad_send_wr->timeout - jiffies; if ((long)delay <= 0) delay = 1; queue_delayed_work(mad_agent_priv->qp_info-> port_priv->wq, &mad_agent_priv->timed_work, delay); } }}static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr){ struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *temp_mad_send_wr; struct list_head *list_item; unsigned long delay; mad_agent_priv = mad_send_wr->mad_agent_priv; list_del(&mad_send_wr->agent_list); delay = mad_send_wr->timeout; mad_send_wr->timeout += jiffies; if (delay) { list_for_each_prev(list_item, &mad_agent_priv->wait_list) { temp_mad_send_wr = list_entry(list_item, struct ib_mad_send_wr_private, agent_list); if (time_after(mad_send_wr->timeout, temp_mad_send_wr->timeout)) break; } } else list_item = &mad_agent_priv->wait_list; list_add(&mad_send_wr->agent_list, list_item); /* Reschedule a work item if we have a shorter timeout */ if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { cancel_delayed_work(&mad_agent_priv->timed_work); queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, &mad_agent_priv->timed_work, delay); }}void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, int timeout_ms){ mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); wait_for_response(mad_send_wr);}/* * Process a send work completion */void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc){ struct ib_mad_agent_private *mad_agent_priv; unsigned long flags; int ret; mad_agent_priv = mad_send_wr->mad_agent_priv; spin_lock_irqsave(&mad_agent_priv->lock, flags); if (mad_agent_priv->agent.rmpp_version) { ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); if (ret == IB_RMPP_RESULT_CONSUMED) goto done; } else ret = IB_RMPP_RESULT_UNHANDLED; if (mad_send_wc->status != IB_WC_SUCCESS && mad_send_wr->status == IB_WC_SUCCESS) { mad_send_wr->status = mad_send_wc->status; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } if (--mad_send_wr->refcount > 0) { if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && mad_send_wr->status == IB_WC_SUCCESS) { wait_for_response(mad_send_wr); } goto done; } /* Remove send from MAD agent and notify client of completion */ list_del(&mad_send_wr->agent_list); adjust_timeout(mad_agent_priv); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_send_wr->status != IB_WC_SUCCESS ) mad_send_wc->status = mad_send_wr->status; if (ret == IB_RMPP_RESULT_INTERNAL) ib_rmpp_send_handler(mad_send_wc); else mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, mad_send_wc); /* Release reference on agent taken when sending */ if (atomic_dec_and_test(&mad_agent_priv->refcount)) wake_up(&mad_agent_priv->wait); return;done: spin_unlock_irqrestore(&mad_agent_priv->lock, flags);}static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc){ struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -