📄 mad_rmpp.c
字号:
/* * Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $ */#include <linux/dma-mapping.h>#include "mad_priv.h"#include "mad_rmpp.h"enum rmpp_state { RMPP_STATE_ACTIVE, RMPP_STATE_TIMEOUT, RMPP_STATE_COMPLETE};struct mad_rmpp_recv { struct ib_mad_agent_private *agent; struct list_head list; struct work_struct timeout_work; struct work_struct cleanup_work; wait_queue_head_t wait; enum rmpp_state state; spinlock_t lock; atomic_t refcount; struct ib_ah *ah; struct ib_mad_recv_wc *rmpp_wc; struct ib_mad_recv_buf *cur_seg_buf; int last_ack; int seg_num; int newwin; __be64 tid; u32 src_qp; u16 slid; u8 mgmt_class; u8 class_version; u8 method;};static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv){ atomic_dec(&rmpp_recv->refcount); wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount)); ib_destroy_ah(rmpp_recv->ah); kfree(rmpp_recv);}void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent){ struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { cancel_delayed_work(&rmpp_recv->timeout_work); cancel_delayed_work(&rmpp_recv->cleanup_work); } spin_unlock_irqrestore(&agent->lock, flags); flush_workqueue(agent->qp_info->port_priv->wq); list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, &agent->rmpp_list, list) { list_del(&rmpp_recv->list); if (rmpp_recv->state != RMPP_STATE_COMPLETE) ib_free_recv_mad(rmpp_recv->rmpp_wc); destroy_rmpp_recv(rmpp_recv); }}static int data_offset(u8 mgmt_class){ if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) return IB_MGMT_SA_HDR; else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) return IB_MGMT_VENDOR_HDR; else return IB_MGMT_RMPP_HDR;}static void format_ack(struct ib_rmpp_mad *ack, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv){ unsigned long flags; memcpy(&ack->mad_hdr, &data->mad_hdr, data_offset(data->mad_hdr.mgmt_class)); ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); spin_lock_irqsave(&rmpp_recv->lock, flags); rmpp_recv->last_ack = rmpp_recv->seg_num; ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); spin_unlock_irqrestore(&rmpp_recv->lock, flags);}static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_wc *recv_wc){ struct ib_mad_send_buf *msg; int ret; msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA, GFP_KERNEL); if (!msg) return; format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); msg->ah = rmpp_recv->ah; ret = ib_post_send_mad(msg, NULL); if (ret) ib_free_send_mad(msg);}static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, struct ib_mad_recv_wc *recv_wc){ struct ib_mad_send_buf *msg; struct ib_ah *ah; ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, recv_wc->recv_buf.grh, agent->port_num); if (IS_ERR(ah)) return (void *) ah; msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA, GFP_KERNEL); if (IS_ERR(msg)) ib_destroy_ah(ah); else msg->ah = ah; return msg;}void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc){ struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK) ib_destroy_ah(mad_send_wc->send_buf->ah); ib_free_send_mad(mad_send_wc->send_buf);}static void nack_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc, u8 rmpp_status){ struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0; ret = ib_post_send_mad(msg, NULL); if (ret) { ib_destroy_ah(msg->ah); ib_free_send_mad(msg); }}static void recv_timeout_handler(void *data){ struct mad_rmpp_recv *rmpp_recv = data; struct ib_mad_recv_wc *rmpp_wc; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state != RMPP_STATE_ACTIVE) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } rmpp_recv->state = RMPP_STATE_TIMEOUT; list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); rmpp_wc = rmpp_recv->rmpp_wc; nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc);}static void recv_cleanup_handler(void *data){ struct mad_rmpp_recv *rmpp_recv = data; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); destroy_rmpp_recv(rmpp_recv);}static struct mad_rmpp_recv *create_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc){ struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr; rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL); if (!rmpp_recv) return NULL; rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, agent->agent.port_num); if (IS_ERR(rmpp_recv->ah)) goto error; rmpp_recv->agent = agent; init_waitqueue_head(&rmpp_recv->wait); INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); spin_lock_init(&rmpp_recv->lock); rmpp_recv->state = RMPP_STATE_ACTIVE; atomic_set(&rmpp_recv->refcount, 1); rmpp_recv->rmpp_wc = mad_recv_wc; rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; rmpp_recv->newwin = 1; rmpp_recv->seg_num = 1; rmpp_recv->last_ack = 0; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; rmpp_recv->tid = mad_hdr->tid; rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; rmpp_recv->slid = mad_recv_wc->wc->slid; rmpp_recv->mgmt_class = mad_hdr->mgmt_class; rmpp_recv->class_version = mad_hdr->class_version; rmpp_recv->method = mad_hdr->method; return rmpp_recv;error: kfree(rmpp_recv); return NULL;}static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv){ if (atomic_dec_and_test(&rmpp_recv->refcount)) wake_up(&rmpp_recv->wait);}static struct mad_rmpp_recv *find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc){ struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid == mad_hdr->tid && rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && rmpp_recv->slid == mad_recv_wc->wc->slid && rmpp_recv->mgmt_class == mad_hdr->mgmt_class && rmpp_recv->class_version == mad_hdr->class_version && rmpp_recv->method == mad_hdr->method) return rmpp_recv; } return NULL;}static struct mad_rmpp_recv *acquire_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc){ struct mad_rmpp_recv *rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv) atomic_inc(&rmpp_recv->refcount); spin_unlock_irqrestore(&agent->lock, flags); return rmpp_recv;}static struct mad_rmpp_recv *insert_rmpp_recv(struct ib_mad_agent_private *agent, struct mad_rmpp_recv *rmpp_recv){ struct mad_rmpp_recv *cur_rmpp_recv; cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc); if (!cur_rmpp_recv) list_add_tail(&rmpp_recv->list, &agent->rmpp_list); return cur_rmpp_recv;}static inline int get_last_flag(struct ib_mad_recv_buf *seg){ struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;}static inline int get_seg_num(struct ib_mad_recv_buf *seg){ struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);}static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list, struct ib_mad_recv_buf *seg){ if (seg->list.next == rmpp_list) return NULL; return container_of(seg->list.next, struct ib_mad_recv_buf, list);}static inline int window_size(struct ib_mad_agent_private *agent){ return max(agent->qp_info->recv_queue.max_active >> 3, 1);}static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, int seg_num){ struct ib_mad_recv_buf *seg_buf; int cur_seg_num; list_for_each_entry_reverse(seg_buf, rmpp_list, list) { cur_seg_num = get_seg_num(seg_buf); if (seg_num > cur_seg_num) return seg_buf; if (seg_num == cur_seg_num) break; } return NULL;}static void update_seg_num(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_buf *new_buf){ struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list; while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) { rmpp_recv->cur_seg_buf = new_buf; rmpp_recv->seg_num++; new_buf = get_next_seg(rmpp_list, new_buf); }}static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv){ struct ib_rmpp_mad *rmpp_mad; int hdr_size, data_size, pad; rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) pad = 0; return hdr_size + rmpp_recv->seg_num * data_size - pad;}static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv){ struct ib_mad_recv_wc *rmpp_wc; ack_recv(rmpp_recv, rmpp_recv->rmpp_wc); if (rmpp_recv->seg_num > 1) cancel_delayed_work(&rmpp_recv->timeout_work); rmpp_wc = rmpp_recv->rmpp_wc; rmpp_wc->mad_len = get_mad_len(rmpp_recv); /* 10 seconds until we can find the packet lifetime */ queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, &rmpp_recv->cleanup_work, msecs_to_jiffies(10000)); return rmpp_wc;}void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf){ struct ib_mad_recv_buf *seg_buf; struct ib_rmpp_mad *rmpp_mad; void *data; int size, len, offset; u8 flags; len = mad_recv_wc->mad_len; if (len <= sizeof(struct ib_mad)) { memcpy(buf, mad_recv_wc->recv_buf.mad, len); return; } offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class); list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) { rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad; flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr); if (flags & IB_MGMT_RMPP_FLAG_FIRST) { data = rmpp_mad; size = sizeof(*rmpp_mad); } else { data = (void *) rmpp_mad + offset; if (flags & IB_MGMT_RMPP_FLAG_LAST) size = len; else size = sizeof(*rmpp_mad) - offset; } memcpy(buf, data, size);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -