iwcm.c

来自「linux 内核源代码」· C语言 代码 · 共 1,023 行 · 第 1/2 页

C
1,023
字号
		BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);		cm_id_priv->state = IW_CM_STATE_IDLE;		spin_lock_irqsave(&cm_id_priv->lock, flags);		if (cm_id_priv->qp) {			cm_id->device->iwcm->rem_ref(qp);			cm_id_priv->qp = NULL;		}		spin_unlock_irqrestore(&cm_id_priv->lock, flags);		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);		wake_up_all(&cm_id_priv->connect_wait);	}	return ret;}EXPORT_SYMBOL(iw_cm_accept);/* * Active Side: CM_ID <-- CONN_SENT * * If successful, results in the generation of a CONNECT_REPLY * event. iw_cm_disconnect and iw_cm_destroy will block until the * CONNECT_REPLY event is received from the provider. */int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param){	struct iwcm_id_private *cm_id_priv;	int ret;	unsigned long flags;	struct ib_qp *qp;	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);	ret = alloc_work_entries(cm_id_priv, 4);	if (ret)		return ret;	set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);	spin_lock_irqsave(&cm_id_priv->lock, flags);	if (cm_id_priv->state != IW_CM_STATE_IDLE) {		spin_unlock_irqrestore(&cm_id_priv->lock, flags);		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);		wake_up_all(&cm_id_priv->connect_wait);		return -EINVAL;	}	/* Get the ib_qp given the QPN */	qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);	if (!qp) {		spin_unlock_irqrestore(&cm_id_priv->lock, flags);		return -EINVAL;	}	cm_id->device->iwcm->add_ref(qp);	cm_id_priv->qp = qp;	cm_id_priv->state = IW_CM_STATE_CONN_SENT;	spin_unlock_irqrestore(&cm_id_priv->lock, flags);	ret = cm_id->device->iwcm->connect(cm_id, iw_param);	if (ret) {		spin_lock_irqsave(&cm_id_priv->lock, flags);		if (cm_id_priv->qp) {			cm_id->device->iwcm->rem_ref(qp);			cm_id_priv->qp = NULL;		}		spin_unlock_irqrestore(&cm_id_priv->lock, flags);		BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);		cm_id_priv->state = IW_CM_STATE_IDLE;		clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);		wake_up_all(&cm_id_priv->connect_wait);	}	return ret;}EXPORT_SYMBOL(iw_cm_connect);/* * Passive Side: new CM_ID <-- CONN_RECV * * Handles an inbound connect request. The function creates a new * iw_cm_id to represent the new connection and inherits the client * callback function and other attributes from the listening parent. * * The work item contains a pointer to the listen_cm_id and the event. The * listen_cm_id contains the client cm_handler, context and * device. These are copied when the device is cloned. The event * contains the new four tuple. * * An error on the child should not affect the parent, so this * function does not return a value. */static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,				struct iw_cm_event *iw_event){	unsigned long flags;	struct iw_cm_id *cm_id;	struct iwcm_id_private *cm_id_priv;	int ret;	/*	 * The provider should never generate a connection request	 * event with a bad status.	 */	BUG_ON(iw_event->status);	/*	 * We could be destroying the listening id. If so, ignore this	 * upcall.	 */	spin_lock_irqsave(&listen_id_priv->lock, flags);	if (listen_id_priv->state != IW_CM_STATE_LISTEN) {		spin_unlock_irqrestore(&listen_id_priv->lock, flags);		goto out;	}	spin_unlock_irqrestore(&listen_id_priv->lock, flags);	cm_id = iw_create_cm_id(listen_id_priv->id.device,				listen_id_priv->id.cm_handler,				listen_id_priv->id.context);	/* If the cm_id could not be created, ignore the request */	if (IS_ERR(cm_id))		goto out;	cm_id->provider_data = iw_event->provider_data;	cm_id->local_addr = iw_event->local_addr;	cm_id->remote_addr = iw_event->remote_addr;	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);	cm_id_priv->state = IW_CM_STATE_CONN_RECV;	ret = alloc_work_entries(cm_id_priv, 3);	if (ret) {		iw_cm_reject(cm_id, NULL, 0);		iw_destroy_cm_id(cm_id);		goto out;	}	/* Call the client CM handler */	ret = cm_id->cm_handler(cm_id, iw_event);	if (ret) {		iw_cm_reject(cm_id, NULL, 0);		set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);		destroy_cm_id(cm_id);		if (atomic_read(&cm_id_priv->refcount)==0)			free_cm_id(cm_id_priv);	}out:	if (iw_event->private_data_len)		kfree(iw_event->private_data);}/* * Passive Side: CM_ID <-- ESTABLISHED * * The provider generated an ESTABLISHED event which means that * the MPA negotion has completed successfully and we are now in MPA * FPDU mode. * * This event can only be received in the CONN_RECV state. If the * remote peer closed, the ESTABLISHED event would be received followed * by the CLOSE event. If the app closes, it will block until we wake * it up after processing this event. */static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,			       struct iw_cm_event *iw_event){	unsigned long flags;	int ret;	spin_lock_irqsave(&cm_id_priv->lock, flags);	/*	 * We clear the CONNECT_WAIT bit here to allow the callback	 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id	 * from a callback handler is not allowed.	 */	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);	BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);	cm_id_priv->state = IW_CM_STATE_ESTABLISHED;	spin_unlock_irqrestore(&cm_id_priv->lock, flags);	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);	wake_up_all(&cm_id_priv->connect_wait);	return ret;}/* * Active Side: CM_ID <-- ESTABLISHED * * The app has called connect and is waiting for the established event to * post it's requests to the server. This event will wake up anyone * blocked in iw_cm_disconnect or iw_destroy_id. */static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,			       struct iw_cm_event *iw_event){	unsigned long flags;	int ret;	spin_lock_irqsave(&cm_id_priv->lock, flags);	/*	 * Clear the connect wait bit so a callback function calling	 * iw_cm_disconnect will not wait and deadlock this thread	 */	clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);	BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);	if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {		cm_id_priv->id.local_addr = iw_event->local_addr;		cm_id_priv->id.remote_addr = iw_event->remote_addr;		cm_id_priv->state = IW_CM_STATE_ESTABLISHED;	} else {		/* REJECTED or RESET */		cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);		cm_id_priv->qp = NULL;		cm_id_priv->state = IW_CM_STATE_IDLE;	}	spin_unlock_irqrestore(&cm_id_priv->lock, flags);	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);	if (iw_event->private_data_len)		kfree(iw_event->private_data);	/* Wake up waiters on connect complete */	wake_up_all(&cm_id_priv->connect_wait);	return ret;}/* * CM_ID <-- CLOSING * * If in the ESTABLISHED state, move to CLOSING. */static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,				  struct iw_cm_event *iw_event){	unsigned long flags;	spin_lock_irqsave(&cm_id_priv->lock, flags);	if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)		cm_id_priv->state = IW_CM_STATE_CLOSING;	spin_unlock_irqrestore(&cm_id_priv->lock, flags);}/* * CM_ID <-- IDLE * * If in the ESTBLISHED or CLOSING states, the QP will have have been * moved by the provider to the ERR state. Disassociate the CM_ID from * the QP,  move to IDLE, and remove the 'connected' reference. * * If in some other state, the cm_id was destroyed asynchronously. * This is the last reference that will result in waking up * the app thread blocked in iw_destroy_cm_id. */static int cm_close_handler(struct iwcm_id_private *cm_id_priv,				  struct iw_cm_event *iw_event){	unsigned long flags;	int ret = 0;	spin_lock_irqsave(&cm_id_priv->lock, flags);	if (cm_id_priv->qp) {		cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);		cm_id_priv->qp = NULL;	}	switch (cm_id_priv->state) {	case IW_CM_STATE_ESTABLISHED:	case IW_CM_STATE_CLOSING:		cm_id_priv->state = IW_CM_STATE_IDLE;		spin_unlock_irqrestore(&cm_id_priv->lock, flags);		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);		spin_lock_irqsave(&cm_id_priv->lock, flags);		break;	case IW_CM_STATE_DESTROYING:		break;	default:		BUG();	}	spin_unlock_irqrestore(&cm_id_priv->lock, flags);	return ret;}static int process_event(struct iwcm_id_private *cm_id_priv,			 struct iw_cm_event *iw_event){	int ret = 0;	switch (iw_event->event) {	case IW_CM_EVENT_CONNECT_REQUEST:		cm_conn_req_handler(cm_id_priv, iw_event);		break;	case IW_CM_EVENT_CONNECT_REPLY:		ret = cm_conn_rep_handler(cm_id_priv, iw_event);		break;	case IW_CM_EVENT_ESTABLISHED:		ret = cm_conn_est_handler(cm_id_priv, iw_event);		break;	case IW_CM_EVENT_DISCONNECT:		cm_disconnect_handler(cm_id_priv, iw_event);		break;	case IW_CM_EVENT_CLOSE:		ret = cm_close_handler(cm_id_priv, iw_event);		break;	default:		BUG();	}	return ret;}/* * Process events on the work_list for the cm_id. If the callback * function requests that the cm_id be deleted, a flag is set in the * cm_id flags to indicate that when the last reference is * removed, the cm_id is to be destroyed. This is necessary to * distinguish between an object that will be destroyed by the app * thread asleep on the destroy_comp list vs. an object destroyed * here synchronously when the last reference is removed. */static void cm_work_handler(struct work_struct *_work){	struct iwcm_work *work = container_of(_work, struct iwcm_work, work);	struct iw_cm_event levent;	struct iwcm_id_private *cm_id_priv = work->cm_id;	unsigned long flags;	int empty;	int ret = 0;	spin_lock_irqsave(&cm_id_priv->lock, flags);	empty = list_empty(&cm_id_priv->work_list);	while (!empty) {		work = list_entry(cm_id_priv->work_list.next,				  struct iwcm_work, list);		list_del_init(&work->list);		empty = list_empty(&cm_id_priv->work_list);		levent = work->event;		put_work(work);		spin_unlock_irqrestore(&cm_id_priv->lock, flags);		ret = process_event(cm_id_priv, &levent);		if (ret) {			set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);			destroy_cm_id(&cm_id_priv->id);		}		BUG_ON(atomic_read(&cm_id_priv->refcount)==0);		if (iwcm_deref_id(cm_id_priv)) {			if (test_bit(IWCM_F_CALLBACK_DESTROY,				     &cm_id_priv->flags)) {				BUG_ON(!list_empty(&cm_id_priv->work_list));				free_cm_id(cm_id_priv);			}			return;		}		spin_lock_irqsave(&cm_id_priv->lock, flags);	}	spin_unlock_irqrestore(&cm_id_priv->lock, flags);}/* * This function is called on interrupt context. Schedule events on * the iwcm_wq thread to allow callback functions to downcall into * the CM and/or block.  Events are queued to a per-CM_ID * work_list. If this is the first event on the work_list, the work * element is also queued on the iwcm_wq thread. * * Each event holds a reference on the cm_id. Until the last posted * event has been delivered and processed, the cm_id cannot be * deleted. * * Returns: * 	      0	- the event was handled. *	-ENOMEM	- the event was not handled due to lack of resources. */static int cm_event_handler(struct iw_cm_id *cm_id,			     struct iw_cm_event *iw_event){	struct iwcm_work *work;	struct iwcm_id_private *cm_id_priv;	unsigned long flags;	int ret = 0;	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);	spin_lock_irqsave(&cm_id_priv->lock, flags);	work = get_work(cm_id_priv);	if (!work) {		ret = -ENOMEM;		goto out;	}	INIT_WORK(&work->work, cm_work_handler);	work->cm_id = cm_id_priv;	work->event = *iw_event;	if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||	     work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&	    work->event.private_data_len) {		ret = copy_private_data(&work->event);		if (ret) {			put_work(work);			goto out;		}	}	atomic_inc(&cm_id_priv->refcount);	if (list_empty(&cm_id_priv->work_list)) {		list_add_tail(&work->list, &cm_id_priv->work_list);		queue_work(iwcm_wq, &work->work);	} else		list_add_tail(&work->list, &cm_id_priv->work_list);out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);	return ret;}static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,				  struct ib_qp_attr *qp_attr,				  int *qp_attr_mask){	unsigned long flags;	int ret;	spin_lock_irqsave(&cm_id_priv->lock, flags);	switch (cm_id_priv->state) {	case IW_CM_STATE_IDLE:	case IW_CM_STATE_CONN_SENT:	case IW_CM_STATE_CONN_RECV:	case IW_CM_STATE_ESTABLISHED:		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |					   IB_ACCESS_REMOTE_WRITE|					   IB_ACCESS_REMOTE_READ;		ret = 0;		break;	default:		ret = -EINVAL;		break;	}	spin_unlock_irqrestore(&cm_id_priv->lock, flags);	return ret;}static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,				  struct ib_qp_attr *qp_attr,				  int *qp_attr_mask){	unsigned long flags;	int ret;	spin_lock_irqsave(&cm_id_priv->lock, flags);	switch (cm_id_priv->state) {	case IW_CM_STATE_IDLE:	case IW_CM_STATE_CONN_SENT:	case IW_CM_STATE_CONN_RECV:	case IW_CM_STATE_ESTABLISHED:		*qp_attr_mask = 0;		ret = 0;		break;	default:		ret = -EINVAL;		break;	}	spin_unlock_irqrestore(&cm_id_priv->lock, flags);	return ret;}int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,		       struct ib_qp_attr *qp_attr,		       int *qp_attr_mask){	struct iwcm_id_private *cm_id_priv;	int ret;	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);	switch (qp_attr->qp_state) {	case IB_QPS_INIT:	case IB_QPS_RTR:		ret = iwcm_init_qp_init_attr(cm_id_priv,					     qp_attr, qp_attr_mask);		break;	case IB_QPS_RTS:		ret = iwcm_init_qp_rts_attr(cm_id_priv,					    qp_attr, qp_attr_mask);		break;	default:		ret = -EINVAL;		break;	}	return ret;}EXPORT_SYMBOL(iw_cm_init_qp_attr);static int __init iw_cm_init(void){	iwcm_wq = create_singlethread_workqueue("iw_cm_wq");	if (!iwcm_wq)		return -ENOMEM;	return 0;}static void __exit iw_cm_cleanup(void){	destroy_workqueue(iwcm_wq);}module_init(iw_cm_init);module_exit(iw_cm_cleanup);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?