⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 qswlnd.c

📁 非常经典的一个分布式系统
💻 C
📖 第 1 页 / 共 2 页
字号:
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * Copyright (C) 2002-2004 Cluster File Systems, Inc. *   Author: Eric Barton <eric@bartonsoftware.com> * * This file is part of Portals, http://www.lustre.org * * Portals is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Portals is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Portals; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */#include "qswlnd.h"lnd_t the_kqswlnd ={	.lnd_type       = QSWLND,	.lnd_startup    = kqswnal_startup,	.lnd_shutdown   = kqswnal_shutdown,	.lnd_ctl        = kqswnal_ctl,	.lnd_send       = kqswnal_send,        .lnd_recv       = kqswnal_recv,};kqswnal_data_t		kqswnal_data;intkqswnal_get_tx_desc (struct libcfs_ioctl_data *data){	unsigned long      flags;	struct list_head  *tmp;	kqswnal_tx_t      *ktx;	lnet_hdr_t        *hdr;	int                index = data->ioc_count;	int                rc = -ENOENT;	spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);	list_for_each (tmp, &kqswnal_data.kqn_activetxds) {		if (index-- != 0)			continue;		ktx = list_entry (tmp, kqswnal_tx_t, ktx_list);		hdr = (lnet_hdr_t *)ktx->ktx_buffer;		data->ioc_count  = le32_to_cpu(hdr->payload_length);		data->ioc_nid    = le64_to_cpu(hdr->dest_nid);		data->ioc_u64[0] = ktx->ktx_nid;		data->ioc_u32[0] = le32_to_cpu(hdr->type);		data->ioc_u32[1] = ktx->ktx_launcher;		data->ioc_flags  = (list_empty (&ktx->ktx_schedlist) ? 0 : 1) |				   (ktx->ktx_state << 2);		rc = 0;		break;	}		spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);	return (rc);}intkqswnal_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg){	struct libcfs_ioctl_data *data = arg;	LASSERT (ni == kqswnal_data.kqn_ni);	switch (cmd) {	case IOC_LIBCFS_GET_TXDESC:		return (kqswnal_get_tx_desc (data));	case IOC_LIBCFS_REGISTER_MYNID:		if (data->ioc_nid == ni->ni_nid)			return 0;				LASSERT (LNET_NIDNET(data->ioc_nid) == LNET_NIDNET(ni->ni_nid));		CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID for %s(%s)\n",		       libcfs_nid2str(data->ioc_nid),		       libcfs_nid2str(ni->ni_nid));		return 0;			default:		return (-EINVAL);	}}voidkqswnal_shutdown(lnet_ni_t *ni){	unsigned long flags;	kqswnal_tx_t *ktx;	kqswnal_rx_t *krx;		CDEBUG (D_NET, "shutdown\n");	LASSERT (ni->ni_data == &kqswnal_data);	LASSERT (ni == kqswnal_data.kqn_ni);	switch (kqswnal_data.kqn_init)	{	default:		LASSERT (0);	case KQN_INIT_ALL:	case KQN_INIT_DATA:		break;	}	/**********************************************************************/	/* Signal the start of shutdown... */	spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);	kqswnal_data.kqn_shuttingdown = 1;	spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);	/**********************************************************************/	/* wait for sends that have allocated a tx desc to launch or give up */	while (atomic_read (&kqswnal_data.kqn_pending_txs) != 0) {		CDEBUG(D_NET, "waiting for %d pending sends\n",		       atomic_read (&kqswnal_data.kqn_pending_txs));		cfs_pause(cfs_time_seconds(1));	}	/**********************************************************************/	/* close elan comms */	/* Shut down receivers first; rx callbacks might try sending... */	if (kqswnal_data.kqn_eprx_small != NULL)		ep_free_rcvr (kqswnal_data.kqn_eprx_small);	if (kqswnal_data.kqn_eprx_large != NULL)		ep_free_rcvr (kqswnal_data.kqn_eprx_large);	/* NB ep_free_rcvr() returns only after we've freed off all receive	 * buffers (see shutdown handling in kqswnal_requeue_rx()).  This	 * means we must have completed any messages we passed to	 * lnet_parse() */	if (kqswnal_data.kqn_eptx != NULL)		ep_free_xmtr (kqswnal_data.kqn_eptx);	/* NB ep_free_xmtr() returns only after all outstanding transmits	 * have called their callback... */	LASSERT(list_empty(&kqswnal_data.kqn_activetxds));	/**********************************************************************/	/* flag threads to terminate, wake them and wait for them to die */	kqswnal_data.kqn_shuttingdown = 2;	wake_up_all (&kqswnal_data.kqn_sched_waitq);	while (atomic_read (&kqswnal_data.kqn_nthreads) != 0) {		CDEBUG(D_NET, "waiting for %d threads to terminate\n",		       atomic_read (&kqswnal_data.kqn_nthreads));		cfs_pause(cfs_time_seconds(1));	}	/**********************************************************************/	/* No more threads.  No more portals, router or comms callbacks!	 * I control the horizontals and the verticals...	 */	LASSERT (list_empty (&kqswnal_data.kqn_readyrxds));	LASSERT (list_empty (&kqswnal_data.kqn_donetxds));	LASSERT (list_empty (&kqswnal_data.kqn_delayedtxds));	/**********************************************************************/	/* Unmap message buffers and free all descriptors and buffers	 */	/* FTTB, we need to unmap any remaining mapped memory.  When	 * ep_dvma_release() get fixed (and releases any mappings in the	 * region), we can delete all the code from here -------->  */	for (ktx = kqswnal_data.kqn_txds; ktx != NULL; ktx = ktx->ktx_alloclist) {		/* If ktx has a buffer, it got mapped; unmap now.  NB only		 * the pre-mapped stuff is still mapped since all tx descs		 * must be idle */		if (ktx->ktx_buffer != NULL)			ep_dvma_unload(kqswnal_data.kqn_ep,				       kqswnal_data.kqn_ep_tx_nmh,				       &ktx->ktx_ebuffer);	}	for (krx = kqswnal_data.kqn_rxds; krx != NULL; krx = krx->krx_alloclist) {		/* If krx_kiov[0].kiov_page got allocated, it got mapped.  		 * NB subsequent pages get merged */		if (krx->krx_kiov[0].kiov_page != NULL)			ep_dvma_unload(kqswnal_data.kqn_ep,				       kqswnal_data.kqn_ep_rx_nmh,				       &krx->krx_elanbuffer);	}	/* <----------- to here */	if (kqswnal_data.kqn_ep_rx_nmh != NULL)		ep_dvma_release(kqswnal_data.kqn_ep, kqswnal_data.kqn_ep_rx_nmh);	if (kqswnal_data.kqn_ep_tx_nmh != NULL)		ep_dvma_release(kqswnal_data.kqn_ep, kqswnal_data.kqn_ep_tx_nmh);	while (kqswnal_data.kqn_txds != NULL) {		ktx = kqswnal_data.kqn_txds;		if (ktx->ktx_buffer != NULL)			LIBCFS_FREE(ktx->ktx_buffer, KQSW_TX_BUFFER_SIZE);		kqswnal_data.kqn_txds = ktx->ktx_alloclist;		LIBCFS_FREE(ktx, sizeof(*ktx));	}	while (kqswnal_data.kqn_rxds != NULL) {		int           i;		krx = kqswnal_data.kqn_rxds;		for (i = 0; i < krx->krx_npages; i++)			if (krx->krx_kiov[i].kiov_page != NULL)				__free_page (krx->krx_kiov[i].kiov_page);		kqswnal_data.kqn_rxds = krx->krx_alloclist;		LIBCFS_FREE(krx, sizeof (*krx));	}	/* resets flags, pointers to NULL etc */	memset(&kqswnal_data, 0, sizeof (kqswnal_data));	CDEBUG (D_MALLOC, "done kmem %d\n", atomic_read(&libcfs_kmemory));	PORTAL_MODULE_UNUSE;}intkqswnal_startup (lnet_ni_t *ni){	EP_RAILMASK       all_rails = EP_RAILMASK_ALL;	int               rc;	int               i;	kqswnal_rx_t     *krx;	kqswnal_tx_t     *ktx;	int               elan_page_idx;	LASSERT (ni->ni_lnd == &the_kqswlnd);#if KQSW_CKSUM	if (the_lnet.ln_ptlcompat != 0) {		CERROR("Checksumming version not portals compatible\n");		return -ENODEV;	}#endif	/* Only 1 instance supported */	if (kqswnal_data.kqn_init != KQN_INIT_NOTHING) {                CERROR ("Only 1 instance supported\n");                return -EPERM;        }        if (ni->ni_interfaces[0] != NULL) {                CERROR("Explicit interface config not supported\n");                return -EPERM;        }	if (*kqswnal_tunables.kqn_credits >=	    *kqswnal_tunables.kqn_ntxmsgs) {		LCONSOLE_ERROR_MSG(0x12e, "Configuration error: please set "			           "ntxmsgs(%d) > credits(%d)\n",			       	   *kqswnal_tunables.kqn_ntxmsgs,				   *kqswnal_tunables.kqn_credits);	}        

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -