⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * linux/net/sunrpc/svc.c * * High-level RPC service routines * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * * Multiple threads pools and NUMAisation * Copyright (c) 2006 Silicon Graphics, Inc. * by Greg Banks <gnb@melbourne.sgi.com> */#include <linux/linkage.h>#include <linux/sched.h>#include <linux/errno.h>#include <linux/net.h>#include <linux/in.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/module.h>#include <linux/sunrpc/types.h>#include <linux/sunrpc/xdr.h>#include <linux/sunrpc/stats.h>#include <linux/sunrpc/svcsock.h>#include <linux/sunrpc/clnt.h>#define RPCDBG_FACILITY	RPCDBG_SVCDSP#define svc_serv_is_pooled(serv)    ((serv)->sv_function)/* * Mode for mapping cpus to pools. */enum {	SVC_POOL_AUTO = -1,	/* choose one of the others */	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool				 * (legacy & UP mode) */	SVC_POOL_PERCPU,	/* one pool per cpu */	SVC_POOL_PERNODE	/* one pool per numa node */};#define SVC_POOL_DEFAULT	SVC_POOL_GLOBAL/* * Structure for mapping cpus to pools and vice versa. * Setup once during sunrpc initialisation. */static struct svc_pool_map {	int count;			/* How many svc_servs use us */	int mode;			/* Note: int not enum to avoid					 * warnings about "enumeration value					 * not handled in switch" */	unsigned int npools;	unsigned int *pool_to;		/* maps pool id to cpu or node */	unsigned int *to_pool;		/* maps cpu or node to pool id */} svc_pool_map = {	.count = 0,	.mode = SVC_POOL_DEFAULT};static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */static intparam_set_pool_mode(const char *val, struct kernel_param *kp){	int *ip = (int *)kp->arg;	struct svc_pool_map *m = &svc_pool_map;	int err;	mutex_lock(&svc_pool_map_mutex);	err = -EBUSY;	if (m->count)		goto out;	err = 0;	if (!strncmp(val, "auto", 4))		*ip = SVC_POOL_AUTO;	else if (!strncmp(val, "global", 6))		*ip = SVC_POOL_GLOBAL;	else if (!strncmp(val, "percpu", 6))		*ip = SVC_POOL_PERCPU;	else if (!strncmp(val, "pernode", 7))		*ip = SVC_POOL_PERNODE;	else		err = -EINVAL;out:	mutex_unlock(&svc_pool_map_mutex);	return err;}static intparam_get_pool_mode(char *buf, struct kernel_param *kp){	int *ip = (int *)kp->arg;	switch (*ip)	{	case SVC_POOL_AUTO:		return strlcpy(buf, "auto", 20);	case SVC_POOL_GLOBAL:		return strlcpy(buf, "global", 20);	case SVC_POOL_PERCPU:		return strlcpy(buf, "percpu", 20);	case SVC_POOL_PERNODE:		return strlcpy(buf, "pernode", 20);	default:		return sprintf(buf, "%d", *ip);	}}module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,		 &svc_pool_map.mode, 0644);/* * Detect best pool mapping mode heuristically, * according to the machine's topology. */static intsvc_pool_map_choose_mode(void){	unsigned int node;	if (num_online_nodes() > 1) {		/*		 * Actually have multiple NUMA nodes,		 * so split pools on NUMA node boundaries		 */		return SVC_POOL_PERNODE;	}	node = any_online_node(node_online_map);	if (nr_cpus_node(node) > 2) {		/*		 * Non-trivial SMP, or CONFIG_NUMA on		 * non-NUMA hardware, e.g. with a generic		 * x86_64 kernel on Xeons.  In this case we		 * want to divide the pools on cpu boundaries.		 */		return SVC_POOL_PERCPU;	}	/* default: one global pool */	return SVC_POOL_GLOBAL;}/* * Allocate the to_pool[] and pool_to[] arrays. * Returns 0 on success or an errno. */static intsvc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools){	m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);	if (!m->to_pool)		goto fail;	m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);	if (!m->pool_to)		goto fail_free;	return 0;fail_free:	kfree(m->to_pool);fail:	return -ENOMEM;}/* * Initialise the pool map for SVC_POOL_PERCPU mode. * Returns number of pools or <0 on error. */static intsvc_pool_map_init_percpu(struct svc_pool_map *m){	unsigned int maxpools = nr_cpu_ids;	unsigned int pidx = 0;	unsigned int cpu;	int err;	err = svc_pool_map_alloc_arrays(m, maxpools);	if (err)		return err;	for_each_online_cpu(cpu) {		BUG_ON(pidx > maxpools);		m->to_pool[cpu] = pidx;		m->pool_to[pidx] = cpu;		pidx++;	}	/* cpus brought online later all get mapped to pool0, sorry */	return pidx;};/* * Initialise the pool map for SVC_POOL_PERNODE mode. * Returns number of pools or <0 on error. */static intsvc_pool_map_init_pernode(struct svc_pool_map *m){	unsigned int maxpools = nr_node_ids;	unsigned int pidx = 0;	unsigned int node;	int err;	err = svc_pool_map_alloc_arrays(m, maxpools);	if (err)		return err;	for_each_node_with_cpus(node) {		/* some architectures (e.g. SN2) have cpuless nodes */		BUG_ON(pidx > maxpools);		m->to_pool[node] = pidx;		m->pool_to[pidx] = node;		pidx++;	}	/* nodes brought online later all get mapped to pool0, sorry */	return pidx;}/* * Add a reference to the global map of cpus to pools (and * vice versa).  Initialise the map if we're the first user. * Returns the number of pools. */static unsigned intsvc_pool_map_get(void){	struct svc_pool_map *m = &svc_pool_map;	int npools = -1;	mutex_lock(&svc_pool_map_mutex);	if (m->count++) {		mutex_unlock(&svc_pool_map_mutex);		return m->npools;	}	if (m->mode == SVC_POOL_AUTO)		m->mode = svc_pool_map_choose_mode();	switch (m->mode) {	case SVC_POOL_PERCPU:		npools = svc_pool_map_init_percpu(m);		break;	case SVC_POOL_PERNODE:		npools = svc_pool_map_init_pernode(m);		break;	}	if (npools < 0) {		/* default, or memory allocation failure */		npools = 1;		m->mode = SVC_POOL_GLOBAL;	}	m->npools = npools;	mutex_unlock(&svc_pool_map_mutex);	return m->npools;}/* * Drop a reference to the global map of cpus to pools. * When the last reference is dropped, the map data is * freed; this allows the sysadmin to change the pool * mode using the pool_mode module option without * rebooting or re-loading sunrpc.ko. */static voidsvc_pool_map_put(void){	struct svc_pool_map *m = &svc_pool_map;	mutex_lock(&svc_pool_map_mutex);	if (!--m->count) {		m->mode = SVC_POOL_DEFAULT;		kfree(m->to_pool);		kfree(m->pool_to);		m->npools = 0;	}	mutex_unlock(&svc_pool_map_mutex);}/* * Set the current thread's cpus_allowed mask so that it * will only run on cpus in the given pool. * * Returns 1 and fills in oldmask iff a cpumask was applied. */static inline intsvc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask){	struct svc_pool_map *m = &svc_pool_map;	unsigned int node; /* or cpu */	/*	 * The caller checks for sv_nrpools > 1, which	 * implies that we've been initialized.	 */	BUG_ON(m->count == 0);	switch (m->mode)	{	default:		return 0;	case SVC_POOL_PERCPU:		node = m->pool_to[pidx];		*oldmask = current->cpus_allowed;		set_cpus_allowed(current, cpumask_of_cpu(node));		return 1;	case SVC_POOL_PERNODE:		node = m->pool_to[pidx];		*oldmask = current->cpus_allowed;		set_cpus_allowed(current, node_to_cpumask(node));		return 1;	}}/* * Use the mapping mode to choose a pool for a given CPU. * Used when enqueueing an incoming RPC.  Always returns * a non-NULL pool pointer. */struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv, int cpu){	struct svc_pool_map *m = &svc_pool_map;	unsigned int pidx = 0;	/*	 * An uninitialised map happens in a pure client when	 * lockd is brought up, so silently treat it the	 * same as SVC_POOL_GLOBAL.	 */	if (svc_serv_is_pooled(serv)) {		switch (m->mode) {		case SVC_POOL_PERCPU:			pidx = m->to_pool[cpu];			break;		case SVC_POOL_PERNODE:			pidx = m->to_pool[cpu_to_node(cpu)];			break;		}	}	return &serv->sv_pools[pidx % serv->sv_nrpools];}/* * Create an RPC service */static struct svc_serv *__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,	   void (*shutdown)(struct svc_serv *serv)){	struct svc_serv	*serv;	int vers;	unsigned int xdrsize;	unsigned int i;	if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))		return NULL;	serv->sv_name      = prog->pg_name;	serv->sv_program   = prog;	serv->sv_nrthreads = 1;	serv->sv_stats     = prog->pg_stats;	if (bufsize > RPCSVC_MAXPAYLOAD)		bufsize = RPCSVC_MAXPAYLOAD;	serv->sv_max_payload = bufsize? bufsize : 4096;	serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);	serv->sv_shutdown  = shutdown;	xdrsize = 0;	while (prog) {		prog->pg_lovers = prog->pg_nvers-1;		for (vers=0; vers<prog->pg_nvers ; vers++)			if (prog->pg_vers[vers]) {				prog->pg_hivers = vers;				if (prog->pg_lovers > vers)					prog->pg_lovers = vers;				if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)					xdrsize = prog->pg_vers[vers]->vs_xdrsize;			}		prog = prog->pg_next;	}	serv->sv_xdrsize   = xdrsize;	INIT_LIST_HEAD(&serv->sv_tempsocks);	INIT_LIST_HEAD(&serv->sv_permsocks);	init_timer(&serv->sv_temptimer);	spin_lock_init(&serv->sv_lock);	serv->sv_nrpools = npools;	serv->sv_pools =		kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),			GFP_KERNEL);	if (!serv->sv_pools) {		kfree(serv);		return NULL;	}	for (i = 0; i < serv->sv_nrpools; i++) {		struct svc_pool *pool = &serv->sv_pools[i];		dprintk("svc: initialising pool %u for %s\n",				i, serv->sv_name);		pool->sp_id = i;		INIT_LIST_HEAD(&pool->sp_threads);		INIT_LIST_HEAD(&pool->sp_sockets);		INIT_LIST_HEAD(&pool->sp_all_threads);		spin_lock_init(&pool->sp_lock);	}	/* Remove any stale portmap registrations */	svc_register(serv, 0, 0);	return serv;}struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,		void (*shutdown)(struct svc_serv *serv)){	return __svc_create(prog, bufsize, /*npools*/1, shutdown);}struct svc_serv *svc_create_pooled(struct svc_program *prog, unsigned int bufsize,		void (*shutdown)(struct svc_serv *serv),		  svc_thread_fn func, int sig, struct module *mod){	struct svc_serv *serv;	unsigned int npools = svc_pool_map_get();	serv = __svc_create(prog, bufsize, npools, shutdown);	if (serv != NULL) {		serv->sv_function = func;		serv->sv_kill_signal = sig;		serv->sv_module = mod;	}	return serv;}/* * Destroy an RPC service.  Should be called with the BKL held */voidsvc_destroy(struct svc_serv *serv){	struct svc_sock	*svsk;	struct svc_sock *tmp;	dprintk("svc: svc_destroy(%s, %d)\n",				serv->sv_program->pg_name,				serv->sv_nrthreads);	if (serv->sv_nrthreads) {		if (--(serv->sv_nrthreads) != 0) {			svc_sock_update_bufs(serv);			return;		}	} else		printk("svc_destroy: no threads for serv=%p!\n", serv);	del_timer_sync(&serv->sv_temptimer);	list_for_each_entry_safe(svsk, tmp, &serv->sv_tempsocks, sk_list)		svc_force_close_socket(svsk);	if (serv->sv_shutdown)		serv->sv_shutdown(serv);	list_for_each_entry_safe(svsk, tmp, &serv->sv_permsocks, sk_list)		svc_force_close_socket(svsk);	BUG_ON(!list_empty(&serv->sv_permsocks));	BUG_ON(!list_empty(&serv->sv_tempsocks));	cache_clean_deferred(serv);	if (svc_serv_is_pooled(serv))		svc_pool_map_put();	/* Unregister service with the portmapper */	svc_register(serv, 0, 0);	kfree(serv->sv_pools);	kfree(serv);}/* * Allocate an RPC server's buffer space. * We allocate pages and place them in rq_argpages. */static intsvc_init_buffer(struct svc_rqst *rqstp, unsigned int size){	int pages;	int arghi;	pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.				       * We assume one is at most one page				       */	arghi = 0;	BUG_ON(pages > RPCSVC_MAXPAGES);	while (pages) {		struct page *p = alloc_page(GFP_KERNEL);		if (!p)			break;		rqstp->rq_pages[arghi++] = p;		pages--;	}	return ! pages;}/* * Release an RPC server buffer */static voidsvc_release_buffer(struct svc_rqst *rqstp){	int i;	for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -