⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmiserv.c

📁 fortran并行计算包
💻 C
📖 第 1 页 / 共 3 页
字号:
/* -*- Mode: C; c-basic-offset:4 ; -*- *//* *  (C) 2003 by Argonne National Laboratory. *      See COPYRIGHT in top-level directory. *//* * This is a simple PMI server implementation.  This file implements * the PMI calls, including the PMI key value spaces.  This implements the * "server" end of the interface defined in mpich2/src/pmi/simple . */#include "pmutilconf.h"#include <stdio.h>#ifdef HAVE_SYS_TYPES_H#include <sys/types.h>#endif#ifdef HAVE_STDLIB_H#include <stdlib.h>#endif#ifdef HAVE_UNISTD_H#include <unistd.h>#endif#include <string.h>#include <ctype.h>#include <errno.h>/* Use the memory defintions from mpich2/src/include */#include "mpimem.h"#include "pmutil.h"#include "process.h"#include "cmnargs.h"#include "ioloop.h"#include "pmiserv.h"#include "env.h"        /* For MPIE_Putenv *//* We need socket to create a socket pair */#include <sys/socket.h>/* ??? */#include "simple_pmiutil.h"#ifdef HAVE_SNPRINTF#define MPIU_Snprintf snprintf#ifdef NEEDS_SNPRINTF_DECLint snprintf(char *, size_t, const char *, ...);#endif#endif/* isascii is an extension, so define it if it isn't defined */#ifndef isascii#define isascii(c) (((c)&~0x7f)==0)#endif/* These need to be imported from the pmiclient */#define MAXPMICMD   256         /* max length of a PMI command *//* Sizes of info keys and values (should match MPI versions in mpi.h) */#define PMI_MAX_INFO_KEY       256#define PMI_MAX_INFO_VAL      1025/* There is only a single PMI master, so we allocate it here */static PMIMaster pmimaster = { 0, 0, 0 };/* Allow the user to register a routine to be used for the PMI spawn    command */static int (*userSpawner)( ProcessWorld *, void *) = 0;static void *userSpawnerData = 0;static int pmidebug = 0;/* Functions that handle PMI requests */static int fPMI_Handle_finalize( PMIProcess * );static int fPMI_Handle_abort( PMIProcess * );static int fPMI_Handle_barrier( PMIProcess * );static int fPMI_Handle_create_kvs( PMIProcess * );static int fPMI_Handle_destroy_kvs( PMIProcess * );static int fPMI_Handle_put( PMIProcess * );static int fPMI_Handle_get( PMIProcess * );static int fPMI_Handle_get_my_kvsname( PMIProcess * );static int fPMI_Handle_init( PMIProcess * );static int fPMI_Handle_get_maxes( PMIProcess * );static int fPMI_Handle_getbyidx( PMIProcess * );static int fPMI_Handle_init_port( PMIProcess * );static int fPMI_Handle_spawn( PMIProcess * );static int fPMI_Handle_get_universe_size( PMIProcess * );static int fPMI_Handle_get_appnum( PMIProcess * );static PMIKVSpace *fPMIKVSAllocate( void );static int fPMIInfoKey( ProcessApp *, const char [], const char [] );int PMIServHandleInput( int, int, void * );static int PMIUBufferedReadLine( PMIProcess *, char *, int );/* * All PMI commands are handled by calling a routine that is associated with * the command.  New PMI commands can be added by updating this table. */typedef struct {    char *cmdName;    int (*handler)( PMIProcess * );} PMICmdMap;static PMICmdMap pmiCommands[] = {     { "barrier_in",     fPMI_Handle_barrier },    { "finalize",       fPMI_Handle_finalize },    { "abort",          fPMI_Handle_abort },    { "create_kvs",     fPMI_Handle_create_kvs },    { "destroy_kvs",    fPMI_Handle_destroy_kvs },     { "put",            fPMI_Handle_put },    { "get",            fPMI_Handle_get },    { "get_my_kvsname", fPMI_Handle_get_my_kvsname },    { "init",           fPMI_Handle_init },    { "get_maxes",      fPMI_Handle_get_maxes },    { "getbyidx",       fPMI_Handle_getbyidx },    { "initack",        fPMI_Handle_init_port },    { "spawn",          fPMI_Handle_spawn },    { "get_universe_size", fPMI_Handle_get_universe_size },    { "get_appnum",     fPMI_Handle_get_appnum },    { "\0",             0 },                     /* Sentinal for end of list */};/* ------------------------------------------------------------------------- *//* * Create a socket fd and setup the handler on that fd. * * You must also call  *    PMISetupInClient (in the child process) * and *    PMISetupFinishInServer (in the originating process, also called the  *                            parent) * You must also pass those routines the same value for usePort. * If you use a port, call PMIServSetupPort to get the port and set the * portName field in PMISetup. */int PMISetupSockets( int usePort, PMISetup *pmiinfo ){    if (usePort == 0) {	socketpair( AF_UNIX, SOCK_STREAM, 0, pmiinfo->fdpair );    }    else {	/* If we are using a port, the connection is set up only	   after the process is created */	pmiinfo->fdpair[0] = -1;	pmiinfo->fdpair[1] = -1;	/* Check for a non-null portName */	if (!pmiinfo->portName || !pmiinfo->portName[0]) return 1;    }    return 0;}/*  * This is the client side of the PMIserver setup.  It communicates to the * client the information needed to connect to the server (currently the * FD of a pre-existing socket). * * The env_pmi_fd and port must be static because putenv doesn't make a copy * of them.  It is ok to use static variable since this is called only within * the client; this routine will be called only once (in the forked process,  * before the exec). * * Another wrinkle is that in order to support -(g)envnone (no environment * variables in context of created process), we need to add the environment * variables to the ones set *after* environment variables are removed, rather * than using putenv. */int PMISetupInClient( int usePort, PMISetup *pmiinfo ){    static char env_pmi_fd[100];    static char env_pmi_port[1024];    if (usePort == 0) {	close( pmiinfo->fdpair[0] );	MPIU_Snprintf( env_pmi_fd, sizeof(env_pmi_fd), "PMI_FD=%d" , 		       pmiinfo->fdpair[1] );	if (MPIE_Putenv( pmiinfo->pWorld, env_pmi_fd )) {	    MPIU_Internal_error_printf( "Could not set environment PMI_FD" );	    return 1;	}    }    else {	/* We must communicate the port name to the process */	if (pmiinfo->portName) {	    MPIU_Snprintf( env_pmi_port, sizeof(env_pmi_port), "PMI_PORT=%s",			   pmiinfo->portName );	    if (MPIE_Putenv( pmiinfo->pWorld, env_pmi_port )) {		MPIU_Internal_error_printf( "Could not set environment PMI_PORT" );		perror( "Reason: " );		return 1;	    }	}	else {	    MPIU_Internal_error_printf( "Required portname was not defined\n" );	    return 1;	}	    }    /* Indicate that this is a spawned process */    /* MPIE_Putenv( pmiinfo->pWorld, "PMI_SPAWNED=1" ); */    return 0;}/* Finish setting up the server end of the PMI interface */int PMISetupFinishInServer( int usePort, 			    PMISetup *pmiinfo, ProcessState *pState ){    if (usePort == 0) {	PMIProcess *pmiprocess;	/* Close the other end of the socket pair */	close( pmiinfo->fdpair[1] );		/* We must initialize this process in the list of PMI processes. We	   pass the PMIProcess information to the handler */	pmiprocess = PMISetupNewProcess( pmiinfo->fdpair[0], pState );	MPIE_IORegister( pmiinfo->fdpair[0], IO_READ, PMIServHandleInput, 			 pmiprocess );    }    else {	/* We defer the step of setting up the process until the client	   contacts the server.  See PMIServAcceptFromPort for the 	   creation of the pmiprocess structure and the initialization of 	   the IO handler for the PMI communication */	/* FIXME: We may need to record some information, such as the 	   curPMIGroup, in the pState or pmiprocess entry */	;    }    return 0;}static PMIGroup *curPMIGroup = 0;static int       curNprocess = 0;/*  Create a new PMIProcess and initialize it.  If there is an allocation failure, return non zero.*/PMIProcess *PMISetupNewProcess( int fd, ProcessState *pState ){    PMIProcess *pmiprocess;    pmiprocess = (PMIProcess *)MPIU_Malloc( sizeof(PMIProcess) );    if (!pmiprocess) return 0;    pmiprocess->fd           = fd;    pmiprocess->nextChar     = pmiprocess->readBuf;    pmiprocess->endChar      = pmiprocess->readBuf;    pmiprocess->group        = curPMIGroup;    pmiprocess->pState       = pState;    pmiprocess->spawnApp     = 0;    pmiprocess->spawnAppTail = 0;    pmiprocess->spawnKVS     = 0;    pmiprocess->spawnWorld   = 0;    /* Add this process to the curPMIGroup */    curPMIGroup->pmiProcess[curNprocess++] = pmiprocess;    return pmiprocess;}/*  Initialize a new PMI group that will be the parent of all  PMIProcesses until the next group is created   Each group also starts with a KV Space     If there is an allocation failure, return non zero.*/int PMISetupNewGroup( int nProcess, PMIKVSpace *kvs ){    PMIGroup *g;    curPMIGroup = (PMIGroup *)MPIU_Malloc( sizeof(PMIGroup) );    if (!curPMIGroup) return 1;    curPMIGroup->nProcess   = nProcess;    curPMIGroup->groupID    = pmimaster.nGroups++;    curPMIGroup->nInBarrier = 0;    curPMIGroup->pmiProcess = (PMIProcess **)MPIU_Malloc( 					 sizeof(PMIProcess*) * nProcess );    if (!curPMIGroup->pmiProcess) return 1;    curPMIGroup->nextGroup  = 0;    curNprocess             = 0;    /* Add to PMIMaster */    g = pmimaster.groups;    if (!g) {	pmimaster.groups = curPMIGroup;    }    else {	while (g) {	    if (!g->nextGroup) {		g->nextGroup = curPMIGroup;		break;	    }	    g = g->nextGroup;	}    }    if (kvs) {	curPMIGroup->kvs = kvs;    }    else {	curPMIGroup->kvs = fPMIKVSAllocate();    }    return 0;}/*  * Process input from the socket connecting the mpiexec process to the * child process. * * The return status is interpreted by the IOLoop code in ioloop.c ; * a zero is a normal exit. */int PMIServHandleInput( int fd, int rdwr, void *extra ){    PMIProcess *pentry = (PMIProcess *)extra;    int        rc;    int        returnCode = 0;    char       inbuf[PMIU_MAXLINE], cmd[MAXPMICMD];    PMICmdMap *p;    int        cmdtype;    DBG_PRINTFCOND(pmidebug,("Handling PMI input\n") );    if ( ( rc = PMIUBufferedReadLine( pentry, inbuf, PMIU_MAXLINE ) ) > 0 ) {	DBG_PRINTFCOND(pmidebug,		       ("Entering PMIServHandleInputFd %s\n", inbuf) );	PMIU_parse_keyvals( inbuf );	cmdtype = PMIGetCommand( cmd, sizeof(cmd) );	DBG_PRINTFCOND(pmidebug,( "cmd = %s\n", cmd ));	/* Look for the command and execute the related function */	p = pmiCommands;	while (p->handler) {	    if (strncmp( cmd, p->cmdName, MAXPMICMD ) == 0) {		rc = (p->handler)( pentry );		break;	    }	    p++;	}	if (!p->handler) {	    PMIU_printf( 1, "unknown cmd %s\n", cmd );	}    }    else {                        /* lost contact with client */	DBG_PRINTFCOND(pmidebug,("EOF on PMI input\n"));	/* returning a 1 causes the IO loop code to close the socket */	returnCode = 1;    }    return returnCode;}/* ------------------------------------------------------------------------- *//* * Perform any initialization. * Input *   spawner - A routine to spawn processes *   spawnerData - data passed to spawner */int PMIServInit( int (*spawner)(ProcessWorld *, void*), void * spawnerData ){    userSpawner     = spawner;    userSpawnerData = spawnerData;    return 0;}/* * Set the debug flag for the pmiserver routines.  Returns the old * value of the flag.  0 turns off debugging, non-zero turns it on. */int PMISetDebug( int flag ){    int oldflag = pmidebug;    pmidebug = flag;    return oldflag;}/* ------------------------------------------------------------------------ *//* Additional service routines                                              *//* ------------------------------------------------------------------------ *//* * Get either a cmd=val or mcmd=val.  return 0 if cmd, 1 if mcmd, and -1  * if neither (an error, since all PMI messages should contain one of * these). */int PMIGetCommand( char *cmd, int cmdlen ){    char *valptr;    int  cmdtype = 0;        valptr = PMIU_getval( "cmd", cmd, cmdlen );    if (!valptr) {	valptr = PMIU_getval( "mcmd", cmd, cmdlen );	if (valptr) cmdtype = 1;	else        cmdtype = -1;    }    return cmdtype;}/* ------------------------------------------------------------------------- *//* The rest of these routines are internal                                   *//* ------------------------------------------------------------------------ *//* ------------------------------------------------------------------------- */static int fPMI_Handle_finalize( PMIProcess *pentry ){    char outbuf[PMIU_MAXLINE];        pentry->pState->status = PROCESS_FINALIZED;    /* send back an acknowledgement to release the process */    MPIU_Snprintf(outbuf, PMIU_MAXLINE, "cmd=finalize_ack\n");    PMIWriteLine(pentry->fd, outbuf);    return 0;}static int fPMI_Handle_abort( PMIProcess *pentry ){    return 1;}/*  * Handle an incoming "barrier" command * * Need a structure that has the fds for all members of a pmi group */static int fPMI_Handle_barrier( PMIProcess *pentry ){    int i;    PMIGroup *group = pentry->group;    DBG_PRINTFCOND(pmidebug,("Entering PMI_Handle_barrier for group %d\n", 		    group->groupID) );    group->nInBarrier++;    if (group->nInBarrier == group->nProcess) {	for ( i=0; i<group->nProcess; i++) {	    PMIWriteLine(group->pmiProcess[i]->fd, "cmd=barrier_out\n" );	}	group->nInBarrier = 0;    }    return 0;}/* Create a kvs and return a pointer to it */static PMIKVSpace *fPMIKVSAllocate( void ){    PMIKVSpace *kvs, **kPrev, *k;    int        rc;    static int kvsnum = 0;    /* Used to generate names */    /* Create the space */    kvs = (PMIKVSpace *)MPIU_Malloc( sizeof(PMIKVSpace) );    if (!kvs) {	MPIU_Internal_error_printf( "too many kvs's\n" );	return 0;    }    /* We include the pid of the PMI server as a way to allow multiple       PMI servers to coexist.  This is needed to support connect/accept       operations when multiple mpiexec's are used, and the KVS space       is served directly by mpiexec (it should really have the        hostname as well, just to avoid getting the same pid on two       different hosts, but this is probably good enough for most       uses) */    MPIU_Snprintf( (char *)(kvs->kvsname), MAXNAMELEN, "kvs_%d_%d", 		   (int)getpid(), kvsnum++ );    kvs->pairs     = 0;    kvs->lastByIdx = 0;    kvs->lastIdx   = -1;    /* Insert into the list of KV spaces */    kPrev = &pmimaster.kvSpaces;    k     = pmimaster.kvSpaces;    while (k) {	rc = strcmp( k->kvsname, kvs->kvsname );	if (rc > 0) break;	kPrev = &k->nextKVS;	k     = k->nextKVS;    }    kvs->nextKVS = k;    *kPrev = kvs;    return kvs;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -