📄 ch3_progress_connect.c
字号:
*flag = FALSE;/* If there is no shm host key, assume that we can't use shared memory *//* mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**argstr_shmhost", 0);*/ mpi_errno = 0; return mpi_errno; } mpi_errno = MPIU_Str_get_string_arg(business_card, MPIDI_CH3I_SHM_QUEUE_KEY, queue_name, 100); if (mpi_errno != MPIU_STR_SUCCESS) { *flag = FALSE; mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**argstr_shmq", 0); return mpi_errno; }#ifdef HAVE_SHARED_PROCESS_READ mpi_errno = MPIU_Str_get_string_arg(business_card, MPIDI_CH3I_SHM_PID_KEY, pid_str, 20); if (mpi_errno != MPIU_STR_SUCCESS) { *flag = FALSE; mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**argstr_shmpid", 0); return mpi_errno; }#endif /* compare this host's name with the business card host name */ if (strcmp(MPIDI_Process.my_pg->ch.shm_hostname, hostname) != 0) { *flag = FALSE; /*MPIU_DBG_PRINTF(("%s != %s\n", MPIDI_Process.my_pg->ch.shm_hostname, hostname));*/ return MPI_SUCCESS; } *flag = TRUE; /*MPIU_DBG_PRINTF(("%s == %s\n", MPIDI_Process.my_pg->ch.shm_hostname, hostname));*/ MPIU_DBG_PRINTF(("attaching to queue: %s\n", queue_name)); mpi_errno = MPIDI_CH3I_BootstrapQ_attach(queue_name, &queue); if (mpi_errno != MPI_SUCCESS) { *flag = FALSE; return MPI_SUCCESS; } /* create the write queue */ mpi_errno = MPIDI_CH3I_SHM_Get_mem(sizeof(MPIDI_CH3I_SHM_Queue_t), &vc->ch.shm_write_queue_info); if (mpi_errno != MPI_SUCCESS) { *flag = FALSE; mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**shmconnect_getmem", 0); return mpi_errno; } /*printf("rank %d sending queue(%s)\n", MPIR_Process.comm_world->rank, vc->ch.shm_write_queue_info.name);*/ vc->ch.write_shmq = vc->ch.shm_write_queue_info.addr; vc->ch.write_shmq->head_index = 0; vc->ch.write_shmq->tail_index = 0; MPIDI_DBG_PRINTF((60, FCNAME, "write_shmq head = 0")); MPIDI_DBG_PRINTF((60, FCNAME, "write_shmq tail = 0")); for (i=0; i<MPIDI_CH3I_NUM_PACKETS; i++) { vc->ch.write_shmq->packet[i].offset = 0; vc->ch.write_shmq->packet[i].avail = MPIDI_CH3I_PKT_EMPTY; } /* send the queue connection information */ /*MPIU_DBG_PRINTF(("write_shmq: %p, name - %s\n", vc->ch.write_shmq, vc->ch.shm_write_queue_info.key));*/ shm_info.info = vc->ch.shm_write_queue_info; /* printf("comm_world rank %d\nvc->pg_rank %d\nmy_pg_rank %d\nkvs_name:\n<%s>\npg_id:\n<%s>\n", MPIR_Process.comm_world->rank, vc->pg_rank, MPIDI_Process.my_pg_rank, vc->pg->ch.kvs_name, vc->pg->id); fflush(stdout); */ MPIU_Strncpy(shm_info.pg_id, MPIDI_Process.my_pg->id, 100); shm_info.pg_rank = MPIDI_Process.my_pg_rank; shm_info.pid = getpid(); MPIU_DBG_PRINTF(("MPIDI_CH3I_Shm_connect: sending bootstrap queue info from rank %d to msg queue %s\n", MPIR_Process.comm_world->rank, queue_name)); mpi_errno = MPIDI_CH3I_BootstrapQ_send_msg(queue, &shm_info, sizeof(shm_info)); if (mpi_errno != MPI_SUCCESS) { MPIDI_CH3I_SHM_Unlink_mem(&vc->ch.shm_write_queue_info); MPIDI_CH3I_SHM_Release_mem(&vc->ch.shm_write_queue_info); *flag = FALSE; mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**boot_send", 0); return mpi_errno; } /* MPIU_Free the queue resource */ /*MPIU_DBG_PRINTF(("detaching from queue: %s\n", queue_name));*/ mpi_errno = MPIDI_CH3I_BootstrapQ_detach(queue); if (mpi_errno != MPI_SUCCESS) { MPIDI_CH3I_SHM_Unlink_mem(&vc->ch.shm_write_queue_info); MPIDI_CH3I_SHM_Release_mem(&vc->ch.shm_write_queue_info); *flag = FALSE; mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**boot_detach", 0); return mpi_errno; }#ifdef HAVE_SHARED_PROCESS_READ#ifdef HAVE_WINDOWS_H /*MPIU_DBG_PRINTF(("Opening process[%d]: %d\n", i, pSharedProcess[i].nPid));*/ vc->ch.hSharedProcessHandle = OpenProcess(STANDARD_RIGHTS_REQUIRED | PROCESS_VM_READ | PROCESS_VM_WRITE | PROCESS_VM_OPERATION, FALSE, atoi(pid_str)); if (vc->ch.hSharedProcessHandle == NULL) { int err = GetLastError(); MPIDI_CH3I_SHM_Unlink_mem(&vc->ch.shm_write_queue_info); MPIDI_CH3I_SHM_Release_mem(&vc->ch.shm_write_queue_info); *flag = FALSE; mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**OpenProcess", "**OpenProcess %d %d", i, err); return mpi_errno; }#else MPIU_Snprintf(filename, 256, "/proc/%s/mem", pid_str); vc->ch.nSharedProcessID = atoi(pid_str); vc->ch.nSharedProcessFileDescriptor = open(filename, O_RDWR/*O_RDONLY*/); if (vc->ch.nSharedProcessFileDescriptor == -1) { MPIDI_CH3I_SHM_Unlink_mem(&vc->ch.shm_write_queue_info); MPIDI_CH3I_SHM_Release_mem(&vc->ch.shm_write_queue_info); *flag = FALSE; mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**open", "**open %s %d %d", filename, atoi(pid_str), errno); return mpi_errno; }#endif#endif return MPI_SUCCESS;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3I_VC_post_connect#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)int MPIDI_CH3I_VC_post_connect(MPIDI_VC_t * vc){ int mpi_errno = MPI_SUCCESS; char key[MPIDI_MAX_KVS_KEY_LEN]; char val[MPIDI_MAX_KVS_VALUE_LEN]; char host_description[MAX_HOST_DESCRIPTION_LEN]; int port; unsigned char ifaddr[4]; int hasIfaddr = 0; int rc; MPIDI_CH3I_Connection_t * conn; int connected; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_VC_POST_CONNECT); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_VC_POST_CONNECT); MPIDI_DBG_PRINTF((60, FCNAME, "entering")); if (vc->ch.state != MPIDI_CH3I_VC_STATE_UNCONNECTED) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**vc_state", "**vc_state %d", vc->ch.state); goto fn_fail; } vc->ch.state = MPIDI_CH3I_VC_STATE_CONNECTING; /* get the business card */ rc = MPIU_Snprintf(key, MPIDI_MAX_KVS_KEY_LEN, "P%d-businesscard", vc->pg_rank); if (rc < 0 || rc > MPIDI_MAX_KVS_KEY_LEN) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**snprintf", "**snprintf %d", rc); goto fn_fail; } mpi_errno = MPIDI_KVS_Get(vc->pg->ch.kvs_name, key, val); if (mpi_errno != MPI_SUCCESS) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**pmi_kvs_get", "**pmi_kvs_get %d", rc); goto fn_fail; }/* MPIU_DBG_PRINTF(("%s: %s\n", key, val)); */ /* attempt to connect through shared memory *//* printf( "trying to connect through shared memory...\n"); fflush(stdout); */ connected = FALSE;/* MPIU_DBG_PRINTF(("business card: <%s> = <%s>\n", key, val)); */ mpi_errno = MPIDI_CH3I_Shm_connect(vc, val, &connected);/* printf( "After attempt to connect, flag = %d and rc = %d\n", connected, mpi_errno ); fflush(stdout); */ if (mpi_errno != MPI_SUCCESS) { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**post_connect", "**post_connect %s", "MPIDI_CH3I_Shm_connect"); goto fn_fail; } if (connected) { MPIDI_VC_t *iter; int count = 0; /*MPIU_DBG_PRINTF(("shmem connected\n"));*/ MPIDI_CH3I_SHM_Add_to_writer_list(vc); /* If there are more shm connections than cpus, reduce the spin count to one. */ /* This does not take into account connections between other processes on the same machine. */ iter = MPIDI_CH3I_Process.shm_writing_list; while (iter) { count++; iter = iter->ch.shm_next_writer; } if (count >= MPIDI_CH3I_Process.num_cpus) { MPIDI_Process.my_pg->ch.nShmWaitSpinCount = 1; } vc->ch.state = MPIDI_CH3I_VC_STATE_CONNECTED; vc->ch.bShm = TRUE; vc->ch.shm_reading_pkt = TRUE; vc->ch.send_active = MPIDI_CH3I_SendQ_head(vc); /* MT */ goto fn_exit; }/* printf( "Attempting to connect through socket\n" );fflush(stdout); */ MPIU_DBG_MSG_S(CH3_CONNECT,TYPICAL, "Attempting to connect with business card %s", val ); /* attempt to connect through sockets */ mpi_errno = MPIDU_Sock_get_conninfo_from_bc( val, host_description, sizeof(host_description), &port, ifaddr, &hasIfaddr ); if (mpi_errno) { MPIU_ERR_POP(mpi_errno); }/* printf ("Allocating connection\n" );fflush(stdout);*/ mpi_errno = MPIDI_CH3I_Connection_alloc(&conn); if (mpi_errno == MPI_SUCCESS) {/* printf( "Posting socket connection\n" );fflush(stdout);*/ /* FIXME: This is a hack to allow Windows to continue to use the host description string instead of the interface address bytes when posting a socket connection. This should be fixed by changing the Sock_post_connect to only accept interface address. See also channels/sock/ch3_progress.c */#ifndef HAVE_WINDOWS_H if (hasIfaddr) { mpi_errno = MPIDU_Sock_post_connect_ifaddr(MPIDI_CH3I_sock_set, conn, ifaddr, port, &conn->sock); } else #endif { mpi_errno = MPIDU_Sock_post_connect(MPIDI_CH3I_sock_set, conn, host_description, port, &conn->sock); } if (mpi_errno == MPI_SUCCESS) { vc->ch.sock = conn->sock; vc->ch.conn = conn; conn->vc = vc; conn->state = CONN_STATE_CONNECTING; conn->send_active = NULL; conn->recv_active = NULL; } else { mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_FATAL, FCNAME, __LINE__, MPI_ERR_OTHER, "**ch3|sock|postconnect", "**ch3|sock|postconnect %d %d %s", MPIR_Process.comm_world->rank, vc->pg_rank, val); vc->ch.state = MPIDI_CH3I_VC_STATE_FAILED; MPIDI_CH3I_Connection_free(conn); } } else { MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**ch3|sock|connalloc"); } fn_exit:/* printf("Exiting with %d\n", mpi_errno );fflush(stdout);*/ MPIDI_DBG_PRINTF((60, FCNAME, "exiting")); MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_VC_POST_CONNECT); return mpi_errno; fn_fail: goto fn_exit;}#undef FUNCNAME#define FUNCNAME MPIDI_CH3I_Connection_free#undef FCNAME#define FCNAME MPIDI_QUOTE(FUNCNAME)void MPIDI_CH3I_Connection_free(MPIDI_CH3I_Connection_t * conn){ MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_CONNECTION_FREE); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_CONNECTION_FREE); MPIU_Free(conn->pg_id); MPIU_Free(conn); MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_CONNECTION_FREE);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -