📄 pm_separate_rect.c
字号:
free( idleList ); if (flags->no_remote_X) { free( pointData ); }}void SeparateRect_Slave( graph, winspecs, flags )MPE_XGraph graph;Winspecs *winspecs;Flags *flags;{ int x, y, working, isContinuous, *borderData, *datPtr, mesgTag, myid, dataSize, *iterData, npoints; int tracking_color; /* x, y - integer counters for the point being calculated */ /* working - whether this process is still working or has been retired */ /* isContinuous - whether the border just computed is one continuous color */ /* borderData - storage for the values calculated */ /* datPtr - pointer into borderData */ /* mesgTag - what type of message was just received */ NUM rstep, istep; MPE_Point *pointData; int block_type; rect r, rectBuf[2]; /* r - the rectangle being calculated */ MPI_Status mesgStatus; MPI_Comm_rank( MPI_COMM_WORLD, &myid ); if (flags->with_tracking_win) { tracking_color = winspecs->colorArray[winspecs->my_tracking_color]; } MPI_Send( 0, 0, MPI_INT, MASTER_PROC, READY_TO_START, MPI_COMM_WORLD ); MPE_LOG_SEND( MASTER_PROC, READY_TO_START, 0 );#if DEBUG fprintf( debug_file, "[%d]ready for duty\n", myid ); fflush( debug_file );#endif working = 1; NUM_ASSIGN( rstep, NUM_DIV( NUM_SUB( flags->rmax, flags->rmin ), INT2NUM( winspecs->width-1 ) ) ); NUM_ASSIGN( istep, NUM_DIV( NUM_SUB( flags->imin, flags->imax ), INT2NUM( winspecs->height-1 ) ) ); /* figure out how much data might be stored and allocate space for it */ x = flags->breakout * flags->breakout; y = 2 * (winspecs->height + winspecs->width); dataSize = ((y>x) ? y : x); iterData = (int *) malloc( dataSize * sizeof( int ) ); pointData = (MPE_Point *) malloc( dataSize * sizeof( MPE_Point ) ); Fract_SetRegion( flags->rmin, flags->rmax, flags->imin, flags->imax, 0, winspecs->width-1, 0, winspecs->height-1 ); /* fprintf( stderr, "fractal = %d, MBROT = %d\n", flags->fractal, MBROT );*/ switch (flags->fractal) { case MBROT: Mbrot_Settings( flags->boundary_sq, flags->maxiter ); break; case JULIA: Julia_Settings( flags->boundary_sq, flags->maxiter, flags->julia_r, flags->julia_i ); break; case NEWTON:/* Newton_Settings( flags->epsilon, flags->newton.coeff, flags->newton.nterms );*/ Mbrot_Settings( flags->boundary_sq, flags->maxiter ); break; } while (working) { MPE_LOG_EVENT( S_WAIT_FOR_MESSAGE, 0, 0 ); MPI_Recv( &r, 1, rect_type, MASTER_PROC, MPI_ANY_TAG, MPI_COMM_WORLD, &mesgStatus ); MPE_LOG_RECEIVE( MASTER_PROC, mesgStatus.MPI_TAG, sizeof( rect ) ); /* get command from master process */ MPE_LOG_EVENT( E_WAIT_FOR_MESSAGE, 0, 0 ); mesgTag = mesgStatus.MPI_TAG;#if DEBUG fprintf( debug_file, "receive: %d\n", mesgTag); fflush( debug_file );#endif switch (mesgTag) { case ASSIGNMENT: /* new rectangle to compute */#if DEBUG fprintf( debug_file, "Assigned (%d %d %d %d)\n", r.l, r.r, r.t, r.b ); fflush( debug_file );#endif if (r.b-r.t<flags->breakout || r.r-r.l<flags->breakout) { /* if smaller than breakout, compute directly */#if DEBUG fprintf( debug_file, "[%d]computing chunk\n", myid ); fflush( debug_file );#endif MPE_LOG_EVENT( S_COMPUTE, 0, 0 ); ComputeChunk( flags, &r, pointData, iterData, dataSize, &npoints ); MPE_LOG_EVENT( E_COMPUTE, 0, 0 ); MPI_Send( 0, 0, MPI_INT, MASTER_PROC, READY_FOR_MORE, MPI_COMM_WORLD ); MPE_LOG_SEND( MASTER_PROC, READY_FOR_MORE, 0 ); MPE_LOG_EVENT( S_DRAW_CHUNK, 0, 0 );#if DEBUG fprintf( debug_file, "[%d]drawing chunk\n", myid ); fflush( debug_file );#endif if (flags->no_remote_X) { /* Send master the points to display */ block_type = POINTS; MPI_Send( &block_type, 1, MPI_INT, 0, BLOCK_TYPE, MPI_COMM_WORLD ); MPI_Send( &npoints, 1, MPI_INT, 0, POINT_COUNT, MPI_COMM_WORLD ); MPI_Send( pointData, (3 * npoints), MPI_INT, 0, POINT_DATA, MPI_COMM_WORLD ); if (flags->with_tracking_win) { MPI_Send( &tracking_color, 1, MPI_INT, 0, TRACKING_COLOR, MPI_COMM_WORLD ); } } else { MPE_Draw_points( graph, pointData, npoints ); MPE_Update( graph ); MPE_LOG_EVENT( E_DRAW_CHUNK, 0, 0 ); if (flags->with_tracking_win) { int i; for (i=0; i<npoints; i++) { pointData[i].c = tracking_color; } MPE_Draw_points( tracking_win, pointData, npoints ); MPE_Update( tracking_win ); } } } else { /* otherwise, compute the boundary */ MPE_LOG_EVENT( S_COMPUTE, 0, 0 );#if DEBUG fprintf( debug_file, "[%d]computing border\n", myid ); fflush( debug_file );#endif ComputeBorder( winspecs, flags, &r, pointData, dataSize, &npoints, &isContinuous );#if DEBUG>1 { int i; fprintf( debug_file, "computed %d %s points\n", npoints, isContinuous ? "continuous" : "noncontinuous"); for (i=0; i<npoints; i++) { fprintf( debug_file, "check computed (%d %d) %d\n", pointData[i].x, pointData[i].y, pointData[i].c ); } }#endif MPE_LOG_EVENT( E_COMPUTE, 0, 0 ); if (!isContinuous) {#if DEBUG fprintf( debug_file, "[%d]splitting and sending to master\n", myid ); fflush( debug_file );#endif SplitRect( flags, r ); } MPI_Send( 0, 0, MPI_INT, MASTER_PROC, READY_FOR_MORE, MPI_COMM_WORLD ); MPE_LOG_SEND( MASTER_PROC, READY_FOR_MORE, 0 ); if (isContinuous) { MPE_LOG_EVENT( S_DRAW_BLOCK, 0, 0 );#if DEBUG fprintf( debug_file, "[%d]drawing block\n", myid ); fflush( debug_file );#endif if (flags->no_remote_X) { /* Send the master the rectangle to display */ block_type = RECTANGLE; MPI_Send( &block_type, 1, MPI_INT, 0, BLOCK_TYPE, MPI_COMM_WORLD ); MPI_Send( &r, 5, MPI_INT, 0, RECT_SPEC, MPI_COMM_WORLD ); MPI_Send( &(pointData->c),1, MPI_INT, 0, RECT_COLOR, MPI_COMM_WORLD ); if (flags->with_tracking_win) { MPI_Send( &tracking_color, 1, MPI_INT, 0, TRACKING_COLOR, MPI_COMM_WORLD ); } } else { DrawBlock( graph, pointData, &r ); MPE_Update( graph ); if (flags->with_tracking_win) { /* Color the block to identify who computed it */ int i; for (i=0; i<r.length; i++) { pointData[i].c = tracking_color; } DrawBlock( tracking_win, pointData, &r ); MPE_Update( tracking_win ); } } MPE_LOG_EVENT( E_DRAW_BLOCK, 0, 0 ); } else { MPE_LOG_EVENT( S_DRAW_RECT, 0, 0 );#if DEBUG fprintf( debug_file, "[%d]drawing border\n", myid ); fflush( debug_file );#endif#if DEBUG>2 { int i; for (i=0; i<npoints; i++) { fprintf( debug_file, "drawing (%d %d) %d\n", pointData[i].x, pointData[i].y, pointData[i].c ); } }#endif if (flags->no_remote_X) { /* Send master the points to display */ block_type = POINTS; MPI_Send( &block_type, 1, MPI_INT, 0, BLOCK_TYPE, MPI_COMM_WORLD ); MPI_Send( &npoints, 1, MPI_INT, 0, POINT_COUNT, MPI_COMM_WORLD ); MPI_Send( pointData, (3 * npoints), MPI_INT, 0, POINT_DATA, MPI_COMM_WORLD ); if (flags->with_tracking_win) { MPI_Send( &tracking_color, 1, MPI_INT, 0, TRACKING_COLOR, MPI_COMM_WORLD ); } } else { MPE_Update( graph ); if (flags->with_tracking_win) { int i; /* Color the border to identify the computing process. */ for (i=0; i<npoints; i++) { pointData[i].c = tracking_color; } MPE_Draw_points( tracking_win, pointData, npoints ); MPE_Update( tracking_win ); } MPE_LOG_EVENT( E_DRAW_RECT, 0, 0 ); } } } /* else !breakout */ break; /* end if case ASSIGNMENT: */ case ALL_DONE:#if DEBUG fprintf( debug_file, "[%d]all done\n", myid ); fflush( debug_file );#endif working=0; break; } /* end of switch */ } /* end of while (working) */ free( iterData ); free( pointData );}voidSplitRect( flags, r )Flags *flags;rect r;{ int xsplit, ysplit, numRect; rect rectBuf[2]; xsplit = (r.r-r.l)>>1; ysplit = (r.b-r.t)>>1; if (xsplit>ysplit) { /* split the long side */ RECT_ASSIGN( rectBuf[0], r.l+1, r.l+xsplit, r.t+1, r.b-1 ); RECT_ASSIGN( rectBuf[1], r.l+xsplit+1, r.r-1, r.t+1, r.b-1 ); } else { RECT_ASSIGN( rectBuf[0], r.l+1, r.r-1, r.t+1, r.t+ysplit ); RECT_ASSIGN( rectBuf[1], r.l+1, r.r-1, r.t+ysplit+1, r.b-1 ); } rectBuf[0].length = RectBorderLen( rectBuf ); rectBuf[1].length = RectBorderLen( rectBuf+1 ); MPI_Send( rectBuf, 2, rect_type, MASTER_PROC, ADD2Q, MPI_COMM_WORLD ); MPE_LOG_SEND( MASTER_PROC, ADD2Q, sizeof( rect ) * 2 ); /* send the rectangles */#if DEBUG fprintf( debug_file, "Sent master (%d %d %d %d and %d %d %d %d)\n", rectBuf[0].l, rectBuf[0].t, rectBuf[0].r, rectBuf[0].b, rectBuf[1].l, rectBuf[1].t, rectBuf[1].r, rectBuf[1].b ); fflush( debug_file );#endif}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -