📄 tcp_echo.c
字号:
//
void
net_load(cyg_addrword_t who)
{
int i;
while (true) {
cyg_semaphore_wait(&load_thread_sem[who]);
for (i = 0; i < load_thread_level; i++) {
do_some_random_computation(i,who);
}
cyg_thread_delay(1); // Wait until the next 'tick'
cyg_semaphore_post(&load_thread_sem[who]);
}
}
//
// Some arbitrary computation, designed to use up the CPU and cause associated
// cache "thrash" behaviour - part of background load modelling.
//
static void
do_some_random_computation(int p,int id)
{
// Just something that might be "hard"
#if 0
{
volatile double x;
x = ((p * 10) * 3.14159) / 180.0; // radians
}
#endif
#if 1
{
static int footle[0x10001];
static int counter = 0;
register int i;
i = (p << 8) + id + counter++;
i &= 0xffff;
footle[ i+1 ] += footle[ i ] + 1;
}
#endif
}
//
// This thread does nothing but count. It will be allowed to count
// as long as the semaphore is "free".
//
void
net_idle(cyg_addrword_t param)
{
while (true) {
cyg_semaphore_wait(&idle_thread_sem);
idle_thread_count++;
cyg_semaphore_post(&idle_thread_sem);
}
}
static void
echo_test(cyg_addrword_t p)
{
int s_source, s_sink, e_source, e_sink;
struct sockaddr_in e_source_addr, e_sink_addr, local;
int one = 1;
fd_set in_fds;
int i, num, len;
struct test_params params,nparams;
struct test_status status,nstatus;
cyg_tick_count_t starttime, stoptime;
s_source = socket(AF_INET, SOCK_STREAM, 0);
if (s_source < 0) {
pexit("stream socket");
}
if (setsockopt(s_source, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) {
pexit("setsockopt /source/ SO_REUSEADDR");
}
if (setsockopt(s_source, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one))) {
pexit("setsockopt /source/ SO_REUSEPORT");
}
memset(&local, 0, sizeof(local));
local.sin_family = AF_INET;
local.sin_len = sizeof(local);
local.sin_port = ntohs(SOURCE_PORT);
local.sin_addr.s_addr = INADDR_ANY;
if(bind(s_source, (struct sockaddr *) &local, sizeof(local)) < 0) {
pexit("bind /source/ error");
}
listen(s_source, SOMAXCONN);
s_sink = socket(AF_INET, SOCK_STREAM, 0);
if (s_sink < 0) {
pexit("stream socket");
}
memset(&local, 0, sizeof(local));
local.sin_family = AF_INET;
local.sin_len = sizeof(local);
local.sin_port = ntohs(SINK_PORT);
local.sin_addr.s_addr = INADDR_ANY;
if(bind(s_sink, (struct sockaddr *) &local, sizeof(local)) < 0) {
pexit("bind /sink/ error");
}
if (setsockopt(s_sink, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) {
pexit("setsockopt /sink/ SO_REUSEADDR");
}
if (setsockopt(s_sink, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one))) {
pexit("setsockopt /sink/ SO_REUSEPORT");
}
listen(s_sink, SOMAXCONN);
e_source = 0; e_sink = 0;
while (true) {
// Wait for a connection on either of the ports
FD_ZERO(&in_fds);
FD_SET(s_source, &in_fds);
FD_SET(s_sink, &in_fds);
num = select(max(s_sink,s_source)+1, &in_fds, 0, 0, 0);
if (FD_ISSET(s_source, &in_fds)) {
len = sizeof(e_source_addr);
if ((e_source = accept(s_source, (struct sockaddr *)&e_source_addr, &len)) < 0) {
pexit("accept /source/");
}
diag_printf("SOURCE connection from %s:%d\n",
inet_ntoa(e_source_addr.sin_addr), ntohs(e_source_addr.sin_port));
}
if (FD_ISSET(s_sink, &in_fds)) {
len = sizeof(e_sink_addr);
if ((e_sink = accept(s_sink, (struct sockaddr *)&e_sink_addr, &len)) < 0) {
pexit("accept /sink/");
}
diag_printf("SINK connection from %s:%d\n",
inet_ntoa(e_sink_addr.sin_addr), ntohs(e_sink_addr.sin_port));
}
// Continue with test once a connection is established in both directions
if ((e_source != 0) && (e_sink != 0)) {
break;
}
}
// Wait for "source" to tell us the testing paramters
if (do_read(e_source, &nparams, sizeof(nparams)) != sizeof(nparams)) {
pexit("Can't read initialization parameters");
}
params.nbufs = ntohl(nparams.nbufs);
params.bufsize = ntohl(nparams.bufsize);
params.load = ntohl(nparams.load);
diag_printf("Using %d buffers of %d bytes each, %d%% background load\n",
params.nbufs, params.bufsize, params.load);
// Tell the sink what the parameters are
if (do_write(e_sink, &nparams, sizeof(nparams)) != sizeof(nparams)) {
pexit("Can't write initialization parameters");
}
status.ok = 1;
nstatus.ok = htonl(status.ok);
// Tell the "source" to start - we're all connected and ready to go!
if (do_write(e_source, &nstatus, sizeof(nstatus)) != sizeof(nstatus)) {
pexit("Can't send ACK to 'source' host");
}
idle_thread_count = 0;
cyg_semaphore_post(&idle_thread_sem); // Start idle thread
starttime = cyg_current_time();
start_load(params.load);
TNR_ON();
// Echo the data from the source to the sink hosts
for (i = 0; i < params.nbufs; i++) {
if ((len = do_read(e_source, data_buf, params.bufsize)) != params.bufsize) {
TNR_OFF();
diag_printf("Can't read buf #%d: ", i+1);
if (len < 0) {
perror("I/O error");
} else {
diag_printf("short read - only %d bytes\n", len);
}
TNR_ON();
}
if ((len = do_write(e_sink, data_buf, params.bufsize)) != params.bufsize) {
TNR_OFF();
diag_printf("Can't write buf #%d: ", i+1);
if (len < 0) {
perror("I/O error");
} else {
diag_printf("short write - only %d bytes\n", len);
}
TNR_ON();
}
}
TNR_OFF();
// Wait for the data to drain and the "sink" to tell us all is OK.
if (do_read(e_sink, &status, sizeof(status)) != sizeof(status)) {
pexit("Can't receive ACK from 'sink' host");
}
start_load(0);
cyg_semaphore_wait(&idle_thread_sem); // Stop idle thread
stoptime = cyg_current_time();
stoptime -= starttime; // time taken in cS
// expected idle loops in that time period for an idle system:
starttime = no_load_idle_count_1_second * stoptime / 100;
diag_printf( "%d ticks elapsed, %d kloops predicted for an idle system\n",
(int)stoptime, (int)(starttime/1000) );
diag_printf( "actual kloops %d, CPU was %d%% idle during transfer\n",
(int)(idle_thread_count/1000),
(int)(idle_thread_count * 100 / starttime) );
// Now examine how close that loading actually was:
start_load(params.load); // Start up a given load
idle_thread_count = 0;
cyg_semaphore_post(&idle_thread_sem); // Start idle thread
cyg_thread_delay(1*100); // Pause for one second
cyg_semaphore_wait(&idle_thread_sem); // Stop idle thread
start_load(0); // Shut down background load
i = 100 - ((idle_thread_count * 100) / no_load_idle_count_1_second );
diag_printf("Final load[%d] = %d => %d%%\n", load_thread_level,
(int)idle_thread_count, i);
//#ifdef CYGDBG_USE_ASSERTS
#ifdef CYGDBG_NET_TIMING_STATS
{
extern void show_net_times(void);
show_net_times();
}
#endif
//#endif
}
void
net_test(cyg_addrword_t param)
{
diag_printf("Start TCP test - ECHO mode\n");
init_all_network_interfaces();
calibrate_load(DESIRED_BACKGROUND_LOAD);
TNR_INIT();
#ifdef CYGPKG_SNMPAGENT
{
extern void cyg_net_snmp_init(void);
cyg_net_snmp_init();
}
#endif
echo_test(param);
TNR_PRINT_ACTIVITY();
cyg_test_exit();
}
void
cyg_start(void)
{
int i;
// Create a main thread which actually runs the test
cyg_thread_create(MAIN_THREAD_PRIORITY, // Priority
net_test, // entry
0, // entry parameter
"Network test", // Name
&stack[0], // Stack
STACK_SIZE, // Size
&thread_handle, // Handle
&thread_data // Thread data structure
);
cyg_thread_resume(thread_handle); // Start it
// Create the idle thread environment
cyg_semaphore_init(&idle_thread_sem, 0);
cyg_thread_create(IDLE_THREAD_PRIORITY, // Priority
net_idle, // entry
0, // entry parameter
"Network idle", // Name
&idle_thread_stack[0], // Stack
STACK_SIZE, // Size
&idle_thread_handle, // Handle
&idle_thread_data // Thread data structure
);
cyg_thread_resume(idle_thread_handle); // Start it
// Create the load threads and their environment(s)
for (i = 0; i < NUM_LOAD_THREADS; i++) {
cyg_semaphore_init(&load_thread_sem[i], 0);
cyg_thread_create(LOAD_THREAD_PRIORITY, // Priority
net_load, // entry
i, // entry parameter
"Background load", // Name
&load_thread_stack[i][0], // Stack
STACK_SIZE, // Size
&load_thread_handle[i], // Handle
&load_thread_data[i] // Thread data structure
);
cyg_thread_resume(load_thread_handle[i]); // Start it
}
cyg_scheduler_start();
}
// EOF tcp_echo.c
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -