📄 perf-base.c
字号:
** DESCRIPTION** Gets the packet drop statisitics from OS.** NOTE: Currently only pcap-based sniffing is supported. Should** add native OS calls.**** FORMAL INPUT** SFBASE * - ptr to struct** SFBASE_STATS * - ptr to struct to fill in with perf stats**** FORMAL OUTPUT** int - 0 is successful*/int GetPktDropStats(SFBASE *sfBase, SFBASE_STATS *sfBaseStats){ /* ** Network Interfaces. Right now we only check ** the first interface */ extern pcap_t *pd; struct pcap_stat pcapStats; if((!pd)#ifdef WIN32 || (pv.readmode_flag)#endif ) { sfBaseStats->pkt_stats.pkts_recv = sfBaseStats->total_packets; sfBaseStats->pkt_stats.pkts_drop = 0; sfBaseStats->pkt_drop_percent = 0.0; return 0; } if(pcap_stats(pd, &pcapStats) < 0) { sfBaseStats->pkt_stats.pkts_recv = sfBaseStats->total_packets; sfBaseStats->pkt_stats.pkts_drop = 0; sfBaseStats->pkt_drop_percent = 0.0; } else { if( sfBase->iReset == 0 ) { if (pcapStats.ps_recv < (u_int32_t)sfBase->pkt_stats.pkts_recv) { /* Rollover of the pcap stat value */ sfBaseStats->pkt_stats.pkts_recv = pcapStats.ps_recv + (UINT32_MAX - (u_int32_t)sfBase->pkt_stats.pkts_recv); } else { sfBaseStats->pkt_stats.pkts_recv = pcapStats.ps_recv - (u_int32_t)sfBase->pkt_stats.pkts_recv; } if (pcapStats.ps_drop < (u_int32_t)sfBase->pkt_stats.pkts_drop) { /* Rollover of the pcap stat value */ sfBaseStats->pkt_stats.pkts_drop = pcapStats.ps_drop + (UINT32_MAX - (u_int32_t)sfBase->pkt_stats.pkts_drop); } else { sfBaseStats->pkt_stats.pkts_drop = pcapStats.ps_drop - (u_int32_t)sfBase->pkt_stats.pkts_drop; } } else { sfBaseStats->pkt_stats.pkts_recv = pcapStats.ps_recv; sfBaseStats->pkt_stats.pkts_drop = pcapStats.ps_drop; } sfBaseStats->pkt_drop_percent = ((double)sfBaseStats->pkt_stats.pkts_drop / (double)sfBaseStats->pkt_stats.pkts_recv) * 100; /* ** Reset sfBase stats for next go round. */ sfBase->pkt_stats.pkts_recv = pcapStats.ps_recv; sfBase->pkt_stats.pkts_drop = pcapStats.ps_drop; } return 0;}/* * * Log Base Per Stats to File for Use by the MC * * unixtime(in secs since epoch) * %pkts dropped * mbits/sec (wire) * alerts/sec * K-Packets/Sec (wire) * Avg Bytes/Pkt (wire) * %bytes pattern matched * syns/sec * synacks/sec * new-sessions/sec (stream cache) * del-sessions/sec (stream cache) * total-sessions open (stream cache) * max-sessions (stream cache) * streamflushes/sec * streamfaults/sec * streamtimeouts * fragcreates/sec * fragcompletes/sec * fraginserts/sec * fragdeletes/sec * fragflushes/sec * current-frags open (frag cache) * max-frags (frag cache) * fragtimeouts * fragfaults * num cpus (following triple is repeated for each CPU) * %user-cpu usage * %sys-cpu usage * %idle-cpu usage * mbits/sec (wire) * mbits/sec (ip fragmented) * mbits/sec (ip reassembled) * mbits/sec (stream rebuilt) * mbits/sec (app layer) * Avg Bytes/Pkt (wire) * Avg Bytes/Pkt (ip fragmented) * Avg Bytes/Pkt (ip reassembled) * Avg Bytes/Pkt (stream rebuilt) * Avg Bytes/Pkt (app layer) * K-Packets/Sec (wire) * K-Packets/Sec (ip fragmented) * K-Packets/Sec (ip reassembled) * K-Packets/Sec (stream rebuilt) * K-Packets/Sec (app layer) * */int LogBasePerfStats(SFBASE_STATS *sfBaseStats, FILE * fh ){ double sys=0.0,usr=0.0,idle=0.0;#ifdef LINUX_SMP int iCtr;#endif if( ! fh ) return 0; fprintf(fh,"%lu,%.3f,%.1f,%.1f,%.1f,%d,%.2f,", (unsigned long)sfBaseStats->time, sfBaseStats->pkt_drop_percent, sfBaseStats->wire_mbits_per_sec.realtime, sfBaseStats->alerts_per_second, sfBaseStats->kpackets_per_sec.realtime, sfBaseStats->avg_bytes_per_packet, sfBaseStats->patmatch_percent); /* Session estimation statistics */ fprintf(fh,#ifdef WIN32 "%.1f,%.1f,%.1f,%.1f,%I64i,%I64i,",#else "%.1f,%.1f,%.1f,%.1f,%llu,%llu,",#endif sfBaseStats->syns_per_second, sfBaseStats->synacks_per_second, sfBaseStats->new_sessions_per_second, sfBaseStats->deleted_sessions_per_second, sfBaseStats->total_sessions, sfBaseStats->max_sessions); fprintf(fh,#ifdef WIN32 "%.1f,%I64i,%I64i,",#else "%.1f,%llu,%llu,",#endif sfBaseStats->stream_flushes_per_second, sfBaseStats->stream_faults, sfBaseStats->stream_timeouts); fprintf(fh,#ifdef WIN32 "%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%I64i,%I64i,%I64i,%I64i,",#else "%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%llu,%llu,%llu,%llu,",#endif sfBaseStats->frag_creates_per_second, sfBaseStats->frag_completes_per_second, sfBaseStats->frag_inserts_per_second, sfBaseStats->frag_deletes_per_second, sfBaseStats->frag_autofrees_per_second, sfBaseStats->frag_flushes_per_second, sfBaseStats->current_frags, sfBaseStats->max_frags, sfBaseStats->frag_timeouts, sfBaseStats->frag_faults); /* CPU STATS - at the end of output record */ #ifdef LINUX_SMP /* First the number of CPUs */ fprintf(fh, "%d,", sfBaseStats->sfProcPidStats->iCPUs); /* Next, stats for each CPU (a triple) */ for(iCtr = 0; iCtr < sfBaseStats->sfProcPidStats->iCPUs; iCtr++) { usr= sfBaseStats->sfProcPidStats->SysCPUs[iCtr].user; sys= sfBaseStats->sfProcPidStats->SysCPUs[iCtr].sys; idle= sfBaseStats->sfProcPidStats->SysCPUs[iCtr].idle; fprintf(fh,"%.1f,%.1f,%.1f,",usr,sys,idle); }#else usr=sfBaseStats->user_cpu_time; sys=sfBaseStats->system_cpu_time; idle=sfBaseStats->idle_cpu_time; /* 1 CPU hardcoded */ fprintf(fh,"1,%.1f,%.1f,%.1f,",usr,sys,idle);#endif /* Status for MBits/s, Bytes/Pkt, KPkts/s for each of * wire, IP Fragmented, IP Reassembled, Stream Reassembled, * App Layer (data that reaches protocol decoders). */ fprintf(fh,"%.2f,%.2f,%.2f,%.2f,%.2f,", sfBaseStats->wire_mbits_per_sec.realtime, sfBaseStats->ipfrag_mbits_per_sec.realtime, sfBaseStats->ipreass_mbits_per_sec.realtime, sfBaseStats->rebuilt_mbits_per_sec.realtime, sfBaseStats->mbits_per_sec.realtime); fprintf(fh,"%d,%d,%d,%d,%d,", sfBaseStats->avg_bytes_per_wire_packet, sfBaseStats->avg_bytes_per_ipfrag_packet, sfBaseStats->avg_bytes_per_ipreass_packet, sfBaseStats->avg_bytes_per_rebuilt_packet, sfBaseStats->avg_bytes_per_packet); fprintf(fh,"%.2f,%.2f,%.2f,%.2f,%.2f,", sfBaseStats->kpackets_wire_per_sec.realtime, sfBaseStats->kpackets_ipfrag_per_sec.realtime, sfBaseStats->kpackets_ipreass_per_sec.realtime, sfBaseStats->kpackets_rebuilt_per_sec.realtime, sfBaseStats->kpackets_per_sec.realtime); fprintf(fh,"%llu,",sfBaseStats->pkt_stats.pkts_recv); fprintf(fh,"%llu", sfBaseStats->pkt_stats.pkts_drop); fprintf(fh,"\n"); fflush(fh);#ifdef LINUX //LogScheduler();#endif return 0;}/*** NAME ** DisplayBasePerfStats** ** DESCRIPTION** Output Function. We can easily code multiple output buffers** because all that is received is a SFBASE_STATS struct which** holds all the information to output. This current output** function just prints to stdout.**** FORMAL INPUTS** SFBASE_STATS * - struct with perf information** int - flags for output**** FORMAL OUTPUTS** int - 0 is successful*/int DisplayBasePerfStatsConsole(SFBASE_STATS *sfBaseStats, int iFlags){#ifdef LINUX_SMP int iCtr;#endif LogMessage("\n\nSnort Realtime Performance : %s--------------------------\n", ctime(&sfBaseStats->time)); LogMessage("Pkts Recv: %llu\n", sfBaseStats->pkt_stats.pkts_recv); LogMessage("Pkts Drop: %llu\n", sfBaseStats->pkt_stats.pkts_drop); LogMessage("%% Dropped: %.2f%%\n\n",sfBaseStats->pkt_drop_percent); LogMessage("Mbits/Sec: %.2f (wire)\n", sfBaseStats->wire_mbits_per_sec.realtime); LogMessage("Mbits/Sec: %.2f (ip fragmented)\n", sfBaseStats->ipfrag_mbits_per_sec.realtime); LogMessage("Mbits/Sec: %.2f (ip reassembled)\n", sfBaseStats->ipreass_mbits_per_sec.realtime); LogMessage("Mbits/Sec: %.2f (tcp rebuilt)\n", sfBaseStats->rebuilt_mbits_per_sec.realtime); LogMessage("Mbits/Sec: %.2f (app layer)\n\n", sfBaseStats->mbits_per_sec.realtime); LogMessage("Bytes/Pkt: %d (wire)\n", sfBaseStats->avg_bytes_per_wire_packet); LogMessage("Bytes/Pkt: %d (ip fragmented)\n", sfBaseStats->avg_bytes_per_ipfrag_packet); LogMessage("Bytes/Pkt: %d (ip reassembled)\n", sfBaseStats->avg_bytes_per_ipreass_packet); LogMessage("Bytes/Pkt: %d (tcp rebuilt)\n", sfBaseStats->avg_bytes_per_rebuilt_packet); LogMessage("Bytes/Pkt: %d (app layer)\n\n", sfBaseStats->avg_bytes_per_packet); LogMessage("KPkts/Sec: %.2f (wire)\n", sfBaseStats->kpackets_wire_per_sec.realtime); LogMessage("KPkts/Sec: %.2f (ip fragmented)\n", sfBaseStats->kpackets_ipfrag_per_sec.realtime); LogMessage("KPkts/Sec: %.2f (ip reassembled)\n", sfBaseStats->kpackets_ipreass_per_sec.realtime); LogMessage("KPkts/Sec: %.2f (tcp rebuilt)\n", sfBaseStats->kpackets_rebuilt_per_sec.realtime); LogMessage("KPkts/Sec: %.2f (app layer)\n\n", sfBaseStats->kpackets_per_sec.realtime); LogMessage("PatMatch: %.2f%%\n\n", sfBaseStats->patmatch_percent); /* ** The following ifdefs are for CPU stats dealing with multiple ** CPUs in Linux. Snort will show user, system and idle time for ** each CPU. The methods of calculating this are different though, ** since getrusage is broken for multiple CPUs in Linux. We get the ** CPU stats instead from the proc filesystem on Linux. */#ifdef LINUX_SMP for(iCtr = 0; iCtr < sfBaseStats->sfProcPidStats->iCPUs; iCtr++) { LogMessage("CPU%d Usage: %.2f%% (user) %.2f%% (sys) %.2f%% (idle)\n", iCtr, sfBaseStats->sfProcPidStats->SysCPUs[iCtr].user, sfBaseStats->sfProcPidStats->SysCPUs[iCtr].sys, sfBaseStats->sfProcPidStats->SysCPUs[iCtr].idle); } printf("\n");#else LogMessage("CPU Usage: %.2f%% (user) %.2f%% (sys) %.2f%% (idle)\n\n", sfBaseStats->user_cpu_time, sfBaseStats->system_cpu_time, sfBaseStats->idle_cpu_time);#endif /* ** Shows the number of snort alerts per second. */ LogMessage("Alerts/Sec : %.1f\n", sfBaseStats->alerts_per_second); /* Session estimation statistics */ LogMessage("Syns/Sec : %.1f\n", sfBaseStats->syns_per_second); LogMessage("Syn-Acks/Sec : %.1f\n", sfBaseStats->synacks_per_second); LogMessage("New Cached Sessions/Sec: %.1f\n", sfBaseStats->new_sessions_per_second); LogMessage("Cached Sessions Del/Sec: %.1f\n", sfBaseStats->deleted_sessions_per_second); LogMessage("Current Cached Sessions: %llu\n", sfBaseStats->total_sessions); LogMessage("Max Cached Sessions : %llu\n", sfBaseStats->max_sessions); /* more instrumentation for stream4/frag2 */ LogMessage("Stream Flushes/Sec : %.1f\n", sfBaseStats->stream_flushes_per_second); LogMessage("Stream Cache Faults/Sec: %llu\n", sfBaseStats->stream_faults); LogMessage("Stream Cache Timeouts : %llu\n", sfBaseStats->stream_timeouts); LogMessage("Frag Creates()s/Sec : %.1f\n", sfBaseStats->frag_creates_per_second); LogMessage("Frag Completes()s/Sec : %.1f\n", sfBaseStats->frag_completes_per_second); LogMessage("Frag Inserts()s/Sec : %.1f\n", sfBaseStats->frag_inserts_per_second); LogMessage("Frag Deletes/Sec : %.1f\n", sfBaseStats->frag_deletes_per_second); LogMessage("Frag AutoFrees/Sec : %.1f\n", sfBaseStats->frag_autofrees_per_second); LogMessage("Frag Flushes/Sec : %.1f\n", sfBaseStats->frag_flushes_per_second); LogMessage("Current Cached Frags : %llu\n", sfBaseStats->current_frags); LogMessage("Max Cached Frags : %llu\n", sfBaseStats->max_frags); LogMessage("Frag Timeouts : %llu\n", sfBaseStats->frag_timeouts); LogMessage("Frag Faults : %llu\n\n", sfBaseStats->frag_faults); /* ** Snort Maximum Performance Statistics ** These statistics calculate the maximum performance that ** snort could attain by using the getrusage numbers. We've ** seen in testing that these numbers come close to the actual ** throughput for Mbits/Sec and Pkt/Sec. But note that these ** are not hard numbers and rigorous testing is necessary to ** establish snort performance on any hardware setting. */ if(iFlags & MAX_PERF_STATS) { LogMessage("Snort Maximum Performance\n"); LogMessage("-------------------------\n\n"); LogMessage("Mbits/Second\n"); LogMessage("----------------\n"); LogMessage("Snort: %.2f\n",sfBaseStats->mbits_per_sec.usertime); LogMessage("Sniffing: %.2f\n",sfBaseStats->mbits_per_sec.systemtime); LogMessage("Combined: %.2f\n\n",sfBaseStats->mbits_per_sec.totaltime); LogMessage("uSeconds/Pkt\n"); LogMessage("----------------\n"); LogMessage("Snort: %.2f\n",sfBaseStats->usecs_per_packet.usertime); LogMessage("Sniffing: %.2f\n",sfBaseStats->usecs_per_packet.systemtime); LogMessage("Combined: %.2f\n\n",sfBaseStats->usecs_per_packet.totaltime); LogMessage("KPkts/Second\n"); LogMessage("------------------\n"); LogMessage("Snort: %.2f\n",sfBaseStats->kpackets_per_sec.usertime); LogMessage("Sniffing: %.2f\n",sfBaseStats->kpackets_per_sec.systemtime); LogMessage("Combined: %.2f\n\n",sfBaseStats->kpackets_per_sec.totaltime); } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -