⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcpstream.c

📁 Firestorm NIDS是一个性能非常高的网络入侵检测系统 (NIDS)。目前
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * tcpstream - Firestorm TCP state tracking implementation. * Copyright (c) 2002 Gianni Tedesco <gianni@scaramanga.co.uk> * Released under the terms of the GNU GPL v2 * * TODO: *  o Configure the timeouts *  o Implement timeouts for SYN2/FIN states *  o Implement TIME_WAIT states *  o Re-order out-of-order transmissions *  o Intelligent stream reassembly *  o Support sack *  o Connection re-sync *  o Connection pickup ? * * Should CORRECTLY cope with: *  o Re-transmissions *  o Bad TCP or IP checksum evasion *  o Broadcast/Multicast packets *  o PAWS (rfc1323) evasion*/#ifndef DONT_INLINE#define __INLINE__ static inline#else#define __INLINE__ static#endif#ifndef STATE_DEBUG#define dmesg(x...)#else#define dmesg mesgstatic char *state_str[]={	"CLOSED",	"ESTABLISHED",	"SYN_SENT",	"SYN_RECV",	"FIN_WAIT1",	"FIN_WAIT2",	"TCP_TIME_WAIT",	"TCP_CLOSE",	"TCP_CLOSE_WAIT",	"TCP_LAST_ACK",	"TCP_LISTEN",	"TCP_CLOSING"};#endif#include <cleanup.h>#include "tcpip.h"#define TCP_HZ 100#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)#define TCP_PAWS_MSL 60#define TCP_PAWS_WINDOW 60#define TCP_TMO_SYN1 (90*TCP_HZ)/* Configuration */unsigned int tcp_minttl=0;unsigned int tcp_numstreams=2048;unsigned int tcp_numflows=1024;unsigned int tcp_reassemble=0;unsigned int tcp_stateful=0;struct arg tcpstream_args[]={	{"minttl", ARGTYPE_PUINT, NULL, {vp_uint:&tcp_minttl}},	{"num_streams", ARGTYPE_PBYTES, NULL, {vp_bytes:&tcp_numstreams}},	{"num_flows", ARGTYPE_PBYTES, NULL, {vp_bytes:&tcp_numflows}},	{"reassemble", ARGTYPE_PBOOL, NULL, {vp_bool:&tcp_reassemble}},	{NULL, ARGTYPE_NOP, NULL}};/* Token bucket for TCP state violations */struct tokenbucket tcpstream_tb={	.cost=(RATE_SEC/1), /* 1 alert per second */	.burst=(RATE_SEC/1)*10, /* a burst of 10 alerts per second */};/* Token bucket for ICMP trying to fiddle with TCP state */struct tokenbucket icmperr_tb={	.cost=(RATE_SEC/1), /* 1 alert per second */	.burst=(RATE_SEC/1)*10, /* a burst of 10 alerts per second */};/* Alert generator */struct generator tcpstream_gen=init_generator("tcpstream", &tcpstream_tb);struct generator icmperr_gen=init_generator("tcp.icmperr", &icmperr_tb);/* Alerts */struct alert alert_tcp1=init_alert("In-window SYN", 1, 0, 5);struct alert alert_tcp2=init_alert("Data sent on closed stream", 2, 0, 5);struct alert alert_tcp3=init_alert("PAWS discard", 3, 0, 5);struct alert alert_icmp1=init_alert("ICMP error from interloper", 2, 0, 5);struct alert alert_icmp2=init_alert("ICMP error on established connection", 2, 0, 5);/* 1. Hash table for lookups -- O(n) in pathological cases, can be O(1) */unsigned int tcp_hashsz=512;struct tcp_session **tcp_hash=NULL;/* 2. Slab cache for allocation -- O(1) */union tcp_union *all_sessions=NULL;union tcp_union *tcp_next=NULL;/* 3. LRU list to evict oldest items -- O(1) */struct tcp_lru lru={	.next=(struct tcp_session *)&lru,	.prev=(struct tcp_session *)&lru,};/* 4. Timeout list to evict entries on timeout */struct tcp_tmo syn1={	.next=(struct tcp_session *)&syn1,	.prev=(struct tcp_session *)&syn1,};/* O(1) allocator for flows */size_t flow_len=0;void *flow_cache=NULL;void *flow_next=NULL;/* Statistics counters */unsigned int tcp_packets=0;unsigned int tcp_state_errs=0;unsigned int tcp_broadcast=0;unsigned int tcp_lowttl=0;unsigned int tcp_timeouts=0;unsigned int max_concurrent=0;unsigned int num_active=0;unsigned int max_flows=0;unsigned int num_flows=0;/* Wrap-safe seq/ack calculations */__INLINE__ int before(u_int32_t s1, u_int32_t s2) {	return (int32_t)(s1-s2) < 0;}__INLINE__ int after(u_int32_t s1, u_int32_t s2) {	return (int32_t)(s2-s1) < 0;}__INLINE__ int between(u_int32_t s1, u_int32_t s2, u_int32_t s3) {	return s3 - s2 >= s1 - s2; /* is s2<=s1<=s3 ? */}__INLINE__ u_int32_t tcp_receive_window(struct tcp_stream *tp){	int32_t win=tp->rcv_wup+tp->rcv_wnd-tp->rcv_nxt;	if ( win<0 )		win=0;	return (u_int32_t)win;}__INLINE__ int tcp_sequence(struct tcp_stream *tp, u_int32_t seq, u_int32_t end_seq){	return !before(end_seq, tp->rcv_wup) &&		!after(seq, tp->rcv_nxt+tcp_receive_window(tp));}/* Convert 64bit timestamp to 32bit jiffies */__INLINE__ unsigned int tcp_jiffies(struct packet *p){	unsigned int ret;	ret=p->time.tv_sec * TCP_HZ;	ret+=p->time.tv_usec / (1000000UL/TCP_HZ);	return ret;}/* TMO: Add a TCP session to the head of a timeout list */__INLINE__ void tcp_tmo_add(struct tcp_tmo *l, struct tcp_session *s){	s->tmo_next=l->next;	s->tmo_prev=(struct tcp_session *)l;	l->next->tmo_prev=s;	l->next=s;}/* TMO: Remove a TCP session from a timeout list */__INLINE__ void tcp_tmo_del(struct tcp_session *s){	if ( s->expire==0 )		return;	s->tmo_prev->tmo_next=s->tmo_next;	s->tmo_next->tmo_prev=s->tmo_prev;	s->tmo_prev=NULL;	s->tmo_next=NULL;	s->expire=0;}/* TMO: Check timeouts */__INLINE__ void tcp_tmo_check(struct packet *pkt){	unsigned int now=tcp_jiffies(pkt);	while ( syn1.prev!=(struct tcp_session *)&syn1 ) {		if ( (int)syn1.prev->expire - (int)now > 0 )			return;		tcp_free(syn1.prev);		tcp_timeouts++;	}}/* LRU: Add a TCP session to the head of an LRU list */__INLINE__ void tcp_lru_add(struct tcp_lru *l, struct tcp_session *s){	s->next=l->next;	s->prev=(struct tcp_session *)l;	l->next->prev=s;	l->next=s;}/* LRU: Remove a TCP session from an LRU */__INLINE__ void tcp_lru_del(struct tcp_session *s){	s->prev->next=s->next;	s->next->prev=s->prev;}/* LRU: Move a TCP session to the front of an LRU */__INLINE__ void tcp_lru_mtf(struct tcp_lru *l, struct tcp_session *s){	tcp_lru_del(s);	tcp_lru_add(l,s);}/* HASH: Unlink a session from the session hash */__INLINE__ void tcp_hash_unlink(struct tcp_session *s){	if ( s->hash_next )		s->hash_next->hash_pprev=s->hash_pprev;	*s->hash_pprev=s->hash_next;}/* HASH: Link a session in to the TCP session hash */__INLINE__ void tcp_hash_link(struct tcp_session *s){	if ( (s->hash_next=tcp_hash[s->bucket]) ) {		s->hash_next->hash_pprev=&s->hash_next;	}	tcp_hash[s->bucket]=s;	s->hash_pprev=&tcp_hash[s->bucket];}/* HASH: Move to front of hash collision chain */__INLINE__ void tcp_hash_mtf(struct tcp_session *s){	tcp_hash_unlink(s);	tcp_hash_link(s);}/* Flow cache allocation, slab-cache stylee *//* TODO: strategy for when maximum is reached, evict LRU? */__INLINE__ void *tcp_flow_alloc(void){	void *ret=flow_next;	if ( !ret ) return ret;	flow_next=*(void **)flow_next;	if ( ++num_flows > max_flows )		max_flows=num_flows;	return ret;}__INLINE__ void tcp_flow_free(void *p){	*(void **)p=flow_next;	flow_next=p;	num_flows--;}/* Parse TCP options just for timestamps */static int tcp_fast_options(struct pkt_tcphdr *t, u_int32_t *tsval){	char *tmp, *end;	/* Return if we don't have any */	if ( t->doff<<2 <= sizeof(struct pkt_tcphdr)) return 0;	/* Work out where they begin and end */	tmp=end=(char *)t;	tmp+=sizeof(struct pkt_tcphdr);	end+=(t->doff<<2);	while ( tmp<end ) {		if ( *tmp == TCPOPT_EOL || *tmp == TCPOPT_NOP ) {			tmp++;			continue;		}		if ( !(tmp+1 < end) ) break;		switch(*tmp) {		case TCPOPT_TIMESTAMP:			if ( !(tmp+10 < end) ) break;			*tsval=ntohl(*((u_int32_t *)(tmp+2)));			return 1;		}		tmp+=*(tmp+1);	}	return 0;}/* This will parse TCP options for SYN packets */static void tcp_syn_options(struct tcp_stream *s, struct pkt_tcphdr *t, u_int32_t sec){	char *tmp, *end;	/* Return if we don't have any */	if ( t->doff<<2 <= sizeof(struct pkt_tcphdr)) return;	/* Work out where they begin and end */	tmp=end=(char *)t;	tmp+=sizeof(struct pkt_tcphdr);	end+=(t->doff<<2);	while ( tmp<end ) {		if ( *tmp == TCPOPT_EOL || *tmp == TCPOPT_NOP ) {			tmp++;			continue;		}		if ( !(tmp+1 < end) ) break;		switch(*tmp) {		case TCPOPT_SACK_PERMITTED:			s->flags|=TF_SACK_OK;			break;		case TCPOPT_TIMESTAMP:			s->flags|=TF_TSTAMP_OK;			/* Only check the bit we want */			if ( !(tmp+10 < end) ) break;			s->ts_recent=ntohl(*((u_int32_t *)(tmp+2)));			s->ts_recent_stamp=sec;			break;		case TCPOPT_WSCALE:			if ( !(tmp+2 < end) ) break;			s->flags|=TF_WSCALE_OK;			/* rfc1323: must log error and limit to 14 */			if ( (s->scale=*(tmp+2)) > 14 ) s->scale=14;			break;		}		tmp+=*(tmp+1);	}}/* Perform a state transition */__INLINE__ void transition(struct tcp_session *t, int c, int s){#ifdef STATE_DEBUG	char cip[32],sip[32];	struct in_addr ca, sa;	ca.s_addr=t->c_addr;	sa.s_addr=t->s_addr;	strncpy(cip, inet_ntoa(ca), sizeof(cip));	strncpy(sip, inet_ntoa(sa), sizeof(sip));	dmesg(M_DEBUG, "tcpstream: %s:%u(%s) -> %s:%u(%s)",		cip, ntohs(t->c_port), state_str[c],		sip, ntohs(t->s_port), state_str[s]);#endif	t->client.state=c;	t->server.state=s;}/* Hash function. Hashes to the same value even * when source and destinations are inverted */__INLINE__ int tcp_hashfn(u_int32_t saddr, u_int32_t daddr,	u_int16_t sport, u_int16_t dport){	int h = ((saddr ^ sport) ^ (daddr ^ dport));	h ^= h>>16;	h ^= h>>8;	return h % tcp_hashsz;}/* Finds a protocol given a port number */__INLINE__ struct proto *tcp_find_proto(u_int16_t port){	struct proto_child *pc;	/* IDs are server ports */	for(pc=tcp_p.children; pc; pc=pc->next)	{		if ( port == pc->id )			return pc->proto;	}	return NULL;}/* Return a TCP session descriptor to the cache */void tcp_free(struct tcp_session *s){	if ( !s ) {		mesg(M_DEBUG,"tcpstream: severe insanity");		return;	}	/* get the debug message */	transition(s, 0, 0);	/* HASH: unhash */	tcp_hash_unlink(s);	/* LRU: remove */	tcp_lru_del(s);	/* TMO: remove */	tcp_tmo_del(s);	if ( s->flow )		tcp_flow_free(s->flow);	/* CACHE: free */	((union tcp_union *)s)->next=tcp_next;	tcp_next=(union tcp_union *)s;	num_active--;}/* Add a new session, never fails */static struct tcp_session *tcp_new(struct pkt_iphdr *iph,	struct pkt_tcphdr *tcph, struct packet *p){	struct tcp_session *ret;	/* If we have run out of connections, then	 * just free the oldest one in the LRU */	if ( !tcp_next ) {		tcp_free(lru.prev);		tcp_timeouts++;	}	/* CACHE: alloc */	ret=&tcp_next->s;	tcp_next=tcp_next->next;	/* Keep track of max number of concurrent sessions	 * seen so far */	if ( ++num_active > max_concurrent )	       max_concurrent=num_active;	/* Setup the key values */	ret->c_addr=iph->saddr;	ret->s_addr=iph->daddr;	ret->c_port=tcph->sport;	ret->s_port=tcph->dport;	ret->bucket=tcp_hashfn(ret->c_addr, ret->s_addr,		ret->c_port, ret->s_port);	/* Setup initial state */	transition(ret, TCP_SYN_SENT, 0);	ret->server.isn=ntohl(tcph->seq);	ret->client.snd_una=ret->server.isn+1;	ret->client.snd_nxt=ret->client.snd_una+1;	ret->client.rcv_wnd=ntohs(tcph->win);	ret->server.rcv_nxt=ret->client.snd_una;	ret->server.rcv_wup=ret->client.snd_una;	/* server sees clients initial options */	tcp_syn_options(&ret->server, tcph, p->time.tv_sec);	if ( (ret->proto=tcp_find_proto(ret->s_port)) ) {		ret->flow=tcp_flow_alloc();	}else{		/* TODO: auto-detect protocol ;) */		ret->flow=NULL;	}	/* Set the timeout */	ret->expire=tcp_jiffies(p)+TCP_TMO_SYN1;	tcp_tmo_add(&syn1, ret);	tcp_lru_add(&lru, ret);	tcp_hash_link(ret);	return ret;}/* Find a TCP session given a packet */static struct tcp_session *tcp_find(struct pkt_iphdr *iph,	struct pkt_tcphdr *tcph,	int *to_server){	struct tcp_session *s;	int idx=tcp_hashfn(iph->saddr, iph->daddr,		tcph->sport, tcph->dport);	for(s=tcp_hash[idx]; s; s=s->hash_next) {		if (	s->s_addr==iph->saddr &&			s->c_addr==iph->daddr &&			s->s_port==tcph->sport &&			s->c_port==tcph->dport ) {			*to_server=0;			return s;		}		if (	s->c_addr==iph->saddr &&			s->s_addr==iph->daddr &&			s->c_port==tcph->sport &&			s->s_port==tcph->dport ) {			*to_server=1;			return s;		}	}	return NULL;}/* rfc793: Actions to perform when recieving an ACK in * an established state */__INLINE__ void tcp_established(struct tcp_stream *snd,	struct tcp_stream *rcv,	u_int32_t seq,	u_int32_t ack,	u_int32_t win){	if ( after(ack, rcv->snd_una) && !before(ack, rcv->snd_nxt) ) {		rcv->snd_una=ack;		rcv->rcv_nxt=seq+1;		snd->rcv_wup=ack;		snd->rcv_wnd=win;	}	snd->snd_una=seq;	snd->snd_nxt=seq+1;}/* A segment contained valid data so attempt * stream reassembly -- this is not finished yet */static void tcpstream_data(	struct tcp_session *s,	struct tcp_stream *rcv,	struct pkt_tcphdr *tcph,	struct tcpseg *tp){	/* char *ptr=(char *)tcph + (tcph->doff<<2); */	if ( tp->seq==rcv->rcv_nxt ) {		if (tcp_receive_window(rcv) == 0) {			/* XXX: Alert here? Sending beyond end of window.			 * Some buggy stacks do this a lot... */			return;		}		dmesg(M_DEBUG, "tcpstream: In order: %u->%u",			tp->seq-rcv->isn,			tp->seq_end-rcv->isn);		/* XXX: Can only deliver data if rcv is in ESTABLISHED,		 * FIN_WAIT1 or FIN_WAIT2 -- hrmmm */		/* TODO: Intelligent reassembly		 *  1. Application layer plugin recives shit		 *  2. Application layer plugin signals us to start buffering		 *  3. Here we aggregate each new packet that comes in		 *  4. Free buffer if size>max || app signals OK		 */		rcv->rcv_nxt=tp->seq_end;	}else{		/* A retransmit */		if ( !after(tp->seq_end,rcv->rcv_nxt) )			return;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -