📄 drcom.c
字号:
/* * (C) 2008 Zeng Zhaorong * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. *//* * Derived from linux netfilter conntrack codes */#include <linux/module.h>#include <linux/kmod.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/netdevice.h>#include <linux/ip.h>#include <linux/udp.h>#include <linux/tcp.h>#include <net/ip.h>#include <net/tcp.h>#include <linux/netfilter.h>#include <linux/netfilter_ipv4.h>#include "daemon_kernel.h"#define TCPTRACK_VERSION "0.0.1"MODULE_LICENSE("GPL");MODULE_AUTHOR("Wheelz");MODULE_DESCRIPTION("Drcom-Kernel " TCPTRACK_VERSION);enum tcp_state { TCP_STATE_NONE = 0, TCP_STATE_SYN_SENT, TCP_STATE_SYN_RECV, TCP_STATE_ESTABLISHED, TCP_STATE_FIN_WAIT, TCP_STATE_TIME_WAIT, TCP_STATE_CLOSE, TCP_STATE_CLOSE_WAIT, TCP_STATE_LAST_ACK, TCP_STATE_LISTEN, TCP_STATE_MAX};#define SECS *HZ#define MINS * 60 SECS#define HOURS * 60 MINS#define DAYS * 24 HOURSstatic unsigned long tcp_timeouts[]= { 30 MINS, /* TCP_STATE_NONE, */ 2 MINS, /* TCP_STATE_SYN_SENT, */ 60 SECS, /* TCP_STATE_SYN_RECV, */ 5 DAYS, /* TCP_STATE_ESTABLISHED, */ 2 MINS, /* TCP_STATE_FIN_WAIT, */ 2 MINS, /* TCP_STATE_TIME_WAIT, */ 10 SECS, /* TCP_STATE_CLOSE, */ 60 SECS, /* TCP_STATE_CLOSE_WAIT, */ 30 SECS, /* TCP_STATE_LAST_ACK, */ 2 MINS, /* TCP_STATE_LISTEN, */};#define sNO TCP_STATE_NONE#define sES TCP_STATE_ESTABLISHED#define sSS TCP_STATE_SYN_SENT#define sSR TCP_STATE_SYN_RECV#define sFW TCP_STATE_FIN_WAIT#define sTW TCP_STATE_TIME_WAIT#define sCL TCP_STATE_CLOSE#define sCW TCP_STATE_CLOSE_WAIT#define sLA TCP_STATE_LAST_ACK#define sLI TCP_STATE_LISTEN#define sIV TCP_STATE_MAXstatic enum tcp_state tcp_states[2][5][TCP_STATE_MAX] = { {/* ORIGINAL *//* sNO, sSS, sSR, sES, sFW, sTW, sCL, sCW, sLA, sLI *//*syn*/ {sSS, sSS, sSR, sES, sSS, sSS, sSS, sSS, sSS, sLI },/*fin*/ {sTW, sSS, sTW, sFW, sFW, sTW, sCL, sTW, sLA, sLI },/*ack*/ {sES, sSS, sES, sES, sFW, sTW, sCL, sCW, sLA, sES },/*rst*/ {sCL, sSS, sCL, sCL, sCL, sTW, sCL, sCL, sCL, sCL },/*none*/{sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } }, {/* REPLY *//* sNO, sSS, sSR, sES, sFW, sTW, sCL, sCW, sLA, sLI *//*syn*/ {sSR, sSR, sSR, sES, sSR, sSR, sSR, sSR, sSR, sSR },/*fin*/ {sCL, sSS, sTW, sCW, sTW, sTW, sCL, sCW, sLA, sLI },/*ack*/ {sCL, sSS, sSR, sES, sFW, sTW, sCL, sCW, sCL, sLI },/*rst*/ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sLA, sLI },/*none*/{sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } }};struct tcp_tuple{ __be32 src_ip; __be32 dst_ip; __be16 src_port; __be16 dst_port; u_int8_t dir;};struct tcp_tuplehash{ struct list_head list; struct tcp_tuple tuple;};struct tcp_seq{ u_int32_t syn_seq; u_int32_t correction_pos; int16_t offset_before, offset_after;};struct tcp_conn{ struct tcp_tuplehash tuplehash[2]; atomic_t ref; struct timer_list timeout; u_int8_t flags; enum tcp_state state; struct tcp_seq seq[2];};#define CONN_F_NEW 0x01#define CONN_F_AUTHSENT 0x02#define TODO_NONE 0x00#define TODO_BYPASS 0x01#define TODO_SEND_ACK 0x02#define TODO_SEND_AUTH 0x04#define CONN_DIR_ORIG 0#define CONN_DIR_REPLY 1#define TCP_CONN_HASH_SIZE 32static pid_t conn_pid = 0;static int conn_autologout = 0;static struct timer_list conn_keepalive_timer;static unsigned char conn_auth_data[CONN_AUTH_DATA_LEN];static struct list_head tcp_conn_hash[TCP_CONN_HASH_SIZE];static atomic_t tcp_conn_count = ATOMIC_INIT(0);static struct net_device *track_dev = NULL;static struct e_address *conn_e_addr = NULL;static int conn_e_count = 0;static int track_mode = CONN_MODE_NONE;static DEFINE_RWLOCK(mode_lock);static DEFINE_RWLOCK(hash_lock);static DEFINE_RWLOCK(state_lock);#if 0#define DEBUGP printk#else#define DEBUGP(format, args...)#endif#if 0static const char *tcp_state_names[] = { "NONE", "SYN_SENT", "SYN_RECV", "ESTABLISHED", "FIN_WAIT", "TIME_WAIT", "CLOSE", "CLOSE_WAIT", "LAST_ACK", "LISTEN"};#endifstatic inline int tuple_equal(struct tcp_tuple *t1, struct tcp_tuple *t2){ return (t1->src_ip == t2->src_ip && t1->dst_ip == t2->dst_ip && t1->src_port == t2->src_port && t1->dst_port == t2->dst_port);}static inline u_int32_t hash_conn(const struct tcp_tuple *tuple) { return (ntohl(tuple->src_ip + tuple->dst_ip + tuple->src_port + tuple->dst_port) + ntohs(tuple->src_port)) % TCP_CONN_HASH_SIZE;}static inline struct tcp_conn *tuplehash_to_conn(const struct tcp_tuplehash *hash){ return container_of(hash, struct tcp_conn, tuplehash[hash->tuple.dir]);}static inline void conn_get(struct tcp_conn *conn){ if (conn) atomic_inc(&conn->ref);}static inline void conn_put(struct tcp_conn *conn){ if (conn && atomic_dec_and_test(&conn->ref)) { kfree(conn); atomic_dec(&tcp_conn_count); }}/* under state_lock */static void __conn_refresh_timer(struct tcp_conn *conn, unsigned long timeout){ unsigned long newtime; if (conn->flags & CONN_F_NEW) { conn->flags &= ~CONN_F_NEW; conn->timeout.expires = jiffies + timeout; add_timer(&conn->timeout); } else { newtime = jiffies + timeout; if (newtime - conn->timeout.expires >= HZ && del_timer(&conn->timeout)) { conn->timeout.expires = newtime; add_timer(&conn->timeout); } }}static void death_by_timeout(unsigned long ul_conn){ struct tcp_conn *conn = (struct tcp_conn*)ul_conn; write_lock_bh(&hash_lock); list_del(&conn->tuplehash[CONN_DIR_ORIG].list); list_del(&conn->tuplehash[CONN_DIR_REPLY].list); write_unlock_bh(&hash_lock); conn_put(conn);}static void conn_tuple_init(struct tcp_conn *conn, struct tcp_tuple *tuple){ struct tcp_tuple *t; t = &conn->tuplehash[CONN_DIR_ORIG].tuple; t->src_ip = tuple->src_ip; t->dst_ip = tuple->dst_ip; t->src_port = tuple->src_port; t->dst_port = tuple->dst_port; t->dir = CONN_DIR_ORIG; t = &conn->tuplehash[CONN_DIR_REPLY].tuple; t->src_ip = tuple->dst_ip; t->dst_ip = tuple->src_ip; t->src_port = tuple->dst_port; t->dst_port = tuple->src_port; t->dir = CONN_DIR_REPLY;}static inline struct tcp_conn *get_new_conn(struct tcp_tuple *tuple){ struct tcp_conn *conn; conn = kmalloc(sizeof(struct tcp_conn), GFP_ATOMIC); if (conn == NULL) return NULL; memset(conn, 0, sizeof(struct tcp_conn)); conn->flags = CONN_F_NEW; conn_tuple_init(conn, tuple); setup_timer(&conn->timeout, death_by_timeout, (unsigned long)conn); return conn;}static int is_syn_pkt(struct sk_buff *skb){ unsigned int nhoff = skb_network_offset(skb); struct iphdr *iph, _iph; struct tcphdr *tcph, _tcph; iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); if (iph == NULL) return 0; tcph = skb_header_pointer(skb, nhoff + (iph->ihl << 2), sizeof(_tcph), &_tcph); if (tcph == NULL) return 0; return (tcph->syn && !tcph->ack);}static int tcp_get_tuple(struct sk_buff *skb, struct tcp_tuple *tuple){ struct iphdr _iph, *iph; struct tcphdr _hdr, *hp; unsigned int nhoff = skb_network_offset(skb); unsigned int thoff; memset(tuple, 0, sizeof(struct tcp_tuple)); iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); if (iph == NULL) return 0; tuple->src_ip = iph->saddr; tuple->dst_ip = iph->daddr; thoff = nhoff + (iph->ihl << 2); hp = skb_header_pointer(skb, thoff, 8, &_hdr); if (hp == NULL) return 0; tuple->src_port = hp->source; tuple->dst_port = hp->dest; return 1;}static inline void __add_new_hash(struct tcp_tuplehash *hash){ u_int32_t h = hash_conn(&hash->tuple); struct list_head *head = &tcp_conn_hash[h]; list_add_tail(&hash->list, head);}static struct tcp_tuplehash *__hash_find(struct tcp_tuple *tuple){ struct tcp_tuplehash *tuplehash; struct list_head *head, *pos; u_int32_t h; h = hash_conn(tuple); head = &tcp_conn_hash[h]; list_for_each(pos, head) { tuplehash = list_entry(pos, struct tcp_tuplehash, list); if (tuple_equal(&tuplehash->tuple, tuple)) return tuplehash; } return NULL;}static struct tcp_tuplehash *resolve_tcp_conn(struct sk_buff *skb){ struct tcp_tuple tuple; struct tcp_conn *conn; struct tcp_tuplehash *hash; if (!tcp_get_tuple(skb, &tuple)) return NULL; read_lock_bh(&hash_lock); hash = __hash_find(&tuple); if (hash != NULL) { conn = tuplehash_to_conn(hash); conn_get(conn); read_unlock_bh(&hash_lock); return hash; } read_unlock_bh(&hash_lock); /* OK, This is a new connection, let's create a conn */ /* the first packet must be SYN && !ACK */ if (!is_syn_pkt(skb)) return NULL; conn = get_new_conn(&tuple); if (conn == NULL) return NULL; conn_get(conn);/* for this packet */ conn_get(conn);/* for hash list */ write_lock_bh(&hash_lock); hash = __hash_find(&tuple); if (hash != NULL) { /* already added by someone else, but is it possible? */ struct tcp_conn *conn2 = tuplehash_to_conn(hash); conn_get(conn2); write_unlock_bh(&hash_lock); conn_put(conn); /* for hash list */ conn_put(conn); /* for this packet */ return hash; } __add_new_hash(&conn->tuplehash[CONN_DIR_ORIG]); __add_new_hash(&conn->tuplehash[CONN_DIR_REPLY]); write_unlock_bh(&hash_lock); atomic_inc(&tcp_conn_count); hash = &conn->tuplehash[CONN_DIR_ORIG]; return hash;}static void sack_adjust(struct sk_buff *skb, struct tcphdr *tcph, unsigned int sackoff, unsigned int sackend, struct tcp_seq *seq){ while (sackoff < sackend) { struct tcp_sack_block_wire *sack; __be32 new_start_seq, new_end_seq; sack = (void *)skb->data + sackoff; if (after(ntohl(sack->start_seq) - seq->offset_before, seq->correction_pos)) new_start_seq = htonl(ntohl(sack->start_seq) - seq->offset_after); else new_start_seq = htonl(ntohl(sack->start_seq) - seq->offset_before); if (after(ntohl(sack->end_seq) - seq->offset_before, seq->correction_pos)) new_end_seq = htonl(ntohl(sack->end_seq) - seq->offset_after); else new_end_seq = htonl(ntohl(sack->end_seq) - seq->offset_before); DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", ntohl(sack->start_seq), ntohl(new_start_seq), ntohl(sack->end_seq), ntohl(new_end_seq)); nf_proto_csum_replace4(&tcph->check, skb, sack->start_seq, new_start_seq, 0); nf_proto_csum_replace4(&tcph->check, skb, sack->end_seq, new_end_seq, 0); sack->start_seq = new_start_seq; sack->end_seq = new_end_seq; sackoff += sizeof(*sack); }}static int tcp_sack_adjust(struct tcp_conn *conn, int dir, struct sk_buff *skb, struct tcphdr *tcph){ unsigned int optoff, optend; optoff = ip_hdrlen(skb) + sizeof(struct tcphdr); optend = ip_hdrlen(skb) + tcph->doff * 4; if (!skb_make_writable(skb, optend)) return 0; while (optoff < optend) { /* Usually: option, length. */ unsigned char *op = skb->data + optoff; switch (op[0]) { case TCPOPT_EOL: return 1; case TCPOPT_NOP: optoff++; continue; default: /* no partial options */ if (optoff + 1 == optend || optoff + op[1] > optend || op[1] < 2) return 0; if (op[0] == TCPOPT_SACK && op[1] >= 2+TCPOLEN_SACK_PERBLOCK && ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) sack_adjust(skb, tcph, optoff+2, optoff+op[1], &conn->seq[!dir]); optoff += op[1]; } } return 1;}static int tcp_adjust_seq(struct sk_buff *skb, struct tcp_tuplehash *hash){ struct tcp_conn *conn = tuplehash_to_conn(hash); int dir = hash->tuple.dir; struct tcphdr *tcph; __be32 newseq, newack; struct tcp_seq *this_way, *other_way; int ret = 0; read_lock_bh(&state_lock); this_way = &conn->seq[dir]; other_way = &conn->seq[!dir]; if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph))) goto out; tcph = (void *)skb->data + ip_hdrlen(skb); if (after(ntohl(tcph->seq), this_way->correction_pos)) newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); else newseq = htonl(ntohl(tcph->seq) + this_way->offset_before); if (after(ntohl(tcph->ack_seq) - other_way->offset_before, other_way->correction_pos)) newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after); else newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); if (newseq != tcph->seq) { nf_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); tcph->seq = newseq; } if (newack != tcph->ack_seq) { nf_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); tcph->ack_seq = newack; if (!tcp_sack_adjust(conn, dir, skb, tcph)) goto out; } ret = 1;out: read_unlock_bh(&state_lock); return ret;}static unsigned int get_state_index(const struct tcphdr *tcph){ if (tcph->rst) {return 3;} else if (tcph->syn) {return 0;} else if (tcph->fin) {return 1;} else if (tcph->ack) {return 2;} else {return 4;}}/* return what to do */static unsigned int check_tcp_packet(struct sk_buff *skb, struct tcp_tuplehash *hash){ enum tcp_state newstate, oldtcpstate; struct tcp_conn *conn = tuplehash_to_conn(hash); int dir = hash->tuple.dir; unsigned int nhoff = skb_network_offset(skb); struct iphdr *iph, _iph; struct tcphdr *tcph, _tcph; unsigned int hdrlen; struct tcp_seq *seq; u_int8_t todo = TODO_NONE; iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); if (iph == NULL) return TODO_NONE; tcph = skb_header_pointer(skb, nhoff + (iph->ihl << 2), sizeof(_tcph), &_tcph); if (tcph == NULL) return TODO_NONE; hdrlen = (iph->ihl + tcph->doff)*4; if (skb->len < hdrlen) {/* we may not have the options */ DEBUGP("tcp_packet: Truncated packet.\n"); return TODO_NONE; } write_lock_bh(&state_lock); oldtcpstate = conn->state; newstate = tcp_states[dir][get_state_index(tcph)][oldtcpstate]; if (newstate == TCP_STATE_MAX) { /* invalid */ DEBUGP("tcp_packet: Invalid dir=%i index=%u state=%s\n", dir, get_state_index(tcph), tcp_state_names[conn->state]); write_unlock_bh(&state_lock); return TODO_NONE; } conn->state = newstate; /* Handshake SYN */ if (oldtcpstate == TCP_STATE_NONE && dir == CONN_DIR_ORIG && tcph->syn && !tcph->ack) { seq = &conn->seq[dir]; seq->syn_seq = ntohl(tcph->seq); seq->correction_pos = seq->syn_seq; seq->offset_before = 0; seq->offset_after = CONN_AUTH_DATA_LEN; todo |= TODO_BYPASS; } /* Handshake SYN-ACK */ if (oldtcpstate == TCP_STATE_SYN_SENT && dir == CONN_DIR_REPLY && tcph->syn && tcph->ack) { seq = &conn->seq[dir]; seq->syn_seq = ntohl(tcph->seq); seq->correction_pos = seq->syn_seq; seq->offset_before = 0; seq->offset_after = 0; todo |= TODO_BYPASS; } /* Handshake pure ACK */ if (oldtcpstate == TCP_STATE_SYN_RECV && skb->len == hdrlen && dir == CONN_DIR_ORIG && tcph->ack && !tcph->syn && ntohl(tcph->ack_seq) == conn->seq[!dir].syn_seq+1) { todo |= TODO_BYPASS; } /* Handshake ACK with data*/ if (oldtcpstate == TCP_STATE_SYN_RECV && skb->len > hdrlen && dir == CONN_DIR_ORIG
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -