⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 optimize.c

📁 该软件根据网络数据生成NetFlow记录。NetFlow可用于网络规划、负载均衡、安全监控等
💻 C
📖 第 1 页 / 共 3 页
字号:
		if (s == 0)			break;		next = this_op(s->next);		if (next == 0)			break;		last = next;		/*		 * st  M[k]	-->	st  M[k]		 * ldx M[k]		tax		 */		if (s->s.code == BPF_ST &&		    next->s.code == (BPF_LDX|BPF_MEM) &&		    s->s.k == next->s.k) {			done = 0;			next->s.code = BPF_MISC|BPF_TAX;		}		/*		 * ld  #k	-->	ldx  #k		 * tax			txa		 */		if (s->s.code == (BPF_LD|BPF_IMM) &&		    next->s.code == (BPF_MISC|BPF_TAX)) {			s->s.code = BPF_LDX|BPF_IMM;			next->s.code = BPF_MISC|BPF_TXA;			done = 0;		}		/*		 * This is an ugly special case, but it happens		 * when you say tcp[k] or udp[k] where k is a constant.		 */		if (s->s.code == (BPF_LD|BPF_IMM)) {			struct slist *add, *tax, *ild;			/*			 * Check that X isn't used on exit from this			 * block (which the optimizer might cause).			 * We know the code generator won't generate			 * any local dependencies.			 */			if (ATOMELEM(b->out_use, X_ATOM))				break;			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))				add = next;			else				add = this_op(next->next);			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))				break;			tax = this_op(add->next);			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))				break;			ild = this_op(tax->next);			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||			    BPF_MODE(ild->s.code) != BPF_IND)				break;			/*			 * XXX We need to check that X is not			 * subsequently used.  We know we can eliminate the			 * accumulator modifications since it is defined			 * by the last stmt of this sequence.			 *			 * We want to turn this sequence:			 *			 * (004) ldi     #0x2		{s}			 * (005) ldxms   [14]		{next}  -- optional			 * (006) addx			{add}			 * (007) tax			{tax}			 * (008) ild     [x+0]		{ild}			 *			 * into this sequence:			 *			 * (004) nop			 * (005) ldxms   [14]			 * (006) nop			 * (007) nop			 * (008) ild     [x+2]			 *			 */			ild->s.k += s->s.k;			s->s.code = NOP;			add->s.code = NOP;			tax->s.code = NOP;			done = 0;		}		s = next;	}	/*	 * If we have a subtract to do a comparison, and the X register	 * is a known constant, we can merge this value into the	 * comparison.	 */	if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) &&	    !ATOMELEM(b->out_use, A_ATOM)) {		val = b->val[X_ATOM];		if (vmap[val].is_const) {			int op;			b->s.k += vmap[val].const_val;			op = BPF_OP(b->s.code);			if (op == BPF_JGT || op == BPF_JGE) {				struct block *t = JT(b);				JT(b) = JF(b);				JF(b) = t;				b->s.k += 0x80000000;			}			last->s.code = NOP;			done = 0;		} else if (b->s.k == 0) {			/*			 * sub x  ->	nop			 * j  #0	j  x			 */			last->s.code = NOP;			b->s.code = BPF_CLASS(b->s.code) | BPF_OP(b->s.code) |				BPF_X;			done = 0;		}	}	/*	 * Likewise, a constant subtract can be simplified.	 */	else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) &&		 !ATOMELEM(b->out_use, A_ATOM)) {		int op;		b->s.k += last->s.k;		last->s.code = NOP;		op = BPF_OP(b->s.code);		if (op == BPF_JGT || op == BPF_JGE) {			struct block *t = JT(b);			JT(b) = JF(b);			JF(b) = t;			b->s.k += 0x80000000;		}		done = 0;	}	/*	 * and #k	nop	 * jeq #0  ->	jset #k	 */	if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&	    !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) {		b->s.k = last->s.k;		b->s.code = BPF_JMP|BPF_K|BPF_JSET;		last->s.code = NOP;		done = 0;		opt_not(b);	}	/*	 * If the accumulator is a known constant, we can compute the	 * comparison result.	 */	val = b->val[A_ATOM];	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {		bpf_int32 v = vmap[val].const_val;		switch (BPF_OP(b->s.code)) {		case BPF_JEQ:			v = v == b->s.k;			break;		case BPF_JGT:			v = (unsigned)v > b->s.k;			break;		case BPF_JGE:			v = (unsigned)v >= b->s.k;			break;		case BPF_JSET:			v &= b->s.k;			break;		default:			abort();		}		if (JF(b) != JT(b))			done = 0;		if (v)			JF(b) = JT(b);		else			JT(b) = JF(b);	}}/* * Compute the symbolic value of expression of 's', and update * anything it defines in the value table 'val'.  If 'alter' is true, * do various optimizations.  This code would be cleaner if symbolic * evaluation and code transformations weren't folded together. */static voidopt_stmt(s, val, alter)	struct stmt *s;	int val[];	int alter;{	int op;	int v;	switch (s->code) {	case BPF_LD|BPF_ABS|BPF_W:	case BPF_LD|BPF_ABS|BPF_H:	case BPF_LD|BPF_ABS|BPF_B:		v = F(s->code, s->k, 0L);		vstore(s, &val[A_ATOM], v, alter);		break;	case BPF_LD|BPF_IND|BPF_W:	case BPF_LD|BPF_IND|BPF_H:	case BPF_LD|BPF_IND|BPF_B:		v = val[X_ATOM];		if (alter && vmap[v].is_const) {			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);			s->k += vmap[v].const_val;			v = F(s->code, s->k, 0L);			done = 0;		}		else			v = F(s->code, s->k, v);		vstore(s, &val[A_ATOM], v, alter);		break;	case BPF_LD|BPF_LEN:		v = F(s->code, 0L, 0L);		vstore(s, &val[A_ATOM], v, alter);		break;	case BPF_LD|BPF_IMM:		v = K(s->k);		vstore(s, &val[A_ATOM], v, alter);		break;	case BPF_LDX|BPF_IMM:		v = K(s->k);		vstore(s, &val[X_ATOM], v, alter);		break;	case BPF_LDX|BPF_MSH|BPF_B:		v = F(s->code, s->k, 0L);		vstore(s, &val[X_ATOM], v, alter);		break;	case BPF_ALU|BPF_NEG:		if (alter && vmap[val[A_ATOM]].is_const) {			s->code = BPF_LD|BPF_IMM;			s->k = -vmap[val[A_ATOM]].const_val;			val[A_ATOM] = K(s->k);		}		else			val[A_ATOM] = F(s->code, val[A_ATOM], 0L);		break;	case BPF_ALU|BPF_ADD|BPF_K:	case BPF_ALU|BPF_SUB|BPF_K:	case BPF_ALU|BPF_MUL|BPF_K:	case BPF_ALU|BPF_DIV|BPF_K:	case BPF_ALU|BPF_AND|BPF_K:	case BPF_ALU|BPF_OR|BPF_K:	case BPF_ALU|BPF_LSH|BPF_K:	case BPF_ALU|BPF_RSH|BPF_K:		op = BPF_OP(s->code);		if (alter) {			if (s->k == 0) {				if (op == BPF_ADD || op == BPF_SUB ||				    op == BPF_LSH || op == BPF_RSH ||				    op == BPF_OR) {					s->code = NOP;					break;				}				if (op == BPF_MUL || op == BPF_AND) {					s->code = BPF_LD|BPF_IMM;					val[A_ATOM] = K(s->k);					break;				}			}			if (vmap[val[A_ATOM]].is_const) {				fold_op(s, val[A_ATOM], K(s->k));				val[A_ATOM] = K(s->k);				break;			}		}		val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));		break;	case BPF_ALU|BPF_ADD|BPF_X:	case BPF_ALU|BPF_SUB|BPF_X:	case BPF_ALU|BPF_MUL|BPF_X:	case BPF_ALU|BPF_DIV|BPF_X:	case BPF_ALU|BPF_AND|BPF_X:	case BPF_ALU|BPF_OR|BPF_X:	case BPF_ALU|BPF_LSH|BPF_X:	case BPF_ALU|BPF_RSH|BPF_X:		op = BPF_OP(s->code);		if (alter && vmap[val[X_ATOM]].is_const) {			if (vmap[val[A_ATOM]].is_const) {				fold_op(s, val[A_ATOM], val[X_ATOM]);				val[A_ATOM] = K(s->k);			}			else {				s->code = BPF_ALU|BPF_K|op;				s->k = vmap[val[X_ATOM]].const_val;				done = 0;				val[A_ATOM] =					F(s->code, val[A_ATOM], K(s->k));			}			break;		}		/*		 * Check if we're doing something to an accumulator		 * that is 0, and simplify.  This may not seem like		 * much of a simplification but it could open up further		 * optimizations.		 * XXX We could also check for mul by 1, and -1, etc.		 */		if (alter && vmap[val[A_ATOM]].is_const		    && vmap[val[A_ATOM]].const_val == 0) {			if (op == BPF_ADD || op == BPF_OR ||			    op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) {				s->code = BPF_MISC|BPF_TXA;				vstore(s, &val[A_ATOM], val[X_ATOM], alter);				break;			}			else if (op == BPF_MUL || op == BPF_DIV ||				 op == BPF_AND) {				s->code = BPF_LD|BPF_IMM;				s->k = 0;				vstore(s, &val[A_ATOM], K(s->k), alter);				break;			}			else if (op == BPF_NEG) {				s->code = NOP;				break;			}		}		val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);		break;	case BPF_MISC|BPF_TXA:		vstore(s, &val[A_ATOM], val[X_ATOM], alter);		break;	case BPF_LD|BPF_MEM:		v = val[s->k];		if (alter && vmap[v].is_const) {			s->code = BPF_LD|BPF_IMM;			s->k = vmap[v].const_val;			done = 0;		}		vstore(s, &val[A_ATOM], v, alter);		break;	case BPF_MISC|BPF_TAX:		vstore(s, &val[X_ATOM], val[A_ATOM], alter);		break;	case BPF_LDX|BPF_MEM:		v = val[s->k];		if (alter && vmap[v].is_const) {			s->code = BPF_LDX|BPF_IMM;			s->k = vmap[v].const_val;			done = 0;		}		vstore(s, &val[X_ATOM], v, alter);		break;	case BPF_ST:		vstore(s, &val[s->k], val[A_ATOM], alter);		break;	case BPF_STX:		vstore(s, &val[s->k], val[X_ATOM], alter);		break;	}}static voiddeadstmt(s, last)	register struct stmt *s;	register struct stmt *last[];{	register int atom;	atom = atomuse(s);	if (atom >= 0) {		if (atom == AX_ATOM) {			last[X_ATOM] = 0;			last[A_ATOM] = 0;		}		else			last[atom] = 0;	}	atom = atomdef(s);	if (atom >= 0) {		if (last[atom]) {			done = 0;			last[atom]->code = NOP;		}		last[atom] = s;	}}static voidopt_deadstores(b)	register struct block *b;{	register struct slist *s;	register int atom;	struct stmt *last[N_ATOMS];	memset((char *)last, 0, sizeof last);	for (s = b->stmts; s != 0; s = s->next)		deadstmt(&s->s, last);	deadstmt(&b->s, last);	for (atom = 0; atom < N_ATOMS; ++atom)		if (last[atom] && !ATOMELEM(b->out_use, atom)) {			last[atom]->code = NOP;			done = 0;		}}static voidopt_blk(b, do_stmts)	struct block *b;	int do_stmts;{	struct slist *s;	struct edge *p;	int i;	bpf_int32 aval;	/*	 * Initialize the atom values.	 * If we have no predecessors, everything is undefined.	 * Otherwise, we inherent our values from our predecessors.	 * If any register has an ambiguous value (i.e. control paths are	 * merging) give it the undefined value of 0.	 */	p = b->in_edges;	if (p == 0)		memset((char *)b->val, 0, sizeof(b->val));	else {		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));		while ((p = p->next) != NULL) {			for (i = 0; i < N_ATOMS; ++i)				if (b->val[i] != p->pred->val[i])					b->val[i] = 0;		}	}	aval = b->val[A_ATOM];	for (s = b->stmts; s; s = s->next)		opt_stmt(&s->s, b->val, do_stmts);	/*	 * This is a special case: if we don't use anything from this	 * block, and we load the accumulator with value that is	 * already there, or if this block is a return,	 * eliminate all the statements.	 */	if (do_stmts && 	    ((b->out_use == 0 && aval != 0 &&b->val[A_ATOM] == aval) ||	     BPF_CLASS(b->s.code) == BPF_RET)) {		if (b->stmts != 0) {			b->stmts = 0;			done = 0;		}	} else {		opt_peep(b);		opt_deadstores(b);	}	/*	 * Set up values for branch optimizer.	 */	if (BPF_SRC(b->s.code) == BPF_K)		b->oval = K(b->s.k);	else		b->oval = b->val[X_ATOM];	b->et.code = b->s.code;	b->ef.code = -b->s.code;}/* * Return true if any register that is used on exit from 'succ', has * an exit value that is different from the corresponding exit value * from 'b'. */static intuse_conflict(b, succ)	struct block *b, *succ;{	int atom;	atomset use = succ->out_use;	if (use == 0)		return 0;	for (atom = 0; atom < N_ATOMS; ++atom)		if (ATOMELEM(use, atom))			if (b->val[atom] != succ->val[atom])				return 1;	return 0;}static struct block *fold_edge(child, ep)	struct block *child;	struct edge *ep;{	int sense;	int aval0, aval1, oval0, oval1;	int code = ep->code;	if (code < 0) {		code = -code;		sense = 0;	} else		sense = 1;	if (child->s.code != code)		return 0;	aval0 = child->val[A_ATOM];	oval0 = child->oval;	aval1 = ep->pred->val[A_ATOM];	oval1 = ep->pred->oval;	if (aval0 != aval1)		return 0;	if (oval0 == oval1)		/*		 * The operands are identical, so the		 * result is true if a true branch was		 * taken to get here, otherwise false.		 */		return sense ? JT(child) : JF(child);	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))		/*		 * At this point, we only know the comparison if we		 * came down the true branch, and it was an equality		 * comparison with a constant.  We rely on the fact that		 * distinct constants have distinct value numbers.		 */		return JF(child);	return 0;}static voidopt_j(ep)	struct edge *ep;{	register int i, k;	register struct block *target;	if (JT(ep->succ) == 0)		return;	if (JT(ep->succ) == JF(ep->succ)) {		/*		 * Common branch targets can be eliminated, provided		 * there is no data dependency.		 */		if (!use_conflict(ep->pred, ep->succ->et.succ)) {			done = 0;			ep->succ = JT(ep->succ);		}	}	/*	 * For each edge dominator that matches the successor of this	 * edge, promote the edge successor to the its grandchild.	 *	 * XXX We violate the set abstraction here in favor a reasonably	 * efficient loop.	 */ top:	for (i = 0; i < edgewords; ++i) {		register bpf_u_int32 x = ep->edom[i];		while (x != 0) {			k = ffs(x) - 1;			x &=~ (1 << k);			k += i * BITS_PER_WORD;			target = fold_edge(ep->succ, edges[k]);			/*			 * Check that there is no data dependency between			 * nodes that will be violated if we move the edge.			 */			if (target != 0 && !use_conflict(ep->pred, target)) {				done = 0;				ep->succ = target;				if (JT(target) != 0)					/*					 * Start over unless we hit a leaf.					 */					goto top;				return;			}		}	}}static voidor_pullup(b)	struct block *b;{	int val, at_top;	struct block *pull;	struct block **diffp, **samep;	struct edge *ep;	ep = b->in_edges;	if (ep == 0)		return;	/*	 * Make sure each predecessor loads the same value.	 * XXX why?	 */	val = ep->pred->val[A_ATOM];	for (ep = ep->next; ep != 0; ep = ep->next)		if (val != ep->pred->val[A_ATOM])			return;	if (JT(b->in_edges->pred) == b)		diffp = &JT(b->in_edges->pred);	else		diffp = &JF(b->in_edges->pred);	at_top = 1;	while (1) {		if (*diffp == 0)			return;		if (JT(*diffp) != JT(b))			return;		if (!SET_MEMBER((*diffp)->dom, b->id))			return;		if ((*diffp)->val[A_ATOM] != val)			break;		diffp = &JF(*diffp);		at_top = 0;	}	samep = &JF(*diffp);	while (1) {		if (*samep == 0)			return;		if (JT(*samep) != JT(b))			return;		if (!SET_MEMBER((*samep)->dom, b->id))			return;		if ((*samep)->val[A_ATOM] == val)			break;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -