⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kernelenv.c

📁 openswan
💻 C
📖 第 1 页 / 共 2 页
字号:
		nfct->destroy(nfct);}void nf_conntrack_get(struct nf_conntrack *nfct){	if (nfct)		atomic_inc(&nfct->use);}void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);#endif /* 2.6.9 */void nf_reset(struct sk_buff *skb){	nf_conntrack_put(skb->nfct);	skb->nfct = NULL;#ifdef CONFIG_NETFILTER_DEBUG	skb->nf_debug = 0;#endif}void nf_reset_debug(struct sk_buff *skb){#ifdef CONFIG_NETFILTER_DEBUG	skb->nf_debug = 0;#endif}u32 dst_path_metric(struct dst_entry *dst, int metric){	return 1500;/*	return dst->path->metrics[metric-1]; */}u32 dst_pmtu(struct dst_entry *dst){	u32 mtu = dst_path_metric(dst, RTAX_MTU);	/* Yes, _exactly_. This is paranoia. */	barrier();	return mtu;}int dst_output(struct sk_buff *skb){	assert(skb);	assert(skb->dst);	assert(skb->dst->output);	return skb->dst->output(skb);}int dst_input(struct sk_buff *skb){	assert(skb);	assert(skb->dst);	assert(skb->dst->input);	return skb->dst->input(skb);}struct ethhdr *eth_hdr(const struct sk_buff *skb){	return (struct ethhdr *)skb->mac.raw;}/* spinlock: use talloc for unreleased lock detection */void __generic_write_lock(spinlock_t *lock, const char *location){	if (lock->lock)		panic("write lock (called at %s) already held by %s.\n",		        location, lock->location);	lock->lock = -1;	lock->location = talloc_strdup(__lock_ctx, location);}void __generic_write_unlock(spinlock_t *lock, const char *location){	if (lock->lock != -1) {		fprintf(stderr, "write lock (called at %s) isn't held\n",		        location);	}	lock->lock = 0;	talloc_free(lock->location);	lock->location = NULL;}void __generic_read_lock(spinlock_t *lock, const char *location){	if (lock->lock == -1)		panic("read lock (called at %s) already held by %s.\n",		        location, lock->location);	lock->lock++;	talloc_free(lock->location);	lock->location = talloc_strdup(__lock_ctx, location);}void __generic_read_unlock(spinlock_t *lock, const char *location){	if (lock->lock <= 0) {		fprintf(stderr, "read lock (called at %s) isn't held\n",		        location);	}	lock->lock--;	if (lock->lock == 0) {		talloc_free(lock->location);		lock->location = NULL;	}}/* semaphore */void __down(struct semaphore *sem, const char *location){	if (!(sem->count)--)		barf("down() unavailable at %s\n", location);	field_attach_static(sem, location, NULL);}int __down_interruptible(struct semaphore *sem, const char *location){	if (should_i_fail(location))		return -EINTR;	if (!(sem->count)--)		barf("down() unavailable at %s\n", location);	field_attach_static(sem, location, NULL);	return 0;}void __up(struct semaphore *sem, const char *location){	if (++(sem->count) > sem->limit)		panic("up() unavailable at %s\n", location);	field_detach_all(sem);}int __down_trylock(struct semaphore *sem, const char *location){	if (sem->count) {		sem->count--;		field_attach_static(sem, location, NULL);		return 0;	}	return 1;}void sema_init(struct semaphore *sem, int val){	sem->count = val;	sem->limit = val;}/* bitops.h */int test_bit(int nr, const long * addr){	int	mask;	addr += nr >> 5;	mask = 1 << (nr & 0x1f);	return ((mask & *addr) != 0);}int set_bit(int nr,long * addr){	int	mask, retval;	addr += nr >> 5;	mask = 1 << (nr & 0x1f);	cli();	retval = (mask & *addr) != 0;	*addr |= mask;	sti();	return retval;}int clear_bit(int nr, long * addr){	int     mask, retval;	addr += nr >> 5;	mask = 1 << (nr & 0x1f);	cli();	retval = (mask & *addr) != 0;	*addr &= ~mask;	sti();        return retval;}/* timer */LIST_HEAD(__timers);LIST_HEAD(__running_timers);void __init_timer(struct timer_list * timer, struct module *owner,	const char *function){	timer->magic = TIMER_MAGIC;	timer->owner = owner;	timer->ownerfunction = function;	timer->use = NULL;}void __add_timer(struct timer_list *timer, const char *location){	struct timer_list *t;	list_for_each_entry(t, &__timers, entry) {		if (time_after(t->expires, timer->expires)) 			break;	}	list_add_tail(&timer->entry, &t->entry);	timer->use = talloc_strdup(__timer_ctx, location);}int __del_timer(struct timer_list *timer, const char *location){	if (!timer->use)		return 0;	if (should_i_fail_once(location)) {		/* Pretend it's running now. */		list_del(&timer->entry);		list_add(&timer->entry, &__running_timers);		return 0;	}	list_del(&timer->entry);	talloc_free(timer->use);	timer->use = NULL;	return 1;}static bool do_running_timers(const char *cmd){	struct timer_list *t, *next;	list_for_each_entry_safe(t, next, &__running_timers, entry) {		list_del(&t->entry);		talloc_free(t->use);		t->function(t->data);	}	return true;}void schedule(void){	do_running_timers("schedule()");}static void setup_running_timers(void){#if 0	tui_register_pre_post_hook(NULL, do_running_timers);#endif}init_call(setup_running_timers);int timer_pending(const struct timer_list * timer){	/* straightforward at present - timers are guaranteed to	   be run at the expiry time	 */	return timer->expires > jiffies;}void increment_time(unsigned int inc){	struct list_head *i;	struct timer_list *t;	jiffies += inc;		i = __timers.next;		while (i != &__timers) {		t = list_entry(i, struct timer_list, entry);		if (time_before(jiffies, t->expires))			break;		nfsim_log(LOG_UI, "running timer to %s:%s()", t->owner->name,			t->ownerfunction, t->function);		i = i->next;		list_del(&t->entry);		talloc_free(t->use);		t->use = NULL;		t->function(t->data);	}}/* notifier *//*static rwlock_t notifier_lock = RW_LOCK_UNLOCKED;*/int notifier_chain_register(struct notifier_block **list, struct notifier_block *n){	/* Detect if they don't unregister. */	field_attach_static(n, "notifier_chain_register", NULL);	/*write_lock(&notifier_lock);*/	while (*list) {		if (n->priority > (*list)->priority)			break;		list= &((*list)->next);	}	n->next = *list;	*list=n;	/*write_unlock(&notifier_lock);*/	return 0;}int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n){	/*write_lock(&notifier_lock);*/	while ((*nl) != NULL) {		if ((*nl) == n) {			*nl = n->next;			/*write_unlock(&notifier_lock);*/			field_detach_all(n);			return 0;		}		nl = &((*nl)->next);	}	/*write_unlock(&notifier_lock);*/	return -ENOENT;}int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v){	int ret = NOTIFY_DONE;	struct notifier_block *nb = *n;	while (nb) {		ret = nb->notifier_call(nb, val, v);		if (ret & NOTIFY_STOP_MASK)			return ret;		nb = nb->next;	}	return ret;}/* random */void get_random_bytes(void *buf, int nbytes){	while (nbytes--)		*((char *)buf + nbytes) = random();		}/* cache */void *__malloc(unsigned int size, void *ctx, const char *location){	if (should_i_fail(__func__))		return NULL;	return _talloc_zero(ctx, size, location);}#if 0kmem_cache_t *kmem_cache_create(const char *name, size_t objsize,        size_t offset, unsigned long flags,	void (*ctor)(void *, kmem_cache_t *, unsigned long),	void (*dtor)(void *, kmem_cache_t *, unsigned long)){	kmem_cache_t *cache;	if (should_i_fail(__func__))		return NULL;	cache = talloc(__kmem_cache_ctx, kmem_cache_t);	cache->name = name;	cache->objsize = objsize;	cache->ctor = ctor;	cache->dtor = dtor;	INIT_LIST_HEAD(&cache->objs);	return cache;}int kmem_cache_destroy(kmem_cache_t *cache){	talloc_free(cache);	return 0;}void *kmem_cache_alloc(kmem_cache_t *cache, int flags){	struct kmem_cache_obj *obj;	if (should_i_fail(__func__))		return NULL;	obj = talloc(cache, struct kmem_cache_obj);	obj->ptr = talloc_size(obj, cache->objsize);	list_add(&obj->entry, &cache->objs);	return obj->ptr;}void kmem_cache_free(kmem_cache_t *cache, void *ptr){	struct kmem_cache_obj *i;		list_for_each_entry(i, &(cache->objs), entry) {		if (i->ptr == ptr) {			list_del(&i->entry);			talloc_free(i);			return;		}	}	panic("[cache] attempting to free non-cache memory\n");}#endifunsigned long__get_free_pages(unsigned int gfp_mask, unsigned int order){	return (unsigned long)(kmalloc(PAGE_SIZE << order, gfp_mask));}void free_pages(unsigned long addr, unsigned int order){	memset((void *)addr, 0, PAGE_SIZE << order);	kfree((void *)addr);}int get_order(unsigned long size){	int order;	size = (size-1) >> (PAGE_SHIFT-1);	order = -1;	do {		size >>= 1;		order++;	} while (size);	return order;}/* jhash.h: Jenkins hash support. * * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net) * * http://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * * lookup2.c, by Bob Jenkins, December 1996, Public Domain. * hash(), hash2(), hash3, and mix() are externally useful functions. * Routines to test the hash are included if SELF_TEST is defined. * You can use this free for any purpose.  It has no warranty. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) * * I've modified Bob's hash to be useful in the Linux kernel, and * any bugs present are surely my fault.  -DaveM *//* NOTE: Arguments are modified. */#define __jhash_mix(a, b, c) \{ \  a -= b; a -= c; a ^= (c>>13); \  b -= c; b -= a; b ^= (a<<8); \  c -= a; c -= b; c ^= (b>>13); \  a -= b; a -= c; a ^= (c>>12);  \  b -= c; b -= a; b ^= (a<<16); \  c -= a; c -= b; c ^= (b>>5); \  a -= b; a -= c; a ^= (c>>3);  \  b -= c; b -= a; b ^= (a<<10); \  c -= a; c -= b; c ^= (b>>15); \}/* The golden ration: an arbitrary value */#define JHASH_GOLDEN_RATIO	0x9e3779b9/* The most generic version, hashes an arbitrary sequence * of bytes.  No alignment or length assumptions are made about * the input key. */u32 jhash(void *key, u32 length, u32 initval){	u32 a, b, c, len;	u8 *k = key;	len = length;	a = b = JHASH_GOLDEN_RATIO;	c = initval;	while (len >= 12) {		a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));		b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));		c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));		__jhash_mix(a,b,c);		k += 12;		len -= 12;	}	c += length;	switch (len) {	case 11: c += ((u32)k[10]<<24);	case 10: c += ((u32)k[9]<<16);	case 9 : c += ((u32)k[8]<<8);	case 8 : b += ((u32)k[7]<<24);	case 7 : b += ((u32)k[6]<<16);	case 6 : b += ((u32)k[5]<<8);	case 5 : b += k[4];	case 4 : a += ((u32)k[3]<<24);	case 3 : a += ((u32)k[2]<<16);	case 2 : a += ((u32)k[1]<<8);	case 1 : a += k[0];	};	__jhash_mix(a,b,c);	return c;}/* A special optimized version that handles 1 or more of u32s. * The length parameter here is the number of u32s in the key. */u32 jhash2(u32 *k, u32 length, u32 initval){	u32 a, b, c, len;	a = b = JHASH_GOLDEN_RATIO;	c = initval;	len = length;	while (len >= 3) {		a += k[0];		b += k[1];		c += k[2];		__jhash_mix(a, b, c);		k += 3; len -= 3;	}	c += length * 4;	switch (len) {	case 2 : b += k[1];	case 1 : a += k[0];	};	__jhash_mix(a,b,c);	return c;}/* A special ultra-optimized versions that knows they are hashing exactly * 3, 2 or 1 word(s). * * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally *       done at the end is not done here. */u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval){	a += JHASH_GOLDEN_RATIO;	b += JHASH_GOLDEN_RATIO;	c += initval;	__jhash_mix(a, b, c);	return c;}u32 jhash_2words(u32 a, u32 b, u32 initval){	return jhash_3words(a, b, 0, initval);}u32 jhash_1word(u32 a, u32 initval){	return jhash_3words(a, 0, 0, initval);}int request_module(const char * name, ...){	return 0;}void kernelenv_init(void){	__vmalloc_ctx = talloc_named_const(nfsim_tallocs, 1, "vmallocs");	__kmalloc_ctx = talloc_named_const(nfsim_tallocs, 1, "kmallocs");	__kmalloc_atomic_ctx = talloc_named_const(nfsim_tallocs, 1,						  "kmallocs (atomic)");	__skb_ctx = talloc_named_const(nfsim_tallocs, 1, "skbs");	__kmem_cache_ctx = talloc_named_const(nfsim_tallocs, 1, "kmem caches");	__lock_ctx = talloc_named_const(nfsim_tallocs, 1, "locks");	__timer_ctx = talloc_named_const(nfsim_tallocs, 1, "timers");}int IS_ERR(const void *ptr){         return (unsigned long)ptr > (unsigned long)-1000L;}void atomic_inc(atomic_t *v){	v->counter++;}void atomic_dec(atomic_t *v){	v->counter--;}int atomic_dec_and_test(atomic_t *v){	return (--(v->counter) == 0);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -