random.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,115 行 · 第 1/5 页

C
2,115
字号
 * to optimize a static rotate left of x bits, it doesn't know how to * deal with a variable rotate of x bits.  So we use a bit of asm magic. */#if (!defined (__i386__))static inline __u32 rotate_left(int i, __u32 word){	return (word << i) | (word >> (32 - i));	}#elsestatic inline __u32 rotate_left(int i, __u32 word){	__asm__("roll %%cl,%0"		:"=r" (word)		:"0" (word),"c" (i));	return word;}#endif/* * More asm magic.... *  * For entropy estimation, we need to do an integral base 2 * logarithm.   * * Note the "12bits" suffix - this is used for numbers between * 0 and 4095 only.  This allows a few shortcuts. */#if 0	/* Slow but clear version */static inline __u32 int_ln_12bits(__u32 word){	__u32 nbits = 0;		while (word >>= 1)		nbits++;	return nbits;}#else	/* Faster (more clever) version, courtesy Colin Plumb */static inline __u32 int_ln_12bits(__u32 word){	/* Smear msbit right to make an n-bit mask */	word |= word >> 8;	word |= word >> 4;	word |= word >> 2;	word |= word >> 1;	/* Remove one bit to make this a logarithm */	word >>= 1;	/* Count the bits set in the word */	word -= (word >> 1) & 0x555;	word = (word & 0x333) + ((word >> 2) & 0x333);	word += (word >> 4);	word += (word >> 8);	return word & 15;}#endif#if 0#define DEBUG_ENT(fmt, arg...) printk(KERN_DEBUG "random: " fmt, ## arg)#else#define DEBUG_ENT(fmt, arg...) do {} while (0)#endif/********************************************************************** * * OS independent entropy store.   Here are the functions which handle * storing entropy in an entropy pool. *  **********************************************************************/struct entropy_store {	/* mostly-read data: */	struct poolinfo poolinfo;	__u32		*pool;	const char	*name;	/* read-write data: */	spinlock_t lock ____cacheline_aligned_in_smp;	unsigned	add_ptr;	int		entropy_count;	int		input_rotate;};/* * Initialize the entropy store.  The input argument is the size of * the random pool. * * Returns an negative error if there is a problem. */static int create_entropy_store(int size, const char *name,				struct entropy_store **ret_bucket){	struct	entropy_store	*r;	struct	poolinfo	*p;	int	poolwords;	poolwords = (size + 3) / 4; /* Convert bytes->words */	/* The pool size must be a multiple of 16 32-bit words */	poolwords = ((poolwords + 15) / 16) * 16;	for (p = poolinfo_table; p->poolwords; p++) {		if (poolwords == p->poolwords)			break;	}	if (p->poolwords == 0)		return -EINVAL;	r = kmalloc(sizeof(struct entropy_store), GFP_KERNEL);	if (!r)		return -ENOMEM;	memset (r, 0, sizeof(struct entropy_store));	r->poolinfo = *p;	r->pool = kmalloc(POOLBYTES, GFP_KERNEL);	if (!r->pool) {		kfree(r);		return -ENOMEM;	}	memset(r->pool, 0, POOLBYTES);	r->lock = SPIN_LOCK_UNLOCKED;	r->name = name;	*ret_bucket = r;	return 0;}/* Clear the entropy pool and associated counters. */static void clear_entropy_store(struct entropy_store *r){	r->add_ptr = 0;	r->entropy_count = 0;	r->input_rotate = 0;	memset(r->pool, 0, r->poolinfo.POOLBYTES);}#ifdef CONFIG_SYSCTLstatic void free_entropy_store(struct entropy_store *r){	if (r->pool)		kfree(r->pool);	kfree(r);}#endif/* * This function adds a byte into the entropy "pool".  It does not * update the entropy estimate.  The caller should call * credit_entropy_store if this is appropriate. *  * The pool is stirred with a primitive polynomial of the appropriate * degree, and then twisted.  We twist by three bits at a time because * it's cheap to do so and helps slightly in the expected case where * the entropy is concentrated in the low-order bits. */static void add_entropy_words(struct entropy_store *r, const __u32 *in,			      int nwords){	static __u32 const twist_table[8] = {		         0, 0x3b6e20c8, 0x76dc4190, 0x4db26158,		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };	unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;	int new_rotate, input_rotate;	int wordmask = r->poolinfo.poolwords - 1;	__u32 w, next_w;	unsigned long flags;	/* Taps are constant, so we can load them without holding r->lock.  */	tap1 = r->poolinfo.tap1;	tap2 = r->poolinfo.tap2;	tap3 = r->poolinfo.tap3;	tap4 = r->poolinfo.tap4;	tap5 = r->poolinfo.tap5;	next_w = *in++;	spin_lock_irqsave(&r->lock, flags);	prefetch_range(r->pool, wordmask);	input_rotate = r->input_rotate;	add_ptr = r->add_ptr;	while (nwords--) {		w = rotate_left(input_rotate, next_w);		if (nwords > 0)			next_w = *in++;		i = add_ptr = (add_ptr - 1) & wordmask;		/*		 * Normally, we add 7 bits of rotation to the pool.		 * At the beginning of the pool, add an extra 7 bits		 * rotation, so that successive passes spread the		 * input bits across the pool evenly.		 */		new_rotate = input_rotate + 14;		if (i)			new_rotate = input_rotate + 7;		input_rotate = new_rotate & 31;		/* XOR in the various taps */		w ^= r->pool[(i + tap1) & wordmask];		w ^= r->pool[(i + tap2) & wordmask];		w ^= r->pool[(i + tap3) & wordmask];		w ^= r->pool[(i + tap4) & wordmask];		w ^= r->pool[(i + tap5) & wordmask];		w ^= r->pool[i];		r->pool[i] = (w >> 3) ^ twist_table[w & 7];	}	r->input_rotate = input_rotate;	r->add_ptr = add_ptr;	spin_unlock_irqrestore(&r->lock, flags);}/* * Credit (or debit) the entropy store with n bits of entropy */static void credit_entropy_store(struct entropy_store *r, int nbits){	unsigned long flags;	spin_lock_irqsave(&r->lock, flags);	if (r->entropy_count + nbits < 0) {		DEBUG_ENT("negative entropy/overflow (%d+%d)\n",			  r->entropy_count, nbits);		r->entropy_count = 0;	} else if (r->entropy_count + nbits > r->poolinfo.POOLBITS) {		r->entropy_count = r->poolinfo.POOLBITS;	} else {		r->entropy_count += nbits;		if (nbits)			DEBUG_ENT("Added %d entropy credits to %s, now %d\n",				  nbits, r->name, r->entropy_count);	}	spin_unlock_irqrestore(&r->lock, flags);}/********************************************************************** * * Entropy batch input management * * We batch entropy to be added to avoid increasing interrupt latency * **********************************************************************/struct sample {	__u32 data[2];	int credit;};static struct sample *batch_entropy_pool, *batch_entropy_copy;static int	batch_head, batch_tail;static spinlock_t batch_lock = SPIN_LOCK_UNLOCKED;static int	batch_max;static void batch_entropy_process(void *private_);static DECLARE_WORK(batch_work, batch_entropy_process, NULL);/* note: the size must be a power of 2 */static int __init batch_entropy_init(int size, struct entropy_store *r){	batch_entropy_pool = kmalloc(size*sizeof(struct sample), GFP_KERNEL);	if (!batch_entropy_pool)		return -1;	batch_entropy_copy = kmalloc(size*sizeof(struct sample), GFP_KERNEL);	if (!batch_entropy_copy) {		kfree(batch_entropy_pool);		return -1;	}	batch_head = batch_tail = 0;	batch_work.data = r;	batch_max = size;	return 0;}/* * Changes to the entropy data is put into a queue rather than being added to * the entropy counts directly.  This is presumably to avoid doing heavy * hashing calculations during an interrupt in add_timer_randomness(). * Instead, the entropy is only added to the pool by keventd. */void batch_entropy_store(u32 a, u32 b, int num){	int new;	unsigned long flags;	if (!batch_max)		return;	spin_lock_irqsave(&batch_lock, flags);	batch_entropy_pool[batch_head].data[0] = a;	batch_entropy_pool[batch_head].data[1] = b;	batch_entropy_pool[batch_head].credit = num;	if (((batch_head - batch_tail) & (batch_max-1)) >= (batch_max / 2)) {		/*		 * Schedule it for the next timer tick:		 */		schedule_delayed_work(&batch_work, 1);	}	new = (batch_head+1) & (batch_max-1);	if (new == batch_tail) {		DEBUG_ENT("batch entropy buffer full\n");	} else {		batch_head = new;	}	spin_unlock_irqrestore(&batch_lock, flags);}EXPORT_SYMBOL(batch_entropy_store);/* * Flush out the accumulated entropy operations, adding entropy to the passed * store (normally random_state).  If that store has enough entropy, alternate * between randomizing the data of the primary and secondary stores. */static void batch_entropy_process(void *private_){	struct entropy_store *r	= (struct entropy_store *) private_, *p;	int max_entropy = r->poolinfo.POOLBITS;	unsigned head, tail;	/* Mixing into the pool is expensive, so copy over the batch	 * data and release the batch lock. The pool is at least half	 * full, so don't worry too much about copying only the used	 * part.	 */	spin_lock_irq(&batch_lock);	memcpy(batch_entropy_copy, batch_entropy_pool,	       batch_max*sizeof(struct sample));	head = batch_head;	tail = batch_tail;	batch_tail = batch_head;	spin_unlock_irq(&batch_lock);	p = r;	while (head != tail) {		if (r->entropy_count >= max_entropy) {			r = (r == sec_random_state) ?	random_state :							sec_random_state;			max_entropy = r->poolinfo.POOLBITS;		}		add_entropy_words(r, batch_entropy_copy[tail].data, 2);		credit_entropy_store(r, batch_entropy_copy[tail].credit);		tail = (tail+1) & (batch_max-1);	}	if (p->entropy_count >= random_read_wakeup_thresh)		wake_up_interruptible(&random_read_wait);}/********************************************************************* * * Entropy input management * *********************************************************************//* There is one of these per entropy source */struct timer_rand_state {	cycles_t	last_time;	long		last_delta,last_delta2;	unsigned	dont_count_entropy:1;};static struct timer_rand_state keyboard_timer_state;static struct timer_rand_state mouse_timer_state;static struct timer_rand_state extract_timer_state;static struct timer_rand_state *irq_timer_state[NR_IRQS];/* * This function adds entropy to the entropy "pool" by using timing * delays.  It uses the timer_rand_state structure to make an estimate * of how many bits of entropy this call has added to the pool. * * The number "num" is also added to the pool - it should somehow describe * the type of event which just happened.  This is currently 0-255 for * keyboard scan codes, and 256 upwards for interrupts. * */static void add_timer_randomness(struct timer_rand_state *state, unsigned num){	cycles_t	time;	long		delta, delta2, delta3;	int		entropy = 0;	preempt_disable();	/* if over the trickle threshold, use only 1 in 4096 samples */	if ( random_state->entropy_count > trickle_thresh &&	     (__get_cpu_var(trickle_count)++ & 0xfff))		goto out;	/*	 * Use get_cycles() if implemented, otherwise fall back to	 * jiffies.	 */	time = get_cycles();	if (time != 0) {		if (sizeof(time) > 4)			num ^= (u32)(time >> 32);	} else {		time = jiffies;	}	/*	 * Calculate number of bits of randomness we probably added.	 * We take into account the first, second and third-order deltas	 * in order to make our estimate.	 */	if (!state->dont_count_entropy) {		delta = time - state->last_time;		state->last_time = time;		delta2 = delta - state->last_delta;		state->last_delta = delta;		delta3 = delta2 - state->last_delta2;		state->last_delta2 = delta2;		if (delta < 0)			delta = -delta;		if (delta2 < 0)			delta2 = -delta2;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?