summaryrefslogtreecommitdiff
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c646
1 files changed, 240 insertions, 406 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 429b75b..7a744d3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -255,7 +255,6 @@
#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/kmemcheck.h>
-#include <linux/workqueue.h>
#include <linux/irq.h>
#include <asm/processor.h>
@@ -270,28 +269,14 @@
/*
* Configuration information
*/
-#define INPUT_POOL_SHIFT 12
-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-#define OUTPUT_POOL_SHIFT 10
-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define SEC_XFER_SIZE 512
-#define EXTRACT_SIZE 10
-
-#define DEBUG_RANDOM_BOOT 0
+#define INPUT_POOL_WORDS 128
+#define OUTPUT_POOL_WORDS 32
+#define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
/*
- * To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits.
- *
- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
- */
-#define ENTROPY_SHIFT 3
-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
-
-/*
* The minimum number of bits of entropy before we wake up a read on
* /dev/random. Should be enough to do a significant reseed.
*/
@@ -302,100 +287,108 @@ static int random_read_wakeup_thresh = 64;
* should wake up processes which are selecting or polling on write
* access to /dev/random.
*/
-static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS;
+static int random_write_wakeup_thresh = 128;
/*
- * The minimum number of seconds between urandom pool resending. We
- * do this to limit the amount of entropy that can be drained from the
- * input pool even if there are heavy demands on /dev/urandom.
+ * When the input pool goes over trickle_thresh, start dropping most
+ * samples to avoid wasting CPU time and reduce lock contention.
*/
-static int random_min_urandom_seed = 60;
+
+static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
+
+static DEFINE_PER_CPU(int, trickle_count);
/*
- * Originally, we used a primitive polynomial of degree .poolwords
- * over GF(2). The taps for various sizes are defined below. They
- * were chosen to be evenly spaced except for the last tap, which is 1
- * to get the twisting happening as fast as possible.
- *
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a (modified) twisted Generalized Feedback Shift
- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
- * generators. ACM Transactions on Modeling and Computer Simulation
- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
- * GFSR generators II. ACM Transactions on Mdeling and Computer
- * Simulation 4:254-266)
- *
- * Thanks to Colin Plumb for suggesting this.
- *
- * The mixing operation is much less sensitive than the output hash,
- * where we use SHA-1. All that we want of mixing operation is that
- * it be a good non-cryptographic hash; i.e. it not produce collisions
- * when fed "random" data of the sort we expect to see. As long as
- * the pool state differs for different inputs, we have preserved the
- * input entropy and done a good job. The fact that an intelligent
- * attacker can construct inputs that will produce controlled
- * alterations to the pool's state is not important because we don't
- * consider such inputs to contribute any randomness. The only
- * property we need with respect to them is that the attacker can't
- * increase his/her knowledge of the pool's state. Since all
- * additions are reversible (knowing the final state and the input,
- * you can reconstruct the initial state), if an attacker has any
- * uncertainty about the initial state, he/she can only shuffle that
- * uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
- *
- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
- * Videau in their paper, "The Linux Pseudorandom Number Generator
- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
- * paper, they point out that we are not using a true Twisted GFSR,
- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
- * is, with only three taps, instead of the six that we are using).
- * As a result, the resulting polynomial is neither primitive nor
- * irreducible, and hence does not have a maximal period over
- * GF(2**32). They suggest a slight change to the generator
- * polynomial which improves the resulting TGFSR polynomial to be
- * irreducible, which we have made here.
+ * A pool of size .poolwords is stirred with a primitive polynomial
+ * of degree .poolwords over GF(2). The taps for various sizes are
+ * defined below. They are chosen to be evenly spaced (minimum RMS
+ * distance from evenly spaced; the numbers in the comments are a
+ * scaled squared error sum) except for the last tap, which is 1 to
+ * get the twisting happening as fast as possible.
*/
static struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+ int poolwords;
int tap1, tap2, tap3, tap4, tap5;
} poolinfo_table[] = {
- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- { S(128), 104, 76, 51, 25, 1 },
- /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
- /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
- { S(32), 26, 19, 14, 7, 1 },
+ /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+ { 128, 103, 76, 51, 25, 1 },
+ /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+ { 32, 26, 20, 14, 7, 1 },
#if 0
/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
- { S(2048), 1638, 1231, 819, 411, 1 },
+ { 2048, 1638, 1231, 819, 411, 1 },
/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
- { S(1024), 817, 615, 412, 204, 1 },
+ { 1024, 817, 615, 412, 204, 1 },
/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
- { S(1024), 819, 616, 410, 207, 2 },
+ { 1024, 819, 616, 410, 207, 2 },
/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
- { S(512), 411, 308, 208, 104, 1 },
+ { 512, 411, 308, 208, 104, 1 },
/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
- { S(512), 409, 307, 206, 102, 2 },
+ { 512, 409, 307, 206, 102, 2 },
/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
- { S(512), 409, 309, 205, 103, 2 },
+ { 512, 409, 309, 205, 103, 2 },
/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
- { S(256), 205, 155, 101, 52, 1 },
+ { 256, 205, 155, 101, 52, 1 },
/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
- { S(128), 103, 78, 51, 27, 2 },
+ { 128, 103, 78, 51, 27, 2 },
/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
- { S(64), 52, 39, 26, 14, 1 },
+ { 64, 52, 39, 26, 14, 1 },
#endif
};
+#define POOLBITS poolwords*32
+#define POOLBYTES poolwords*4
+
+/*
+ * For the purposes of better mixing, we use the CRC-32 polynomial as
+ * well to make a twisted Generalized Feedback Shift Reigster
+ *
+ * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM
+ * Transactions on Modeling and Computer Simulation 2(3):179-194.
+ * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators
+ * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266)
+ *
+ * Thanks to Colin Plumb for suggesting this.
+ *
+ * We have not analyzed the resultant polynomial to prove it primitive;
+ * in fact it almost certainly isn't. Nonetheless, the irreducible factors
+ * of a random large-degree polynomial over GF(2) are more than large enough
+ * that periodicity is not a concern.
+ *
+ * The input hash is much less sensitive than the output hash. All
+ * that we want of it is that it be a good non-cryptographic hash;
+ * i.e. it not produce collisions when fed "random" data of the sort
+ * we expect to see. As long as the pool state differs for different
+ * inputs, we have preserved the input entropy and done a good job.
+ * The fact that an intelligent attacker can construct inputs that
+ * will produce controlled alterations to the pool's state is not
+ * important because we don't consider such inputs to contribute any
+ * randomness. The only property we need with respect to them is that
+ * the attacker can't increase his/her knowledge of the pool's state.
+ * Since all additions are reversible (knowing the final state and the
+ * input, you can reconstruct the initial state), if an attacker has
+ * any uncertainty about the initial state, he/she can only shuffle
+ * that uncertainty about, but never cause any collisions (which would
+ * decrease the uncertainty).
+ *
+ * The chosen system lets the state of the pool be (essentially) the input
+ * modulo the generator polymnomial. Now, for random primitive polynomials,
+ * this is a universal class of hash functions, meaning that the chance
+ * of a collision is limited by the attacker's knowledge of the generator
+ * polynomail, so if it is chosen at random, an attacker can never force
+ * a collision. Here, we use a fixed polynomial, but we *can* assume that
+ * ###--> it is unknown to the processes generating the input entropy. <-###
+ * Because of this important property, this is a good, collision-resistant
+ * hash; hash collisions will occur no more often than chance.
+ */
+
/*
* Static global variables
*/
@@ -403,6 +396,17 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
+static bool debug;
+module_param(debug, bool, 0644);
+#define DEBUG_ENT(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG "random %04d %04d %04d: " \
+ fmt,\
+ input_pool.entropy_count,\
+ blocking_pool.entropy_count,\
+ nonblocking_pool.entropy_count,\
+ ## arg); } while (0)
+
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -413,26 +417,23 @@ static struct fasync_struct *fasync;
struct entropy_store;
struct entropy_store {
/* read-only data: */
- const struct poolinfo *poolinfo;
+ struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
struct entropy_store *pull;
- struct work_struct push_work;
+ int limit;
/* read-write data: */
- unsigned long last_pulled;
spinlock_t lock;
- unsigned short add_ptr;
- unsigned short input_rotate;
+ unsigned add_ptr;
+ unsigned input_rotate;
int entropy_count;
int entropy_total;
unsigned int initialized:1;
- unsigned int limit:1;
- unsigned int last_data_init:1;
+ bool last_data_init;
__u8 last_data[EXTRACT_SIZE];
};
-static void push_to_pool(struct work_struct *work);
static __u32 input_pool_data[INPUT_POOL_WORDS];
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
@@ -451,9 +452,7 @@ static struct entropy_store blocking_pool = {
.limit = 1,
.pull = &input_pool,
.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
- .pool = blocking_pool_data,
- .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
- push_to_pool),
+ .pool = blocking_pool_data
};
static struct entropy_store nonblocking_pool = {
@@ -461,9 +460,7 @@ static struct entropy_store nonblocking_pool = {
.name = "nonblocking",
.pull = &input_pool,
.lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
- .pool = nonblocking_pool_data,
- .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
- push_to_pool),
+ .pool = nonblocking_pool_data
};
static __u32 const twist_table[8] = {
@@ -501,7 +498,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
- w = rol32(*bytes++, input_rotate);
+ w = rol32(*bytes++, input_rotate & 31);
i = (i - 1) & wordmask;
/* XOR in the various taps */
@@ -521,7 +518,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
* rotation, so that successive passes spread the
* input bits across the pool evenly.
*/
- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
+ input_rotate += i ? 7 : 14;
}
ACCESS_ONCE(r->input_rotate) = input_rotate;
@@ -564,151 +561,62 @@ struct fast_pool {
* collector. It's hardcoded for an 128 bit pool and assumes that any
* locks that might be needed are taken by the caller.
*/
-static void fast_mix(struct fast_pool *f, __u32 input[4])
+static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
{
+ const char *bytes = in;
__u32 w;
+ unsigned i = f->count;
unsigned input_rotate = f->rotate;
- w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
- f->pool[0] = (w >> 3) ^ twist_table[w & 7];
- input_rotate = (input_rotate + 14) & 31;
- w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
- f->pool[1] = (w >> 3) ^ twist_table[w & 7];
- input_rotate = (input_rotate + 7) & 31;
- w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
- f->pool[2] = (w >> 3) ^ twist_table[w & 7];
- input_rotate = (input_rotate + 7) & 31;
- w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
- f->pool[3] = (w >> 3) ^ twist_table[w & 7];
- input_rotate = (input_rotate + 7) & 31;
-
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
+ f->pool[(i + 1) & 3];
+ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate += (i++ & 3) ? 7 : 14;
+ }
+ f->count = i;
f->rotate = input_rotate;
- f->count++;
}
/*
- * Credit (or debit) the entropy store with n bits of entropy.
- * Use credit_entropy_bits_safe() if the value comes from userspace
- * or otherwise should be checked for extreme values.
+ * Credit (or debit) the entropy store with n bits of entropy
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
int entropy_count, orig;
- const int pool_size = r->poolinfo->poolfracbits;
- int nfrac = nbits << ENTROPY_SHIFT;
if (!nbits)
return;
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
retry:
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
- if (nfrac < 0) {
- /* Debit */
- entropy_count += nfrac;
- } else {
- /*
- * Credit: we have to account for the possibility of
- * overwriting already present entropy. Even in the
- * ideal case of pure Shannon entropy, new contributions
- * approach the full value asymptotically:
- *
- * entropy <- entropy + (pool_size - entropy) *
- * (1 - exp(-add_entropy/pool_size))
- *
- * For add_entropy <= pool_size/2 then
- * (1 - exp(-add_entropy/pool_size)) >=
- * (add_entropy/pool_size)*0.7869...
- * so we can approximate the exponential with
- * 3/4*add_entropy/pool_size and still be on the
- * safe side by adding at most pool_size/2 at a time.
- *
- * The use of pool_size-2 in the while statement is to
- * prevent rounding artifacts from making the loop
- * arbitrarily long; this limits the loop to log2(pool_size)*2
- * turns no matter how large nbits is.
- */
- int pnfrac = nfrac;
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
- /* The +2 corresponds to the /4 in the denominator */
-
- do {
- unsigned int anfrac = min(pnfrac, pool_size/2);
- unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
-
- entropy_count += add;
- pnfrac -= anfrac;
- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
- }
+ entropy_count += nbits;
if (entropy_count < 0) {
- pr_warn("random: negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
- WARN_ON(1);
+ DEBUG_ENT("negative entropy/overflow\n");
entropy_count = 0;
- } else if (entropy_count > pool_size)
- entropy_count = pool_size;
+ } else if (entropy_count > r->poolinfo->POOLBITS)
+ entropy_count = r->poolinfo->POOLBITS;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
- r->entropy_total += nbits;
- if (!r->initialized && r->entropy_total > 128) {
- r->initialized = 1;
- r->entropy_total = 0;
- if (r == &nonblocking_pool) {
- prandom_reseed_late();
- pr_notice("random: %s pool is initialized\n", r->name);
- }
+ if (!r->initialized && nbits > 0) {
+ r->entropy_total += nbits;
+ if (r->entropy_total > 128)
+ r->initialized = 1;
}
- trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT,
+ trace_credit_entropy_bits(r->name, nbits, entropy_count,
r->entropy_total, _RET_IP_);
- if (r == &input_pool) {
- int entropy_bytes = entropy_count >> ENTROPY_SHIFT;
-
- /* should we wake readers? */
- if (entropy_bytes >= random_read_wakeup_thresh) {
- wake_up_interruptible(&random_read_wait);
- kill_fasync(&fasync, SIGIO, POLL_IN);
- }
- /* If the input pool is getting full, send some
- * entropy to the two output pools, flipping back and
- * forth between them, until the output pools are 75%
- * full.
- */
- if (entropy_bytes > random_write_wakeup_thresh &&
- r->initialized &&
- r->entropy_total >= 2*random_read_wakeup_thresh) {
- static struct entropy_store *last = &blocking_pool;
- struct entropy_store *other = &blocking_pool;
-
- if (last == &blocking_pool)
- other = &nonblocking_pool;
- if (other->entropy_count <=
- 3 * other->poolinfo->poolfracbits / 4)
- last = other;
- if (last->entropy_count <=
- 3 * last->poolinfo->poolfracbits / 4) {
- schedule_work(&last->push_work);
- r->entropy_total = 0;
- }
- }
+ /* should we wake readers? */
+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
}
}
-static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
-{
- const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
-
- /* Cap the value to avoid overflows */
- nbits = min(nbits, nbits_max);
- nbits = max(nbits, -nbits_max);
-
- credit_entropy_bits(r, nbits);
-}
-
/*********************************************************************
*
* Entropy input management
@@ -722,8 +630,6 @@ struct timer_rand_state {
unsigned dont_count_entropy:1;
};
-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
-
/*
* Add device- or boot-specific data to the input and nonblocking
* pools to help initialize them to unique values.
@@ -735,22 +641,15 @@ struct timer_rand_state {
void add_device_randomness(const void *buf, unsigned int size)
{
unsigned long time = random_get_entropy() ^ jiffies;
- unsigned long flags;
- trace_add_device_randomness(size, _RET_IP_);
- spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(&input_pool, buf, size, NULL);
- _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
- spin_unlock_irqrestore(&input_pool.lock, flags);
-
- spin_lock_irqsave(&nonblocking_pool.lock, flags);
- _mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
- _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
- spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
+ mix_pool_bytes(&input_pool, buf, size, NULL);
+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
}
EXPORT_SYMBOL(add_device_randomness);
-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
+static struct timer_rand_state input_timer_state;
/*
* This function adds entropy to the entropy "pool" by using timing
@@ -764,7 +663,6 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
- struct entropy_store *r;
struct {
long jiffies;
unsigned cycles;
@@ -773,12 +671,15 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
long delta, delta2, delta3;
preempt_disable();
+ /* if over the trickle threshold, use only 1 in 4096 samples */
+ if (input_pool.entropy_count > trickle_thresh &&
+ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+ goto out;
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
- r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
- mix_pool_bytes(r, &sample, sizeof(sample), NULL);
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
/*
* Calculate number of bits of randomness we probably added.
@@ -812,8 +713,10 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
*/
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
}
+out:
preempt_enable();
}
@@ -826,10 +729,10 @@ void add_input_randomness(unsigned int type, unsigned int code,
if (value == last_value)
return;
+ DEBUG_ENT("input event\n");
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
}
EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -841,21 +744,20 @@ void add_interrupt_randomness(int irq, int irq_flags)
struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 input[4], c_high, j_high;
- __u64 ip;
-
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- input[0] = cycles ^ j_high ^ irq;
- input[1] = now ^ c_high;
- ip = regs ? instruction_pointer(regs) : _RET_IP_;
- input[2] = ip;
- input[3] = ip >> 32;
+ __u32 input[4], cycles = random_get_entropy();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+ if (regs) {
+ __u64 ip = instruction_pointer(regs);
+ input[2] = ip;
+ input[3] = ip >> 32;
+ }
- fast_mix(fast_pool, input);
+ fast_mix(fast_pool, input, sizeof(input));
- if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
+ if ((fast_pool->count & 1023) &&
+ !time_after(now, fast_pool->last + HZ))
return;
fast_pool->last = now;
@@ -884,8 +786,10 @@ void add_disk_randomness(struct gendisk *disk)
if (!disk || !disk->random)
return;
/* first major is 1, so we get >= 0x200 here */
+ DEBUG_ENT("disk event %d:%d\n",
+ MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
+
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
}
#endif
@@ -903,58 +807,30 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
* from the primary pool to the secondary extraction pool. We make
* sure we pull enough for a 'catastrophic reseed'.
*/
-static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
- if (r->limit == 0 && random_min_urandom_seed) {
- unsigned long now = jiffies;
-
- if (time_before(now,
- r->last_pulled + random_min_urandom_seed * HZ))
- return;
- r->last_pulled = now;
- }
- if (r->pull &&
- r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
- r->entropy_count < r->poolinfo->poolfracbits)
- _xfer_secondary_pool(r, nbytes);
-}
-
-static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
-{
__u32 tmp[OUTPUT_POOL_WORDS];
- /* For /dev/random's pool, always leave two wakeup worth's BITS */
- int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
- int bytes = nbytes;
-
- /* pull at least as many as BYTES as wakeup BITS */
- bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
- /* but never more than the buffer size */
- bytes = min_t(int, bytes, sizeof(tmp));
-
- trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
- ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
- bytes = extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_thresh / 8, rsvd);
- mix_pool_bytes(r, tmp, bytes, NULL);
- credit_entropy_bits(r, bytes*8);
-}
-
-/*
- * Used as a workqueue function so that when the input pool is getting
- * full, we can "spill over" some entropy to the output pools. That
- * way the output pools can store some of the excess entropy instead
- * of letting it go to waste.
- */
-static void push_to_pool(struct work_struct *work)
-{
- struct entropy_store *r = container_of(work, struct entropy_store,
- push_work);
- BUG_ON(!r);
- _xfer_secondary_pool(r, random_read_wakeup_thresh/8);
- trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
- r->pull->entropy_count >> ENTROPY_SHIFT);
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+ /* If we're limited, always leave two wakeup worth's BITS */
+ int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+ int bytes = nbytes;
+
+ /* pull at least as many as BYTES as wakeup BITS */
+ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+ /* but never more than the buffer size */
+ bytes = min_t(int, bytes, sizeof(tmp));
+
+ DEBUG_ENT("going to reseed %s with %d bits "
+ "(%zu of %d requested)\n",
+ r->name, bytes * 8, nbytes * 8, r->entropy_count);
+
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+ mix_pool_bytes(r, tmp, bytes, NULL);
+ credit_entropy_bits(r, bytes*8);
+ }
}
/*
@@ -974,48 +850,50 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
{
unsigned long flags;
int wakeup_write = 0;
- int have_bytes;
- int entropy_count, orig;
- size_t ibytes;
/* Hold lock while accounting */
spin_lock_irqsave(&r->lock, flags);
- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
+ BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
+ DEBUG_ENT("trying to extract %zu bits from %s\n",
+ nbytes * 8, r->name);
/* Can we pull enough? */
-retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
- ibytes = nbytes;
- if (have_bytes < min + reserved) {
- ibytes = 0;
+ if (r->entropy_count / 8 < min + reserved) {
+ nbytes = 0;
} else {
+ int entropy_count, orig;
+retry:
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
/* If limited, never pull more than available */
- if (r->limit && ibytes + reserved >= have_bytes)
- ibytes = have_bytes - reserved;
-
- if (have_bytes >= ibytes + reserved)
- entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
- else
- entropy_count = reserved << (ENTROPY_SHIFT + 3);
-
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
+ if (r->limit && nbytes + reserved >= entropy_count / 8)
+ nbytes = entropy_count/8 - reserved;
+
+ if (entropy_count / 8 >= nbytes + reserved) {
+ entropy_count -= nbytes*8;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+ } else {
+ entropy_count = reserved;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+ }
- if ((r->entropy_count >> ENTROPY_SHIFT)
- < random_write_wakeup_thresh)
+ if (entropy_count < random_write_wakeup_thresh)
wakeup_write = 1;
}
+
+ DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
+ nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
+
spin_unlock_irqrestore(&r->lock, flags);
- trace_debit_entropy(r->name, 8 * ibytes);
if (wakeup_write) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
- return ibytes;
+ return nbytes;
}
static void extract_buf(struct entropy_store *r, __u8 *out)
@@ -1023,7 +901,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
int i;
union {
__u32 w[5];
- unsigned long l[LONGS(20)];
+ unsigned long l[LONGS(EXTRACT_SIZE)];
} hash;
__u32 workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
@@ -1036,17 +914,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
- * If we have a architectural hardware random number
- * generator, mix that in, too.
- */
- for (i = 0; i < LONGS(20); i++) {
- unsigned long v;
- if (!arch_get_random_long(&v))
- break;
- hash.l[i] ^= v;
- }
-
- /*
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
* plus the current outputs, and attempts to find previous
@@ -1075,6 +942,17 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
hash.w[1] ^= hash.w[4];
hash.w[2] ^= rol32(hash.w[2], 16);
+ /*
+ * If we have a architectural hardware random number
+ * generator, mix that in, too.
+ */
+ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
+ unsigned long v;
+ if (!arch_get_random_long(&v))
+ break;
+ hash.l[i] ^= v;
+ }
+
memcpy(out, &hash, EXTRACT_SIZE);
memset(&hash, 0, sizeof(hash));
}
@@ -1090,10 +968,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!r->last_data_init) {
- r->last_data_init = 1;
+ r->last_data_init = true;
spin_unlock_irqrestore(&r->lock, flags);
trace_extract_entropy(r->name, EXTRACT_SIZE,
- ENTROPY_BITS(r), _RET_IP_);
+ r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, EXTRACT_SIZE);
extract_buf(r, tmp);
spin_lock_irqsave(&r->lock, flags);
@@ -1102,7 +980,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
spin_unlock_irqrestore(&r->lock, flags);
}
- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+ trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -1135,7 +1013,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
- trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+ trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -1175,14 +1053,6 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
*/
void get_random_bytes(void *buf, int nbytes)
{
-#if DEBUG_RANDOM_BOOT > 0
- if (unlikely(nonblocking_pool.initialized == 0))
- printk(KERN_NOTICE "random: %pF get_random_bytes called "
- "with %d bits of entropy available\n",
- (void *) _RET_IP_,
- nonblocking_pool.entropy_total);
-#endif
- trace_get_random_bytes(nbytes, _RET_IP_);
extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
}
EXPORT_SYMBOL(get_random_bytes);
@@ -1201,7 +1071,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
{
char *p = buf;
- trace_get_random_bytes_arch(nbytes, _RET_IP_);
+ trace_get_random_bytes(nbytes, _RET_IP_);
while (nbytes) {
unsigned long v;
int chunk = min(nbytes, (int)sizeof(unsigned long));
@@ -1235,11 +1105,13 @@ static void init_std_data(struct entropy_store *r)
ktime_t now = ktime_get_real();
unsigned long rv;
- r->last_pulled = jiffies;
+ r->entropy_count = 0;
+ r->entropy_total = 0;
+ r->last_data_init = false;
mix_pool_bytes(r, &now, sizeof(now), NULL);
- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
+ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_long(&rv))
- rv = random_get_entropy();
+ break;
mix_pool_bytes(r, &rv, sizeof(rv), NULL);
}
mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
@@ -1262,7 +1134,7 @@ static int rand_initialize(void)
init_std_data(&nonblocking_pool);
return 0;
}
-early_initcall(rand_initialize);
+module_init(rand_initialize);
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
@@ -1274,10 +1146,8 @@ void rand_initialize_disk(struct gendisk *disk)
* source.
*/
state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state) {
- state->last_time = INITIAL_JIFFIES;
+ if (state)
disk->random = state;
- }
}
#endif
@@ -1294,6 +1164,8 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (n > SEC_XFER_SIZE)
n = SEC_XFER_SIZE;
+ DEBUG_ENT("reading %zu bits\n", n*8);
+
n = extract_entropy_user(&blocking_pool, buf, n);
if (n < 0) {
@@ -1301,9 +1173,8 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
break;
}
- trace_random_read(n*8, (nbytes-n)*8,
- ENTROPY_BITS(&blocking_pool),
- ENTROPY_BITS(&input_pool));
+ DEBUG_ENT("read got %zd bits (%zd still needed)\n",
+ n*8, (nbytes-n)*8);
if (n == 0) {
if (file->f_flags & O_NONBLOCK) {
@@ -1311,9 +1182,13 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
break;
}
+ DEBUG_ENT("sleeping?\n");
+
wait_event_interruptible(random_read_wait,
- ENTROPY_BITS(&input_pool) >=
- random_read_wakeup_thresh);
+ input_pool.entropy_count >=
+ random_read_wakeup_thresh);
+
+ DEBUG_ENT("awake\n");
if (signal_pending(current)) {
retval = -ERESTARTSYS;
@@ -1336,18 +1211,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
static ssize_t
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- int ret;
-
- if (unlikely(nonblocking_pool.initialized == 0))
- printk_once(KERN_NOTICE "random: %s urandom read "
- "with %d bits of entropy available\n",
- current->comm, nonblocking_pool.entropy_total);
-
- ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
-
- trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
- ENTROPY_BITS(&input_pool));
- return ret;
+ return extract_entropy_user(&nonblocking_pool, buf, nbytes);
}
static unsigned int
@@ -1358,9 +1222,9 @@ random_poll(struct file *file, poll_table * wait)
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
- if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh)
+ if (input_pool.entropy_count >= random_read_wakeup_thresh)
mask |= POLLIN | POLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh)
+ if (input_pool.entropy_count < random_write_wakeup_thresh)
mask |= POLLOUT | POLLWRNORM;
return mask;
}
@@ -1411,8 +1275,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
/* inherently racy, no point locking */
- ent_count = ENTROPY_BITS(&input_pool);
- if (put_user(ent_count, p))
+ if (put_user(input_pool.entropy_count, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1420,7 +1283,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_bits_safe(&input_pool, ent_count);
+ credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
@@ -1435,19 +1298,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
size);
if (retval < 0)
return retval;
- credit_entropy_bits_safe(&input_pool, ent_count);
+ credit_entropy_bits(&input_pool, ent_count);
return 0;
case RNDZAPENTCNT:
case RNDCLEARPOOL:
- /*
- * Clear the entropy pool counters. We no longer clear
- * the entropy pool, as that's silly.
- */
+ /* Clear the entropy pool counters. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- input_pool.entropy_count = 0;
- nonblocking_pool.entropy_count = 0;
- blocking_pool.entropy_count = 0;
+ rand_initialize();
return 0;
default:
return -EINVAL;
@@ -1547,23 +1405,6 @@ static int proc_do_uuid(struct ctl_table *table, int write,
return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
-/*
- * Return entropy available scaled to integral bits
- */
-static int proc_do_entropy(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- ctl_table fake_table;
- int entropy_count;
-
- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
-
- fake_table.data = &entropy_count;
- fake_table.maxlen = sizeof(entropy_count);
-
- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
-}
-
static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
extern struct ctl_table random_table[];
struct ctl_table random_table[] = {
@@ -1578,7 +1419,7 @@ struct ctl_table random_table[] = {
.procname = "entropy_avail",
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_do_entropy,
+ .proc_handler = proc_dointvec,
.data = &input_pool.entropy_count,
},
{
@@ -1600,13 +1441,6 @@ struct ctl_table random_table[] = {
.extra2 = &max_write_thresh,
},
{
- .procname = "urandom_min_reseed_secs",
- .data = &random_min_urandom_seed,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "boot_id",
.data = &sysctl_bootid,
.maxlen = 16,