Merge branch 'entropy'
Merge active entropy generation updates. This is admittedly partly "for discussion". We need to have a way forward for the boot time deadlocks where user space ends up waiting for more entropy, but no entropy is forthcoming because the system is entirely idle just waiting for something to happen. While this was triggered by what is arguably a user space bug with GDM/gnome-session asking for secure randomness during early boot, when they didn't even need any such truly secure thing, the issue ends up being that our "getrandom()" interface is prone to that kind of confusion, because people don't think very hard about whether they want to block for sufficient amounts of entropy. The approach here-in is to decide to not just passively wait for entropy to happen, but to start actively collecting it if it is missing. This is not necessarily always possible, but if the architecture has a CPU cycle counter, there is a fair amount of noise in the exact timings of reasonably complex loads. We may end up tweaking the load and the entropy estimates, but this should be at least a reasonable starting point. As part of this, we also revert the revert of the ext4 IO pattern improvement that ended up triggering the reported lack of external entropy. * getrandom() active entropy waiting: Revert "Revert "ext4: make __ext4_get_inode_loc plug"" random: try to actively add entropy rather than passively wait for it
This commit is contained in:
commit
3f2dc2798b
2 changed files with 64 additions and 1 deletions
|
@ -1732,6 +1732,56 @@ void get_random_bytes(void *buf, int nbytes)
|
|||
}
|
||||
EXPORT_SYMBOL(get_random_bytes);
|
||||
|
||||
|
||||
/*
|
||||
* Each time the timer fires, we expect that we got an unpredictable
|
||||
* jump in the cycle counter. Even if the timer is running on another
|
||||
* CPU, the timer activity will be touching the stack of the CPU that is
|
||||
* generating entropy..
|
||||
*
|
||||
* Note that we don't re-arm the timer in the timer itself - we are
|
||||
* happy to be scheduled away, since that just makes the load more
|
||||
* complex, but we do not want the timer to keep ticking unless the
|
||||
* entropy loop is running.
|
||||
*
|
||||
* So the re-arming always happens in the entropy loop itself.
|
||||
*/
|
||||
static void entropy_timer(struct timer_list *t)
|
||||
{
|
||||
credit_entropy_bits(&input_pool, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have an actual cycle counter, see if we can
|
||||
* generate enough entropy with timing noise
|
||||
*/
|
||||
static void try_to_generate_entropy(void)
|
||||
{
|
||||
struct {
|
||||
unsigned long now;
|
||||
struct timer_list timer;
|
||||
} stack;
|
||||
|
||||
stack.now = random_get_entropy();
|
||||
|
||||
/* Slow counter - or none. Don't even bother */
|
||||
if (stack.now == random_get_entropy())
|
||||
return;
|
||||
|
||||
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
|
||||
while (!crng_ready()) {
|
||||
if (!timer_pending(&stack.timer))
|
||||
mod_timer(&stack.timer, jiffies+1);
|
||||
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
|
||||
schedule();
|
||||
stack.now = random_get_entropy();
|
||||
}
|
||||
|
||||
del_timer_sync(&stack.timer);
|
||||
destroy_timer_on_stack(&stack.timer);
|
||||
mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the urandom pool to be seeded and thus guaranteed to supply
|
||||
* cryptographically secure random numbers. This applies to: the /dev/urandom
|
||||
|
@ -1746,7 +1796,17 @@ int wait_for_random_bytes(void)
|
|||
{
|
||||
if (likely(crng_ready()))
|
||||
return 0;
|
||||
return wait_event_interruptible(crng_init_wait, crng_ready());
|
||||
|
||||
do {
|
||||
int ret;
|
||||
ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
|
||||
if (ret)
|
||||
return ret > 0 ? 0 : ret;
|
||||
|
||||
try_to_generate_entropy();
|
||||
} while (!crng_ready());
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(wait_for_random_bytes);
|
||||
|
||||
|
|
|
@ -4551,6 +4551,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
|
|||
struct buffer_head *bh;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
ext4_fsblk_t block;
|
||||
struct blk_plug plug;
|
||||
int inodes_per_block, inode_offset;
|
||||
|
||||
iloc->bh = NULL;
|
||||
|
@ -4639,6 +4640,7 @@ make_io:
|
|||
* If we need to do any I/O, try to pre-readahead extra
|
||||
* blocks from the inode table.
|
||||
*/
|
||||
blk_start_plug(&plug);
|
||||
if (EXT4_SB(sb)->s_inode_readahead_blks) {
|
||||
ext4_fsblk_t b, end, table;
|
||||
unsigned num;
|
||||
|
@ -4669,6 +4671,7 @@ make_io:
|
|||
get_bh(bh);
|
||||
bh->b_end_io = end_buffer_read_sync;
|
||||
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
|
||||
blk_finish_plug(&plug);
|
||||
wait_on_buffer(bh);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
EXT4_ERROR_INODE_BLOCK(inode, block,
|
||||
|
|
Loading…
Reference in a new issue