Dear RT folks! I'm pleased to announce the v5.17-rc3-rt6 patch set. Changes since v5.17-rc3-rt5: - Update John's printk series. It supports now direct printing from irqwork. - Correct atomic access to a variable in printk. Patch by John Ogness. - Add a warning if there is a ksoftirqd wakeup from idle. - Jason A. Donenfeld patches against the random subsystem were updated to v4. There is an additional RT related change on top. - The known issue netconsole triggers WARN. has been removed from the list since it also triggers with CONFIG_PREEMPT and v5.17-rc3 (without the PREEMPT_RT patch). Known issues - Valentin Schneider reported a few splats on ARM64, see https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@xxxxxxx The delta patch against v5.17-rc3-rt5 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.17/incr/patch-5.17-rc3-rt5-rt6.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.17-rc3-rt6 The RT patch against v5.17-rc3 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.17/older/patch-5.17-rc3-rt6.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.17/older/patches-5.17-rc3-rt6.tar.xz Sebastian diff --git a/drivers/char/random.c b/drivers/char/random.c index afcedefb0c1c8..e4bde9c917654 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -404,7 +404,7 @@ struct fast_pool { struct work_struct mix; unsigned long last; u32 pool[4]; - atomic_t count; + unsigned int count; u16 reg_idx; }; #define FAST_POOL_MIX_INFLIGHT (1U << 31) @@ -1045,25 +1045,33 @@ static u32 get_reg(struct fast_pool *f, struct pt_regs *regs) static void mix_interrupt_randomness(struct work_struct *work) { struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); + u8 pool[sizeof(fast_pool->pool)]; - fast_pool->last = jiffies; + if (unlikely(crng_init == 0)) { + size_t ret; - /* Since this is the result of a trip through the scheduler, xor in + ret = crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)); + if (ret) { + WRITE_ONCE(fast_pool->count, 0); + fast_pool->last = jiffies; + return; + } + } + + /* + * Since this is the result of a trip through the scheduler, xor in * a cycle counter. It can't hurt, and might help. */ fast_pool->pool[3] ^= random_get_entropy(); + /* Copy the pool to the stack so that the mixer always has a consistent view. */ + memcpy(pool, fast_pool->pool, sizeof(pool)); + /* We take care to zero out the count only after we're done reading the pool. */ + WRITE_ONCE(fast_pool->count, 0); + fast_pool->last = jiffies; - if (unlikely(crng_init == 0)) { - if (crng_fast_load((u8 *)&fast_pool->pool, sizeof(fast_pool->pool)) > 0) - atomic_set(&fast_pool->count, 0); - else - atomic_and(~FAST_POOL_MIX_INFLIGHT, &fast_pool->count); - return; - } - - mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool)); - atomic_set(&fast_pool->count, 0); + mix_pool_bytes(pool, sizeof(pool)); credit_entropy_bits(1); + memzero_explicit(pool, sizeof(pool)); } void add_interrupt_randomness(int irq) @@ -1089,15 +1097,33 @@ void add_interrupt_randomness(int irq) fast_mix(fast_pool); add_interrupt_bench(cycles); + new_cou>
Leave a comment