diff mbox series

random: remove mostly unused async readiness notifier

Message ID 20220515131927.474097-1-Jason@zx2c4.com
State New
Headers show
Series random: remove mostly unused async readiness notifier | expand

Commit Message

Jason A. Donenfeld May 15, 2022, 1:19 p.m. UTC
The register_random_ready_notifier() notifier is somewhat complicated,
and was already recently rewritten to use notifier blocks. It is only
used now by one consumer in the kernel, vsprintf.c, for which the async
mechanism is really overly complex for what it actually needs. This
commit removes register_random_ready_notifier() and unregister_random_
ready_notifier(), because it just adds complication with little utility,
and changes vsprintf.c to just check on `!rng_is_initialized() &&
!rng_has_arch_random()`, which will eventually be true. Performance-
wise, that code was already using a static branch, so there's basically
no overhead at all to this change.

Cc: Petr Mladek <pmladek@suse.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 drivers/char/random.c  | 48 --------------------------------
 include/linux/random.h |  2 --
 lib/vsprintf.c         | 63 ++++++++++++++----------------------------
 3 files changed, 20 insertions(+), 93 deletions(-)

Comments

Petr Mladek May 17, 2022, 9:04 a.m. UTC | #1
On Sun 2022-05-15 15:19:27, Jason A. Donenfeld wrote:
> The register_random_ready_notifier() notifier is somewhat complicated,
> and was already recently rewritten to use notifier blocks. It is only
> used now by one consumer in the kernel, vsprintf.c, for which the async
> mechanism is really overly complex for what it actually needs. This
> commit removes register_random_ready_notifier() and unregister_random_
> ready_notifier(), because it just adds complication with little utility,
> and changes vsprintf.c to just check on `!rng_is_initialized() &&
> !rng_has_arch_random()`, which will eventually be true. Performance-
> wise, that code was already using a static branch, so there's basically
> no overhead at all to this change.
> 
> Cc: Petr Mladek <pmladek@suse.com>
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
> ---
>  drivers/char/random.c  | 48 --------------------------------
>  include/linux/random.h |  2 --
>  lib/vsprintf.c         | 63 ++++++++++++++----------------------------
>  3 files changed, 20 insertions(+), 93 deletions(-)
> 
> diff --git a/drivers/char/random.c b/drivers/char/random.c
> index 909c23f66fd8..f601856b7b66 100644
> --- a/drivers/char/random.c
> +++ b/drivers/char/random.c
> @@ -84,8 +84,6 @@ static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
>  /* Various types of waiters for crng_init->CRNG_READY transition. */
>  static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
>  static struct fasync_struct *fasync;
> -static DEFINE_SPINLOCK(random_ready_chain_lock);
> -static RAW_NOTIFIER_HEAD(random_ready_chain);
>  
>  /* Control how we warn userspace. */
>  static struct ratelimit_state urandom_warning =
> @@ -142,51 +140,6 @@ int wait_for_random_bytes(void)
>  }
>  EXPORT_SYMBOL(wait_for_random_bytes);
>  
> -/*
> - * Add a callback function that will be invoked when the input
> - * pool is initialised.
> - *
> - * returns: 0 if callback is successfully added
> - *	    -EALREADY if pool is already initialised (callback not called)
> - */
> -int __cold register_random_ready_notifier(struct notifier_block *nb)
> -{
> -	unsigned long flags;
> -	int ret = -EALREADY;
> -
> -	if (crng_ready())
> -		return ret;
> -
> -	spin_lock_irqsave(&random_ready_chain_lock, flags);
> -	if (!crng_ready())
> -		ret = raw_notifier_chain_register(&random_ready_chain, nb);
> -	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
> -	return ret;
> -}
> -
> -/*
> - * Delete a previously registered readiness callback function.
> - */
> -int __cold unregister_random_ready_notifier(struct notifier_block *nb)
> -{
> -	unsigned long flags;
> -	int ret;
> -
> -	spin_lock_irqsave(&random_ready_chain_lock, flags);
> -	ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
> -	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
> -	return ret;
> -}
> -
> -static void __cold process_random_ready_list(void)
> -{
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&random_ready_chain_lock, flags);
> -	raw_notifier_call_chain(&random_ready_chain, 0, NULL);
> -	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
> -}
> -
>  #define warn_unseeded_randomness() \
>  	if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
>  		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
> @@ -706,7 +659,6 @@ static void __cold _credit_init_bits(size_t bits)
>  	if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
>  		crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
>  		execute_in_process_context(crng_set_ready, &set_ready);
> -		process_random_ready_list();
>  		wake_up_interruptible(&crng_init_wait);
>  		kill_fasync(&fasync, SIGIO, POLL_IN);
>  		pr_notice("crng init done\n");
> diff --git a/include/linux/random.h b/include/linux/random.h
> index 07d93c2a3b5f..fae0c84027fd 100644
> --- a/include/linux/random.h
> +++ b/include/linux/random.h
> @@ -76,8 +76,6 @@ int __init random_init(const char *command_line);
>  bool rng_is_initialized(void);
>  bool rng_has_arch_random(void);
>  int wait_for_random_bytes(void);
> -int register_random_ready_notifier(struct notifier_block *nb);
> -int unregister_random_ready_notifier(struct notifier_block *nb);
>  
>  /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
>   * Returns the result of the call to wait_for_random_bytes. */
> diff --git a/lib/vsprintf.c b/lib/vsprintf.c
> index 20e9887faaaa..ec67d36abfdb 100644
> --- a/lib/vsprintf.c
> +++ b/lib/vsprintf.c
> @@ -750,60 +750,37 @@ static int __init debug_boot_weak_hash_enable(char *str)
>  }
>  early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
>  
> -static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
> -static siphash_key_t ptr_key __read_mostly;
> +static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
>  
>  static void enable_ptr_key_workfn(struct work_struct *work)
>  {
> -	get_random_bytes(&ptr_key, sizeof(ptr_key));
> -	/* Needs to run from preemptible context */
> -	static_branch_disable(&not_filled_random_ptr_key);
> +	static_branch_enable(&filled_random_ptr_key);
>  }
>  
> -static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
> -
> -static int fill_random_ptr_key(struct notifier_block *nb,
> -			       unsigned long action, void *data)
> +/* Maps a pointer to a 32 bit unique identifier. */
> +static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
>  {
> -	/* This may be in an interrupt handler. */
> -	queue_work(system_unbound_wq, &enable_ptr_key_work);
> -	return 0;
> -}
> -
> -static struct notifier_block random_ready = {
> -	.notifier_call = fill_random_ptr_key
> -};
> +	static siphash_key_t ptr_key __read_mostly;
> +	unsigned long hashval;
>  
> -static int __init initialize_ptr_random(void)
> -{
> -	int ret;
> +	if (!static_branch_likely(&filled_random_ptr_key)) {
> +		static bool filled = false;
> +		static DEFINE_SPINLOCK(filling);
> +		static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
> +		unsigned long flags;
>  
> -	/* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */
> -	if (rng_has_arch_random()) {
> -		enable_ptr_key_workfn(&enable_ptr_key_work);
> -		return 0;
> -	}
> +		if (!rng_is_initialized() && !rng_has_arch_random())
> +			return -EAGAIN;
>  
> -	ret = register_random_ready_notifier(&random_ready);
> -	if (!ret) {
> -		return 0;
> -	} else if (ret == -EALREADY) {
> -		/* This is in preemptible context */
> -		enable_ptr_key_workfn(&enable_ptr_key_work);
> -		return 0;
> +		spin_lock_irqsave(&filling, flags);
> +		if (!filled) {
> +			get_random_bytes(&ptr_key, sizeof(ptr_key));
> +			queue_work(system_unbound_wq, &enable_ptr_key_work);
> +			filled = true;
> +		}
> +		spin_unlock_irqrestore(&filling, flags);

I would go even further. The workqueue is needed only because we are not
able to switch the static branch in an atomic context.

But the static branch looks like an over-optimization.
vsprintf() is a slow path. It will be enough to use a normal
variable.

Well, it would be nice to check it without the spinlock to keep it
fast and avoid problems with the spin lock during panic().

What about?

/* Maps a pointer to a 32 bit unique identifier. */
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
	static siphash_key_t ptr_key __read_mostly;
	static boot filled_random_ptr_key __read_mostly;
	unsigned long hashval;

	if (unlikely(!filled_random_ptr_key)) {
		static DEFINE_SPINLOCK(filling_lock);
		unsigned long flags;

		if (!rng_is_initialized() && !rng_has_arch_random())
			return -EAGAIN;

		spin_lock_irqsave(&filling_lock, flags);
		if (!filled_random_ptr_key) {
			get_random_bytes(&ptr_key, sizeof(ptr_key));
			/*
			 * Make sure that ptr_key is initialized before
			 * we tell the word about it.
			 */
			smp_wmb();
			filled_random_ptr_key = true;
		}
		spin_unlock_irqrestore(&filling_lock, flags);
	}

	/*
	 * filled_random_ptr_key was checked without the spin lock
	 * to make it fast and also safe in panic(). Make sure that
	 * we see the initialized ptr_key.
	 */
	 smp_rmb();

#ifdef CONFIG_64BIT
	hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);

	...


Alternative solution would be to use an atomic variable and
access it using atomic_read_acquire(&filled_random_ptr_key) and
atomic_set_release(&filled_random_ptr_key). It would hide
the memory barriers.

Well, your approach with static_key is fine as well. Feel free
to use:

Acked-by: Petr Mladek <pmladek@suse.com>

I would personally prefer the variant with the explicit
memory barriers because it seems to be the most easy to understand.
And we do not need to take care of workqueues during early boot.

Best Regards,
Petr
Jason A. Donenfeld May 17, 2022, 9:48 a.m. UTC | #2
Hi Petr,

On Tue, May 17, 2022 at 11:04:54AM +0200, Petr Mladek wrote:
> I would go even further. The workqueue is needed only because we are not
> able to switch the static branch in an atomic context.
> 
> But the static branch looks like an over-optimization.
> vsprintf() is a slow path. It will be enough to use a normal
> variable.
> 
> Well, it would be nice to check it without the spinlock to keep it
> fast and avoid problems with the spin lock during panic().
> 
> What about?

That all makes sense to me, but I'm a bit worried about changing too
much from the original design in a commit mostly intended on removing
things from random.c. Maybe we can do the patch I sent here, and then
once that lands in 5.19, we can do some more simplifications as
standalone commits that you can assess. Or if you're adamant about doing
this now, maybe you can send a patch that I can apply on _top_ of this
commit here?

The reason I'm a bit cautious is because I recall the original code from
Tobin way back had some smp_wmb() just like this, but it got removed and
replaced with that static branch. So at least somebody felt differently
about it. Which means it'll probably be a whole discussion with more
people, and I'm probably not the right person to lead that.

> Well, your approach with static_key is fine as well. Feel free
> to use:
> 
> Acked-by: Petr Mladek <pmladek@suse.com>

Okay, I'll do this. And then let's circle around the memory barriers
whenever you feel like it later.

Jason
Petr Mladek May 17, 2022, 10:46 a.m. UTC | #3
On Tue 2022-05-17 11:48:31, Jason A. Donenfeld wrote:
> Hi Petr,
> 
> On Tue, May 17, 2022 at 11:04:54AM +0200, Petr Mladek wrote:
> > I would go even further. The workqueue is needed only because we are not
> > able to switch the static branch in an atomic context.
> > 
> > But the static branch looks like an over-optimization.
> > vsprintf() is a slow path. It will be enough to use a normal
> > variable.
> > 
> > Well, it would be nice to check it without the spinlock to keep it
> > fast and avoid problems with the spin lock during panic().
> > 
> > What about?
> 
> That all makes sense to me, but I'm a bit worried about changing too
> much from the original design in a commit mostly intended on removing
> things from random.c. Maybe we can do the patch I sent here, and then
> once that lands in 5.19, we can do some more simplifications as
> standalone commits that you can assess. Or if you're adamant about doing
> this now, maybe you can send a patch that I can apply on _top_ of this
> commit here?
> 
> The reason I'm a bit cautious is because I recall the original code from
> Tobin way back had some smp_wmb() just like this, but it got removed and
> replaced with that static branch. So at least somebody felt differently
> about it. Which means it'll probably be a whole discussion with more
> people, and I'm probably not the right person to lead that.

Fair enough.

> > Well, your approach with static_key is fine as well. Feel free
> > to use:
> > 
> > Acked-by: Petr Mladek <pmladek@suse.com>
> 
> Okay, I'll do this. And then let's circle around the memory barriers
> whenever you feel like it later.

OK, let's stick with your version in 5.19.

I will later send a patch with the barriers when time permits.
But it will probably be for the next release.

Best Regards,
Petr
Petr Mladek May 18, 2022, 8:54 a.m. UTC | #4
On Sun 2022-05-15 15:19:27, Jason A. Donenfeld wrote:
> The register_random_ready_notifier() notifier is somewhat complicated,
> and was already recently rewritten to use notifier blocks. It is only
> used now by one consumer in the kernel, vsprintf.c, for which the async
> mechanism is really overly complex for what it actually needs. This
> commit removes register_random_ready_notifier() and unregister_random_
> ready_notifier(), because it just adds complication with little utility,
> and changes vsprintf.c to just check on `!rng_is_initialized() &&
> !rng_has_arch_random()`, which will eventually be true. Performance-
> wise, that code was already using a static branch, so there's basically
> no overhead at all to this change.
> 
> --- a/lib/vsprintf.c
> +++ b/lib/vsprintf.c
> @@ -750,60 +750,37 @@ static int __init debug_boot_weak_hash_enable(char *str)
>  }
>  early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
>  
> -static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
> -static siphash_key_t ptr_key __read_mostly;
> +static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
>  
>  static void enable_ptr_key_workfn(struct work_struct *work)
>  {
> -	get_random_bytes(&ptr_key, sizeof(ptr_key));
> -	/* Needs to run from preemptible context */
> -	static_branch_disable(&not_filled_random_ptr_key);
> +	static_branch_enable(&filled_random_ptr_key);
>  }
>  
> -static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
> -
> -static int fill_random_ptr_key(struct notifier_block *nb,
> -			       unsigned long action, void *data)
> +/* Maps a pointer to a 32 bit unique identifier. */
> +static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
>  {
> -	/* This may be in an interrupt handler. */
> -	queue_work(system_unbound_wq, &enable_ptr_key_work);
> -	return 0;
> -}
> -
> -static struct notifier_block random_ready = {
> -	.notifier_call = fill_random_ptr_key
> -};
> +	static siphash_key_t ptr_key __read_mostly;
> +	unsigned long hashval;
>  
> -static int __init initialize_ptr_random(void)
> -{
> -	int ret;
> +	if (!static_branch_likely(&filled_random_ptr_key)) {
> +		static bool filled = false;
> +		static DEFINE_SPINLOCK(filling);
> +		static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
> +		unsigned long flags;
>  
> -	/* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */
> -	if (rng_has_arch_random()) {
> -		enable_ptr_key_workfn(&enable_ptr_key_work);
> -		return 0;
> -	}
> +		if (!rng_is_initialized() && !rng_has_arch_random())
> +			return -EAGAIN;
>  
> -	ret = register_random_ready_notifier(&random_ready);
> -	if (!ret) {
> -		return 0;
> -	} else if (ret == -EALREADY) {
> -		/* This is in preemptible context */
> -		enable_ptr_key_workfn(&enable_ptr_key_work);
> -		return 0;
> +		spin_lock_irqsave(&filling, flags);

I thought more about this and there is a small risk of a deadlock
when get_random_bytes() or queue_work() or NMI calls
printk()/vsprintf() with %p here.

A simple solution would be to use trylock():

		if (!spin_trylock_irqsave(&filling, flags))
			return -EDEADLK;

Could we do this change, please?

I do not mind if it will be done by re-spinning the original
patch or another patch on top of it.

Best Regards,
Petr


> +		if (!filled) {
> +			get_random_bytes(&ptr_key, sizeof(ptr_key));
> +			queue_work(system_unbound_wq, &enable_ptr_key_work);
> +			filled = true;
> +		}
> +		spin_unlock_irqrestore(&filling, flags);
>  	}
>  
> -	return ret;
> -}
> -early_initcall(initialize_ptr_random);
> -
> -/* Maps a pointer to a 32 bit unique identifier. */
> -static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
> -{
> -	unsigned long hashval;
> -
> -	if (static_branch_unlikely(&not_filled_random_ptr_key))
> -		return -EAGAIN;
>  
>  #ifdef CONFIG_64BIT
>  	hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
Jason A. Donenfeld May 18, 2022, 9:52 a.m. UTC | #5
Hi Petr,

On Wed, May 18, 2022 at 10:54:22AM +0200, Petr Mladek wrote:
> > +		spin_lock_irqsave(&filling, flags);
> 
> I thought more about this and there is a small risk of a deadlock
> when get_random_bytes() or queue_work() or NMI calls
> printk()/vsprintf() with %p here.
> 
> A simple solution would be to use trylock():
> 
> 		if (!spin_trylock_irqsave(&filling, flags))
> 			return -EDEADLK;
> 
> Could we do this change, please?
> 
> I do not mind if it will be done by re-spinning the original
> patch or another patch on top of it.

Interesting consideration. Sure, I'll do exactly that and send a v2.

Jason
Petr Mladek May 19, 2022, 7:10 a.m. UTC | #6
On Wed 2022-05-18 11:56:58, Jason A. Donenfeld wrote:
> The register_random_ready_notifier() notifier is somewhat complicated,
> and was already recently rewritten to use notifier blocks. It is only
> used now by one consumer in the kernel, vsprintf.c, for which the async
> mechanism is really overly complex for what it actually needs. This
> commit removes register_random_ready_notifier() and unregister_random_
> ready_notifier(), because it just adds complication with little utility,
> and changes vsprintf.c to just check on `!rng_is_initialized() &&
> !rng_has_arch_random()`, which will eventually be true. Performance-
> wise, that code was already using a static branch, so there's basically
> no overhead at all to this change.
> 
> Cc: Petr Mladek <pmladek@suse.com>
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
> Acked-by: Petr Mladek <pmladek@suse.com> # for vsprintf.c
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
> ---
> Changes v1->v2:
> - Use a trylock instead of a spinlock to be NMI safe.

Looks good to me. My ack is already there. Feel free to use:

Reviewed-by: Petr Mladek <pmladek@suse.com>

if you would prefer it.

Thanks a lot for updating the patch.

Best Regards,
Petr
diff mbox series

Patch

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 909c23f66fd8..f601856b7b66 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -84,8 +84,6 @@  static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
 /* Various types of waiters for crng_init->CRNG_READY transition. */
 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 static struct fasync_struct *fasync;
-static DEFINE_SPINLOCK(random_ready_chain_lock);
-static RAW_NOTIFIER_HEAD(random_ready_chain);
 
 /* Control how we warn userspace. */
 static struct ratelimit_state urandom_warning =
@@ -142,51 +140,6 @@  int wait_for_random_bytes(void)
 }
 EXPORT_SYMBOL(wait_for_random_bytes);
 
-/*
- * Add a callback function that will be invoked when the input
- * pool is initialised.
- *
- * returns: 0 if callback is successfully added
- *	    -EALREADY if pool is already initialised (callback not called)
- */
-int __cold register_random_ready_notifier(struct notifier_block *nb)
-{
-	unsigned long flags;
-	int ret = -EALREADY;
-
-	if (crng_ready())
-		return ret;
-
-	spin_lock_irqsave(&random_ready_chain_lock, flags);
-	if (!crng_ready())
-		ret = raw_notifier_chain_register(&random_ready_chain, nb);
-	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
-	return ret;
-}
-
-/*
- * Delete a previously registered readiness callback function.
- */
-int __cold unregister_random_ready_notifier(struct notifier_block *nb)
-{
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&random_ready_chain_lock, flags);
-	ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
-	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
-	return ret;
-}
-
-static void __cold process_random_ready_list(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&random_ready_chain_lock, flags);
-	raw_notifier_call_chain(&random_ready_chain, 0, NULL);
-	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
-}
-
 #define warn_unseeded_randomness() \
 	if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
@@ -706,7 +659,6 @@  static void __cold _credit_init_bits(size_t bits)
 	if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
 		crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
 		execute_in_process_context(crng_set_ready, &set_ready);
-		process_random_ready_list();
 		wake_up_interruptible(&crng_init_wait);
 		kill_fasync(&fasync, SIGIO, POLL_IN);
 		pr_notice("crng init done\n");
diff --git a/include/linux/random.h b/include/linux/random.h
index 07d93c2a3b5f..fae0c84027fd 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -76,8 +76,6 @@  int __init random_init(const char *command_line);
 bool rng_is_initialized(void);
 bool rng_has_arch_random(void);
 int wait_for_random_bytes(void);
-int register_random_ready_notifier(struct notifier_block *nb);
-int unregister_random_ready_notifier(struct notifier_block *nb);
 
 /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
  * Returns the result of the call to wait_for_random_bytes. */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 20e9887faaaa..ec67d36abfdb 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -750,60 +750,37 @@  static int __init debug_boot_weak_hash_enable(char *str)
 }
 early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
 
-static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
-static siphash_key_t ptr_key __read_mostly;
+static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
 
 static void enable_ptr_key_workfn(struct work_struct *work)
 {
-	get_random_bytes(&ptr_key, sizeof(ptr_key));
-	/* Needs to run from preemptible context */
-	static_branch_disable(&not_filled_random_ptr_key);
+	static_branch_enable(&filled_random_ptr_key);
 }
 
-static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
-
-static int fill_random_ptr_key(struct notifier_block *nb,
-			       unsigned long action, void *data)
+/* Maps a pointer to a 32 bit unique identifier. */
+static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
 {
-	/* This may be in an interrupt handler. */
-	queue_work(system_unbound_wq, &enable_ptr_key_work);
-	return 0;
-}
-
-static struct notifier_block random_ready = {
-	.notifier_call = fill_random_ptr_key
-};
+	static siphash_key_t ptr_key __read_mostly;
+	unsigned long hashval;
 
-static int __init initialize_ptr_random(void)
-{
-	int ret;
+	if (!static_branch_likely(&filled_random_ptr_key)) {
+		static bool filled = false;
+		static DEFINE_SPINLOCK(filling);
+		static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+		unsigned long flags;
 
-	/* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */
-	if (rng_has_arch_random()) {
-		enable_ptr_key_workfn(&enable_ptr_key_work);
-		return 0;
-	}
+		if (!rng_is_initialized() && !rng_has_arch_random())
+			return -EAGAIN;
 
-	ret = register_random_ready_notifier(&random_ready);
-	if (!ret) {
-		return 0;
-	} else if (ret == -EALREADY) {
-		/* This is in preemptible context */
-		enable_ptr_key_workfn(&enable_ptr_key_work);
-		return 0;
+		spin_lock_irqsave(&filling, flags);
+		if (!filled) {
+			get_random_bytes(&ptr_key, sizeof(ptr_key));
+			queue_work(system_unbound_wq, &enable_ptr_key_work);
+			filled = true;
+		}
+		spin_unlock_irqrestore(&filling, flags);
 	}
 
-	return ret;
-}
-early_initcall(initialize_ptr_random);
-
-/* Maps a pointer to a 32 bit unique identifier. */
-static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
-{
-	unsigned long hashval;
-
-	if (static_branch_unlikely(&not_filled_random_ptr_key))
-		return -EAGAIN;
 
 #ifdef CONFIG_64BIT
 	hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);