diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-05-15 22:24:52 -0400 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-05-16 09:01:41 -0400 |
commit | 85f4f12d51397f1648e1f4350f77e24039b82d61 (patch) | |
tree | 7913a89f385849f276a99c2fbfa2a1e345d3ddbd /lib/vsprintf.c | |
parent | 67b8d5c7081221efa252e111cd52532ec6d4266f (diff) |
vsprintf: Replace memory barrier with static_key for random_ptr_key update
Reviewing Tobin's patches for getting pointers out early before
entropy has been established, I noticed that there's a lone smp_mb() in
the code. As with most lone memory barriers, this one appears to be
incorrectly used.
We currently basically have this:
get_random_bytes(&ptr_key, sizeof(ptr_key));
/*
* have_filled_random_ptr_key==true is dependent on get_random_bytes().
* ptr_to_id() needs to see have_filled_random_ptr_key==true
* after get_random_bytes() returns.
*/
smp_mb();
WRITE_ONCE(have_filled_random_ptr_key, true);
And later we have:
if (unlikely(!have_filled_random_ptr_key))
return string(buf, end, "(ptrval)", spec);
/* Missing memory barrier here. */
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
As the CPU can perform speculative loads, we could have a situation
with the following:
CPU0 CPU1
---- ----
load ptr_key = 0
store ptr_key = random
smp_mb()
store have_filled_random_ptr_key
load have_filled_random_ptr_key = true
BAD BAD BAD! (you're so bad!)
Because nothing prevents CPU1 from loading ptr_key before loading
have_filled_random_ptr_key.
But this race is very unlikely, but we can't keep an incorrect smp_mb() in
place. Instead, replace the have_filled_random_ptr_key with a static_branch
not_filled_random_ptr_key, that is initialized to true and changed to false
when we get enough entropy. If the update happens in early boot, the
static_key is updated immediately, otherwise it will have to wait till
entropy is filled and this happens in an interrupt handler which can't
enable a static_key, as that requires a preemptible context. In that case, a
work_queue is used to enable it, as entropy already took too long to
establish in the first place waiting a little more shouldn't hurt anything.
The benefit of using the static key is that the unlikely branch in
vsprintf() now becomes a nop.
Link: http://lkml.kernel.org/r/20180515100558.21df515e@gandalf.local.home
Cc: stable@vger.kernel.org
Fixes: ad67b74d2469d ("printk: hash addresses printed with %p")
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'lib/vsprintf.c')
-rw-r--r-- | lib/vsprintf.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 30c0cb8cc9bc..23920c5ff728 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -1669,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr, | |||
1669 | return number(buf, end, (unsigned long int)ptr, spec); | 1669 | return number(buf, end, (unsigned long int)ptr, spec); |
1670 | } | 1670 | } |
1671 | 1671 | ||
1672 | static bool have_filled_random_ptr_key __read_mostly; | 1672 | static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); |
1673 | static siphash_key_t ptr_key __read_mostly; | 1673 | static siphash_key_t ptr_key __read_mostly; |
1674 | 1674 | ||
1675 | static void fill_random_ptr_key(struct random_ready_callback *unused) | 1675 | static void enable_ptr_key_workfn(struct work_struct *work) |
1676 | { | 1676 | { |
1677 | get_random_bytes(&ptr_key, sizeof(ptr_key)); | 1677 | get_random_bytes(&ptr_key, sizeof(ptr_key)); |
1678 | /* | 1678 | /* Needs to run from preemptible context */ |
1679 | * have_filled_random_ptr_key==true is dependent on get_random_bytes(). | 1679 | static_branch_disable(¬_filled_random_ptr_key); |
1680 | * ptr_to_id() needs to see have_filled_random_ptr_key==true | 1680 | } |
1681 | * after get_random_bytes() returns. | 1681 | |
1682 | */ | 1682 | static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); |
1683 | smp_mb(); | 1683 | |
1684 | WRITE_ONCE(have_filled_random_ptr_key, true); | 1684 | static void fill_random_ptr_key(struct random_ready_callback *unused) |
1685 | { | ||
1686 | /* This may be in an interrupt handler. */ | ||
1687 | queue_work(system_unbound_wq, &enable_ptr_key_work); | ||
1685 | } | 1688 | } |
1686 | 1689 | ||
1687 | static struct random_ready_callback random_ready = { | 1690 | static struct random_ready_callback random_ready = { |
@@ -1695,7 +1698,8 @@ static int __init initialize_ptr_random(void) | |||
1695 | if (!ret) { | 1698 | if (!ret) { |
1696 | return 0; | 1699 | return 0; |
1697 | } else if (ret == -EALREADY) { | 1700 | } else if (ret == -EALREADY) { |
1698 | fill_random_ptr_key(&random_ready); | 1701 | /* This is in preemptible context */ |
1702 | enable_ptr_key_workfn(&enable_ptr_key_work); | ||
1699 | return 0; | 1703 | return 0; |
1700 | } | 1704 | } |
1701 | 1705 | ||
@@ -1709,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | |||
1709 | unsigned long hashval; | 1713 | unsigned long hashval; |
1710 | const int default_width = 2 * sizeof(ptr); | 1714 | const int default_width = 2 * sizeof(ptr); |
1711 | 1715 | ||
1712 | if (unlikely(!have_filled_random_ptr_key)) { | 1716 | if (static_branch_unlikely(¬_filled_random_ptr_key)) { |
1713 | spec.field_width = default_width; | 1717 | spec.field_width = default_width; |
1714 | /* string length must be less than default_width */ | 1718 | /* string length must be less than default_width */ |
1715 | return string(buf, end, "(ptrval)", spec); | 1719 | return string(buf, end, "(ptrval)", spec); |