aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/utils.c
diff options
context:
space:
mode:
authorHannes Frederic Sowa <hannes@stressinduktion.org>2013-10-19 15:48:55 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-19 19:45:35 -0400
commita48e42920ff38bc90bbf75143fff4555723d4540 (patch)
tree22143fc9556bf3f864a57012b4954d78c8ab04aa /net/core/utils.c
parenta8fab0744585c1ab61009bfc1a1958f28e1c864f (diff)
net: introduce new macro net_get_random_once
net_get_random_once is a new macro which handles the initialization of secret keys. It is possible to call it in the fast path. Only the initialization depends on the spinlock and is rather slow. Otherwise it should get used just before the key is used to delay the entropy extration as late as possible to get better randomness. It returns true if the key got initialized. The usage of static_keys for net_get_random_once is a bit uncommon so it needs some further explanation why this actually works: === In the simple non-HAVE_JUMP_LABEL case we actually have === no constrains to use static_key_(true|false) on keys initialized with STATIC_KEY_INIT_(FALSE|TRUE). So this path just expands in favor of the likely case that the initialization is already done. The key is initialized like this: ___done_key = { .enabled = ATOMIC_INIT(0) } The check if (!static_key_true(&___done_key)) \ expands into (pseudo code) if (!likely(___done_key > 0)) , so we take the fast path as soon as ___done_key is increased from the helper function. === If HAVE_JUMP_LABELs are available this depends === on patching of jumps into the prepared NOPs, which is done in jump_label_init at boot-up time (from start_kernel). It is forbidden and dangerous to use net_get_random_once in functions which are called before that! At compilation time NOPs are generated at the call sites of net_get_random_once. E.g. net/ipv6/inet6_hashtable.c:inet6_ehashfn (we need to call net_get_random_once two times in inet6_ehashfn, so two NOPs): 71: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) 76: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) Both will be patched to the actual jumps to the end of the function to call __net_get_random_once at boot time as explained above. arch_static_branch is optimized and inlined for false as return value and actually also returns false in case the NOP is placed in the instruction stream. So in the fast case we get a "return false". But because we initialize ___done_key with (enabled != (entries & 1)) this call-site will get patched up at boot thus returning true. The final check looks like this: if (!static_key_true(&___done_key)) \ ___ret = __net_get_random_once(buf, \ expands to if (!!static_key_false(&___done_key)) \ ___ret = __net_get_random_once(buf, \ So we get true at boot time and as soon as static_key_slow_inc is called on the key it will invert the logic and return false for the fast path. static_key_slow_inc will change the branch because it got initialized with .enabled == 0. After static_key_slow_inc is called on the key the branch is replaced with a nop again. === Misc: === The helper defers the increment into a workqueue so we don't have problems calling this code from atomic sections. A seperate boolean (___done) guards the case where we enter net_get_random_once again before the increment happend. Cc: Ingo Molnar <mingo@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Jason Baron <jbaron@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Eric Dumazet <edumazet@google.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/utils.c')
-rw-r--r--net/core/utils.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/net/core/utils.c b/net/core/utils.c
index aa88e23fc87a..bf09371e19b1 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -338,3 +338,51 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
338 csum_unfold(*sum))); 338 csum_unfold(*sum)));
339} 339}
340EXPORT_SYMBOL(inet_proto_csum_replace16); 340EXPORT_SYMBOL(inet_proto_csum_replace16);
341
342struct __net_random_once_work {
343 struct work_struct work;
344 struct static_key *key;
345};
346
347static void __net_random_once_deferred(struct work_struct *w)
348{
349 struct __net_random_once_work *work =
350 container_of(w, struct __net_random_once_work, work);
351 if (!static_key_enabled(work->key))
352 static_key_slow_inc(work->key);
353 kfree(work);
354}
355
356static void __net_random_once_disable_jump(struct static_key *key)
357{
358 struct __net_random_once_work *w;
359
360 w = kmalloc(sizeof(*w), GFP_ATOMIC);
361 if (!w)
362 return;
363
364 INIT_WORK(&w->work, __net_random_once_deferred);
365 w->key = key;
366 schedule_work(&w->work);
367}
368
369bool __net_get_random_once(void *buf, int nbytes, bool *done,
370 struct static_key *done_key)
371{
372 static DEFINE_SPINLOCK(lock);
373
374 spin_lock_bh(&lock);
375 if (*done) {
376 spin_unlock_bh(&lock);
377 return false;
378 }
379
380 get_random_bytes(buf, nbytes);
381 *done = true;
382 spin_unlock_bh(&lock);
383
384 __net_random_once_disable_jump(done_key);
385
386 return true;
387}
388EXPORT_SYMBOL(__net_get_random_once);