aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/utils.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-10-19 19:45:46 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-19 19:45:46 -0400
commit7dcade390860712551a4feb080911d5002226188 (patch)
tree68768a3e760a90b2b13a8e7a83d573c0e63a4a35 /net/core/utils.c
parent53481da372851a5506deb5247302f75459b472b4 (diff)
parente34c9a69970d8664a36b46e6445a7cc879111cfd (diff)
Merge branch 'net_get_random_once'
Hannes Frederic Sowa says: ==================== This series implements support for delaying the initialization of secret keys, e.g. used for hashing, for as long as possible. This functionality is implemented by a new macro, net_get_random_bytes. I already used it to protect the socket hashes, the syncookie secret (most important) and the tcp_fastopen secrets. Changelog: v2) Use static_keys in net_get_random_once to have as minimal impact to the fast-path as possible. v3) added patch "static_key: WARN on usage before jump_label_init was called": Patch "x86/jump_label: expect default_nop if static_key gets enabled on boot-up" relaxes the checks for using static_key primitives before jump_label_init. So tighten them first. v4) Update changelog on the patch "static_key: WARN on usage before jump_label_init was called" Included patches: ipv4: split inet_ehashfn to hash functions per compilation unit ipv6: split inet6_ehashfn to hash functions per compilation unit static_key: WARN on usage before jump_label_init was called x86/jump_label: expect default_nop if static_key gets enabled on boot-up net: introduce new macro net_get_random_once inet: split syncookie keys for ipv4 and ipv6 and initialize with net_get_random_once inet: convert inet_ehash_secret and ipv6_hash_secret to net_get_random_once tcp: switch tcp_fastopen key generation to net_get_random_once net: switch net_secret key generation to net_get_random_once ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/utils.c')
-rw-r--r--net/core/utils.c48
1 files changed, 48 insertions, 0 deletions
diff --git a/net/core/utils.c b/net/core/utils.c
index aa88e23fc87a..bf09371e19b1 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -338,3 +338,51 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
338 csum_unfold(*sum))); 338 csum_unfold(*sum)));
339} 339}
340EXPORT_SYMBOL(inet_proto_csum_replace16); 340EXPORT_SYMBOL(inet_proto_csum_replace16);
341
342struct __net_random_once_work {
343 struct work_struct work;
344 struct static_key *key;
345};
346
347static void __net_random_once_deferred(struct work_struct *w)
348{
349 struct __net_random_once_work *work =
350 container_of(w, struct __net_random_once_work, work);
351 if (!static_key_enabled(work->key))
352 static_key_slow_inc(work->key);
353 kfree(work);
354}
355
356static void __net_random_once_disable_jump(struct static_key *key)
357{
358 struct __net_random_once_work *w;
359
360 w = kmalloc(sizeof(*w), GFP_ATOMIC);
361 if (!w)
362 return;
363
364 INIT_WORK(&w->work, __net_random_once_deferred);
365 w->key = key;
366 schedule_work(&w->work);
367}
368
369bool __net_get_random_once(void *buf, int nbytes, bool *done,
370 struct static_key *done_key)
371{
372 static DEFINE_SPINLOCK(lock);
373
374 spin_lock_bh(&lock);
375 if (*done) {
376 spin_unlock_bh(&lock);
377 return false;
378 }
379
380 get_random_bytes(buf, nbytes);
381 *done = true;
382 spin_unlock_bh(&lock);
383
384 __net_random_once_disable_jump(done_key);
385
386 return true;
387}
388EXPORT_SYMBOL(__net_get_random_once);