aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2012-07-04 10:38:30 -0400
committerTheodore Ts'o <tytso@mit.edu>2012-07-14 20:17:43 -0400
commit902c098a3663de3fa18639efbb71b6080f0bcd3c (patch)
treee2f984d5903236d4d593e457fd1279fd24177af2 /drivers/char
parent775f4b297b780601e61787b766f306ed3e1d23eb (diff)
random: use lockless techniques in the interrupt path
The real-time Linux folks don't like add_interrupt_randomness() taking a spinlock since it is called in the low-level interrupt routine. This also allows us to reduce the overhead in the fast path, for the random driver, which is the interrupt collection path. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/random.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 9fcceace239c..315feb1f59f3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -418,9 +418,9 @@ struct entropy_store {
418 /* read-write data: */ 418 /* read-write data: */
419 spinlock_t lock; 419 spinlock_t lock;
420 unsigned add_ptr; 420 unsigned add_ptr;
421 unsigned input_rotate;
421 int entropy_count; 422 int entropy_count;
422 int entropy_total; 423 int entropy_total;
423 int input_rotate;
424 unsigned int initialized:1; 424 unsigned int initialized:1;
425 __u8 last_data[EXTRACT_SIZE]; 425 __u8 last_data[EXTRACT_SIZE];
426}; 426};
@@ -468,26 +468,24 @@ static __u32 const twist_table[8] = {
468 * it's cheap to do so and helps slightly in the expected case where 468 * it's cheap to do so and helps slightly in the expected case where
469 * the entropy is concentrated in the low-order bits. 469 * the entropy is concentrated in the low-order bits.
470 */ 470 */
471static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, 471static void __mix_pool_bytes(struct entropy_store *r, const void *in,
472 int nbytes, __u8 out[64]) 472 int nbytes, __u8 out[64])
473{ 473{
474 unsigned long i, j, tap1, tap2, tap3, tap4, tap5; 474 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
475 int input_rotate; 475 int input_rotate;
476 int wordmask = r->poolinfo->poolwords - 1; 476 int wordmask = r->poolinfo->poolwords - 1;
477 const char *bytes = in; 477 const char *bytes = in;
478 __u32 w; 478 __u32 w;
479 unsigned long flags;
480 479
481 /* Taps are constant, so we can load them without holding r->lock. */
482 tap1 = r->poolinfo->tap1; 480 tap1 = r->poolinfo->tap1;
483 tap2 = r->poolinfo->tap2; 481 tap2 = r->poolinfo->tap2;
484 tap3 = r->poolinfo->tap3; 482 tap3 = r->poolinfo->tap3;
485 tap4 = r->poolinfo->tap4; 483 tap4 = r->poolinfo->tap4;
486 tap5 = r->poolinfo->tap5; 484 tap5 = r->poolinfo->tap5;
487 485
488 spin_lock_irqsave(&r->lock, flags); 486 smp_rmb();
489 input_rotate = r->input_rotate; 487 input_rotate = ACCESS_ONCE(r->input_rotate);
490 i = r->add_ptr; 488 i = ACCESS_ONCE(r->add_ptr);
491 489
492 /* mix one byte at a time to simplify size handling and churn faster */ 490 /* mix one byte at a time to simplify size handling and churn faster */
493 while (nbytes--) { 491 while (nbytes--) {
@@ -514,19 +512,23 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
514 input_rotate += i ? 7 : 14; 512 input_rotate += i ? 7 : 14;
515 } 513 }
516 514
517 r->input_rotate = input_rotate; 515 ACCESS_ONCE(r->input_rotate) = input_rotate;
518 r->add_ptr = i; 516 ACCESS_ONCE(r->add_ptr) = i;
517 smp_wmb();
519 518
520 if (out) 519 if (out)
521 for (j = 0; j < 16; j++) 520 for (j = 0; j < 16; j++)
522 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; 521 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
523
524 spin_unlock_irqrestore(&r->lock, flags);
525} 522}
526 523
527static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) 524static void mix_pool_bytes(struct entropy_store *r, const void *in,
525 int nbytes, __u8 out[64])
528{ 526{
529 mix_pool_bytes_extract(r, in, bytes, NULL); 527 unsigned long flags;
528
529 spin_lock_irqsave(&r->lock, flags);
530 __mix_pool_bytes(r, in, nbytes, out);
531 spin_unlock_irqrestore(&r->lock, flags);
530} 532}
531 533
532struct fast_pool { 534struct fast_pool {
@@ -564,23 +566,22 @@ static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
564 */ 566 */
565static void credit_entropy_bits(struct entropy_store *r, int nbits) 567static void credit_entropy_bits(struct entropy_store *r, int nbits)
566{ 568{
567 unsigned long flags; 569 int entropy_count, orig;
568 int entropy_count;
569 570
570 if (!nbits) 571 if (!nbits)
571 return; 572 return;
572 573
573 spin_lock_irqsave(&r->lock, flags);
574
575 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); 574 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
576 entropy_count = r->entropy_count; 575retry:
576 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
577 entropy_count += nbits; 577 entropy_count += nbits;
578 if (entropy_count < 0) { 578 if (entropy_count < 0) {
579 DEBUG_ENT("negative entropy/overflow\n"); 579 DEBUG_ENT("negative entropy/overflow\n");
580 entropy_count = 0; 580 entropy_count = 0;
581 } else if (entropy_count > r->poolinfo->POOLBITS) 581 } else if (entropy_count > r->poolinfo->POOLBITS)
582 entropy_count = r->poolinfo->POOLBITS; 582 entropy_count = r->poolinfo->POOLBITS;
583 r->entropy_count = entropy_count; 583 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
584 goto retry;
584 585
585 if (!r->initialized && nbits > 0) { 586 if (!r->initialized && nbits > 0) {
586 r->entropy_total += nbits; 587 r->entropy_total += nbits;
@@ -593,7 +594,6 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
593 wake_up_interruptible(&random_read_wait); 594 wake_up_interruptible(&random_read_wait);
594 kill_fasync(&fasync, SIGIO, POLL_IN); 595 kill_fasync(&fasync, SIGIO, POLL_IN);
595 } 596 }
596 spin_unlock_irqrestore(&r->lock, flags);
597} 597}
598 598
599/********************************************************************* 599/*********************************************************************
@@ -680,7 +680,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
680 sample.cycles = get_cycles(); 680 sample.cycles = get_cycles();
681 681
682 sample.num = num; 682 sample.num = num;
683 mix_pool_bytes(&input_pool, &sample, sizeof(sample)); 683 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
684 684
685 /* 685 /*
686 * Calculate number of bits of randomness we probably added. 686 * Calculate number of bits of randomness we probably added.
@@ -764,7 +764,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
764 fast_pool->last = now; 764 fast_pool->last = now;
765 765
766 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; 766 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
767 mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); 767 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
768 /* 768 /*
769 * If we don't have a valid cycle counter, and we see 769 * If we don't have a valid cycle counter, and we see
770 * back-to-back timer interrupts, then skip giving credit for 770 * back-to-back timer interrupts, then skip giving credit for
@@ -829,7 +829,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
829 829
830 bytes = extract_entropy(r->pull, tmp, bytes, 830 bytes = extract_entropy(r->pull, tmp, bytes,
831 random_read_wakeup_thresh / 8, rsvd); 831 random_read_wakeup_thresh / 8, rsvd);
832 mix_pool_bytes(r, tmp, bytes); 832 mix_pool_bytes(r, tmp, bytes, NULL);
833 credit_entropy_bits(r, bytes*8); 833 credit_entropy_bits(r, bytes*8);
834 } 834 }
835} 835}
@@ -890,9 +890,11 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
890 int i; 890 int i;
891 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; 891 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
892 __u8 extract[64]; 892 __u8 extract[64];
893 unsigned long flags;
893 894
894 /* Generate a hash across the pool, 16 words (512 bits) at a time */ 895 /* Generate a hash across the pool, 16 words (512 bits) at a time */
895 sha_init(hash); 896 sha_init(hash);
897 spin_lock_irqsave(&r->lock, flags);
896 for (i = 0; i < r->poolinfo->poolwords; i += 16) 898 for (i = 0; i < r->poolinfo->poolwords; i += 16)
897 sha_transform(hash, (__u8 *)(r->pool + i), workspace); 899 sha_transform(hash, (__u8 *)(r->pool + i), workspace);
898 900
@@ -905,7 +907,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
905 * brute-forcing the feedback as hard as brute-forcing the 907 * brute-forcing the feedback as hard as brute-forcing the
906 * hash. 908 * hash.
907 */ 909 */
908 mix_pool_bytes_extract(r, hash, sizeof(hash), extract); 910 __mix_pool_bytes(r, hash, sizeof(hash), extract);
911 spin_unlock_irqrestore(&r->lock, flags);
909 912
910 /* 913 /*
911 * To avoid duplicates, we atomically extract a portion of the 914 * To avoid duplicates, we atomically extract a portion of the
@@ -928,11 +931,10 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
928} 931}
929 932
930static ssize_t extract_entropy(struct entropy_store *r, void *buf, 933static ssize_t extract_entropy(struct entropy_store *r, void *buf,
931 size_t nbytes, int min, int reserved) 934 size_t nbytes, int min, int reserved)
932{ 935{
933 ssize_t ret = 0, i; 936 ssize_t ret = 0, i;
934 __u8 tmp[EXTRACT_SIZE]; 937 __u8 tmp[EXTRACT_SIZE];
935 unsigned long flags;
936 938
937 xfer_secondary_pool(r, nbytes); 939 xfer_secondary_pool(r, nbytes);
938 nbytes = account(r, nbytes, min, reserved); 940 nbytes = account(r, nbytes, min, reserved);
@@ -941,6 +943,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
941 extract_buf(r, tmp); 943 extract_buf(r, tmp);
942 944
943 if (fips_enabled) { 945 if (fips_enabled) {
946 unsigned long flags;
947
944 spin_lock_irqsave(&r->lock, flags); 948 spin_lock_irqsave(&r->lock, flags);
945 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) 949 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
946 panic("Hardware RNG duplicated output!\n"); 950 panic("Hardware RNG duplicated output!\n");
@@ -1034,22 +1038,18 @@ EXPORT_SYMBOL(get_random_bytes);
1034static void init_std_data(struct entropy_store *r) 1038static void init_std_data(struct entropy_store *r)
1035{ 1039{
1036 int i; 1040 int i;
1037 ktime_t now; 1041 ktime_t now = ktime_get_real();
1038 unsigned long flags; 1042 unsigned long rv;
1039 1043
1040 spin_lock_irqsave(&r->lock, flags);
1041 r->entropy_count = 0; 1044 r->entropy_count = 0;
1042 r->entropy_total = 0; 1045 r->entropy_total = 0;
1043 spin_unlock_irqrestore(&r->lock, flags); 1046 mix_pool_bytes(r, &now, sizeof(now), NULL);
1044 1047 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
1045 now = ktime_get_real(); 1048 if (!arch_get_random_long(&rv))
1046 mix_pool_bytes(r, &now, sizeof(now));
1047 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
1048 if (!arch_get_random_long(&flags))
1049 break; 1049 break;
1050 mix_pool_bytes(r, &flags, sizeof(flags)); 1050 mix_pool_bytes(r, &rv, sizeof(rv), NULL);
1051 } 1051 }
1052 mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); 1052 mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
1053} 1053}
1054 1054
1055static int rand_initialize(void) 1055static int rand_initialize(void)
@@ -1186,7 +1186,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1186 count -= bytes; 1186 count -= bytes;
1187 p += bytes; 1187 p += bytes;
1188 1188
1189 mix_pool_bytes(r, buf, bytes); 1189 mix_pool_bytes(r, buf, bytes, NULL);
1190 cond_resched(); 1190 cond_resched();
1191 } 1191 }
1192 1192