aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c297
1 files changed, 142 insertions, 155 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index f43c89f7c449..0cf98bd4f2d2 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -272,7 +272,7 @@ static int random_write_wakeup_thresh = 128;
272 272
273static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; 273static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
274 274
275static DEFINE_PER_CPU(int, trickle_count) = 0; 275static DEFINE_PER_CPU(int, trickle_count);
276 276
277/* 277/*
278 * A pool of size .poolwords is stirred with a primitive polynomial 278 * A pool of size .poolwords is stirred with a primitive polynomial
@@ -370,17 +370,19 @@ static struct poolinfo {
370 */ 370 */
371static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); 371static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
372static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 372static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
373static struct fasync_struct *fasync;
373 374
374#if 0 375#if 0
375static int debug = 0; 376static int debug;
376module_param(debug, bool, 0644); 377module_param(debug, bool, 0644);
377#define DEBUG_ENT(fmt, arg...) do { if (debug) \ 378#define DEBUG_ENT(fmt, arg...) do { \
378 printk(KERN_DEBUG "random %04d %04d %04d: " \ 379 if (debug) \
379 fmt,\ 380 printk(KERN_DEBUG "random %04d %04d %04d: " \
380 input_pool.entropy_count,\ 381 fmt,\
381 blocking_pool.entropy_count,\ 382 input_pool.entropy_count,\
382 nonblocking_pool.entropy_count,\ 383 blocking_pool.entropy_count,\
383 ## arg); } while (0) 384 nonblocking_pool.entropy_count,\
385 ## arg); } while (0)
384#else 386#else
385#define DEBUG_ENT(fmt, arg...) do {} while (0) 387#define DEBUG_ENT(fmt, arg...) do {} while (0)
386#endif 388#endif
@@ -394,7 +396,7 @@ module_param(debug, bool, 0644);
394 396
395struct entropy_store; 397struct entropy_store;
396struct entropy_store { 398struct entropy_store {
397 /* mostly-read data: */ 399 /* read-only data: */
398 struct poolinfo *poolinfo; 400 struct poolinfo *poolinfo;
399 __u32 *pool; 401 __u32 *pool;
400 const char *name; 402 const char *name;
@@ -402,7 +404,7 @@ struct entropy_store {
402 struct entropy_store *pull; 404 struct entropy_store *pull;
403 405
404 /* read-write data: */ 406 /* read-write data: */
405 spinlock_t lock ____cacheline_aligned_in_smp; 407 spinlock_t lock;
406 unsigned add_ptr; 408 unsigned add_ptr;
407 int entropy_count; 409 int entropy_count;
408 int input_rotate; 410 int input_rotate;
@@ -438,25 +440,26 @@ static struct entropy_store nonblocking_pool = {
438}; 440};
439 441
440/* 442/*
441 * This function adds a byte into the entropy "pool". It does not 443 * This function adds bytes into the entropy "pool". It does not
442 * update the entropy estimate. The caller should call 444 * update the entropy estimate. The caller should call
443 * credit_entropy_store if this is appropriate. 445 * credit_entropy_bits if this is appropriate.
444 * 446 *
445 * The pool is stirred with a primitive polynomial of the appropriate 447 * The pool is stirred with a primitive polynomial of the appropriate
446 * degree, and then twisted. We twist by three bits at a time because 448 * degree, and then twisted. We twist by three bits at a time because
447 * it's cheap to do so and helps slightly in the expected case where 449 * it's cheap to do so and helps slightly in the expected case where
448 * the entropy is concentrated in the low-order bits. 450 * the entropy is concentrated in the low-order bits.
449 */ 451 */
450static void __add_entropy_words(struct entropy_store *r, const __u32 *in, 452static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
451 int nwords, __u32 out[16]) 453 int nbytes, __u8 out[64])
452{ 454{
453 static __u32 const twist_table[8] = { 455 static __u32 const twist_table[8] = {
454 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, 456 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
455 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; 457 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
456 unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5; 458 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
457 int new_rotate, input_rotate; 459 int input_rotate;
458 int wordmask = r->poolinfo->poolwords - 1; 460 int wordmask = r->poolinfo->poolwords - 1;
459 __u32 w, next_w; 461 const char *bytes = in;
462 __u32 w;
460 unsigned long flags; 463 unsigned long flags;
461 464
462 /* Taps are constant, so we can load them without holding r->lock. */ 465 /* Taps are constant, so we can load them without holding r->lock. */
@@ -465,78 +468,76 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
465 tap3 = r->poolinfo->tap3; 468 tap3 = r->poolinfo->tap3;
466 tap4 = r->poolinfo->tap4; 469 tap4 = r->poolinfo->tap4;
467 tap5 = r->poolinfo->tap5; 470 tap5 = r->poolinfo->tap5;
468 next_w = *in++;
469 471
470 spin_lock_irqsave(&r->lock, flags); 472 spin_lock_irqsave(&r->lock, flags);
471 prefetch_range(r->pool, wordmask);
472 input_rotate = r->input_rotate; 473 input_rotate = r->input_rotate;
473 add_ptr = r->add_ptr; 474 i = r->add_ptr;
474 475
475 while (nwords--) { 476 /* mix one byte at a time to simplify size handling and churn faster */
476 w = rol32(next_w, input_rotate); 477 while (nbytes--) {
477 if (nwords > 0) 478 w = rol32(*bytes++, input_rotate & 31);
478 next_w = *in++; 479 i = (i - 1) & wordmask;
479 i = add_ptr = (add_ptr - 1) & wordmask;
480 /*
481 * Normally, we add 7 bits of rotation to the pool.
482 * At the beginning of the pool, add an extra 7 bits
483 * rotation, so that successive passes spread the
484 * input bits across the pool evenly.
485 */
486 new_rotate = input_rotate + 14;
487 if (i)
488 new_rotate = input_rotate + 7;
489 input_rotate = new_rotate & 31;
490 480
491 /* XOR in the various taps */ 481 /* XOR in the various taps */
482 w ^= r->pool[i];
492 w ^= r->pool[(i + tap1) & wordmask]; 483 w ^= r->pool[(i + tap1) & wordmask];
493 w ^= r->pool[(i + tap2) & wordmask]; 484 w ^= r->pool[(i + tap2) & wordmask];
494 w ^= r->pool[(i + tap3) & wordmask]; 485 w ^= r->pool[(i + tap3) & wordmask];
495 w ^= r->pool[(i + tap4) & wordmask]; 486 w ^= r->pool[(i + tap4) & wordmask];
496 w ^= r->pool[(i + tap5) & wordmask]; 487 w ^= r->pool[(i + tap5) & wordmask];
497 w ^= r->pool[i]; 488
489 /* Mix the result back in with a twist */
498 r->pool[i] = (w >> 3) ^ twist_table[w & 7]; 490 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
491
492 /*
493 * Normally, we add 7 bits of rotation to the pool.
494 * At the beginning of the pool, add an extra 7 bits
495 * rotation, so that successive passes spread the
496 * input bits across the pool evenly.
497 */
498 input_rotate += i ? 7 : 14;
499 } 499 }
500 500
501 r->input_rotate = input_rotate; 501 r->input_rotate = input_rotate;
502 r->add_ptr = add_ptr; 502 r->add_ptr = i;
503 503
504 if (out) { 504 if (out)
505 for (i = 0; i < 16; i++) { 505 for (j = 0; j < 16; j++)
506 out[i] = r->pool[add_ptr]; 506 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
507 add_ptr = (add_ptr - 1) & wordmask;
508 }
509 }
510 507
511 spin_unlock_irqrestore(&r->lock, flags); 508 spin_unlock_irqrestore(&r->lock, flags);
512} 509}
513 510
514static inline void add_entropy_words(struct entropy_store *r, const __u32 *in, 511static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
515 int nwords)
516{ 512{
517 __add_entropy_words(r, in, nwords, NULL); 513 mix_pool_bytes_extract(r, in, bytes, NULL);
518} 514}
519 515
520/* 516/*
521 * Credit (or debit) the entropy store with n bits of entropy 517 * Credit (or debit) the entropy store with n bits of entropy
522 */ 518 */
523static void credit_entropy_store(struct entropy_store *r, int nbits) 519static void credit_entropy_bits(struct entropy_store *r, int nbits)
524{ 520{
525 unsigned long flags; 521 unsigned long flags;
526 522
523 if (!nbits)
524 return;
525
527 spin_lock_irqsave(&r->lock, flags); 526 spin_lock_irqsave(&r->lock, flags);
528 527
529 if (r->entropy_count + nbits < 0) { 528 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
530 DEBUG_ENT("negative entropy/overflow (%d+%d)\n", 529 r->entropy_count += nbits;
531 r->entropy_count, nbits); 530 if (r->entropy_count < 0) {
531 DEBUG_ENT("negative entropy/overflow\n");
532 r->entropy_count = 0; 532 r->entropy_count = 0;
533 } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) { 533 } else if (r->entropy_count > r->poolinfo->POOLBITS)
534 r->entropy_count = r->poolinfo->POOLBITS; 534 r->entropy_count = r->poolinfo->POOLBITS;
535 } else { 535
536 r->entropy_count += nbits; 536 /* should we wake readers? */
537 if (nbits) 537 if (r == &input_pool &&
538 DEBUG_ENT("added %d entropy credits to %s\n", 538 r->entropy_count >= random_read_wakeup_thresh) {
539 nbits, r->name); 539 wake_up_interruptible(&random_read_wait);
540 kill_fasync(&fasync, SIGIO, POLL_IN);
540 } 541 }
541 542
542 spin_unlock_irqrestore(&r->lock, flags); 543 spin_unlock_irqrestore(&r->lock, flags);
@@ -551,7 +552,7 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
551/* There is one of these per entropy source */ 552/* There is one of these per entropy source */
552struct timer_rand_state { 553struct timer_rand_state {
553 cycles_t last_time; 554 cycles_t last_time;
554 long last_delta,last_delta2; 555 long last_delta, last_delta2;
555 unsigned dont_count_entropy:1; 556 unsigned dont_count_entropy:1;
556}; 557};
557 558
@@ -586,7 +587,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
586 sample.jiffies = jiffies; 587 sample.jiffies = jiffies;
587 sample.cycles = get_cycles(); 588 sample.cycles = get_cycles();
588 sample.num = num; 589 sample.num = num;
589 add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4); 590 mix_pool_bytes(&input_pool, &sample, sizeof(sample));
590 591
591 /* 592 /*
592 * Calculate number of bits of randomness we probably added. 593 * Calculate number of bits of randomness we probably added.
@@ -620,13 +621,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
620 * Round down by 1 bit on general principles, 621 * Round down by 1 bit on general principles,
621 * and limit entropy entimate to 12 bits. 622 * and limit entropy entimate to 12 bits.
622 */ 623 */
623 credit_entropy_store(&input_pool, 624 credit_entropy_bits(&input_pool,
624 min_t(int, fls(delta>>1), 11)); 625 min_t(int, fls(delta>>1), 11));
625 } 626 }
626
627 if(input_pool.entropy_count >= random_read_wakeup_thresh)
628 wake_up_interruptible(&random_read_wait);
629
630out: 627out:
631 preempt_enable(); 628 preempt_enable();
632} 629}
@@ -677,7 +674,7 @@ void add_disk_randomness(struct gendisk *disk)
677 * 674 *
678 *********************************************************************/ 675 *********************************************************************/
679 676
680static ssize_t extract_entropy(struct entropy_store *r, void * buf, 677static ssize_t extract_entropy(struct entropy_store *r, void *buf,
681 size_t nbytes, int min, int rsvd); 678 size_t nbytes, int min, int rsvd);
682 679
683/* 680/*
@@ -704,10 +701,10 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
704 "(%d of %d requested)\n", 701 "(%d of %d requested)\n",
705 r->name, bytes * 8, nbytes * 8, r->entropy_count); 702 r->name, bytes * 8, nbytes * 8, r->entropy_count);
706 703
707 bytes=extract_entropy(r->pull, tmp, bytes, 704 bytes = extract_entropy(r->pull, tmp, bytes,
708 random_read_wakeup_thresh / 8, rsvd); 705 random_read_wakeup_thresh / 8, rsvd);
709 add_entropy_words(r, tmp, (bytes + 3) / 4); 706 mix_pool_bytes(r, tmp, bytes);
710 credit_entropy_store(r, bytes*8); 707 credit_entropy_bits(r, bytes*8);
711 } 708 }
712} 709}
713 710
@@ -744,13 +741,15 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
744 if (r->limit && nbytes + reserved >= r->entropy_count / 8) 741 if (r->limit && nbytes + reserved >= r->entropy_count / 8)
745 nbytes = r->entropy_count/8 - reserved; 742 nbytes = r->entropy_count/8 - reserved;
746 743
747 if(r->entropy_count / 8 >= nbytes + reserved) 744 if (r->entropy_count / 8 >= nbytes + reserved)
748 r->entropy_count -= nbytes*8; 745 r->entropy_count -= nbytes*8;
749 else 746 else
750 r->entropy_count = reserved; 747 r->entropy_count = reserved;
751 748
752 if (r->entropy_count < random_write_wakeup_thresh) 749 if (r->entropy_count < random_write_wakeup_thresh) {
753 wake_up_interruptible(&random_write_wait); 750 wake_up_interruptible(&random_write_wait);
751 kill_fasync(&fasync, SIGIO, POLL_OUT);
752 }
754 } 753 }
755 754
756 DEBUG_ENT("debiting %d entropy credits from %s%s\n", 755 DEBUG_ENT("debiting %d entropy credits from %s%s\n",
@@ -764,45 +763,46 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
764static void extract_buf(struct entropy_store *r, __u8 *out) 763static void extract_buf(struct entropy_store *r, __u8 *out)
765{ 764{
766 int i; 765 int i;
767 __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS]; 766 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
767 __u8 extract[64];
768
769 /* Generate a hash across the pool, 16 words (512 bits) at a time */
770 sha_init(hash);
771 for (i = 0; i < r->poolinfo->poolwords; i += 16)
772 sha_transform(hash, (__u8 *)(r->pool + i), workspace);
768 773
769 sha_init(buf);
770 /* 774 /*
771 * As we hash the pool, we mix intermediate values of 775 * We mix the hash back into the pool to prevent backtracking
772 * the hash back into the pool. This eliminates 776 * attacks (where the attacker knows the state of the pool
773 * backtracking attacks (where the attacker knows 777 * plus the current outputs, and attempts to find previous
774 * the state of the pool plus the current outputs, and 778 * ouputs), unless the hash function can be inverted. By
775 * attempts to find previous ouputs), unless the hash 779 * mixing at least a SHA1 worth of hash data back, we make
776 * function can be inverted. 780 * brute-forcing the feedback as hard as brute-forcing the
781 * hash.
777 */ 782 */
778 for (i = 0; i < r->poolinfo->poolwords; i += 16) { 783 mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
779 /* hash blocks of 16 words = 512 bits */
780 sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
781 /* feed back portion of the resulting hash */
782 add_entropy_words(r, &buf[i % 5], 1);
783 }
784 784
785 /* 785 /*
786 * To avoid duplicates, we atomically extract a 786 * To avoid duplicates, we atomically extract a portion of the
787 * portion of the pool while mixing, and hash one 787 * pool while mixing, and hash one final time.
788 * final time.
789 */ 788 */
790 __add_entropy_words(r, &buf[i % 5], 1, data); 789 sha_transform(hash, extract, workspace);
791 sha_transform(buf, (__u8 *)data, buf + 5); 790 memset(extract, 0, sizeof(extract));
791 memset(workspace, 0, sizeof(workspace));
792 792
793 /* 793 /*
794 * In case the hash function has some recognizable 794 * In case the hash function has some recognizable output
795 * output pattern, we fold it in half. 795 * pattern, we fold it in half. Thus, we always feed back
796 * twice as much data as we output.
796 */ 797 */
797 798 hash[0] ^= hash[3];
798 buf[0] ^= buf[3]; 799 hash[1] ^= hash[4];
799 buf[1] ^= buf[4]; 800 hash[2] ^= rol32(hash[2], 16);
800 buf[2] ^= rol32(buf[2], 16); 801 memcpy(out, hash, EXTRACT_SIZE);
801 memcpy(out, buf, EXTRACT_SIZE); 802 memset(hash, 0, sizeof(hash));
802 memset(buf, 0, sizeof(buf));
803} 803}
804 804
805static ssize_t extract_entropy(struct entropy_store *r, void * buf, 805static ssize_t extract_entropy(struct entropy_store *r, void *buf,
806 size_t nbytes, int min, int reserved) 806 size_t nbytes, int min, int reserved)
807{ 807{
808 ssize_t ret = 0, i; 808 ssize_t ret = 0, i;
@@ -872,7 +872,6 @@ void get_random_bytes(void *buf, int nbytes)
872{ 872{
873 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); 873 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
874} 874}
875
876EXPORT_SYMBOL(get_random_bytes); 875EXPORT_SYMBOL(get_random_bytes);
877 876
878/* 877/*
@@ -894,12 +893,11 @@ static void init_std_data(struct entropy_store *r)
894 spin_unlock_irqrestore(&r->lock, flags); 893 spin_unlock_irqrestore(&r->lock, flags);
895 894
896 now = ktime_get_real(); 895 now = ktime_get_real();
897 add_entropy_words(r, (__u32 *)&now, sizeof(now)/4); 896 mix_pool_bytes(r, &now, sizeof(now));
898 add_entropy_words(r, (__u32 *)utsname(), 897 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
899 sizeof(*(utsname()))/4);
900} 898}
901 899
902static int __init rand_initialize(void) 900static int rand_initialize(void)
903{ 901{
904 init_std_data(&input_pool); 902 init_std_data(&input_pool);
905 init_std_data(&blocking_pool); 903 init_std_data(&blocking_pool);
@@ -940,7 +938,7 @@ void rand_initialize_disk(struct gendisk *disk)
940#endif 938#endif
941 939
942static ssize_t 940static ssize_t
943random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) 941random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
944{ 942{
945 ssize_t n, retval = 0, count = 0; 943 ssize_t n, retval = 0, count = 0;
946 944
@@ -1002,8 +1000,7 @@ random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
1002} 1000}
1003 1001
1004static ssize_t 1002static ssize_t
1005urandom_read(struct file * file, char __user * buf, 1003urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1006 size_t nbytes, loff_t *ppos)
1007{ 1004{
1008 return extract_entropy_user(&nonblocking_pool, buf, nbytes); 1005 return extract_entropy_user(&nonblocking_pool, buf, nbytes);
1009} 1006}
@@ -1038,16 +1035,15 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1038 count -= bytes; 1035 count -= bytes;
1039 p += bytes; 1036 p += bytes;
1040 1037
1041 add_entropy_words(r, buf, (bytes + 3) / 4); 1038 mix_pool_bytes(r, buf, bytes);
1042 cond_resched(); 1039 cond_resched();
1043 } 1040 }
1044 1041
1045 return 0; 1042 return 0;
1046} 1043}
1047 1044
1048static ssize_t 1045static ssize_t random_write(struct file *file, const char __user *buffer,
1049random_write(struct file * file, const char __user * buffer, 1046 size_t count, loff_t *ppos)
1050 size_t count, loff_t *ppos)
1051{ 1047{
1052 size_t ret; 1048 size_t ret;
1053 struct inode *inode = file->f_path.dentry->d_inode; 1049 struct inode *inode = file->f_path.dentry->d_inode;
@@ -1064,9 +1060,7 @@ random_write(struct file * file, const char __user * buffer,
1064 return (ssize_t)count; 1060 return (ssize_t)count;
1065} 1061}
1066 1062
1067static int 1063static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1068random_ioctl(struct inode * inode, struct file * file,
1069 unsigned int cmd, unsigned long arg)
1070{ 1064{
1071 int size, ent_count; 1065 int size, ent_count;
1072 int __user *p = (int __user *)arg; 1066 int __user *p = (int __user *)arg;
@@ -1074,8 +1068,8 @@ random_ioctl(struct inode * inode, struct file * file,
1074 1068
1075 switch (cmd) { 1069 switch (cmd) {
1076 case RNDGETENTCNT: 1070 case RNDGETENTCNT:
1077 ent_count = input_pool.entropy_count; 1071 /* inherently racy, no point locking */
1078 if (put_user(ent_count, p)) 1072 if (put_user(input_pool.entropy_count, p))
1079 return -EFAULT; 1073 return -EFAULT;
1080 return 0; 1074 return 0;
1081 case RNDADDTOENTCNT: 1075 case RNDADDTOENTCNT:
@@ -1083,13 +1077,7 @@ random_ioctl(struct inode * inode, struct file * file,
1083 return -EPERM; 1077 return -EPERM;
1084 if (get_user(ent_count, p)) 1078 if (get_user(ent_count, p))
1085 return -EFAULT; 1079 return -EFAULT;
1086 credit_entropy_store(&input_pool, ent_count); 1080 credit_entropy_bits(&input_pool, ent_count);
1087 /*
1088 * Wake up waiting processes if we have enough
1089 * entropy.
1090 */
1091 if (input_pool.entropy_count >= random_read_wakeup_thresh)
1092 wake_up_interruptible(&random_read_wait);
1093 return 0; 1081 return 0;
1094 case RNDADDENTROPY: 1082 case RNDADDENTROPY:
1095 if (!capable(CAP_SYS_ADMIN)) 1083 if (!capable(CAP_SYS_ADMIN))
@@ -1104,39 +1092,45 @@ random_ioctl(struct inode * inode, struct file * file,
1104 size); 1092 size);
1105 if (retval < 0) 1093 if (retval < 0)
1106 return retval; 1094 return retval;
1107 credit_entropy_store(&input_pool, ent_count); 1095 credit_entropy_bits(&input_pool, ent_count);
1108 /*
1109 * Wake up waiting processes if we have enough
1110 * entropy.
1111 */
1112 if (input_pool.entropy_count >= random_read_wakeup_thresh)
1113 wake_up_interruptible(&random_read_wait);
1114 return 0; 1096 return 0;
1115 case RNDZAPENTCNT: 1097 case RNDZAPENTCNT:
1116 case RNDCLEARPOOL: 1098 case RNDCLEARPOOL:
1117 /* Clear the entropy pool counters. */ 1099 /* Clear the entropy pool counters. */
1118 if (!capable(CAP_SYS_ADMIN)) 1100 if (!capable(CAP_SYS_ADMIN))
1119 return -EPERM; 1101 return -EPERM;
1120 init_std_data(&input_pool); 1102 rand_initialize();
1121 init_std_data(&blocking_pool);
1122 init_std_data(&nonblocking_pool);
1123 return 0; 1103 return 0;
1124 default: 1104 default:
1125 return -EINVAL; 1105 return -EINVAL;
1126 } 1106 }
1127} 1107}
1128 1108
1109static int random_fasync(int fd, struct file *filp, int on)
1110{
1111 return fasync_helper(fd, filp, on, &fasync);
1112}
1113
1114static int random_release(struct inode *inode, struct file *filp)
1115{
1116 return fasync_helper(-1, filp, 0, &fasync);
1117}
1118
1129const struct file_operations random_fops = { 1119const struct file_operations random_fops = {
1130 .read = random_read, 1120 .read = random_read,
1131 .write = random_write, 1121 .write = random_write,
1132 .poll = random_poll, 1122 .poll = random_poll,
1133 .ioctl = random_ioctl, 1123 .unlocked_ioctl = random_ioctl,
1124 .fasync = random_fasync,
1125 .release = random_release,
1134}; 1126};
1135 1127
1136const struct file_operations urandom_fops = { 1128const struct file_operations urandom_fops = {
1137 .read = urandom_read, 1129 .read = urandom_read,
1138 .write = random_write, 1130 .write = random_write,
1139 .ioctl = random_ioctl, 1131 .unlocked_ioctl = random_ioctl,
1132 .fasync = random_fasync,
1133 .release = random_release,
1140}; 1134};
1141 1135
1142/*************************************************************** 1136/***************************************************************
@@ -1157,7 +1151,6 @@ void generate_random_uuid(unsigned char uuid_out[16])
1157 /* Set the UUID variant to DCE */ 1151 /* Set the UUID variant to DCE */
1158 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; 1152 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
1159} 1153}
1160
1161EXPORT_SYMBOL(generate_random_uuid); 1154EXPORT_SYMBOL(generate_random_uuid);
1162 1155
1163/******************************************************************** 1156/********************************************************************
@@ -1339,7 +1332,7 @@ ctl_table random_table[] = {
1339 1332
1340#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1333#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1341 1334
1342static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12]) 1335static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
1343{ 1336{
1344 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; 1337 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1345 1338
@@ -1487,8 +1480,8 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1487 */ 1480 */
1488 1481
1489 memcpy(hash, saddr, 16); 1482 memcpy(hash, saddr, 16);
1490 hash[4]=((__force u16)sport << 16) + (__force u16)dport; 1483 hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
1491 memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); 1484 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1492 1485
1493 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; 1486 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
1494 seq += keyptr->count; 1487 seq += keyptr->count;
@@ -1538,10 +1531,10 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1538 * Note that the words are placed into the starting vector, which is 1531 * Note that the words are placed into the starting vector, which is
1539 * then mixed with a partial MD4 over random data. 1532 * then mixed with a partial MD4 over random data.
1540 */ 1533 */
1541 hash[0]=(__force u32)saddr; 1534 hash[0] = (__force u32)saddr;
1542 hash[1]=(__force u32)daddr; 1535 hash[1] = (__force u32)daddr;
1543 hash[2]=((__force u16)sport << 16) + (__force u16)dport; 1536 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1544 hash[3]=keyptr->secret[11]; 1537 hash[3] = keyptr->secret[11];
1545 1538
1546 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; 1539 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
1547 seq += keyptr->count; 1540 seq += keyptr->count;
@@ -1556,10 +1549,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1556 * Choosing a clock of 64 ns period is OK. (period of 274 s) 1549 * Choosing a clock of 64 ns period is OK. (period of 274 s)
1557 */ 1550 */
1558 seq += ktime_to_ns(ktime_get_real()) >> 6; 1551 seq += ktime_to_ns(ktime_get_real()) >> 6;
1559#if 0 1552
1560 printk("init_seq(%lx, %lx, %d, %d) = %d\n",
1561 saddr, daddr, sport, dport, seq);
1562#endif
1563 return seq; 1553 return seq;
1564} 1554}
1565 1555
@@ -1582,14 +1572,15 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1582} 1572}
1583 1573
1584#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1574#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1585u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) 1575u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
1576 __be16 dport)
1586{ 1577{
1587 struct keydata *keyptr = get_keyptr(); 1578 struct keydata *keyptr = get_keyptr();
1588 u32 hash[12]; 1579 u32 hash[12];
1589 1580
1590 memcpy(hash, saddr, 16); 1581 memcpy(hash, saddr, 16);
1591 hash[4] = (__force u32)dport; 1582 hash[4] = (__force u32)dport;
1592 memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); 1583 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1593 1584
1594 return twothirdsMD4Transform((const __u32 *)daddr, hash); 1585 return twothirdsMD4Transform((const __u32 *)daddr, hash);
1595} 1586}
@@ -1617,13 +1608,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1617 1608
1618 seq += ktime_to_ns(ktime_get_real()); 1609 seq += ktime_to_ns(ktime_get_real());
1619 seq &= (1ull << 48) - 1; 1610 seq &= (1ull << 48) - 1;
1620#if 0 1611
1621 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
1622 saddr, daddr, sport, dport, seq);
1623#endif
1624 return seq; 1612 return seq;
1625} 1613}
1626
1627EXPORT_SYMBOL(secure_dccp_sequence_number); 1614EXPORT_SYMBOL(secure_dccp_sequence_number);
1628#endif 1615#endif
1629 1616