aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/hw_random
diff options
context:
space:
mode:
authorIan Molton <ian.molton@collabora.co.uk>2009-12-01 01:47:32 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2009-12-01 01:47:32 -0500
commit9996508b3353063f2d6c48c1a28a84543d72d70b (patch)
tree4358f990cf7805caee70be90953b4d267aad5461 /drivers/char/hw_random
parent2f32bfd834d5d7eb230bcbf39aaacccd2a01d767 (diff)
hwrng: core - Replace u32 in driver API with byte array
This patch implements a new method by which hw_random hardware drivers can pass data to the core more efficiently, using a shared buffer. The old methods have been retained as a compatability layer until all the drivers have been updated. Signed-off-by: Ian Molton <ian.molton@collabora.co.uk> Acked-by: Matt Mackall <mpm@selenic.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/char/hw_random')
-rw-r--r--drivers/char/hw_random/core.c107
1 files changed, 64 insertions, 43 deletions
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index fc93e2fc7c71..82367262f3a8 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -52,7 +52,8 @@
52static struct hwrng *current_rng; 52static struct hwrng *current_rng;
53static LIST_HEAD(rng_list); 53static LIST_HEAD(rng_list);
54static DEFINE_MUTEX(rng_mutex); 54static DEFINE_MUTEX(rng_mutex);
55 55static int data_avail;
56static u8 rng_buffer[SMP_CACHE_BYTES] __cacheline_aligned;
56 57
57static inline int hwrng_init(struct hwrng *rng) 58static inline int hwrng_init(struct hwrng *rng)
58{ 59{
@@ -67,19 +68,6 @@ static inline void hwrng_cleanup(struct hwrng *rng)
67 rng->cleanup(rng); 68 rng->cleanup(rng);
68} 69}
69 70
70static inline int hwrng_data_present(struct hwrng *rng, int wait)
71{
72 if (!rng->data_present)
73 return 1;
74 return rng->data_present(rng, wait);
75}
76
77static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
78{
79 return rng->data_read(rng, data);
80}
81
82
83static int rng_dev_open(struct inode *inode, struct file *filp) 71static int rng_dev_open(struct inode *inode, struct file *filp)
84{ 72{
85 /* enforce read-only access to this chrdev */ 73 /* enforce read-only access to this chrdev */
@@ -91,54 +79,87 @@ static int rng_dev_open(struct inode *inode, struct file *filp)
91 return 0; 79 return 0;
92} 80}
93 81
82static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
83 int wait) {
84 int present;
85
86 if (rng->read)
87 return rng->read(rng, (void *)buffer, size, wait);
88
89 if (rng->data_present)
90 present = rng->data_present(rng, wait);
91 else
92 present = 1;
93
94 if (present)
95 return rng->data_read(rng, (u32 *)buffer);
96
97 return 0;
98}
99
94static ssize_t rng_dev_read(struct file *filp, char __user *buf, 100static ssize_t rng_dev_read(struct file *filp, char __user *buf,
95 size_t size, loff_t *offp) 101 size_t size, loff_t *offp)
96{ 102{
97 u32 data;
98 ssize_t ret = 0; 103 ssize_t ret = 0;
99 int err = 0; 104 int err = 0;
100 int bytes_read; 105 int bytes_read, len;
101 106
102 while (size) { 107 while (size) {
103 err = -ERESTARTSYS; 108 if (mutex_lock_interruptible(&rng_mutex)) {
104 if (mutex_lock_interruptible(&rng_mutex)) 109 err = -ERESTARTSYS;
105 goto out; 110 goto out;
111 }
112
106 if (!current_rng) { 113 if (!current_rng) {
107 mutex_unlock(&rng_mutex);
108 err = -ENODEV; 114 err = -ENODEV;
109 goto out; 115 goto out_unlock;
110 } 116 }
111 117
112 bytes_read = 0; 118 if (!data_avail) {
113 if (hwrng_data_present(current_rng, 119 bytes_read = rng_get_data(current_rng, rng_buffer,
114 !(filp->f_flags & O_NONBLOCK))) 120 sizeof(rng_buffer),
115 bytes_read = hwrng_data_read(current_rng, &data); 121 !(filp->f_flags & O_NONBLOCK));
116 mutex_unlock(&rng_mutex); 122 if (bytes_read < 0) {
117 123 err = bytes_read;
118 err = -EAGAIN; 124 goto out_unlock;
119 if (!bytes_read && (filp->f_flags & O_NONBLOCK)) 125 }
120 goto out; 126 data_avail = bytes_read;
121 if (bytes_read < 0) {
122 err = bytes_read;
123 goto out;
124 } 127 }
125 128
126 err = -EFAULT; 129 if (!data_avail) {
127 while (bytes_read && size) { 130 if (filp->f_flags & O_NONBLOCK) {
128 if (put_user((u8)data, buf++)) 131 err = -EAGAIN;
129 goto out; 132 goto out_unlock;
130 size--; 133 }
131 ret++; 134 } else {
132 bytes_read--; 135 len = data_avail;
133 data >>= 8; 136 if (len > size)
137 len = size;
138
139 data_avail -= len;
140
141 if (copy_to_user(buf + ret, rng_buffer + data_avail,
142 len)) {
143 err = -EFAULT;
144 goto out_unlock;
145 }
146
147 size -= len;
148 ret += len;
134 } 149 }
135 150
151 mutex_unlock(&rng_mutex);
152
136 if (need_resched()) 153 if (need_resched())
137 schedule_timeout_interruptible(1); 154 schedule_timeout_interruptible(1);
138 err = -ERESTARTSYS; 155
139 if (signal_pending(current)) 156 if (signal_pending(current)) {
157 err = -ERESTARTSYS;
140 goto out; 158 goto out;
159 }
141 } 160 }
161out_unlock:
162 mutex_unlock(&rng_mutex);
142out: 163out:
143 return ret ? : err; 164 return ret ? : err;
144} 165}
@@ -280,7 +301,7 @@ int hwrng_register(struct hwrng *rng)
280 struct hwrng *old_rng, *tmp; 301 struct hwrng *old_rng, *tmp;
281 302
282 if (rng->name == NULL || 303 if (rng->name == NULL ||
283 rng->data_read == NULL) 304 (rng->data_read == NULL && rng->read == NULL))
284 goto out; 305 goto out;
285 306
286 mutex_lock(&rng_mutex); 307 mutex_lock(&rng_mutex);