aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/hw_random/core.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2014-12-08 03:50:37 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-12-22 07:02:39 -0500
commit3a2c0ba5ad00c018c0bef39a2224aca950aa33f2 (patch)
tree1fbf69fb820c0fa2e6050f8caaecff424c73bd8d /drivers/char/hw_random/core.c
parent1dacb395d68a14825ee48c0843335e3181aea675 (diff)
hwrng: use reference counts on each struct hwrng.
current_rng holds one reference, and we bump it every time we want to do a read from it. This means we only hold the rng_mutex to grab or drop a reference, so accessing /sys/devices/virtual/misc/hw_random/rng_current doesn't block on read of /dev/hwrng. Using a kref is overkill (we're always under the rng_mutex), but a standard pattern. This also solves the problem that the hwrng_fillfn thread was accessing current_rng without a lock, which could change (eg. to NULL) underneath it. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Amos Kong <akong@redhat.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/char/hw_random/core.c')
-rw-r--r--drivers/char/hw_random/core.c135
1 files changed, 92 insertions, 43 deletions
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index b4c0e873d362..089c18dc579e 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/random.h> 44#include <linux/random.h>
45#include <linux/err.h>
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46 47
47 48
@@ -91,6 +92,60 @@ static void add_early_randomness(struct hwrng *rng)
91 add_device_randomness(bytes, bytes_read); 92 add_device_randomness(bytes, bytes_read);
92} 93}
93 94
95static inline void cleanup_rng(struct kref *kref)
96{
97 struct hwrng *rng = container_of(kref, struct hwrng, ref);
98
99 if (rng->cleanup)
100 rng->cleanup(rng);
101}
102
103static void set_current_rng(struct hwrng *rng)
104{
105 BUG_ON(!mutex_is_locked(&rng_mutex));
106 kref_get(&rng->ref);
107 current_rng = rng;
108}
109
110static void drop_current_rng(void)
111{
112 BUG_ON(!mutex_is_locked(&rng_mutex));
113 if (!current_rng)
114 return;
115
116 /* decrease last reference for triggering the cleanup */
117 kref_put(&current_rng->ref, cleanup_rng);
118 current_rng = NULL;
119}
120
121/* Returns ERR_PTR(), NULL or refcounted hwrng */
122static struct hwrng *get_current_rng(void)
123{
124 struct hwrng *rng;
125
126 if (mutex_lock_interruptible(&rng_mutex))
127 return ERR_PTR(-ERESTARTSYS);
128
129 rng = current_rng;
130 if (rng)
131 kref_get(&rng->ref);
132
133 mutex_unlock(&rng_mutex);
134 return rng;
135}
136
137static void put_rng(struct hwrng *rng)
138{
139 /*
140 * Hold rng_mutex here so we serialize in case they set_current_rng
141 * on rng again immediately.
142 */
143 mutex_lock(&rng_mutex);
144 if (rng)
145 kref_put(&rng->ref, cleanup_rng);
146 mutex_unlock(&rng_mutex);
147}
148
94static inline int hwrng_init(struct hwrng *rng) 149static inline int hwrng_init(struct hwrng *rng)
95{ 150{
96 if (rng->init) { 151 if (rng->init) {
@@ -113,12 +168,6 @@ static inline int hwrng_init(struct hwrng *rng)
113 return 0; 168 return 0;
114} 169}
115 170
116static inline void hwrng_cleanup(struct hwrng *rng)
117{
118 if (rng && rng->cleanup)
119 rng->cleanup(rng);
120}
121
122static int rng_dev_open(struct inode *inode, struct file *filp) 171static int rng_dev_open(struct inode *inode, struct file *filp)
123{ 172{
124 /* enforce read-only access to this chrdev */ 173 /* enforce read-only access to this chrdev */
@@ -154,21 +203,22 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
154 ssize_t ret = 0; 203 ssize_t ret = 0;
155 int err = 0; 204 int err = 0;
156 int bytes_read, len; 205 int bytes_read, len;
206 struct hwrng *rng;
157 207
158 while (size) { 208 while (size) {
159 if (mutex_lock_interruptible(&rng_mutex)) { 209 rng = get_current_rng();
160 err = -ERESTARTSYS; 210 if (IS_ERR(rng)) {
211 err = PTR_ERR(rng);
161 goto out; 212 goto out;
162 } 213 }
163 214 if (!rng) {
164 if (!current_rng) {
165 err = -ENODEV; 215 err = -ENODEV;
166 goto out_unlock; 216 goto out;
167 } 217 }
168 218
169 mutex_lock(&reading_mutex); 219 mutex_lock(&reading_mutex);
170 if (!data_avail) { 220 if (!data_avail) {
171 bytes_read = rng_get_data(current_rng, rng_buffer, 221 bytes_read = rng_get_data(rng, rng_buffer,
172 rng_buffer_size(), 222 rng_buffer_size(),
173 !(filp->f_flags & O_NONBLOCK)); 223 !(filp->f_flags & O_NONBLOCK));
174 if (bytes_read < 0) { 224 if (bytes_read < 0) {
@@ -200,8 +250,8 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
200 ret += len; 250 ret += len;
201 } 251 }
202 252
203 mutex_unlock(&rng_mutex);
204 mutex_unlock(&reading_mutex); 253 mutex_unlock(&reading_mutex);
254 put_rng(rng);
205 255
206 if (need_resched()) 256 if (need_resched())
207 schedule_timeout_interruptible(1); 257 schedule_timeout_interruptible(1);
@@ -213,12 +263,11 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
213 } 263 }
214out: 264out:
215 return ret ? : err; 265 return ret ? : err;
216out_unlock: 266
217 mutex_unlock(&rng_mutex);
218 goto out;
219out_unlock_reading: 267out_unlock_reading:
220 mutex_unlock(&reading_mutex); 268 mutex_unlock(&reading_mutex);
221 goto out_unlock; 269 put_rng(rng);
270 goto out;
222} 271}
223 272
224 273
@@ -257,8 +306,8 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
257 err = hwrng_init(rng); 306 err = hwrng_init(rng);
258 if (err) 307 if (err)
259 break; 308 break;
260 hwrng_cleanup(current_rng); 309 drop_current_rng();
261 current_rng = rng; 310 set_current_rng(rng);
262 err = 0; 311 err = 0;
263 break; 312 break;
264 } 313 }
@@ -272,17 +321,15 @@ static ssize_t hwrng_attr_current_show(struct device *dev,
272 struct device_attribute *attr, 321 struct device_attribute *attr,
273 char *buf) 322 char *buf)
274{ 323{
275 int err;
276 ssize_t ret; 324 ssize_t ret;
277 const char *name = "none"; 325 struct hwrng *rng;
278 326
279 err = mutex_lock_interruptible(&rng_mutex); 327 rng = get_current_rng();
280 if (err) 328 if (IS_ERR(rng))
281 return -ERESTARTSYS; 329 return PTR_ERR(rng);
282 if (current_rng) 330
283 name = current_rng->name; 331 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
284 ret = snprintf(buf, PAGE_SIZE, "%s\n", name); 332 put_rng(rng);
285 mutex_unlock(&rng_mutex);
286 333
287 return ret; 334 return ret;
288} 335}
@@ -353,12 +400,16 @@ static int hwrng_fillfn(void *unused)
353 long rc; 400 long rc;
354 401
355 while (!kthread_should_stop()) { 402 while (!kthread_should_stop()) {
356 if (!current_rng) 403 struct hwrng *rng;
404
405 rng = get_current_rng();
406 if (IS_ERR(rng) || !rng)
357 break; 407 break;
358 mutex_lock(&reading_mutex); 408 mutex_lock(&reading_mutex);
359 rc = rng_get_data(current_rng, rng_fillbuf, 409 rc = rng_get_data(rng, rng_fillbuf,
360 rng_buffer_size(), 1); 410 rng_buffer_size(), 1);
361 mutex_unlock(&reading_mutex); 411 mutex_unlock(&reading_mutex);
412 put_rng(rng);
362 if (rc <= 0) { 413 if (rc <= 0) {
363 pr_warn("hwrng: no data available\n"); 414 pr_warn("hwrng: no data available\n");
364 msleep_interruptible(10000); 415 msleep_interruptible(10000);
@@ -419,14 +470,13 @@ int hwrng_register(struct hwrng *rng)
419 err = hwrng_init(rng); 470 err = hwrng_init(rng);
420 if (err) 471 if (err)
421 goto out_unlock; 472 goto out_unlock;
422 current_rng = rng; 473 set_current_rng(rng);
423 } 474 }
424 err = 0; 475 err = 0;
425 if (!old_rng) { 476 if (!old_rng) {
426 err = register_miscdev(); 477 err = register_miscdev();
427 if (err) { 478 if (err) {
428 hwrng_cleanup(rng); 479 drop_current_rng();
429 current_rng = NULL;
430 goto out_unlock; 480 goto out_unlock;
431 } 481 }
432 } 482 }
@@ -453,22 +503,21 @@ EXPORT_SYMBOL_GPL(hwrng_register);
453 503
454void hwrng_unregister(struct hwrng *rng) 504void hwrng_unregister(struct hwrng *rng)
455{ 505{
456 int err;
457
458 mutex_lock(&rng_mutex); 506 mutex_lock(&rng_mutex);
459 507
460 list_del(&rng->list); 508 list_del(&rng->list);
461 if (current_rng == rng) { 509 if (current_rng == rng) {
462 hwrng_cleanup(rng); 510 drop_current_rng();
463 if (list_empty(&rng_list)) { 511 if (!list_empty(&rng_list)) {
464 current_rng = NULL; 512 struct hwrng *tail;
465 } else { 513
466 current_rng = list_entry(rng_list.prev, struct hwrng, list); 514 tail = list_entry(rng_list.prev, struct hwrng, list);
467 err = hwrng_init(current_rng); 515
468 if (err) 516 if (hwrng_init(tail) == 0)
469 current_rng = NULL; 517 set_current_rng(tail);
470 } 518 }
471 } 519 }
520
472 if (list_empty(&rng_list)) { 521 if (list_empty(&rng_list)) {
473 mutex_unlock(&rng_mutex); 522 mutex_unlock(&rng_mutex);
474 unregister_miscdev(); 523 unregister_miscdev();