diff options
author | Omar Sandoval <osandov@fb.com> | 2018-08-22 00:54:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-22 13:52:46 -0400 |
commit | 0b172f845ff963ab15e2d861dc155e2ab13241e9 (patch) | |
tree | 6d3771499f11584251f78cf918eda90ccfc82de1 /fs/proc/kcore.c | |
parent | bf53183164dbba00b342df7d2215b33007ed83ed (diff) |
proc/kcore: replace kclist_lock rwlock with rwsem
Now we only need kclist_lock from user context and at fs init time, and
the following changes need to sleep while holding the kclist_lock.
Link: http://lkml.kernel.org/r/521ba449ebe921d905177410fee9222d07882f0d.1531953780.git.osandov@fb.com
Signed-off-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Bhupesh Sharma <bhsharma@redhat.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: James Morse <james.morse@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/kcore.c')
-rw-r--r-- | fs/proc/kcore.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e83f15a4f66d..ae43a97d511d 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -59,7 +59,7 @@ struct memelfnote | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | static LIST_HEAD(kclist_head); | 61 | static LIST_HEAD(kclist_head); |
62 | static DEFINE_RWLOCK(kclist_lock); | 62 | static DECLARE_RWSEM(kclist_lock); |
63 | static int kcore_need_update = 1; | 63 | static int kcore_need_update = 1; |
64 | 64 | ||
65 | /* This doesn't grab kclist_lock, so it should only be used at init time. */ | 65 | /* This doesn't grab kclist_lock, so it should only be used at init time. */ |
@@ -117,7 +117,7 @@ static void __kcore_update_ram(struct list_head *list) | |||
117 | struct kcore_list *tmp, *pos; | 117 | struct kcore_list *tmp, *pos; |
118 | LIST_HEAD(garbage); | 118 | LIST_HEAD(garbage); |
119 | 119 | ||
120 | write_lock(&kclist_lock); | 120 | down_write(&kclist_lock); |
121 | if (xchg(&kcore_need_update, 0)) { | 121 | if (xchg(&kcore_need_update, 0)) { |
122 | list_for_each_entry_safe(pos, tmp, &kclist_head, list) { | 122 | list_for_each_entry_safe(pos, tmp, &kclist_head, list) { |
123 | if (pos->type == KCORE_RAM | 123 | if (pos->type == KCORE_RAM |
@@ -128,7 +128,7 @@ static void __kcore_update_ram(struct list_head *list) | |||
128 | } else | 128 | } else |
129 | list_splice(list, &garbage); | 129 | list_splice(list, &garbage); |
130 | proc_root_kcore->size = get_kcore_size(&nphdr, &size); | 130 | proc_root_kcore->size = get_kcore_size(&nphdr, &size); |
131 | write_unlock(&kclist_lock); | 131 | up_write(&kclist_lock); |
132 | 132 | ||
133 | free_kclist_ents(&garbage); | 133 | free_kclist_ents(&garbage); |
134 | } | 134 | } |
@@ -451,11 +451,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
451 | int nphdr; | 451 | int nphdr; |
452 | unsigned long start; | 452 | unsigned long start; |
453 | 453 | ||
454 | read_lock(&kclist_lock); | 454 | down_read(&kclist_lock); |
455 | size = get_kcore_size(&nphdr, &elf_buflen); | 455 | size = get_kcore_size(&nphdr, &elf_buflen); |
456 | 456 | ||
457 | if (buflen == 0 || *fpos >= size) { | 457 | if (buflen == 0 || *fpos >= size) { |
458 | read_unlock(&kclist_lock); | 458 | up_read(&kclist_lock); |
459 | return 0; | 459 | return 0; |
460 | } | 460 | } |
461 | 461 | ||
@@ -472,11 +472,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
472 | tsz = buflen; | 472 | tsz = buflen; |
473 | elf_buf = kzalloc(elf_buflen, GFP_ATOMIC); | 473 | elf_buf = kzalloc(elf_buflen, GFP_ATOMIC); |
474 | if (!elf_buf) { | 474 | if (!elf_buf) { |
475 | read_unlock(&kclist_lock); | 475 | up_read(&kclist_lock); |
476 | return -ENOMEM; | 476 | return -ENOMEM; |
477 | } | 477 | } |
478 | elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen); | 478 | elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen); |
479 | read_unlock(&kclist_lock); | 479 | up_read(&kclist_lock); |
480 | if (copy_to_user(buffer, elf_buf + *fpos, tsz)) { | 480 | if (copy_to_user(buffer, elf_buf + *fpos, tsz)) { |
481 | kfree(elf_buf); | 481 | kfree(elf_buf); |
482 | return -EFAULT; | 482 | return -EFAULT; |
@@ -491,7 +491,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
491 | if (buflen == 0) | 491 | if (buflen == 0) |
492 | return acc; | 492 | return acc; |
493 | } else | 493 | } else |
494 | read_unlock(&kclist_lock); | 494 | up_read(&kclist_lock); |
495 | 495 | ||
496 | /* | 496 | /* |
497 | * Check to see if our file offset matches with any of | 497 | * Check to see if our file offset matches with any of |
@@ -504,12 +504,12 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
504 | while (buflen) { | 504 | while (buflen) { |
505 | struct kcore_list *m; | 505 | struct kcore_list *m; |
506 | 506 | ||
507 | read_lock(&kclist_lock); | 507 | down_read(&kclist_lock); |
508 | list_for_each_entry(m, &kclist_head, list) { | 508 | list_for_each_entry(m, &kclist_head, list) { |
509 | if (start >= m->addr && start < (m->addr+m->size)) | 509 | if (start >= m->addr && start < (m->addr+m->size)) |
510 | break; | 510 | break; |
511 | } | 511 | } |
512 | read_unlock(&kclist_lock); | 512 | up_read(&kclist_lock); |
513 | 513 | ||
514 | if (&m->list == &kclist_head) { | 514 | if (&m->list == &kclist_head) { |
515 | if (clear_user(buffer, tsz)) | 515 | if (clear_user(buffer, tsz)) |