aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-02 14:46:05 -0400
committerTejun Heo <tj@kernel.org>2014-09-02 14:46:05 -0400
commitfe6bd8c3d28357174587c4fe895d10b00321b692 (patch)
treea8425e061a985f998573d819a3092bc201c2a525
parentb539b87fed37ffc16c89a6bc3beca2d7aed82e1c (diff)
percpu: rename pcpu_reclaim_work to pcpu_balance_work
pcpu_reclaim_work will also be used to populate chunks asynchronously. Rename it to pcpu_balance_work in preparation. pcpu_reclaim() is renamed to pcpu_balance_workfn() and some of its local variables are renamed too. This is pure rename. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--mm/percpu.c27
1 files changed, 12 insertions, 15 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 4f2d58760c9c..28a830590b4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -168,9 +168,9 @@ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168 */ 168 */
169static int pcpu_nr_empty_pop_pages; 169static int pcpu_nr_empty_pop_pages;
170 170
171/* reclaim work to release fully free chunks, scheduled from free path */ 171/* balance work is used to populate or destroy chunks asynchronously */
172static void pcpu_reclaim(struct work_struct *work); 172static void pcpu_balance_workfn(struct work_struct *work);
173static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 173static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
174 174
175static bool pcpu_addr_in_first_chunk(void *addr) 175static bool pcpu_addr_in_first_chunk(void *addr)
176{ 176{
@@ -1080,36 +1080,33 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1080} 1080}
1081 1081
1082/** 1082/**
1083 * pcpu_reclaim - reclaim fully free chunks, workqueue function 1083 * pcpu_balance_workfn - reclaim fully free chunks, workqueue function
1084 * @work: unused 1084 * @work: unused
1085 * 1085 *
1086 * Reclaim all fully free chunks except for the first one. 1086 * Reclaim all fully free chunks except for the first one.
1087 *
1088 * CONTEXT:
1089 * workqueue context.
1090 */ 1087 */
1091static void pcpu_reclaim(struct work_struct *work) 1088static void pcpu_balance_workfn(struct work_struct *work)
1092{ 1089{
1093 LIST_HEAD(todo); 1090 LIST_HEAD(to_free);
1094 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 1091 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1095 struct pcpu_chunk *chunk, *next; 1092 struct pcpu_chunk *chunk, *next;
1096 1093
1097 mutex_lock(&pcpu_alloc_mutex); 1094 mutex_lock(&pcpu_alloc_mutex);
1098 spin_lock_irq(&pcpu_lock); 1095 spin_lock_irq(&pcpu_lock);
1099 1096
1100 list_for_each_entry_safe(chunk, next, head, list) { 1097 list_for_each_entry_safe(chunk, next, free_head, list) {
1101 WARN_ON(chunk->immutable); 1098 WARN_ON(chunk->immutable);
1102 1099
1103 /* spare the first one */ 1100 /* spare the first one */
1104 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 1101 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1105 continue; 1102 continue;
1106 1103
1107 list_move(&chunk->list, &todo); 1104 list_move(&chunk->list, &to_free);
1108 } 1105 }
1109 1106
1110 spin_unlock_irq(&pcpu_lock); 1107 spin_unlock_irq(&pcpu_lock);
1111 1108
1112 list_for_each_entry_safe(chunk, next, &todo, list) { 1109 list_for_each_entry_safe(chunk, next, &to_free, list) {
1113 int rs, re; 1110 int rs, re;
1114 1111
1115 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { 1112 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
@@ -1163,7 +1160,7 @@ void free_percpu(void __percpu *ptr)
1163 1160
1164 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1161 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1165 if (pos != chunk) { 1162 if (pos != chunk) {
1166 schedule_work(&pcpu_reclaim_work); 1163 schedule_work(&pcpu_balance_work);
1167 break; 1164 break;
1168 } 1165 }
1169 } 1166 }