aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2011-05-29 06:40:51 -0400
committerHeiko Carstens <heiko.carstens@de.ibm.com>2011-05-29 06:40:51 -0400
commit3c5cffb66d8ea94832650fcb55194715b0229088 (patch)
tree22872361ef884b527855ebe6bf225eaabbae4ca1 /arch/s390
parenta43a9d93d40a69eceeb4e4a4c860cc20186d475c (diff)
[S390] mm: fix mmu_gather rework
Quite a few functions that get called from the tlb gather code require that preemption must be disabled. So disable preemption inside of the called functions instead. The only drawback is that rcu_table_freelist_finish() doesn't get necessarily called on the cpu(s) that filled the free lists. So we may see a delay, until we finally see an rcu callback. However over time this shouldn't matter. So we get rid of lots of "BUG: using smp_processor_id() in preemptible" messages. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/mm/pgtable.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 14c6fae6fe6b..b09763fe5da1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -71,12 +71,15 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
71 71
72void rcu_table_freelist_finish(void) 72void rcu_table_freelist_finish(void)
73{ 73{
74 struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist); 74 struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
75 struct rcu_table_freelist *batch = *batchp;
75 76
76 if (!batch) 77 if (!batch)
77 return; 78 goto out;
78 call_rcu(&batch->rcu, rcu_table_freelist_callback); 79 call_rcu(&batch->rcu, rcu_table_freelist_callback);
79 __get_cpu_var(rcu_table_freelist) = NULL; 80 *batchp = NULL;
81out:
82 put_cpu_var(rcu_table_freelist);
80} 83}
81 84
82static void smp_sync(void *arg) 85static void smp_sync(void *arg)
@@ -141,20 +144,23 @@ void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
141{ 144{
142 struct rcu_table_freelist *batch; 145 struct rcu_table_freelist *batch;
143 146
147 preempt_disable();
144 if (atomic_read(&mm->mm_users) < 2 && 148 if (atomic_read(&mm->mm_users) < 2 &&
145 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 149 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
146 crst_table_free(mm, table); 150 crst_table_free(mm, table);
147 return; 151 goto out;
148 } 152 }
149 batch = rcu_table_freelist_get(mm); 153 batch = rcu_table_freelist_get(mm);
150 if (!batch) { 154 if (!batch) {
151 smp_call_function(smp_sync, NULL, 1); 155 smp_call_function(smp_sync, NULL, 1);
152 crst_table_free(mm, table); 156 crst_table_free(mm, table);
153 return; 157 goto out;
154 } 158 }
155 batch->table[--batch->crst_index] = table; 159 batch->table[--batch->crst_index] = table;
156 if (batch->pgt_index >= batch->crst_index) 160 if (batch->pgt_index >= batch->crst_index)
157 rcu_table_freelist_finish(); 161 rcu_table_freelist_finish();
162out:
163 preempt_enable();
158} 164}
159 165
160#ifdef CONFIG_64BIT 166#ifdef CONFIG_64BIT
@@ -323,16 +329,17 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
323 struct page *page; 329 struct page *page;
324 unsigned long bits; 330 unsigned long bits;
325 331
332 preempt_disable();
326 if (atomic_read(&mm->mm_users) < 2 && 333 if (atomic_read(&mm->mm_users) < 2 &&
327 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 334 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
328 page_table_free(mm, table); 335 page_table_free(mm, table);
329 return; 336 goto out;
330 } 337 }
331 batch = rcu_table_freelist_get(mm); 338 batch = rcu_table_freelist_get(mm);
332 if (!batch) { 339 if (!batch) {
333 smp_call_function(smp_sync, NULL, 1); 340 smp_call_function(smp_sync, NULL, 1);
334 page_table_free(mm, table); 341 page_table_free(mm, table);
335 return; 342 goto out;
336 } 343 }
337 bits = (mm->context.has_pgste) ? 3UL : 1UL; 344 bits = (mm->context.has_pgste) ? 3UL : 1UL;
338 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 345 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
@@ -345,6 +352,8 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
345 batch->table[batch->pgt_index++] = table; 352 batch->table[batch->pgt_index++] = table;
346 if (batch->pgt_index >= batch->crst_index) 353 if (batch->pgt_index >= batch->crst_index)
347 rcu_table_freelist_finish(); 354 rcu_table_freelist_finish();
355out:
356 preempt_enable();
348} 357}
349 358
350/* 359/*