diff options
| author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-15 21:07:59 -0400 |
|---|---|---|
| committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-15 21:07:59 -0400 |
| commit | 84c3d4aaec3338201b449034beac41635866bddf (patch) | |
| tree | 3412951682fb2dd4feb8a5532f8efbaf8b345933 /arch/s390/kernel | |
| parent | 43d2548bb2ef7e6d753f91468a746784041e522d (diff) | |
| parent | fafa3a3f16723997f039a0193997464d66dafd8f (diff) | |
Merge commit 'origin/master'
Manual merge of:
arch/powerpc/Kconfig
arch/powerpc/kernel/stacktrace.c
arch/powerpc/mm/slice.c
arch/ppc/kernel/smp.c
Diffstat (limited to 'arch/s390/kernel')
| -rw-r--r-- | arch/s390/kernel/smp.c | 22 | ||||
| -rw-r--r-- | arch/s390/kernel/stacktrace.c | 2 | ||||
| -rw-r--r-- | arch/s390/kernel/time.c | 10 |
3 files changed, 16 insertions, 18 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 5d4fa4b1c74c..b6781030cfbd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -109,7 +109,7 @@ static void do_call_function(void) | |||
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | static void __smp_call_function_map(void (*func) (void *info), void *info, | 111 | static void __smp_call_function_map(void (*func) (void *info), void *info, |
| 112 | int nonatomic, int wait, cpumask_t map) | 112 | int wait, cpumask_t map) |
| 113 | { | 113 | { |
| 114 | struct call_data_struct data; | 114 | struct call_data_struct data; |
| 115 | int cpu, local = 0; | 115 | int cpu, local = 0; |
| @@ -162,7 +162,6 @@ out: | |||
| 162 | * smp_call_function: | 162 | * smp_call_function: |
| 163 | * @func: the function to run; this must be fast and non-blocking | 163 | * @func: the function to run; this must be fast and non-blocking |
| 164 | * @info: an arbitrary pointer to pass to the function | 164 | * @info: an arbitrary pointer to pass to the function |
| 165 | * @nonatomic: unused | ||
| 166 | * @wait: if true, wait (atomically) until function has completed on other CPUs | 165 | * @wait: if true, wait (atomically) until function has completed on other CPUs |
| 167 | * | 166 | * |
| 168 | * Run a function on all other CPUs. | 167 | * Run a function on all other CPUs. |
| @@ -170,15 +169,14 @@ out: | |||
| 170 | * You must not call this function with disabled interrupts, from a | 169 | * You must not call this function with disabled interrupts, from a |
| 171 | * hardware interrupt handler or from a bottom half. | 170 | * hardware interrupt handler or from a bottom half. |
| 172 | */ | 171 | */ |
| 173 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | 172 | int smp_call_function(void (*func) (void *info), void *info, int wait) |
| 174 | int wait) | ||
| 175 | { | 173 | { |
| 176 | cpumask_t map; | 174 | cpumask_t map; |
| 177 | 175 | ||
| 178 | spin_lock(&call_lock); | 176 | spin_lock(&call_lock); |
| 179 | map = cpu_online_map; | 177 | map = cpu_online_map; |
| 180 | cpu_clear(smp_processor_id(), map); | 178 | cpu_clear(smp_processor_id(), map); |
| 181 | __smp_call_function_map(func, info, nonatomic, wait, map); | 179 | __smp_call_function_map(func, info, wait, map); |
| 182 | spin_unlock(&call_lock); | 180 | spin_unlock(&call_lock); |
| 183 | return 0; | 181 | return 0; |
| 184 | } | 182 | } |
| @@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function); | |||
| 189 | * @cpu: the CPU where func should run | 187 | * @cpu: the CPU where func should run |
| 190 | * @func: the function to run; this must be fast and non-blocking | 188 | * @func: the function to run; this must be fast and non-blocking |
| 191 | * @info: an arbitrary pointer to pass to the function | 189 | * @info: an arbitrary pointer to pass to the function |
| 192 | * @nonatomic: unused | ||
| 193 | * @wait: if true, wait (atomically) until function has completed on other CPUs | 190 | * @wait: if true, wait (atomically) until function has completed on other CPUs |
| 194 | * | 191 | * |
| 195 | * Run a function on one processor. | 192 | * Run a function on one processor. |
| @@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function); | |||
| 198 | * hardware interrupt handler or from a bottom half. | 195 | * hardware interrupt handler or from a bottom half. |
| 199 | */ | 196 | */ |
| 200 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 197 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
| 201 | int nonatomic, int wait) | 198 | int wait) |
| 202 | { | 199 | { |
| 203 | spin_lock(&call_lock); | 200 | spin_lock(&call_lock); |
| 204 | __smp_call_function_map(func, info, nonatomic, wait, | 201 | __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); |
| 205 | cpumask_of_cpu(cpu)); | ||
| 206 | spin_unlock(&call_lock); | 202 | spin_unlock(&call_lock); |
| 207 | return 0; | 203 | return 0; |
| 208 | } | 204 | } |
| @@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 228 | { | 224 | { |
| 229 | spin_lock(&call_lock); | 225 | spin_lock(&call_lock); |
| 230 | cpu_clear(smp_processor_id(), mask); | 226 | cpu_clear(smp_processor_id(), mask); |
| 231 | __smp_call_function_map(func, info, 0, wait, mask); | 227 | __smp_call_function_map(func, info, wait, mask); |
| 232 | spin_unlock(&call_lock); | 228 | spin_unlock(&call_lock); |
| 233 | return 0; | 229 | return 0; |
| 234 | } | 230 | } |
| @@ -303,7 +299,7 @@ static void smp_ptlb_callback(void *info) | |||
| 303 | 299 | ||
| 304 | void smp_ptlb_all(void) | 300 | void smp_ptlb_all(void) |
| 305 | { | 301 | { |
| 306 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | 302 | on_each_cpu(smp_ptlb_callback, NULL, 1); |
| 307 | } | 303 | } |
| 308 | EXPORT_SYMBOL(smp_ptlb_all); | 304 | EXPORT_SYMBOL(smp_ptlb_all); |
| 309 | #endif /* ! CONFIG_64BIT */ | 305 | #endif /* ! CONFIG_64BIT */ |
| @@ -351,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit) | |||
| 351 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 347 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
| 352 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 348 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
| 353 | parms.orvals[cr] = 1 << bit; | 349 | parms.orvals[cr] = 1 << bit; |
| 354 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 350 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
| 355 | } | 351 | } |
| 356 | EXPORT_SYMBOL(smp_ctl_set_bit); | 352 | EXPORT_SYMBOL(smp_ctl_set_bit); |
| 357 | 353 | ||
| @@ -365,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
| 365 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 361 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
| 366 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 362 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
| 367 | parms.andvals[cr] = ~(1L << bit); | 363 | parms.andvals[cr] = ~(1L << bit); |
| 368 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 364 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
| 369 | } | 365 | } |
| 370 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 366 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
| 371 | 367 | ||
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 85e46a5d0e08..57571f10270c 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c | |||
| @@ -81,6 +81,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
| 81 | S390_lowcore.thread_info, | 81 | S390_lowcore.thread_info, |
| 82 | S390_lowcore.thread_info + THREAD_SIZE, 1); | 82 | S390_lowcore.thread_info + THREAD_SIZE, 1); |
| 83 | } | 83 | } |
| 84 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
| 84 | 85 | ||
| 85 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 86 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 86 | { | 87 | { |
| @@ -93,3 +94,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
| 93 | if (trace->nr_entries < trace->max_entries) | 94 | if (trace->nr_entries < trace->max_entries) |
| 94 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 95 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
| 95 | } | 96 | } |
| 97 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 7418bebb547f..f2cede3947b2 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -707,7 +707,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 707 | */ | 707 | */ |
| 708 | memset(&etr_sync, 0, sizeof(etr_sync)); | 708 | memset(&etr_sync, 0, sizeof(etr_sync)); |
| 709 | preempt_disable(); | 709 | preempt_disable(); |
| 710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0); | 710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0); |
| 711 | local_irq_disable(); | 711 | local_irq_disable(); |
| 712 | enable_sync_clock(); | 712 | enable_sync_clock(); |
| 713 | 713 | ||
| @@ -746,7 +746,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
| 746 | rc = -EAGAIN; | 746 | rc = -EAGAIN; |
| 747 | } | 747 | } |
| 748 | local_irq_enable(); | 748 | local_irq_enable(); |
| 749 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); | 749 | smp_call_function(clock_sync_cpu_end, NULL, 0); |
| 750 | preempt_enable(); | 750 | preempt_enable(); |
| 751 | return rc; | 751 | return rc; |
| 752 | } | 752 | } |
| @@ -926,7 +926,7 @@ static void etr_work_fn(struct work_struct *work) | |||
| 926 | if (!eacr.ea) { | 926 | if (!eacr.ea) { |
| 927 | /* Both ports offline. Reset everything. */ | 927 | /* Both ports offline. Reset everything. */ |
| 928 | eacr.dp = eacr.es = eacr.sl = 0; | 928 | eacr.dp = eacr.es = eacr.sl = 0; |
| 929 | on_each_cpu(disable_sync_clock, NULL, 0, 1); | 929 | on_each_cpu(disable_sync_clock, NULL, 1); |
| 930 | del_timer_sync(&etr_timer); | 930 | del_timer_sync(&etr_timer); |
| 931 | etr_update_eacr(eacr); | 931 | etr_update_eacr(eacr); |
| 932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
| @@ -1432,7 +1432,7 @@ static void stp_work_fn(struct work_struct *work) | |||
| 1432 | */ | 1432 | */ |
| 1433 | memset(&stp_sync, 0, sizeof(stp_sync)); | 1433 | memset(&stp_sync, 0, sizeof(stp_sync)); |
| 1434 | preempt_disable(); | 1434 | preempt_disable(); |
| 1435 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0); | 1435 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0); |
| 1436 | local_irq_disable(); | 1436 | local_irq_disable(); |
| 1437 | enable_sync_clock(); | 1437 | enable_sync_clock(); |
| 1438 | 1438 | ||
| @@ -1465,7 +1465,7 @@ static void stp_work_fn(struct work_struct *work) | |||
| 1465 | stp_sync.in_sync = 1; | 1465 | stp_sync.in_sync = 1; |
| 1466 | 1466 | ||
| 1467 | local_irq_enable(); | 1467 | local_irq_enable(); |
| 1468 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); | 1468 | smp_call_function(clock_sync_cpu_end, NULL, 0); |
| 1469 | preempt_enable(); | 1469 | preempt_enable(); |
| 1470 | } | 1470 | } |
| 1471 | 1471 | ||
