diff options
author | Tejun Heo <tj@kernel.org> | 2009-08-14 01:41:02 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-08-14 01:45:31 -0400 |
commit | 384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch) | |
tree | 04c93f391a1b65c8bf8d7ba8643c07d26c26590a /arch/s390/include | |
parent | a76761b621bcd8336065c4fe3a74f046858bc34c (diff) | |
parent | 142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff) |
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts:
arch/sparc/kernel/smp_64.c
arch/x86/kernel/cpu/perf_counter.c
arch/x86/kernel/setup_percpu.c
drivers/cpufreq/cpufreq_ondemand.c
mm/percpu.c
Conflicts in core and arch percpu codes are mostly from commit
ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many
num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all
the first chunk allocators into mm/percpu.c, the changes are moved
from arch code to mm/percpu.c.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 7 | ||||
-rw-r--r-- | arch/s390/include/asm/perf_counter.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 9 |
4 files changed, 15 insertions, 5 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index fca9dffcc669..c7d0abfb0f00 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -268,7 +268,12 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
268 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 268 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
269 | 269 | ||
270 | #undef __CSG_LOOP | 270 | #undef __CSG_LOOP |
271 | #endif | 271 | |
272 | #else /* __s390x__ */ | ||
273 | |||
274 | #include <asm-generic/atomic64.h> | ||
275 | |||
276 | #endif /* __s390x__ */ | ||
272 | 277 | ||
273 | #define smp_mb__before_atomic_dec() smp_mb() | 278 | #define smp_mb__before_atomic_dec() smp_mb() |
274 | #define smp_mb__after_atomic_dec() smp_mb() | 279 | #define smp_mb__after_atomic_dec() smp_mb() |
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h index a7205a3828cb..7015188c2cc2 100644 --- a/arch/s390/include/asm/perf_counter.h +++ b/arch/s390/include/asm/perf_counter.h | |||
@@ -6,3 +6,5 @@ | |||
6 | 6 | ||
7 | static inline void set_perf_counter_pending(void) {} | 7 | static inline void set_perf_counter_pending(void) {} |
8 | static inline void clear_perf_counter_pending(void) {} | 8 | static inline void clear_perf_counter_pending(void) {} |
9 | |||
10 | #define PERF_COUNTER_INDEX_OFFSET 0 | ||
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 925bcc649035..ba1cab9fc1f9 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -61,7 +61,7 @@ struct thread_info { | |||
61 | .exec_domain = &default_exec_domain, \ | 61 | .exec_domain = &default_exec_domain, \ |
62 | .flags = 0, \ | 62 | .flags = 0, \ |
63 | .cpu = 0, \ | 63 | .cpu = 0, \ |
64 | .preempt_count = 1, \ | 64 | .preempt_count = INIT_PREEMPT_COUNT, \ |
65 | .restart_block = { \ | 65 | .restart_block = { \ |
66 | .fn = do_no_restart_syscall, \ | 66 | .fn = do_no_restart_syscall, \ |
67 | }, \ | 67 | }, \ |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 3d8a96d39d9d..81150b053689 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
96 | * pte_free_tlb frees a pte table and clears the CRSTE for the | 96 | * pte_free_tlb frees a pte table and clears the CRSTE for the |
97 | * page table from the tlb. | 97 | * page table from the tlb. |
98 | */ | 98 | */ |
99 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) | 99 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
100 | unsigned long address) | ||
100 | { | 101 | { |
101 | if (!tlb->fullmm) { | 102 | if (!tlb->fullmm) { |
102 | tlb->array[tlb->nr_ptes++] = pte; | 103 | tlb->array[tlb->nr_ptes++] = pte; |
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) | |||
113 | * as the pgd. pmd_free_tlb checks the asce_limit against 2GB | 114 | * as the pgd. pmd_free_tlb checks the asce_limit against 2GB |
114 | * to avoid the double free of the pmd in this case. | 115 | * to avoid the double free of the pmd in this case. |
115 | */ | 116 | */ |
116 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 117 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
118 | unsigned long address) | ||
117 | { | 119 | { |
118 | #ifdef __s390x__ | 120 | #ifdef __s390x__ |
119 | if (tlb->mm->context.asce_limit <= (1UL << 31)) | 121 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | |||
134 | * as the pgd. pud_free_tlb checks the asce_limit against 4TB | 136 | * as the pgd. pud_free_tlb checks the asce_limit against 4TB |
135 | * to avoid the double free of the pud in this case. | 137 | * to avoid the double free of the pud in this case. |
136 | */ | 138 | */ |
137 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) | 139 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
140 | unsigned long address) | ||
138 | { | 141 | { |
139 | #ifdef __s390x__ | 142 | #ifdef __s390x__ |
140 | if (tlb->mm->context.asce_limit <= (1UL << 42)) | 143 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |