diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-03 10:35:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-03 10:35:09 -0400 |
commit | 484cad34dd667235565c14a40e2f5a8143184aaa (patch) | |
tree | 70bb901671958960a64d7f383c902178b4b49558 /arch | |
parent | 95a38f34635bdf06089de763b4becbc957694977 (diff) | |
parent | 67796bf7dc54c035fd97f2681a72e5d2bf2a234a (diff) |
Merge branch 'dma-debug' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
Diffstat (limited to 'arch')
128 files changed, 3744 insertions, 460 deletions
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index aeeb125f6851..e38fb95cb335 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h | |||
@@ -166,6 +166,9 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock) | |||
166 | lock->lock = 0; | 166 | lock->lock = 0; |
167 | } | 167 | } |
168 | 168 | ||
169 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
170 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
171 | |||
169 | #define _raw_spin_relax(lock) cpu_relax() | 172 | #define _raw_spin_relax(lock) cpu_relax() |
170 | #define _raw_read_relax(lock) cpu_relax() | 173 | #define _raw_read_relax(lock) cpu_relax() |
171 | #define _raw_write_relax(lock) cpu_relax() | 174 | #define _raw_write_relax(lock) cpu_relax() |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 8d0097f10208..3a2fb7a02db4 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -272,7 +272,7 @@ alpha_vfork(struct pt_regs *regs) | |||
272 | */ | 272 | */ |
273 | 273 | ||
274 | int | 274 | int |
275 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 275 | copy_thread(unsigned long clone_flags, unsigned long usp, |
276 | unsigned long unused, | 276 | unsigned long unused, |
277 | struct task_struct * p, struct pt_regs * regs) | 277 | struct task_struct * p, struct pt_regs * regs) |
278 | { | 278 | { |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 2b41ebbfa7ff..c13681ac1ede 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -217,6 +217,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
217 | /* read_can_lock - would read_trylock() succeed? */ | 217 | /* read_can_lock - would read_trylock() succeed? */ |
218 | #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) | 218 | #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) |
219 | 219 | ||
220 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
221 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
222 | |||
220 | #define _raw_spin_relax(lock) cpu_relax() | 223 | #define _raw_spin_relax(lock) cpu_relax() |
221 | #define _raw_read_relax(lock) cpu_relax() | 224 | #define _raw_read_relax(lock) cpu_relax() |
222 | #define _raw_write_relax(lock) cpu_relax() | 225 | #define _raw_write_relax(lock) cpu_relax() |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 2de14e2afdc5..c3265a2e7cd4 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -301,7 +301,7 @@ void release_thread(struct task_struct *dead_task) | |||
301 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 301 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
302 | 302 | ||
303 | int | 303 | int |
304 | copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, | 304 | copy_thread(unsigned long clone_flags, unsigned long stack_start, |
305 | unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) | 305 | unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) |
306 | { | 306 | { |
307 | struct thread_info *thread = task_thread_info(p); | 307 | struct thread_info *thread = task_thread_info(p); |
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 43ae555ecb33..1bbe1da54869 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c | |||
@@ -332,7 +332,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
332 | 332 | ||
333 | asmlinkage void ret_from_fork(void); | 333 | asmlinkage void ret_from_fork(void); |
334 | 334 | ||
335 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 335 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
336 | unsigned long unused, | 336 | unsigned long unused, |
337 | struct task_struct *p, struct pt_regs *regs) | 337 | struct task_struct *p, struct pt_regs *regs) |
338 | { | 338 | { |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 33e2e8993f7f..f49427293ca1 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -193,7 +193,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs) | |||
193 | } | 193 | } |
194 | 194 | ||
195 | int | 195 | int |
196 | copy_thread(int nr, unsigned long clone_flags, | 196 | copy_thread(unsigned long clone_flags, |
197 | unsigned long usp, unsigned long topstk, | 197 | unsigned long usp, unsigned long topstk, |
198 | struct task_struct *p, struct pt_regs *regs) | 198 | struct task_struct *p, struct pt_regs *regs) |
199 | { | 199 | { |
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index bd9b3ff63f6c..c4c69cf721e5 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c | |||
@@ -115,7 +115,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
115 | */ | 115 | */ |
116 | asmlinkage void ret_from_fork(void); | 116 | asmlinkage void ret_from_fork(void); |
117 | 117 | ||
118 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 118 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
119 | unsigned long unused, | 119 | unsigned long unused, |
120 | struct task_struct *p, struct pt_regs *regs) | 120 | struct task_struct *p, struct pt_regs *regs) |
121 | { | 121 | { |
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index ced5b725d9bd..120e7f796fea 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c | |||
@@ -131,7 +131,7 @@ kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
131 | extern asmlinkage void ret_from_fork(void); | 131 | extern asmlinkage void ret_from_fork(void); |
132 | 132 | ||
133 | int | 133 | int |
134 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 134 | copy_thread(unsigned long clone_flags, unsigned long usp, |
135 | unsigned long unused, | 135 | unsigned long unused, |
136 | struct task_struct *p, struct pt_regs *regs) | 136 | struct task_struct *p, struct pt_regs *regs) |
137 | { | 137 | { |
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 0d5709b983a1..129756b96661 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h | |||
@@ -121,6 +121,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
121 | return 1; | 121 | return 1; |
122 | } | 122 | } |
123 | 123 | ||
124 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | ||
125 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | ||
124 | 126 | ||
125 | #define _raw_spin_relax(lock) cpu_relax() | 127 | #define _raw_spin_relax(lock) cpu_relax() |
126 | #define _raw_read_relax(lock) cpu_relax() | 128 | #define _raw_read_relax(lock) cpu_relax() |
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 60816e876455..4df0b320d524 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/fs_struct.h> | ||
23 | #include <linux/init_task.h> | 22 | #include <linux/init_task.h> |
24 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
25 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 9583a338e9d6..0de50df74970 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c | |||
@@ -204,7 +204,7 @@ void prepare_to_copy(struct task_struct *tsk) | |||
204 | /* | 204 | /* |
205 | * set up the kernel stack and exception frames for a new process | 205 | * set up the kernel stack and exception frames for a new process |
206 | */ | 206 | */ |
207 | int copy_thread(int nr, unsigned long clone_flags, | 207 | int copy_thread(unsigned long clone_flags, |
208 | unsigned long usp, unsigned long topstk, | 208 | unsigned long usp, unsigned long topstk, |
209 | struct task_struct *p, struct pt_regs *regs) | 209 | struct task_struct *p, struct pt_regs *regs) |
210 | { | 210 | { |
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index a8ef654a5a0b..e2f33d0f9969 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c | |||
@@ -191,7 +191,7 @@ asmlinkage int h8300_clone(struct pt_regs *regs) | |||
191 | 191 | ||
192 | } | 192 | } |
193 | 193 | ||
194 | int copy_thread(int nr, unsigned long clone_flags, | 194 | int copy_thread(unsigned long clone_flags, |
195 | unsigned long usp, unsigned long topstk, | 195 | unsigned long usp, unsigned long topstk, |
196 | struct task_struct * p, struct pt_regs * regs) | 196 | struct task_struct * p, struct pt_regs * regs) |
197 | { | 197 | { |
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index a109db30ce55..75645495c2dd 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
@@ -193,7 +193,6 @@ CONFIG_BOUNCE=y | |||
193 | CONFIG_NR_QUICK=1 | 193 | CONFIG_NR_QUICK=1 |
194 | CONFIG_VIRT_TO_BUS=y | 194 | CONFIG_VIRT_TO_BUS=y |
195 | CONFIG_UNEVICTABLE_LRU=y | 195 | CONFIG_UNEVICTABLE_LRU=y |
196 | CONFIG_MMU_NOTIFIER=y | ||
197 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | 196 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y |
198 | CONFIG_ARCH_DISCONTIGMEM_ENABLE=y | 197 | CONFIG_ARCH_DISCONTIGMEM_ENABLE=y |
199 | CONFIG_ARCH_FLATMEM_ENABLE=y | 198 | CONFIG_ARCH_FLATMEM_ENABLE=y |
@@ -416,8 +415,6 @@ CONFIG_SGI_IOC4=y | |||
416 | # CONFIG_ENCLOSURE_SERVICES is not set | 415 | # CONFIG_ENCLOSURE_SERVICES is not set |
417 | CONFIG_SGI_XP=m | 416 | CONFIG_SGI_XP=m |
418 | # CONFIG_HP_ILO is not set | 417 | # CONFIG_HP_ILO is not set |
419 | CONFIG_SGI_GRU=m | ||
420 | # CONFIG_SGI_GRU_DEBUG is not set | ||
421 | # CONFIG_C2PORT is not set | 418 | # CONFIG_C2PORT is not set |
422 | CONFIG_HAVE_IDE=y | 419 | CONFIG_HAVE_IDE=y |
423 | CONFIG_IDE=y | 420 | CONFIG_IDE=y |
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h index c47830e26cb7..111ed5222892 100644 --- a/arch/ia64/include/asm/intrinsics.h +++ b/arch/ia64/include/asm/intrinsics.h | |||
@@ -202,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void); | |||
202 | 202 | ||
203 | #ifndef __ASSEMBLY__ | 203 | #ifndef __ASSEMBLY__ |
204 | #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) | 204 | #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) |
205 | #define IA64_INTRINSIC_API(name) pv_cpu_ops.name | 205 | #ifdef ASM_SUPPORTED |
206 | # define IA64_INTRINSIC_API(name) paravirt_ ## name | ||
207 | #else | ||
208 | # define IA64_INTRINSIC_API(name) pv_cpu_ops.name | ||
209 | #endif | ||
206 | #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name | 210 | #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name |
207 | #else | 211 | #else |
208 | #define IA64_INTRINSIC_API(name) ia64_native_ ## name | 212 | #define IA64_INTRINSIC_API(name) ia64_native_ ## name |
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h index 040bc87db930..7f2a456603cb 100644 --- a/arch/ia64/include/asm/mmu_context.h +++ b/arch/ia64/include/asm/mmu_context.h | |||
@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm) | |||
87 | /* re-check, now that we've got the lock: */ | 87 | /* re-check, now that we've got the lock: */ |
88 | context = mm->context; | 88 | context = mm->context; |
89 | if (context == 0) { | 89 | if (context == 0) { |
90 | cpus_clear(mm->cpu_vm_mask); | 90 | cpumask_clear(mm_cpumask(mm)); |
91 | if (ia64_ctx.next >= ia64_ctx.limit) { | 91 | if (ia64_ctx.next >= ia64_ctx.limit) { |
92 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, | 92 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, |
93 | ia64_ctx.max_ctx, ia64_ctx.next); | 93 | ia64_ctx.max_ctx, ia64_ctx.next); |
@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm) | |||
166 | 166 | ||
167 | do { | 167 | do { |
168 | context = get_mmu_context(mm); | 168 | context = get_mmu_context(mm); |
169 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 169 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
170 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 170 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
171 | reload_context(context); | 171 | reload_context(context); |
172 | /* | 172 | /* |
173 | * in the unlikely event of a TLB-flush by another thread, | 173 | * in the unlikely event of a TLB-flush by another thread, |
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h index d2da61e4c49b..908eaef42a08 100644 --- a/arch/ia64/include/asm/module.h +++ b/arch/ia64/include/asm/module.h | |||
@@ -16,6 +16,12 @@ struct mod_arch_specific { | |||
16 | struct elf64_shdr *got; /* global offset table */ | 16 | struct elf64_shdr *got; /* global offset table */ |
17 | struct elf64_shdr *opd; /* official procedure descriptors */ | 17 | struct elf64_shdr *opd; /* official procedure descriptors */ |
18 | struct elf64_shdr *unwind; /* unwind-table section */ | 18 | struct elf64_shdr *unwind; /* unwind-table section */ |
19 | #ifdef CONFIG_PARAVIRT | ||
20 | struct elf64_shdr *paravirt_bundles; | ||
21 | /* paravirt_alt_bundle_patch table */ | ||
22 | struct elf64_shdr *paravirt_insts; | ||
23 | /* paravirt_alt_inst_patch table */ | ||
24 | #endif | ||
19 | unsigned long gp; /* global-pointer for module */ | 25 | unsigned long gp; /* global-pointer for module */ |
20 | 26 | ||
21 | void *core_unw_table; /* core unwind-table cookie returned by unwinder */ | 27 | void *core_unw_table; /* core unwind-table cookie returned by unwinder */ |
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h index 0a1026cca4fa..d2d46efb3e6e 100644 --- a/arch/ia64/include/asm/native/inst.h +++ b/arch/ia64/include/asm/native/inst.h | |||
@@ -30,6 +30,9 @@ | |||
30 | #define __paravirt_work_processed_syscall_target \ | 30 | #define __paravirt_work_processed_syscall_target \ |
31 | ia64_work_processed_syscall | 31 | ia64_work_processed_syscall |
32 | 32 | ||
33 | #define paravirt_fsyscall_table ia64_native_fsyscall_table | ||
34 | #define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down | ||
35 | |||
33 | #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK | 36 | #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK |
34 | # define PARAVIRT_POISON 0xdeadbeefbaadf00d | 37 | # define PARAVIRT_POISON 0xdeadbeefbaadf00d |
35 | # define CLOBBER(clob) \ | 38 | # define CLOBBER(clob) \ |
@@ -74,6 +77,11 @@ | |||
74 | (pred) mov reg = psr \ | 77 | (pred) mov reg = psr \ |
75 | CLOBBER(clob) | 78 | CLOBBER(clob) |
76 | 79 | ||
80 | #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ | ||
81 | (pred) mov reg = ar.itc \ | ||
82 | CLOBBER(clob) \ | ||
83 | CLOBBER_PRED(pred_clob) | ||
84 | |||
77 | #define MOV_TO_IFA(reg, clob) \ | 85 | #define MOV_TO_IFA(reg, clob) \ |
78 | mov cr.ifa = reg \ | 86 | mov cr.ifa = reg \ |
79 | CLOBBER(clob) | 87 | CLOBBER(clob) |
@@ -158,6 +166,11 @@ | |||
158 | #define RSM_PSR_DT \ | 166 | #define RSM_PSR_DT \ |
159 | rsm psr.dt | 167 | rsm psr.dt |
160 | 168 | ||
169 | #define RSM_PSR_BE_I(clob0, clob1) \ | ||
170 | rsm psr.be | psr.i \ | ||
171 | CLOBBER(clob0) \ | ||
172 | CLOBBER(clob1) | ||
173 | |||
161 | #define SSM_PSR_DT_AND_SRLZ_I \ | 174 | #define SSM_PSR_DT_AND_SRLZ_I \ |
162 | ssm psr.dt \ | 175 | ssm psr.dt \ |
163 | ;; \ | 176 | ;; \ |
diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h new file mode 100644 index 000000000000..be16ca9311bf --- /dev/null +++ b/arch/ia64/include/asm/native/patchlist.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/native/inst.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #define __paravirt_start_gate_fsyscall_patchlist \ | ||
24 | __ia64_native_start_gate_fsyscall_patchlist | ||
25 | #define __paravirt_end_gate_fsyscall_patchlist \ | ||
26 | __ia64_native_end_gate_fsyscall_patchlist | ||
27 | #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ | ||
28 | __ia64_native_start_gate_brl_fsys_bubble_down_patchlist | ||
29 | #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ | ||
30 | __ia64_native_end_gate_brl_fsys_bubble_down_patchlist | ||
31 | #define __paravirt_start_gate_vtop_patchlist \ | ||
32 | __ia64_native_start_gate_vtop_patchlist | ||
33 | #define __paravirt_end_gate_vtop_patchlist \ | ||
34 | __ia64_native_end_gate_vtop_patchlist | ||
35 | #define __paravirt_start_gate_mckinley_e9_patchlist \ | ||
36 | __ia64_native_start_gate_mckinley_e9_patchlist | ||
37 | #define __paravirt_end_gate_mckinley_e9_patchlist \ | ||
38 | __ia64_native_end_gate_mckinley_e9_patchlist | ||
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h index b8e6eb1090d7..8d72962ec838 100644 --- a/arch/ia64/include/asm/native/pvchk_inst.h +++ b/arch/ia64/include/asm/native/pvchk_inst.h | |||
@@ -180,6 +180,11 @@ | |||
180 | IS_PRED_IN(pred) \ | 180 | IS_PRED_IN(pred) \ |
181 | IS_RREG_OUT(reg) \ | 181 | IS_RREG_OUT(reg) \ |
182 | IS_RREG_CLOB(clob) | 182 | IS_RREG_CLOB(clob) |
183 | #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ | ||
184 | IS_PRED_IN(pred) \ | ||
185 | IS_PRED_CLOB(pred_clob) \ | ||
186 | IS_RREG_OUT(reg) \ | ||
187 | IS_RREG_CLOB(clob) | ||
183 | #define MOV_TO_IFA(reg, clob) \ | 188 | #define MOV_TO_IFA(reg, clob) \ |
184 | IS_RREG_IN(reg) \ | 189 | IS_RREG_IN(reg) \ |
185 | IS_RREG_CLOB(clob) | 190 | IS_RREG_CLOB(clob) |
@@ -246,6 +251,9 @@ | |||
246 | IS_RREG_CLOB(clob2) | 251 | IS_RREG_CLOB(clob2) |
247 | #define RSM_PSR_DT \ | 252 | #define RSM_PSR_DT \ |
248 | nop 0 | 253 | nop 0 |
254 | #define RSM_PSR_BE_I(clob0, clob1) \ | ||
255 | IS_RREG_CLOB(clob0) \ | ||
256 | IS_RREG_CLOB(clob1) | ||
249 | #define SSM_PSR_DT_AND_SRLZ_I \ | 257 | #define SSM_PSR_DT_AND_SRLZ_I \ |
250 | nop 0 | 258 | nop 0 |
251 | #define BSW_0(clob0, clob1, clob2) \ | 259 | #define BSW_0(clob0, clob1, clob2) \ |
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index 2bf3636473fe..2eb0a981a09a 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h | |||
@@ -22,6 +22,56 @@ | |||
22 | #ifndef __ASM_PARAVIRT_H | 22 | #ifndef __ASM_PARAVIRT_H |
23 | #define __ASM_PARAVIRT_H | 23 | #define __ASM_PARAVIRT_H |
24 | 24 | ||
25 | #ifndef __ASSEMBLY__ | ||
26 | /****************************************************************************** | ||
27 | * fsys related addresses | ||
28 | */ | ||
29 | struct pv_fsys_data { | ||
30 | unsigned long *fsyscall_table; | ||
31 | void *fsys_bubble_down; | ||
32 | }; | ||
33 | |||
34 | extern struct pv_fsys_data pv_fsys_data; | ||
35 | |||
36 | unsigned long *paravirt_get_fsyscall_table(void); | ||
37 | char *paravirt_get_fsys_bubble_down(void); | ||
38 | |||
39 | /****************************************************************************** | ||
40 | * patchlist addresses for gate page | ||
41 | */ | ||
42 | enum pv_gate_patchlist { | ||
43 | PV_GATE_START_FSYSCALL, | ||
44 | PV_GATE_END_FSYSCALL, | ||
45 | |||
46 | PV_GATE_START_BRL_FSYS_BUBBLE_DOWN, | ||
47 | PV_GATE_END_BRL_FSYS_BUBBLE_DOWN, | ||
48 | |||
49 | PV_GATE_START_VTOP, | ||
50 | PV_GATE_END_VTOP, | ||
51 | |||
52 | PV_GATE_START_MCKINLEY_E9, | ||
53 | PV_GATE_END_MCKINLEY_E9, | ||
54 | }; | ||
55 | |||
56 | struct pv_patchdata { | ||
57 | unsigned long start_fsyscall_patchlist; | ||
58 | unsigned long end_fsyscall_patchlist; | ||
59 | unsigned long start_brl_fsys_bubble_down_patchlist; | ||
60 | unsigned long end_brl_fsys_bubble_down_patchlist; | ||
61 | unsigned long start_vtop_patchlist; | ||
62 | unsigned long end_vtop_patchlist; | ||
63 | unsigned long start_mckinley_e9_patchlist; | ||
64 | unsigned long end_mckinley_e9_patchlist; | ||
65 | |||
66 | void *gate_section; | ||
67 | }; | ||
68 | |||
69 | extern struct pv_patchdata pv_patchdata; | ||
70 | |||
71 | unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type); | ||
72 | void *paravirt_get_gate_section(void); | ||
73 | #endif | ||
74 | |||
25 | #ifdef CONFIG_PARAVIRT_GUEST | 75 | #ifdef CONFIG_PARAVIRT_GUEST |
26 | 76 | ||
27 | #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 | 77 | #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 |
@@ -68,6 +118,14 @@ struct pv_init_ops { | |||
68 | int (*arch_setup_nomca)(void); | 118 | int (*arch_setup_nomca)(void); |
69 | 119 | ||
70 | void (*post_smp_prepare_boot_cpu)(void); | 120 | void (*post_smp_prepare_boot_cpu)(void); |
121 | |||
122 | #ifdef ASM_SUPPORTED | ||
123 | unsigned long (*patch_bundle)(void *sbundle, void *ebundle, | ||
124 | unsigned long type); | ||
125 | unsigned long (*patch_inst)(unsigned long stag, unsigned long etag, | ||
126 | unsigned long type); | ||
127 | #endif | ||
128 | void (*patch_branch)(unsigned long tag, unsigned long type); | ||
71 | }; | 129 | }; |
72 | 130 | ||
73 | extern struct pv_init_ops pv_init_ops; | 131 | extern struct pv_init_ops pv_init_ops; |
@@ -210,6 +268,8 @@ struct pv_time_ops { | |||
210 | int (*do_steal_accounting)(unsigned long *new_itm); | 268 | int (*do_steal_accounting)(unsigned long *new_itm); |
211 | 269 | ||
212 | void (*clocksource_resume)(void); | 270 | void (*clocksource_resume)(void); |
271 | |||
272 | unsigned long long (*sched_clock)(void); | ||
213 | }; | 273 | }; |
214 | 274 | ||
215 | extern struct pv_time_ops pv_time_ops; | 275 | extern struct pv_time_ops pv_time_ops; |
@@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm) | |||
227 | return pv_time_ops.do_steal_accounting(new_itm); | 287 | return pv_time_ops.do_steal_accounting(new_itm); |
228 | } | 288 | } |
229 | 289 | ||
290 | static inline unsigned long long paravirt_sched_clock(void) | ||
291 | { | ||
292 | return pv_time_ops.sched_clock(); | ||
293 | } | ||
294 | |||
230 | #endif /* !__ASSEMBLY__ */ | 295 | #endif /* !__ASSEMBLY__ */ |
231 | 296 | ||
232 | #else | 297 | #else |
diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h new file mode 100644 index 000000000000..128ff5db6e67 --- /dev/null +++ b/arch/ia64/include/asm/paravirt_patch.h | |||
@@ -0,0 +1,143 @@ | |||
1 | /****************************************************************************** | ||
2 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
3 | * VA Linux Systems Japan K.K. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #ifndef __ASM_PARAVIRT_PATCH_H | ||
22 | #define __ASM_PARAVIRT_PATCH_H | ||
23 | |||
24 | #ifdef __ASSEMBLY__ | ||
25 | |||
26 | .section .paravirt_branches, "a" | ||
27 | .previous | ||
28 | #define PARAVIRT_PATCH_SITE_BR(type) \ | ||
29 | { \ | ||
30 | [1:] ; \ | ||
31 | br.cond.sptk.many 2f ; \ | ||
32 | nop.b 0 ; \ | ||
33 | nop.b 0;; ; \ | ||
34 | } ; \ | ||
35 | 2: \ | ||
36 | .xdata8 ".paravirt_branches", 1b, type | ||
37 | |||
38 | #else | ||
39 | |||
40 | #include <linux/stringify.h> | ||
41 | #include <asm/intrinsics.h> | ||
42 | |||
43 | /* for binary patch */ | ||
44 | struct paravirt_patch_site_bundle { | ||
45 | void *sbundle; | ||
46 | void *ebundle; | ||
47 | unsigned long type; | ||
48 | }; | ||
49 | |||
50 | /* label means the beginning of new bundle */ | ||
51 | #define paravirt_alt_bundle(instr, privop) \ | ||
52 | "\t998:\n" \ | ||
53 | "\t" instr "\n" \ | ||
54 | "\t999:\n" \ | ||
55 | "\t.pushsection .paravirt_bundles, \"a\"\n" \ | ||
56 | "\t.popsection\n" \ | ||
57 | "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \ | ||
58 | __stringify(privop) "\n" | ||
59 | |||
60 | |||
61 | struct paravirt_patch_bundle_elem { | ||
62 | const void *sbundle; | ||
63 | const void *ebundle; | ||
64 | unsigned long type; | ||
65 | }; | ||
66 | |||
67 | |||
68 | struct paravirt_patch_site_inst { | ||
69 | unsigned long stag; | ||
70 | unsigned long etag; | ||
71 | unsigned long type; | ||
72 | }; | ||
73 | |||
74 | #define paravirt_alt_inst(instr, privop) \ | ||
75 | "\t[998:]\n" \ | ||
76 | "\t" instr "\n" \ | ||
77 | "\t[999:]\n" \ | ||
78 | "\t.pushsection .paravirt_insts, \"a\"\n" \ | ||
79 | "\t.popsection\n" \ | ||
80 | "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \ | ||
81 | __stringify(privop) "\n" | ||
82 | |||
83 | struct paravirt_patch_site_branch { | ||
84 | unsigned long tag; | ||
85 | unsigned long type; | ||
86 | }; | ||
87 | |||
88 | struct paravirt_patch_branch_target { | ||
89 | const void *entry; | ||
90 | unsigned long type; | ||
91 | }; | ||
92 | |||
93 | void | ||
94 | __paravirt_patch_apply_branch( | ||
95 | unsigned long tag, unsigned long type, | ||
96 | const struct paravirt_patch_branch_target *entries, | ||
97 | unsigned int nr_entries); | ||
98 | |||
99 | void | ||
100 | paravirt_patch_reloc_br(unsigned long tag, const void *target); | ||
101 | |||
102 | void | ||
103 | paravirt_patch_reloc_brl(unsigned long tag, const void *target); | ||
104 | |||
105 | |||
106 | #if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT) | ||
107 | unsigned long | ||
108 | ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type); | ||
109 | |||
110 | unsigned long | ||
111 | __paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, | ||
112 | const struct paravirt_patch_bundle_elem *elems, | ||
113 | unsigned long nelems, | ||
114 | const struct paravirt_patch_bundle_elem **found); | ||
115 | |||
116 | void | ||
117 | paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, | ||
118 | const struct paravirt_patch_site_bundle *end); | ||
119 | |||
120 | void | ||
121 | paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, | ||
122 | const struct paravirt_patch_site_inst *end); | ||
123 | |||
124 | void paravirt_patch_apply(void); | ||
125 | #else | ||
126 | #define paravirt_patch_apply_bundle(start, end) do { } while (0) | ||
127 | #define paravirt_patch_apply_inst(start, end) do { } while (0) | ||
128 | #define paravirt_patch_apply() do { } while (0) | ||
129 | #endif | ||
130 | |||
131 | #endif /* !__ASSEMBLEY__ */ | ||
132 | |||
133 | #endif /* __ASM_PARAVIRT_PATCH_H */ | ||
134 | |||
135 | /* | ||
136 | * Local variables: | ||
137 | * mode: C | ||
138 | * c-set-style: "linux" | ||
139 | * c-basic-offset: 8 | ||
140 | * tab-width: 8 | ||
141 | * indent-tabs-mode: t | ||
142 | * End: | ||
143 | */ | ||
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h index 33c8e55f5775..3d2951130b5f 100644 --- a/arch/ia64/include/asm/paravirt_privop.h +++ b/arch/ia64/include/asm/paravirt_privop.h | |||
@@ -33,7 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | struct pv_cpu_ops { | 35 | struct pv_cpu_ops { |
36 | void (*fc)(unsigned long addr); | 36 | void (*fc)(void *addr); |
37 | unsigned long (*thash)(unsigned long addr); | 37 | unsigned long (*thash)(unsigned long addr); |
38 | unsigned long (*get_cpuid)(int index); | 38 | unsigned long (*get_cpuid)(int index); |
39 | unsigned long (*get_pmd)(int index); | 39 | unsigned long (*get_pmd)(int index); |
@@ -60,12 +60,18 @@ extern unsigned long ia64_native_getreg_func(int regnum); | |||
60 | /* Instructions paravirtualized for performance */ | 60 | /* Instructions paravirtualized for performance */ |
61 | /************************************************/ | 61 | /************************************************/ |
62 | 62 | ||
63 | #ifndef ASM_SUPPORTED | ||
64 | #define paravirt_ssm_i() pv_cpu_ops.ssm_i() | ||
65 | #define paravirt_rsm_i() pv_cpu_ops.rsm_i() | ||
66 | #define __paravirt_getreg() pv_cpu_ops.getreg() | ||
67 | #endif | ||
68 | |||
63 | /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). | 69 | /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). |
64 | * static inline function doesn't satisfy it. */ | 70 | * static inline function doesn't satisfy it. */ |
65 | #define paravirt_ssm(mask) \ | 71 | #define paravirt_ssm(mask) \ |
66 | do { \ | 72 | do { \ |
67 | if ((mask) == IA64_PSR_I) \ | 73 | if ((mask) == IA64_PSR_I) \ |
68 | pv_cpu_ops.ssm_i(); \ | 74 | paravirt_ssm_i(); \ |
69 | else \ | 75 | else \ |
70 | ia64_native_ssm(mask); \ | 76 | ia64_native_ssm(mask); \ |
71 | } while (0) | 77 | } while (0) |
@@ -73,7 +79,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); | |||
73 | #define paravirt_rsm(mask) \ | 79 | #define paravirt_rsm(mask) \ |
74 | do { \ | 80 | do { \ |
75 | if ((mask) == IA64_PSR_I) \ | 81 | if ((mask) == IA64_PSR_I) \ |
76 | pv_cpu_ops.rsm_i(); \ | 82 | paravirt_rsm_i(); \ |
77 | else \ | 83 | else \ |
78 | ia64_native_rsm(mask); \ | 84 | ia64_native_rsm(mask); \ |
79 | } while (0) | 85 | } while (0) |
@@ -86,7 +92,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); | |||
86 | if ((reg) == _IA64_REG_IP) \ | 92 | if ((reg) == _IA64_REG_IP) \ |
87 | res = ia64_native_getreg(_IA64_REG_IP); \ | 93 | res = ia64_native_getreg(_IA64_REG_IP); \ |
88 | else \ | 94 | else \ |
89 | res = pv_cpu_ops.getreg(reg); \ | 95 | res = __paravirt_getreg(reg); \ |
90 | res; \ | 96 | res; \ |
91 | }) | 97 | }) |
92 | 98 | ||
@@ -112,6 +118,12 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); | |||
112 | 118 | ||
113 | #endif /* CONFIG_PARAVIRT */ | 119 | #endif /* CONFIG_PARAVIRT */ |
114 | 120 | ||
121 | #if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED) | ||
122 | #define paravirt_dv_serialize_data() ia64_dv_serialize_data() | ||
123 | #else | ||
124 | #define paravirt_dv_serialize_data() /* nothing */ | ||
125 | #endif | ||
126 | |||
115 | /* these routines utilize privilege-sensitive or performance-sensitive | 127 | /* these routines utilize privilege-sensitive or performance-sensitive |
116 | * privileged instructions so the code must be replaced with | 128 | * privileged instructions so the code must be replaced with |
117 | * paravirtualized versions */ | 129 | * paravirtualized versions */ |
@@ -121,4 +133,349 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); | |||
121 | IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) | 133 | IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) |
122 | #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) | 134 | #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) |
123 | 135 | ||
136 | |||
137 | #if defined(CONFIG_PARAVIRT) | ||
138 | /****************************************************************************** | ||
139 | * binary patching infrastructure | ||
140 | */ | ||
141 | #define PARAVIRT_PATCH_TYPE_FC 1 | ||
142 | #define PARAVIRT_PATCH_TYPE_THASH 2 | ||
143 | #define PARAVIRT_PATCH_TYPE_GET_CPUID 3 | ||
144 | #define PARAVIRT_PATCH_TYPE_GET_PMD 4 | ||
145 | #define PARAVIRT_PATCH_TYPE_PTCGA 5 | ||
146 | #define PARAVIRT_PATCH_TYPE_GET_RR 6 | ||
147 | #define PARAVIRT_PATCH_TYPE_SET_RR 7 | ||
148 | #define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8 | ||
149 | #define PARAVIRT_PATCH_TYPE_SSM_I 9 | ||
150 | #define PARAVIRT_PATCH_TYPE_RSM_I 10 | ||
151 | #define PARAVIRT_PATCH_TYPE_GET_PSR_I 11 | ||
152 | #define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12 | ||
153 | |||
154 | /* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */ | ||
155 | #define PARAVIRT_PATCH_TYPE_GETREG 0x10000000 | ||
156 | #define PARAVIRT_PATCH_TYPE_SETREG 0x20000000 | ||
157 | |||
158 | /* | ||
159 | * struct task_struct* (*ia64_switch_to)(void* next_task); | ||
160 | * void *ia64_leave_syscall; | ||
161 | * void *ia64_work_processed_syscall | ||
162 | * void *ia64_leave_kernel; | ||
163 | */ | ||
164 | |||
165 | #define PARAVIRT_PATCH_TYPE_BR_START 0x30000000 | ||
166 | #define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \ | ||
167 | (PARAVIRT_PATCH_TYPE_BR_START + 0) | ||
168 | #define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \ | ||
169 | (PARAVIRT_PATCH_TYPE_BR_START + 1) | ||
170 | #define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \ | ||
171 | (PARAVIRT_PATCH_TYPE_BR_START + 2) | ||
172 | #define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \ | ||
173 | (PARAVIRT_PATCH_TYPE_BR_START + 3) | ||
174 | |||
175 | #ifdef ASM_SUPPORTED | ||
176 | #include <asm/paravirt_patch.h> | ||
177 | |||
178 | /* | ||
179 | * pv_cpu_ops calling stub. | ||
180 | * normal function call convension can't be written by gcc | ||
181 | * inline assembly. | ||
182 | * | ||
183 | * from the caller's point of view, | ||
184 | * the following registers will be clobbered. | ||
185 | * r2, r3 | ||
186 | * r8-r15 | ||
187 | * r16, r17 | ||
188 | * b6, b7 | ||
189 | * p6-p15 | ||
190 | * ar.ccv | ||
191 | * | ||
192 | * from the callee's point of view , | ||
193 | * the following registers can be used. | ||
194 | * r2, r3: scratch | ||
195 | * r8: scratch, input argument0 and return value | ||
196 | * r0-r15: scratch, input argument1-5 | ||
197 | * b6: return pointer | ||
198 | * b7: scratch | ||
199 | * p6-p15: scratch | ||
200 | * ar.ccv: scratch | ||
201 | * | ||
202 | * other registers must not be changed. especially | ||
203 | * b0: rp: preserved. gcc ignores b0 in clobbered register. | ||
204 | * r16: saved gp | ||
205 | */ | ||
206 | /* 5 bundles */ | ||
207 | #define __PARAVIRT_BR \ | ||
208 | ";;\n" \ | ||
209 | "{ .mlx\n" \ | ||
210 | "nop 0\n" \ | ||
211 | "movl r2 = %[op_addr]\n"/* get function pointer address */ \ | ||
212 | ";;\n" \ | ||
213 | "}\n" \ | ||
214 | "1:\n" \ | ||
215 | "{ .mii\n" \ | ||
216 | "ld8 r2 = [r2]\n" /* load function descriptor address */ \ | ||
217 | "mov r17 = ip\n" /* get ip to calc return address */ \ | ||
218 | "mov r16 = gp\n" /* save gp */ \ | ||
219 | ";;\n" \ | ||
220 | "}\n" \ | ||
221 | "{ .mii\n" \ | ||
222 | "ld8 r3 = [r2], 8\n" /* load entry address */ \ | ||
223 | "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \ | ||
224 | ";;\n" \ | ||
225 | "mov b7 = r3\n" /* set entry address */ \ | ||
226 | "}\n" \ | ||
227 | "{ .mib\n" \ | ||
228 | "ld8 gp = [r2]\n" /* load gp value */ \ | ||
229 | "mov b6 = r17\n" /* set return address */ \ | ||
230 | "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \ | ||
231 | "}\n" \ | ||
232 | "1:\n" \ | ||
233 | "{ .mii\n" \ | ||
234 | "mov gp = r16\n" /* restore gp value */ \ | ||
235 | "nop 0\n" \ | ||
236 | "nop 0\n" \ | ||
237 | ";;\n" \ | ||
238 | "}\n" | ||
239 | |||
240 | #define PARAVIRT_OP(op) \ | ||
241 | [op_addr] "i"(&pv_cpu_ops.op) | ||
242 | |||
243 | #define PARAVIRT_TYPE(type) \ | ||
244 | PARAVIRT_PATCH_TYPE_ ## type | ||
245 | |||
246 | #define PARAVIRT_REG_CLOBBERS0 \ | ||
247 | "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ | ||
248 | "r15", "r16", "r17" | ||
249 | |||
250 | #define PARAVIRT_REG_CLOBBERS1 \ | ||
251 | "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ | ||
252 | "r15", "r16", "r17" | ||
253 | |||
254 | #define PARAVIRT_REG_CLOBBERS2 \ | ||
255 | "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \ | ||
256 | "r15", "r16", "r17" | ||
257 | |||
258 | #define PARAVIRT_REG_CLOBBERS5 \ | ||
259 | "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \ | ||
260 | "r15", "r16", "r17" | ||
261 | |||
262 | #define PARAVIRT_BR_CLOBBERS \ | ||
263 | "b6", "b7" | ||
264 | |||
265 | #define PARAVIRT_PR_CLOBBERS \ | ||
266 | "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15" | ||
267 | |||
268 | #define PARAVIRT_AR_CLOBBERS \ | ||
269 | "ar.ccv" | ||
270 | |||
271 | #define PARAVIRT_CLOBBERS0 \ | ||
272 | PARAVIRT_REG_CLOBBERS0, \ | ||
273 | PARAVIRT_BR_CLOBBERS, \ | ||
274 | PARAVIRT_PR_CLOBBERS, \ | ||
275 | PARAVIRT_AR_CLOBBERS, \ | ||
276 | "memory" | ||
277 | |||
278 | #define PARAVIRT_CLOBBERS1 \ | ||
279 | PARAVIRT_REG_CLOBBERS1, \ | ||
280 | PARAVIRT_BR_CLOBBERS, \ | ||
281 | PARAVIRT_PR_CLOBBERS, \ | ||
282 | PARAVIRT_AR_CLOBBERS, \ | ||
283 | "memory" | ||
284 | |||
285 | #define PARAVIRT_CLOBBERS2 \ | ||
286 | PARAVIRT_REG_CLOBBERS2, \ | ||
287 | PARAVIRT_BR_CLOBBERS, \ | ||
288 | PARAVIRT_PR_CLOBBERS, \ | ||
289 | PARAVIRT_AR_CLOBBERS, \ | ||
290 | "memory" | ||
291 | |||
292 | #define PARAVIRT_CLOBBERS5 \ | ||
293 | PARAVIRT_REG_CLOBBERS5, \ | ||
294 | PARAVIRT_BR_CLOBBERS, \ | ||
295 | PARAVIRT_PR_CLOBBERS, \ | ||
296 | PARAVIRT_AR_CLOBBERS, \ | ||
297 | "memory" | ||
298 | |||
299 | #define PARAVIRT_BR0(op, type) \ | ||
300 | register unsigned long ia64_clobber asm ("r8"); \ | ||
301 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
302 | PARAVIRT_TYPE(type)) \ | ||
303 | : "=r"(ia64_clobber) \ | ||
304 | : PARAVIRT_OP(op) \ | ||
305 | : PARAVIRT_CLOBBERS0) | ||
306 | |||
307 | #define PARAVIRT_BR0_RET(op, type) \ | ||
308 | register unsigned long ia64_intri_res asm ("r8"); \ | ||
309 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
310 | PARAVIRT_TYPE(type)) \ | ||
311 | : "=r"(ia64_intri_res) \ | ||
312 | : PARAVIRT_OP(op) \ | ||
313 | : PARAVIRT_CLOBBERS0) | ||
314 | |||
315 | #define PARAVIRT_BR1(op, type, arg1) \ | ||
316 | register unsigned long __##arg1 asm ("r8") = arg1; \ | ||
317 | register unsigned long ia64_clobber asm ("r8"); \ | ||
318 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
319 | PARAVIRT_TYPE(type)) \ | ||
320 | : "=r"(ia64_clobber) \ | ||
321 | : PARAVIRT_OP(op), "0"(__##arg1) \ | ||
322 | : PARAVIRT_CLOBBERS1) | ||
323 | |||
324 | #define PARAVIRT_BR1_RET(op, type, arg1) \ | ||
325 | register unsigned long ia64_intri_res asm ("r8"); \ | ||
326 | register unsigned long __##arg1 asm ("r8") = arg1; \ | ||
327 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
328 | PARAVIRT_TYPE(type)) \ | ||
329 | : "=r"(ia64_intri_res) \ | ||
330 | : PARAVIRT_OP(op), "0"(__##arg1) \ | ||
331 | : PARAVIRT_CLOBBERS1) | ||
332 | |||
333 | #define PARAVIRT_BR1_VOID(op, type, arg1) \ | ||
334 | register void *__##arg1 asm ("r8") = arg1; \ | ||
335 | register unsigned long ia64_clobber asm ("r8"); \ | ||
336 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
337 | PARAVIRT_TYPE(type)) \ | ||
338 | : "=r"(ia64_clobber) \ | ||
339 | : PARAVIRT_OP(op), "0"(__##arg1) \ | ||
340 | : PARAVIRT_CLOBBERS1) | ||
341 | |||
342 | #define PARAVIRT_BR2(op, type, arg1, arg2) \ | ||
343 | register unsigned long __##arg1 asm ("r8") = arg1; \ | ||
344 | register unsigned long __##arg2 asm ("r9") = arg2; \ | ||
345 | register unsigned long ia64_clobber1 asm ("r8"); \ | ||
346 | register unsigned long ia64_clobber2 asm ("r9"); \ | ||
347 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
348 | PARAVIRT_TYPE(type)) \ | ||
349 | : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \ | ||
350 | : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \ | ||
351 | : PARAVIRT_CLOBBERS2) | ||
352 | |||
353 | |||
354 | #define PARAVIRT_DEFINE_CPU_OP0(op, type) \ | ||
355 | static inline void \ | ||
356 | paravirt_ ## op (void) \ | ||
357 | { \ | ||
358 | PARAVIRT_BR0(op, type); \ | ||
359 | } | ||
360 | |||
361 | #define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \ | ||
362 | static inline unsigned long \ | ||
363 | paravirt_ ## op (void) \ | ||
364 | { \ | ||
365 | PARAVIRT_BR0_RET(op, type); \ | ||
366 | return ia64_intri_res; \ | ||
367 | } | ||
368 | |||
369 | #define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \ | ||
370 | static inline void \ | ||
371 | paravirt_ ## op (void *arg1) \ | ||
372 | { \ | ||
373 | PARAVIRT_BR1_VOID(op, type, arg1); \ | ||
374 | } | ||
375 | |||
376 | #define PARAVIRT_DEFINE_CPU_OP1(op, type) \ | ||
377 | static inline void \ | ||
378 | paravirt_ ## op (unsigned long arg1) \ | ||
379 | { \ | ||
380 | PARAVIRT_BR1(op, type, arg1); \ | ||
381 | } | ||
382 | |||
383 | #define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \ | ||
384 | static inline unsigned long \ | ||
385 | paravirt_ ## op (unsigned long arg1) \ | ||
386 | { \ | ||
387 | PARAVIRT_BR1_RET(op, type, arg1); \ | ||
388 | return ia64_intri_res; \ | ||
389 | } | ||
390 | |||
391 | #define PARAVIRT_DEFINE_CPU_OP2(op, type) \ | ||
392 | static inline void \ | ||
393 | paravirt_ ## op (unsigned long arg1, \ | ||
394 | unsigned long arg2) \ | ||
395 | { \ | ||
396 | PARAVIRT_BR2(op, type, arg1, arg2); \ | ||
397 | } | ||
398 | |||
399 | |||
400 | PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC); | ||
401 | PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH) | ||
402 | PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID) | ||
403 | PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD) | ||
404 | PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA) | ||
405 | PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR) | ||
406 | PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR) | ||
407 | PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I) | ||
408 | PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I) | ||
409 | PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I) | ||
410 | PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE) | ||
411 | |||
412 | static inline void | ||
413 | paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
414 | unsigned long val2, unsigned long val3, | ||
415 | unsigned long val4) | ||
416 | { | ||
417 | register unsigned long __val0 asm ("r8") = val0; | ||
418 | register unsigned long __val1 asm ("r9") = val1; | ||
419 | register unsigned long __val2 asm ("r10") = val2; | ||
420 | register unsigned long __val3 asm ("r11") = val3; | ||
421 | register unsigned long __val4 asm ("r14") = val4; | ||
422 | |||
423 | register unsigned long ia64_clobber0 asm ("r8"); | ||
424 | register unsigned long ia64_clobber1 asm ("r9"); | ||
425 | register unsigned long ia64_clobber2 asm ("r10"); | ||
426 | register unsigned long ia64_clobber3 asm ("r11"); | ||
427 | register unsigned long ia64_clobber4 asm ("r14"); | ||
428 | |||
429 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, | ||
430 | PARAVIRT_TYPE(SET_RR0_TO_RR4)) | ||
431 | : "=r"(ia64_clobber0), | ||
432 | "=r"(ia64_clobber1), | ||
433 | "=r"(ia64_clobber2), | ||
434 | "=r"(ia64_clobber3), | ||
435 | "=r"(ia64_clobber4) | ||
436 | : PARAVIRT_OP(set_rr0_to_rr4), | ||
437 | "0"(__val0), "1"(__val1), "2"(__val2), | ||
438 | "3"(__val3), "4"(__val4) | ||
439 | : PARAVIRT_CLOBBERS5); | ||
440 | } | ||
441 | |||
442 | /* unsigned long paravirt_getreg(int reg) */ | ||
443 | #define __paravirt_getreg(reg) \ | ||
444 | ({ \ | ||
445 | register unsigned long ia64_intri_res asm ("r8"); \ | ||
446 | register unsigned long __reg asm ("r8") = (reg); \ | ||
447 | \ | ||
448 | BUILD_BUG_ON(!__builtin_constant_p(reg)); \ | ||
449 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
450 | PARAVIRT_TYPE(GETREG) \ | ||
451 | + (reg)) \ | ||
452 | : "=r"(ia64_intri_res) \ | ||
453 | : PARAVIRT_OP(getreg), "0"(__reg) \ | ||
454 | : PARAVIRT_CLOBBERS1); \ | ||
455 | \ | ||
456 | ia64_intri_res; \ | ||
457 | }) | ||
458 | |||
459 | /* void paravirt_setreg(int reg, unsigned long val) */ | ||
460 | #define paravirt_setreg(reg, val) \ | ||
461 | do { \ | ||
462 | register unsigned long __val asm ("r8") = val; \ | ||
463 | register unsigned long __reg asm ("r9") = reg; \ | ||
464 | register unsigned long ia64_clobber1 asm ("r8"); \ | ||
465 | register unsigned long ia64_clobber2 asm ("r9"); \ | ||
466 | \ | ||
467 | BUILD_BUG_ON(!__builtin_constant_p(reg)); \ | ||
468 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
469 | PARAVIRT_TYPE(SETREG) \ | ||
470 | + (reg)) \ | ||
471 | : "=r"(ia64_clobber1), \ | ||
472 | "=r"(ia64_clobber2) \ | ||
473 | : PARAVIRT_OP(setreg), \ | ||
474 | "1"(__reg), "0"(__val) \ | ||
475 | : PARAVIRT_CLOBBERS2); \ | ||
476 | } while (0) | ||
477 | |||
478 | #endif /* ASM_SUPPORTED */ | ||
479 | #endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */ | ||
480 | |||
124 | #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ | 481 | #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ |
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 21c402365d0e..598408336251 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h | |||
@@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *); | |||
126 | extern int is_multithreading_enabled(void); | 126 | extern int is_multithreading_enabled(void); |
127 | 127 | ||
128 | extern void arch_send_call_function_single_ipi(int cpu); | 128 | extern void arch_send_call_function_single_ipi(int cpu); |
129 | extern void arch_send_call_function_ipi(cpumask_t mask); | 129 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
130 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
130 | 131 | ||
131 | #else /* CONFIG_SMP */ | 132 | #else /* CONFIG_SMP */ |
132 | 133 | ||
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 0229fb95fb38..13ab71576bc7 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -120,6 +120,38 @@ do { \ | |||
120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
122 | 122 | ||
123 | #ifdef ASM_SUPPORTED | ||
124 | |||
125 | static __always_inline void | ||
126 | __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | ||
127 | { | ||
128 | __asm__ __volatile__ ( | ||
129 | "tbit.nz p6, p0 = %1,%2\n" | ||
130 | "br.few 3f\n" | ||
131 | "1:\n" | ||
132 | "fetchadd4.rel r2 = [%0], -1;;\n" | ||
133 | "(p6) ssm psr.i\n" | ||
134 | "2:\n" | ||
135 | "hint @pause\n" | ||
136 | "ld4 r2 = [%0];;\n" | ||
137 | "cmp4.lt p7,p0 = r2, r0\n" | ||
138 | "(p7) br.cond.spnt.few 2b\n" | ||
139 | "(p6) rsm psr.i\n" | ||
140 | ";;\n" | ||
141 | "3:\n" | ||
142 | "fetchadd4.acq r2 = [%0], 1;;\n" | ||
143 | "cmp4.lt p7,p0 = r2, r0\n" | ||
144 | "(p7) br.cond.spnt.few 1b\n" | ||
145 | : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) | ||
146 | : "p6", "p7", "r2", "memory"); | ||
147 | } | ||
148 | |||
149 | #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) | ||
150 | |||
151 | #else /* !ASM_SUPPORTED */ | ||
152 | |||
153 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | ||
154 | |||
123 | #define __raw_read_lock(rw) \ | 155 | #define __raw_read_lock(rw) \ |
124 | do { \ | 156 | do { \ |
125 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 157 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
@@ -131,6 +163,8 @@ do { \ | |||
131 | } \ | 163 | } \ |
132 | } while (0) | 164 | } while (0) |
133 | 165 | ||
166 | #endif /* !ASM_SUPPORTED */ | ||
167 | |||
134 | #define __raw_read_unlock(rw) \ | 168 | #define __raw_read_unlock(rw) \ |
135 | do { \ | 169 | do { \ |
136 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 170 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
@@ -138,20 +172,33 @@ do { \ | |||
138 | } while (0) | 172 | } while (0) |
139 | 173 | ||
140 | #ifdef ASM_SUPPORTED | 174 | #ifdef ASM_SUPPORTED |
141 | #define __raw_write_lock(rw) \ | 175 | |
142 | do { \ | 176 | static __always_inline void |
143 | __asm__ __volatile__ ( \ | 177 | __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) |
144 | "mov ar.ccv = r0\n" \ | 178 | { |
145 | "dep r29 = -1, r0, 31, 1;;\n" \ | 179 | __asm__ __volatile__ ( |
146 | "1:\n" \ | 180 | "tbit.nz p6, p0 = %1, %2\n" |
147 | "ld4 r2 = [%0];;\n" \ | 181 | "mov ar.ccv = r0\n" |
148 | "cmp4.eq p0,p7 = r0,r2\n" \ | 182 | "dep r29 = -1, r0, 31, 1\n" |
149 | "(p7) br.cond.spnt.few 1b \n" \ | 183 | "br.few 3f;;\n" |
150 | "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ | 184 | "1:\n" |
151 | "cmp4.eq p0,p7 = r0, r2\n" \ | 185 | "(p6) ssm psr.i\n" |
152 | "(p7) br.cond.spnt.few 1b;;\n" \ | 186 | "2:\n" |
153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 187 | "hint @pause\n" |
154 | } while(0) | 188 | "ld4 r2 = [%0];;\n" |
189 | "cmp4.eq p0,p7 = r0, r2\n" | ||
190 | "(p7) br.cond.spnt.few 2b\n" | ||
191 | "(p6) rsm psr.i\n" | ||
192 | ";;\n" | ||
193 | "3:\n" | ||
194 | "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" | ||
195 | "cmp4.eq p0,p7 = r0, r2\n" | ||
196 | "(p7) br.cond.spnt.few 1b;;\n" | ||
197 | : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) | ||
198 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); | ||
199 | } | ||
200 | |||
201 | #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) | ||
155 | 202 | ||
156 | #define __raw_write_trylock(rw) \ | 203 | #define __raw_write_trylock(rw) \ |
157 | ({ \ | 204 | ({ \ |
@@ -174,6 +221,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
174 | 221 | ||
175 | #else /* !ASM_SUPPORTED */ | 222 | #else /* !ASM_SUPPORTED */ |
176 | 223 | ||
224 | #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) | ||
225 | |||
177 | #define __raw_write_lock(l) \ | 226 | #define __raw_write_lock(l) \ |
178 | ({ \ | 227 | ({ \ |
179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 228 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h index 4e03cfe74a0c..86c7db861180 100644 --- a/arch/ia64/include/asm/timex.h +++ b/arch/ia64/include/asm/timex.h | |||
@@ -40,5 +40,6 @@ get_cycles (void) | |||
40 | } | 40 | } |
41 | 41 | ||
42 | extern void ia64_cpu_local_tick (void); | 42 | extern void ia64_cpu_local_tick (void); |
43 | extern unsigned long long ia64_native_sched_clock (void); | ||
43 | 44 | ||
44 | #endif /* _ASM_IA64_TIMEX_H */ | 45 | #endif /* _ASM_IA64_TIMEX_H */ |
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index f260dcf21515..7b4c8c70b2d1 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h | |||
@@ -112,11 +112,6 @@ void build_cpu_to_node_map(void); | |||
112 | 112 | ||
113 | extern void arch_fix_phys_package_id(int num, u32 slot); | 113 | extern void arch_fix_phys_package_id(int num, u32 slot); |
114 | 114 | ||
115 | #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ | ||
116 | CPU_MASK_ALL : \ | ||
117 | node_to_cpumask(pcibus_to_node(bus)) \ | ||
118 | ) | ||
119 | |||
120 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | 115 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ |
121 | cpu_all_mask : \ | 116 | cpu_all_mask : \ |
122 | cpumask_of_node(pcibus_to_node(bus))) | 117 | cpumask_of_node(pcibus_to_node(bus))) |
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h index f607018af4a1..53e9dfacd073 100644 --- a/arch/ia64/include/asm/uv/uv_hub.h +++ b/arch/ia64/include/asm/uv/uv_hub.h | |||
@@ -305,5 +305,11 @@ static inline int uv_num_possible_blades(void) | |||
305 | return 1; | 305 | return 1; |
306 | } | 306 | } |
307 | 307 | ||
308 | static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | ||
309 | { | ||
310 | /* not currently needed on ia64 */ | ||
311 | } | ||
312 | |||
313 | |||
308 | #endif /* __ASM_IA64_UV_HUB__ */ | 314 | #endif /* __ASM_IA64_UV_HUB__ */ |
309 | 315 | ||
diff --git a/arch/ia64/include/asm/uv/uv_mmrs.h b/arch/ia64/include/asm/uv/uv_mmrs.h index c149ef085437..fe0b8f05e1a8 100644 --- a/arch/ia64/include/asm/uv/uv_mmrs.h +++ b/arch/ia64/include/asm/uv/uv_mmrs.h | |||
@@ -8,8 +8,8 @@ | |||
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __ASM_IA64_UV_MMRS__ | 11 | #ifndef _ASM_IA64_UV_UV_MMRS_H |
12 | #define __ASM_IA64_UV_MMRS__ | 12 | #define _ASM_IA64_UV_UV_MMRS_H |
13 | 13 | ||
14 | #define UV_MMR_ENABLE (1UL << 63) | 14 | #define UV_MMR_ENABLE (1UL << 63) |
15 | 15 | ||
@@ -243,6 +243,158 @@ union uvh_event_occurred0_u { | |||
243 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 | 243 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 |
244 | 244 | ||
245 | /* ========================================================================= */ | 245 | /* ========================================================================= */ |
246 | /* UVH_GR0_TLB_INT0_CONFIG */ | ||
247 | /* ========================================================================= */ | ||
248 | #define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL | ||
249 | |||
250 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 | ||
251 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
252 | #define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 | ||
253 | #define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL | ||
254 | #define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11 | ||
255 | #define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
256 | #define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12 | ||
257 | #define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
258 | #define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13 | ||
259 | #define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL | ||
260 | #define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15 | ||
261 | #define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL | ||
262 | #define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16 | ||
263 | #define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL | ||
264 | #define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32 | ||
265 | #define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
266 | |||
267 | union uvh_gr0_tlb_int0_config_u { | ||
268 | unsigned long v; | ||
269 | struct uvh_gr0_tlb_int0_config_s { | ||
270 | unsigned long vector_ : 8; /* RW */ | ||
271 | unsigned long dm : 3; /* RW */ | ||
272 | unsigned long destmode : 1; /* RW */ | ||
273 | unsigned long status : 1; /* RO */ | ||
274 | unsigned long p : 1; /* RO */ | ||
275 | unsigned long rsvd_14 : 1; /* */ | ||
276 | unsigned long t : 1; /* RO */ | ||
277 | unsigned long m : 1; /* RW */ | ||
278 | unsigned long rsvd_17_31: 15; /* */ | ||
279 | unsigned long apic_id : 32; /* RW */ | ||
280 | } s; | ||
281 | }; | ||
282 | |||
283 | /* ========================================================================= */ | ||
284 | /* UVH_GR0_TLB_INT1_CONFIG */ | ||
285 | /* ========================================================================= */ | ||
286 | #define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL | ||
287 | |||
288 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 | ||
289 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
290 | #define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 | ||
291 | #define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL | ||
292 | #define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11 | ||
293 | #define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
294 | #define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12 | ||
295 | #define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
296 | #define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13 | ||
297 | #define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL | ||
298 | #define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15 | ||
299 | #define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL | ||
300 | #define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16 | ||
301 | #define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL | ||
302 | #define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32 | ||
303 | #define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
304 | |||
305 | union uvh_gr0_tlb_int1_config_u { | ||
306 | unsigned long v; | ||
307 | struct uvh_gr0_tlb_int1_config_s { | ||
308 | unsigned long vector_ : 8; /* RW */ | ||
309 | unsigned long dm : 3; /* RW */ | ||
310 | unsigned long destmode : 1; /* RW */ | ||
311 | unsigned long status : 1; /* RO */ | ||
312 | unsigned long p : 1; /* RO */ | ||
313 | unsigned long rsvd_14 : 1; /* */ | ||
314 | unsigned long t : 1; /* RO */ | ||
315 | unsigned long m : 1; /* RW */ | ||
316 | unsigned long rsvd_17_31: 15; /* */ | ||
317 | unsigned long apic_id : 32; /* RW */ | ||
318 | } s; | ||
319 | }; | ||
320 | |||
321 | /* ========================================================================= */ | ||
322 | /* UVH_GR1_TLB_INT0_CONFIG */ | ||
323 | /* ========================================================================= */ | ||
324 | #define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL | ||
325 | |||
326 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 | ||
327 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
328 | #define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 | ||
329 | #define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL | ||
330 | #define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11 | ||
331 | #define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
332 | #define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12 | ||
333 | #define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
334 | #define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13 | ||
335 | #define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL | ||
336 | #define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15 | ||
337 | #define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL | ||
338 | #define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16 | ||
339 | #define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL | ||
340 | #define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32 | ||
341 | #define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
342 | |||
343 | union uvh_gr1_tlb_int0_config_u { | ||
344 | unsigned long v; | ||
345 | struct uvh_gr1_tlb_int0_config_s { | ||
346 | unsigned long vector_ : 8; /* RW */ | ||
347 | unsigned long dm : 3; /* RW */ | ||
348 | unsigned long destmode : 1; /* RW */ | ||
349 | unsigned long status : 1; /* RO */ | ||
350 | unsigned long p : 1; /* RO */ | ||
351 | unsigned long rsvd_14 : 1; /* */ | ||
352 | unsigned long t : 1; /* RO */ | ||
353 | unsigned long m : 1; /* RW */ | ||
354 | unsigned long rsvd_17_31: 15; /* */ | ||
355 | unsigned long apic_id : 32; /* RW */ | ||
356 | } s; | ||
357 | }; | ||
358 | |||
359 | /* ========================================================================= */ | ||
360 | /* UVH_GR1_TLB_INT1_CONFIG */ | ||
361 | /* ========================================================================= */ | ||
362 | #define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL | ||
363 | |||
364 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 | ||
365 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
366 | #define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 | ||
367 | #define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL | ||
368 | #define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11 | ||
369 | #define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
370 | #define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12 | ||
371 | #define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
372 | #define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13 | ||
373 | #define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL | ||
374 | #define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15 | ||
375 | #define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL | ||
376 | #define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16 | ||
377 | #define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL | ||
378 | #define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32 | ||
379 | #define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
380 | |||
381 | union uvh_gr1_tlb_int1_config_u { | ||
382 | unsigned long v; | ||
383 | struct uvh_gr1_tlb_int1_config_s { | ||
384 | unsigned long vector_ : 8; /* RW */ | ||
385 | unsigned long dm : 3; /* RW */ | ||
386 | unsigned long destmode : 1; /* RW */ | ||
387 | unsigned long status : 1; /* RO */ | ||
388 | unsigned long p : 1; /* RO */ | ||
389 | unsigned long rsvd_14 : 1; /* */ | ||
390 | unsigned long t : 1; /* RO */ | ||
391 | unsigned long m : 1; /* RW */ | ||
392 | unsigned long rsvd_17_31: 15; /* */ | ||
393 | unsigned long apic_id : 32; /* RW */ | ||
394 | } s; | ||
395 | }; | ||
396 | |||
397 | /* ========================================================================= */ | ||
246 | /* UVH_INT_CMPB */ | 398 | /* UVH_INT_CMPB */ |
247 | /* ========================================================================= */ | 399 | /* ========================================================================= */ |
248 | #define UVH_INT_CMPB 0x22080UL | 400 | #define UVH_INT_CMPB 0x22080UL |
@@ -670,4 +822,4 @@ union uvh_si_alias2_overlay_config_u { | |||
670 | }; | 822 | }; |
671 | 823 | ||
672 | 824 | ||
673 | #endif /* __ASM_IA64_UV_MMRS__ */ | 825 | #endif /* _ASM_IA64_UV_UV_MMRS_H */ |
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 7a804e80fc67..e425227a418e 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h | |||
@@ -33,9 +33,6 @@ | |||
33 | #ifndef _ASM_IA64_XEN_HYPERVISOR_H | 33 | #ifndef _ASM_IA64_XEN_HYPERVISOR_H |
34 | #define _ASM_IA64_XEN_HYPERVISOR_H | 34 | #define _ASM_IA64_XEN_HYPERVISOR_H |
35 | 35 | ||
36 | #ifdef CONFIG_XEN | ||
37 | |||
38 | #include <linux/init.h> | ||
39 | #include <xen/interface/xen.h> | 36 | #include <xen/interface/xen.h> |
40 | #include <xen/interface/version.h> /* to compile feature.c */ | 37 | #include <xen/interface/version.h> /* to compile feature.c */ |
41 | #include <xen/features.h> /* to comiple xen-netfront.c */ | 38 | #include <xen/features.h> /* to comiple xen-netfront.c */ |
@@ -43,22 +40,32 @@ | |||
43 | 40 | ||
44 | /* xen_domain_type is set before executing any C code by early_xen_setup */ | 41 | /* xen_domain_type is set before executing any C code by early_xen_setup */ |
45 | enum xen_domain_type { | 42 | enum xen_domain_type { |
46 | XEN_NATIVE, | 43 | XEN_NATIVE, /* running on bare hardware */ |
47 | XEN_PV_DOMAIN, | 44 | XEN_PV_DOMAIN, /* running in a PV domain */ |
48 | XEN_HVM_DOMAIN, | 45 | XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ |
49 | }; | 46 | }; |
50 | 47 | ||
48 | #ifdef CONFIG_XEN | ||
51 | extern enum xen_domain_type xen_domain_type; | 49 | extern enum xen_domain_type xen_domain_type; |
50 | #else | ||
51 | #define xen_domain_type XEN_NATIVE | ||
52 | #endif | ||
52 | 53 | ||
53 | #define xen_domain() (xen_domain_type != XEN_NATIVE) | 54 | #define xen_domain() (xen_domain_type != XEN_NATIVE) |
54 | #define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) | 55 | #define xen_pv_domain() (xen_domain() && \ |
55 | #define xen_initial_domain() (xen_pv_domain() && \ | 56 | xen_domain_type == XEN_PV_DOMAIN) |
57 | #define xen_hvm_domain() (xen_domain() && \ | ||
58 | xen_domain_type == XEN_HVM_DOMAIN) | ||
59 | |||
60 | #ifdef CONFIG_XEN_DOM0 | ||
61 | #define xen_initial_domain() (xen_pv_domain() && \ | ||
56 | (xen_start_info->flags & SIF_INITDOMAIN)) | 62 | (xen_start_info->flags & SIF_INITDOMAIN)) |
57 | #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) | 63 | #else |
64 | #define xen_initial_domain() (0) | ||
65 | #endif | ||
58 | 66 | ||
59 | /* deprecated. remove this */ | ||
60 | #define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN) | ||
61 | 67 | ||
68 | #ifdef CONFIG_XEN | ||
62 | extern struct shared_info *HYPERVISOR_shared_info; | 69 | extern struct shared_info *HYPERVISOR_shared_info; |
63 | extern struct start_info *xen_start_info; | 70 | extern struct start_info *xen_start_info; |
64 | 71 | ||
@@ -74,16 +81,6 @@ void force_evtchn_callback(void); | |||
74 | 81 | ||
75 | /* For setup_arch() in arch/ia64/kernel/setup.c */ | 82 | /* For setup_arch() in arch/ia64/kernel/setup.c */ |
76 | void xen_ia64_enable_opt_feature(void); | 83 | void xen_ia64_enable_opt_feature(void); |
77 | |||
78 | #else /* CONFIG_XEN */ | ||
79 | |||
80 | #define xen_domain() (0) | ||
81 | #define xen_pv_domain() (0) | ||
82 | #define xen_initial_domain() (0) | ||
83 | #define xen_hvm_domain() (0) | ||
84 | #define is_running_on_xen() (0) /* deprecated. remove this */ | ||
85 | #endif | 84 | #endif |
86 | 85 | ||
87 | #define is_initial_xendomain() (0) /* deprecated. remove this */ | ||
88 | |||
89 | #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ | 86 | #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ |
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h index 19c2ae1d878a..c53a47611208 100644 --- a/arch/ia64/include/asm/xen/inst.h +++ b/arch/ia64/include/asm/xen/inst.h | |||
@@ -33,6 +33,9 @@ | |||
33 | #define __paravirt_work_processed_syscall_target \ | 33 | #define __paravirt_work_processed_syscall_target \ |
34 | xen_work_processed_syscall | 34 | xen_work_processed_syscall |
35 | 35 | ||
36 | #define paravirt_fsyscall_table xen_fsyscall_table | ||
37 | #define paravirt_fsys_bubble_down xen_fsys_bubble_down | ||
38 | |||
36 | #define MOV_FROM_IFA(reg) \ | 39 | #define MOV_FROM_IFA(reg) \ |
37 | movl reg = XSI_IFA; \ | 40 | movl reg = XSI_IFA; \ |
38 | ;; \ | 41 | ;; \ |
@@ -110,6 +113,27 @@ | |||
110 | .endm | 113 | .endm |
111 | #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob | 114 | #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob |
112 | 115 | ||
116 | /* assuming ar.itc is read with interrupt disabled. */ | ||
117 | #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ | ||
118 | (pred) movl clob = XSI_ITC_OFFSET; \ | ||
119 | ;; \ | ||
120 | (pred) ld8 clob = [clob]; \ | ||
121 | (pred) mov reg = ar.itc; \ | ||
122 | ;; \ | ||
123 | (pred) add reg = reg, clob; \ | ||
124 | ;; \ | ||
125 | (pred) movl clob = XSI_ITC_LAST; \ | ||
126 | ;; \ | ||
127 | (pred) ld8 clob = [clob]; \ | ||
128 | ;; \ | ||
129 | (pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ | ||
130 | ;; \ | ||
131 | (pred_clob) add reg = 1, clob; \ | ||
132 | ;; \ | ||
133 | (pred) movl clob = XSI_ITC_LAST; \ | ||
134 | ;; \ | ||
135 | (pred) st8 [clob] = reg | ||
136 | |||
113 | 137 | ||
114 | #define MOV_TO_IFA(reg, clob) \ | 138 | #define MOV_TO_IFA(reg, clob) \ |
115 | movl clob = XSI_IFA; \ | 139 | movl clob = XSI_IFA; \ |
@@ -362,6 +386,10 @@ | |||
362 | #define RSM_PSR_DT \ | 386 | #define RSM_PSR_DT \ |
363 | XEN_HYPER_RSM_PSR_DT | 387 | XEN_HYPER_RSM_PSR_DT |
364 | 388 | ||
389 | #define RSM_PSR_BE_I(clob0, clob1) \ | ||
390 | RSM_PSR_I(p0, clob0, clob1); \ | ||
391 | rum psr.be | ||
392 | |||
365 | #define SSM_PSR_DT_AND_SRLZ_I \ | 393 | #define SSM_PSR_DT_AND_SRLZ_I \ |
366 | XEN_HYPER_SSM_PSR_DT | 394 | XEN_HYPER_SSM_PSR_DT |
367 | 395 | ||
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h index f00fab40854d..e951e740bdf2 100644 --- a/arch/ia64/include/asm/xen/interface.h +++ b/arch/ia64/include/asm/xen/interface.h | |||
@@ -209,6 +209,15 @@ struct mapped_regs { | |||
209 | unsigned long krs[8]; /* kernel registers */ | 209 | unsigned long krs[8]; /* kernel registers */ |
210 | unsigned long tmp[16]; /* temp registers | 210 | unsigned long tmp[16]; /* temp registers |
211 | (e.g. for hyperprivops) */ | 211 | (e.g. for hyperprivops) */ |
212 | |||
213 | /* itc paravirtualization | ||
214 | * vAR.ITC = mAR.ITC + itc_offset | ||
215 | * itc_last is one which was lastly passed to | ||
216 | * the guest OS in order to prevent it from | ||
217 | * going backwords. | ||
218 | */ | ||
219 | unsigned long itc_offset; | ||
220 | unsigned long itc_last; | ||
212 | }; | 221 | }; |
213 | }; | 222 | }; |
214 | }; | 223 | }; |
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h index 4d92d9bbda7b..c57fa910f2c9 100644 --- a/arch/ia64/include/asm/xen/minstate.h +++ b/arch/ia64/include/asm/xen/minstate.h | |||
@@ -1,3 +1,12 @@ | |||
1 | |||
2 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
3 | /* read ar.itc in advance, and use it before leaving bank 0 */ | ||
4 | #define XEN_ACCOUNT_GET_STAMP \ | ||
5 | MOV_FROM_ITC(pUStk, p6, r20, r2); | ||
6 | #else | ||
7 | #define XEN_ACCOUNT_GET_STAMP | ||
8 | #endif | ||
9 | |||
1 | /* | 10 | /* |
2 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 11 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
3 | * the minimum state necessary that allows us to turn psr.ic back | 12 | * the minimum state necessary that allows us to turn psr.ic back |
@@ -123,7 +132,7 @@ | |||
123 | ;; \ | 132 | ;; \ |
124 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ | 133 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ |
125 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ | 134 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ |
126 | ACCOUNT_GET_STAMP \ | 135 | XEN_ACCOUNT_GET_STAMP \ |
127 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ | 136 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ |
128 | ;; \ | 137 | ;; \ |
129 | EXTRA; \ | 138 | EXTRA; \ |
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h new file mode 100644 index 000000000000..eae944e88846 --- /dev/null +++ b/arch/ia64/include/asm/xen/patchlist.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/patchlist.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #define __paravirt_start_gate_fsyscall_patchlist \ | ||
24 | __xen_start_gate_fsyscall_patchlist | ||
25 | #define __paravirt_end_gate_fsyscall_patchlist \ | ||
26 | __xen_end_gate_fsyscall_patchlist | ||
27 | #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ | ||
28 | __xen_start_gate_brl_fsys_bubble_down_patchlist | ||
29 | #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ | ||
30 | __xen_end_gate_brl_fsys_bubble_down_patchlist | ||
31 | #define __paravirt_start_gate_vtop_patchlist \ | ||
32 | __xen_start_gate_vtop_patchlist | ||
33 | #define __paravirt_end_gate_vtop_patchlist \ | ||
34 | __xen_end_gate_vtop_patchlist | ||
35 | #define __paravirt_start_gate_mckinley_e9_patchlist \ | ||
36 | __xen_start_gate_mckinley_e9_patchlist | ||
37 | #define __paravirt_end_gate_mckinley_e9_patchlist \ | ||
38 | __xen_end_gate_mckinley_e9_patchlist | ||
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h index 71ec7546e100..fb4ec5e0b066 100644 --- a/arch/ia64/include/asm/xen/privop.h +++ b/arch/ia64/include/asm/xen/privop.h | |||
@@ -55,6 +55,8 @@ | |||
55 | #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) | 55 | #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) |
56 | #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) | 56 | #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) |
57 | #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) | 57 | #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) |
58 | #define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) | ||
59 | #define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) | ||
58 | #endif | 60 | #endif |
59 | 61 | ||
60 | #ifndef __ASSEMBLY__ | 62 | #ifndef __ASSEMBLY__ |
@@ -67,7 +69,7 @@ | |||
67 | * may have different semantics depending on whether they are executed | 69 | * may have different semantics depending on whether they are executed |
68 | * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't | 70 | * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't |
69 | * be allowed to execute directly, lest incorrect semantics result. */ | 71 | * be allowed to execute directly, lest incorrect semantics result. */ |
70 | extern void xen_fc(unsigned long addr); | 72 | extern void xen_fc(void *addr); |
71 | extern unsigned long xen_thash(unsigned long addr); | 73 | extern unsigned long xen_thash(unsigned long addr); |
72 | 74 | ||
73 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" | 75 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" |
@@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr); | |||
80 | extern unsigned long xen_get_cpuid(int index); | 82 | extern unsigned long xen_get_cpuid(int index); |
81 | extern unsigned long xen_get_pmd(int index); | 83 | extern unsigned long xen_get_pmd(int index); |
82 | 84 | ||
85 | #ifndef ASM_SUPPORTED | ||
83 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ | 86 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ |
84 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | 87 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ |
88 | #endif | ||
85 | 89 | ||
86 | /************************************************/ | 90 | /************************************************/ |
87 | /* Instructions paravirtualized for performance */ | 91 | /* Instructions paravirtualized for performance */ |
@@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | |||
106 | #define xen_get_virtual_pend() \ | 110 | #define xen_get_virtual_pend() \ |
107 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) | 111 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) |
108 | 112 | ||
113 | #ifndef ASM_SUPPORTED | ||
109 | /* Although all privileged operations can be left to trap and will | 114 | /* Although all privileged operations can be left to trap and will |
110 | * be properly handled by Xen, some are frequent enough that we use | 115 | * be properly handled by Xen, some are frequent enough that we use |
111 | * hyperprivops for performance. */ | 116 | * hyperprivops for performance. */ |
@@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | |||
123 | unsigned long val4); | 128 | unsigned long val4); |
124 | extern void xen_set_kr(unsigned long index, unsigned long val); | 129 | extern void xen_set_kr(unsigned long index, unsigned long val); |
125 | extern void xen_ptcga(unsigned long addr, unsigned long size); | 130 | extern void xen_ptcga(unsigned long addr, unsigned long size); |
131 | #endif /* !ASM_SUPPORTED */ | ||
126 | 132 | ||
127 | #endif /* !__ASSEMBLY__ */ | 133 | #endif /* !__ASSEMBLY__ */ |
128 | 134 | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index f2778f2c4fd9..5628e9a990a6 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ |
9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
10 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o | 10 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o |
11 | 11 | ||
@@ -36,7 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o | |||
36 | mca_recovery-y += mca_drv.o mca_drv_asm.o | 36 | mca_recovery-y += mca_drv.o mca_drv_asm.o |
37 | obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o | 37 | obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o |
38 | 38 | ||
39 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o | 39 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ |
40 | paravirt_patch.o | ||
40 | 41 | ||
41 | obj-$(CONFIG_IA64_ESI) += esi.o | 42 | obj-$(CONFIG_IA64_ESI) += esi.o |
42 | ifneq ($(CONFIG_IA64_ESI),) | 43 | ifneq ($(CONFIG_IA64_ESI),) |
@@ -45,35 +46,13 @@ endif | |||
45 | obj-$(CONFIG_DMAR) += pci-dma.o | 46 | obj-$(CONFIG_DMAR) += pci-dma.o |
46 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
47 | 48 | ||
48 | # The gate DSO image is built using a special linker script. | ||
49 | targets += gate.so gate-syms.o | ||
50 | |||
51 | extra-y += gate.so gate-syms.o gate.lds gate.o | ||
52 | |||
53 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. | 49 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. |
54 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 | 50 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 |
55 | 51 | ||
56 | CPPFLAGS_gate.lds := -P -C -U$(ARCH) | 52 | # The gate DSO image is built using a special linker script. |
57 | 53 | include $(srctree)/arch/ia64/kernel/Makefile.gate | |
58 | quiet_cmd_gate = GATE $@ | 54 | # tell compiled for native |
59 | cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ | 55 | CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE |
60 | |||
61 | GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ | ||
62 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
63 | $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
64 | $(call if_changed,gate) | ||
65 | |||
66 | $(obj)/built-in.o: $(obj)/gate-syms.o | ||
67 | $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o | ||
68 | |||
69 | GATECFLAGS_gate-syms.o = -r | ||
70 | $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
71 | $(call if_changed,gate) | ||
72 | |||
73 | # gate-data.o contains the gate DSO image as data in section .data.gate. | ||
74 | # We must build gate.so before we can assemble it. | ||
75 | # Note: kbuild does not track this dependency due to usage of .incbin | ||
76 | $(obj)/gate-data.o: $(obj)/gate.so | ||
77 | 56 | ||
78 | # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config | 57 | # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config |
79 | define sed-y | 58 | define sed-y |
@@ -109,9 +88,9 @@ include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s | |||
109 | clean-files += $(objtree)/include/asm-ia64/nr-irqs.h | 88 | clean-files += $(objtree)/include/asm-ia64/nr-irqs.h |
110 | 89 | ||
111 | # | 90 | # |
112 | # native ivt.S and entry.S | 91 | # native ivt.S, entry.S and fsys.S |
113 | # | 92 | # |
114 | ASM_PARAVIRT_OBJS = ivt.o entry.o | 93 | ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o |
115 | define paravirtualized_native | 94 | define paravirtualized_native |
116 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE | 95 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE |
117 | AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK | 96 | AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK |
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate new file mode 100644 index 000000000000..1d87f84069b3 --- /dev/null +++ b/arch/ia64/kernel/Makefile.gate | |||
@@ -0,0 +1,27 @@ | |||
1 | # The gate DSO image is built using a special linker script. | ||
2 | |||
3 | targets += gate.so gate-syms.o | ||
4 | |||
5 | extra-y += gate.so gate-syms.o gate.lds gate.o | ||
6 | |||
7 | CPPFLAGS_gate.lds := -P -C -U$(ARCH) | ||
8 | |||
9 | quiet_cmd_gate = GATE $@ | ||
10 | cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ | ||
11 | |||
12 | GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ | ||
13 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
14 | $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
15 | $(call if_changed,gate) | ||
16 | |||
17 | $(obj)/built-in.o: $(obj)/gate-syms.o | ||
18 | $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o | ||
19 | |||
20 | GATECFLAGS_gate-syms.o = -r | ||
21 | $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
22 | $(call if_changed,gate) | ||
23 | |||
24 | # gate-data.o contains the gate DSO image as data in section .data.gate. | ||
25 | # We must build gate.so before we can assemble it. | ||
26 | # Note: kbuild does not track this dependency due to usage of .incbin | ||
27 | $(obj)/gate-data.o: $(obj)/gate.so | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index bdef2ce38c8b..5510317db37b 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -890,7 +890,7 @@ __init void prefill_possible_map(void) | |||
890 | possible, max((possible - available_cpus), 0)); | 890 | possible, max((possible - available_cpus), 0)); |
891 | 891 | ||
892 | for (i = 0; i < possible; i++) | 892 | for (i = 0; i < possible; i++) |
893 | cpu_set(i, cpu_possible_map); | 893 | set_cpu_possible(i, true); |
894 | } | 894 | } |
895 | 895 | ||
896 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) | 896 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) |
@@ -928,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
928 | buffer.length = ACPI_ALLOCATE_BUFFER; | 928 | buffer.length = ACPI_ALLOCATE_BUFFER; |
929 | buffer.pointer = NULL; | 929 | buffer.pointer = NULL; |
930 | 930 | ||
931 | cpus_complement(tmp_map, cpu_present_map); | 931 | cpumask_complement(&tmp_map, cpu_present_mask); |
932 | cpu = first_cpu(tmp_map); | 932 | cpu = cpumask_first(&tmp_map); |
933 | if (cpu >= NR_CPUS) | 933 | if (cpu >= nr_cpu_ids) |
934 | return -EINVAL; | 934 | return -EINVAL; |
935 | 935 | ||
936 | acpi_map_cpu2node(handle, cpu, physid); | 936 | acpi_map_cpu2node(handle, cpu, physid); |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 742dbb1d5a4f..af5650169043 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -316,5 +316,7 @@ void foo(void) | |||
316 | DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); | 316 | DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); |
317 | DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); | 317 | DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); |
318 | DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); | 318 | DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); |
319 | DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); | ||
320 | DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); | ||
319 | #endif /* CONFIG_XEN */ | 321 | #endif /* CONFIG_XEN */ |
320 | } | 322 | } |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index efaff15d8cf1..7ef80e8161ce 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -456,6 +456,7 @@ efi_map_pal_code (void) | |||
456 | GRANULEROUNDDOWN((unsigned long) pal_vaddr), | 456 | GRANULEROUNDDOWN((unsigned long) pal_vaddr), |
457 | pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), | 457 | pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), |
458 | IA64_GRANULE_SHIFT); | 458 | IA64_GRANULE_SHIFT); |
459 | paravirt_dv_serialize_data(); | ||
459 | ia64_set_psr(psr); /* restore psr */ | 460 | ia64_set_psr(psr); /* restore psr */ |
460 | } | 461 | } |
461 | 462 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e5341e2c1175..ccfdeee9d89f 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -735,7 +735,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) | |||
735 | __paravirt_work_processed_syscall: | 735 | __paravirt_work_processed_syscall: |
736 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 736 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
737 | adds r2=PT(LOADRS)+16,r12 | 737 | adds r2=PT(LOADRS)+16,r12 |
738 | (pUStk) mov.m r22=ar.itc // fetch time at leave | 738 | MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave |
739 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | 739 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
740 | ;; | 740 | ;; |
741 | (p6) ld4 r31=[r18] // load current_thread_info()->flags | 741 | (p6) ld4 r31=[r18] // load current_thread_info()->flags |
@@ -984,7 +984,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
985 | .pred.rel.mutex pUStk,pKStk | 985 | .pred.rel.mutex pUStk,pKStk |
986 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled | 986 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled |
987 | (pUStk) mov.m r22=ar.itc // M fetch time at leave | 987 | MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave |
988 | nop.i 0 | 988 | nop.i 0 |
989 | ;; | 989 | ;; |
990 | #else | 990 | #else |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index c1625c7e1779..3567d54f8cee 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/unistd.h> | 25 | #include <asm/unistd.h> |
26 | 26 | ||
27 | #include "entry.h" | 27 | #include "entry.h" |
28 | #include "paravirt_inst.h" | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * See Documentation/ia64/fsys.txt for details on fsyscalls. | 31 | * See Documentation/ia64/fsys.txt for details on fsyscalls. |
@@ -279,7 +280,7 @@ ENTRY(fsys_gettimeofday) | |||
279 | (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control | 280 | (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control |
280 | ;; | 281 | ;; |
281 | .pred.rel.mutex p8,p9 | 282 | .pred.rel.mutex p8,p9 |
282 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! | 283 | MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! |
283 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. | 284 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. |
284 | (p13) ld8 r25 = [r19] // get itc_lastcycle value | 285 | (p13) ld8 r25 = [r19] // get itc_lastcycle value |
285 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec | 286 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec |
@@ -418,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set | |||
418 | mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) | 419 | mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) |
419 | ;; | 420 | ;; |
420 | 421 | ||
421 | rsm psr.i // mask interrupt delivery | 422 | RSM_PSR_I(p0, r18, r19) // mask interrupt delivery |
422 | mov ar.ccv=0 | 423 | mov ar.ccv=0 |
423 | andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP | 424 | andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP |
424 | 425 | ||
@@ -491,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set | |||
491 | #ifdef CONFIG_SMP | 492 | #ifdef CONFIG_SMP |
492 | st4.rel [r31]=r0 // release the lock | 493 | st4.rel [r31]=r0 // release the lock |
493 | #endif | 494 | #endif |
494 | ssm psr.i | 495 | SSM_PSR_I(p0, p9, r31) |
495 | ;; | 496 | ;; |
496 | 497 | ||
497 | srlz.d // ensure psr.i is set again | 498 | srlz.d // ensure psr.i is set again |
@@ -513,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) | |||
513 | #ifdef CONFIG_SMP | 514 | #ifdef CONFIG_SMP |
514 | st4.rel [r31]=r0 // release the lock | 515 | st4.rel [r31]=r0 // release the lock |
515 | #endif | 516 | #endif |
516 | ssm psr.i | 517 | SSM_PSR_I(p0, p9, r17) |
517 | ;; | 518 | ;; |
518 | srlz.d | 519 | srlz.d |
519 | br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall | 520 | br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall |
@@ -521,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) | |||
521 | #ifdef CONFIG_SMP | 522 | #ifdef CONFIG_SMP |
522 | .lock_contention: | 523 | .lock_contention: |
523 | /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ | 524 | /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ |
524 | ssm psr.i | 525 | SSM_PSR_I(p0, p9, r17) |
525 | ;; | 526 | ;; |
526 | srlz.d | 527 | srlz.d |
527 | br.sptk.many fsys_fallback_syscall | 528 | br.sptk.many fsys_fallback_syscall |
@@ -592,17 +593,17 @@ ENTRY(fsys_fallback_syscall) | |||
592 | adds r17=-1024,r15 | 593 | adds r17=-1024,r15 |
593 | movl r14=sys_call_table | 594 | movl r14=sys_call_table |
594 | ;; | 595 | ;; |
595 | rsm psr.i | 596 | RSM_PSR_I(p0, r26, r27) |
596 | shladd r18=r17,3,r14 | 597 | shladd r18=r17,3,r14 |
597 | ;; | 598 | ;; |
598 | ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point | 599 | ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point |
599 | mov r29=psr // read psr (12 cyc load latency) | 600 | MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency) |
600 | mov r27=ar.rsc | 601 | mov r27=ar.rsc |
601 | mov r21=ar.fpsr | 602 | mov r21=ar.fpsr |
602 | mov r26=ar.pfs | 603 | mov r26=ar.pfs |
603 | END(fsys_fallback_syscall) | 604 | END(fsys_fallback_syscall) |
604 | /* FALL THROUGH */ | 605 | /* FALL THROUGH */ |
605 | GLOBAL_ENTRY(fsys_bubble_down) | 606 | GLOBAL_ENTRY(paravirt_fsys_bubble_down) |
606 | .prologue | 607 | .prologue |
607 | .altrp b6 | 608 | .altrp b6 |
608 | .body | 609 | .body |
@@ -640,7 +641,7 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
640 | * | 641 | * |
641 | * PSR.BE : already is turned off in __kernel_syscall_via_epc() | 642 | * PSR.BE : already is turned off in __kernel_syscall_via_epc() |
642 | * PSR.AC : don't care (kernel normally turns PSR.AC on) | 643 | * PSR.AC : don't care (kernel normally turns PSR.AC on) |
643 | * PSR.I : already turned off by the time fsys_bubble_down gets | 644 | * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets |
644 | * invoked | 645 | * invoked |
645 | * PSR.DFL: always 0 (kernel never turns it on) | 646 | * PSR.DFL: always 0 (kernel never turns it on) |
646 | * PSR.DFH: don't care --- kernel never touches f32-f127 on its own | 647 | * PSR.DFH: don't care --- kernel never touches f32-f127 on its own |
@@ -650,7 +651,7 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
650 | * PSR.DB : don't care --- kernel never enables kernel-level | 651 | * PSR.DB : don't care --- kernel never enables kernel-level |
651 | * breakpoints | 652 | * breakpoints |
652 | * PSR.TB : must be 0 already; if it wasn't zero on entry to | 653 | * PSR.TB : must be 0 already; if it wasn't zero on entry to |
653 | * __kernel_syscall_via_epc, the branch to fsys_bubble_down | 654 | * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down |
654 | * will trigger a taken branch; the taken-trap-handler then | 655 | * will trigger a taken branch; the taken-trap-handler then |
655 | * converts the syscall into a break-based system-call. | 656 | * converts the syscall into a break-based system-call. |
656 | */ | 657 | */ |
@@ -683,7 +684,7 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
683 | ;; | 684 | ;; |
684 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 | 685 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 |
685 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 686 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
686 | mov.m r30=ar.itc // M get cycle for accounting | 687 | MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting |
687 | #else | 688 | #else |
688 | nop.m 0 | 689 | nop.m 0 |
689 | #endif | 690 | #endif |
@@ -734,21 +735,21 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
734 | mov rp=r14 // I0 set the real return addr | 735 | mov rp=r14 // I0 set the real return addr |
735 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A | 736 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A |
736 | ;; | 737 | ;; |
737 | ssm psr.i // M2 we're on kernel stacks now, reenable irqs | 738 | SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs |
738 | cmp.eq p8,p0=r3,r0 // A | 739 | cmp.eq p8,p0=r3,r0 // A |
739 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT | 740 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT |
740 | 741 | ||
741 | nop.m 0 | 742 | nop.m 0 |
742 | (p8) br.call.sptk.many b6=b6 // B (ignore return address) | 743 | (p8) br.call.sptk.many b6=b6 // B (ignore return address) |
743 | br.cond.spnt ia64_trace_syscall // B | 744 | br.cond.spnt ia64_trace_syscall // B |
744 | END(fsys_bubble_down) | 745 | END(paravirt_fsys_bubble_down) |
745 | 746 | ||
746 | .rodata | 747 | .rodata |
747 | .align 8 | 748 | .align 8 |
748 | .globl fsyscall_table | 749 | .globl paravirt_fsyscall_table |
749 | 750 | ||
750 | data8 fsys_bubble_down | 751 | data8 paravirt_fsys_bubble_down |
751 | fsyscall_table: | 752 | paravirt_fsyscall_table: |
752 | data8 fsys_ni_syscall | 753 | data8 fsys_ni_syscall |
753 | data8 0 // exit // 1025 | 754 | data8 0 // exit // 1025 |
754 | data8 0 // read | 755 | data8 0 // read |
@@ -1033,4 +1034,4 @@ fsyscall_table: | |||
1033 | 1034 | ||
1034 | // fill in zeros for the remaining entries | 1035 | // fill in zeros for the remaining entries |
1035 | .zero: | 1036 | .zero: |
1036 | .space fsyscall_table + 8*NR_syscalls - .zero, 0 | 1037 | .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0 |
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 74b1ccce4e84..cf5e0a105e16 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/sigcontext.h> | 13 | #include <asm/sigcontext.h> |
14 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/unistd.h> | 15 | #include <asm/unistd.h> |
16 | #include "paravirt_inst.h" | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, | 19 | * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, |
@@ -48,87 +49,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break) | |||
48 | } | 49 | } |
49 | END(__kernel_syscall_via_break) | 50 | END(__kernel_syscall_via_break) |
50 | 51 | ||
51 | /* | ||
52 | * On entry: | ||
53 | * r11 = saved ar.pfs | ||
54 | * r15 = system call # | ||
55 | * b0 = saved return address | ||
56 | * b6 = return address | ||
57 | * On exit: | ||
58 | * r11 = saved ar.pfs | ||
59 | * r15 = system call # | ||
60 | * b0 = saved return address | ||
61 | * all other "scratch" registers: undefined | ||
62 | * all "preserved" registers: same as on entry | ||
63 | */ | ||
64 | |||
65 | GLOBAL_ENTRY(__kernel_syscall_via_epc) | ||
66 | .prologue | ||
67 | .altrp b6 | ||
68 | .body | ||
69 | { | ||
70 | /* | ||
71 | * Note: the kernel cannot assume that the first two instructions in this | ||
72 | * bundle get executed. The remaining code must be safe even if | ||
73 | * they do not get executed. | ||
74 | */ | ||
75 | adds r17=-1024,r15 // A | ||
76 | mov r10=0 // A default to successful syscall execution | ||
77 | epc // B causes split-issue | ||
78 | } | ||
79 | ;; | ||
80 | rsm psr.be | psr.i // M2 (5 cyc to srlz.d) | ||
81 | LOAD_FSYSCALL_TABLE(r14) // X | ||
82 | ;; | ||
83 | mov r16=IA64_KR(CURRENT) // M2 (12 cyc) | ||
84 | shladd r18=r17,3,r14 // A | ||
85 | mov r19=NR_syscalls-1 // A | ||
86 | ;; | ||
87 | lfetch [r18] // M0|1 | ||
88 | mov r29=psr // M2 (12 cyc) | ||
89 | // If r17 is a NaT, p6 will be zero | ||
90 | cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? | ||
91 | ;; | ||
92 | mov r21=ar.fpsr // M2 (12 cyc) | ||
93 | tnat.nz p10,p9=r15 // I0 | ||
94 | mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) | ||
95 | ;; | ||
96 | srlz.d // M0 (forces split-issue) ensure PSR.BE==0 | ||
97 | (p6) ld8 r18=[r18] // M0|1 | ||
98 | nop.i 0 | ||
99 | ;; | ||
100 | nop.m 0 | ||
101 | (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) | ||
102 | nop.i 0 | ||
103 | ;; | ||
104 | (p8) ssm psr.i | ||
105 | (p6) mov b7=r18 // I0 | ||
106 | (p8) br.dptk.many b7 // B | ||
107 | |||
108 | mov r27=ar.rsc // M2 (12 cyc) | ||
109 | /* | ||
110 | * brl.cond doesn't work as intended because the linker would convert this branch | ||
111 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some | ||
112 | * future version of the linker. In the meantime, we just use an indirect branch | ||
113 | * instead. | ||
114 | */ | ||
115 | #ifdef CONFIG_ITANIUM | ||
116 | (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | ||
117 | ;; | ||
118 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down | ||
119 | ;; | ||
120 | (p6) mov b7=r14 | ||
121 | (p6) br.sptk.many b7 | ||
122 | #else | ||
123 | BRL_COND_FSYS_BUBBLE_DOWN(p6) | ||
124 | #endif | ||
125 | ssm psr.i | ||
126 | mov r10=-1 | ||
127 | (p10) mov r8=EINVAL | ||
128 | (p9) mov r8=ENOSYS | ||
129 | FSYS_RETURN | ||
130 | END(__kernel_syscall_via_epc) | ||
131 | |||
132 | # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) | 52 | # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) |
133 | # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) | 53 | # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) |
134 | # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) | 54 | # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) |
@@ -374,3 +294,92 @@ restore_rbs: | |||
374 | // invala not necessary as that will happen when returning to user-mode | 294 | // invala not necessary as that will happen when returning to user-mode |
375 | br.cond.sptk back_from_restore_rbs | 295 | br.cond.sptk back_from_restore_rbs |
376 | END(__kernel_sigtramp) | 296 | END(__kernel_sigtramp) |
297 | |||
298 | /* | ||
299 | * On entry: | ||
300 | * r11 = saved ar.pfs | ||
301 | * r15 = system call # | ||
302 | * b0 = saved return address | ||
303 | * b6 = return address | ||
304 | * On exit: | ||
305 | * r11 = saved ar.pfs | ||
306 | * r15 = system call # | ||
307 | * b0 = saved return address | ||
308 | * all other "scratch" registers: undefined | ||
309 | * all "preserved" registers: same as on entry | ||
310 | */ | ||
311 | |||
312 | GLOBAL_ENTRY(__kernel_syscall_via_epc) | ||
313 | .prologue | ||
314 | .altrp b6 | ||
315 | .body | ||
316 | { | ||
317 | /* | ||
318 | * Note: the kernel cannot assume that the first two instructions in this | ||
319 | * bundle get executed. The remaining code must be safe even if | ||
320 | * they do not get executed. | ||
321 | */ | ||
322 | adds r17=-1024,r15 // A | ||
323 | mov r10=0 // A default to successful syscall execution | ||
324 | epc // B causes split-issue | ||
325 | } | ||
326 | ;; | ||
327 | RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d) | ||
328 | LOAD_FSYSCALL_TABLE(r14) // X | ||
329 | ;; | ||
330 | mov r16=IA64_KR(CURRENT) // M2 (12 cyc) | ||
331 | shladd r18=r17,3,r14 // A | ||
332 | mov r19=NR_syscalls-1 // A | ||
333 | ;; | ||
334 | lfetch [r18] // M0|1 | ||
335 | MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc) | ||
336 | // If r17 is a NaT, p6 will be zero | ||
337 | cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? | ||
338 | ;; | ||
339 | mov r21=ar.fpsr // M2 (12 cyc) | ||
340 | tnat.nz p10,p9=r15 // I0 | ||
341 | mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) | ||
342 | ;; | ||
343 | srlz.d // M0 (forces split-issue) ensure PSR.BE==0 | ||
344 | (p6) ld8 r18=[r18] // M0|1 | ||
345 | nop.i 0 | ||
346 | ;; | ||
347 | nop.m 0 | ||
348 | (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) | ||
349 | nop.i 0 | ||
350 | ;; | ||
351 | SSM_PSR_I(p8, p14, r25) | ||
352 | (p6) mov b7=r18 // I0 | ||
353 | (p8) br.dptk.many b7 // B | ||
354 | |||
355 | mov r27=ar.rsc // M2 (12 cyc) | ||
356 | /* | ||
357 | * brl.cond doesn't work as intended because the linker would convert this branch | ||
358 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some | ||
359 | * future version of the linker. In the meantime, we just use an indirect branch | ||
360 | * instead. | ||
361 | */ | ||
362 | #ifdef CONFIG_ITANIUM | ||
363 | (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | ||
364 | ;; | ||
365 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down | ||
366 | ;; | ||
367 | (p6) mov b7=r14 | ||
368 | (p6) br.sptk.many b7 | ||
369 | #else | ||
370 | BRL_COND_FSYS_BUBBLE_DOWN(p6) | ||
371 | #endif | ||
372 | SSM_PSR_I(p0, p14, r10) | ||
373 | mov r10=-1 | ||
374 | (p10) mov r8=EINVAL | ||
375 | (p9) mov r8=ENOSYS | ||
376 | FSYS_RETURN | ||
377 | |||
378 | #ifdef CONFIG_PARAVIRT | ||
379 | /* | ||
380 | * padd to make the size of this symbol constant | ||
381 | * independent of paravirtualization. | ||
382 | */ | ||
383 | .align PAGE_SIZE / 8 | ||
384 | #endif | ||
385 | END(__kernel_syscall_via_epc) | ||
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 3cb1abc00e24..88c64ed47c36 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | 8 | ||
9 | #include <asm/system.h> | 9 | #include <asm/system.h> |
10 | #include "paravirt_patchlist.h" | ||
10 | 11 | ||
11 | SECTIONS | 12 | SECTIONS |
12 | { | 13 | { |
@@ -33,21 +34,21 @@ SECTIONS | |||
33 | . = GATE_ADDR + 0x600; | 34 | . = GATE_ADDR + 0x600; |
34 | 35 | ||
35 | .data.patch : { | 36 | .data.patch : { |
36 | __start_gate_mckinley_e9_patchlist = .; | 37 | __paravirt_start_gate_mckinley_e9_patchlist = .; |
37 | *(.data.patch.mckinley_e9) | 38 | *(.data.patch.mckinley_e9) |
38 | __end_gate_mckinley_e9_patchlist = .; | 39 | __paravirt_end_gate_mckinley_e9_patchlist = .; |
39 | 40 | ||
40 | __start_gate_vtop_patchlist = .; | 41 | __paravirt_start_gate_vtop_patchlist = .; |
41 | *(.data.patch.vtop) | 42 | *(.data.patch.vtop) |
42 | __end_gate_vtop_patchlist = .; | 43 | __paravirt_end_gate_vtop_patchlist = .; |
43 | 44 | ||
44 | __start_gate_fsyscall_patchlist = .; | 45 | __paravirt_start_gate_fsyscall_patchlist = .; |
45 | *(.data.patch.fsyscall_table) | 46 | *(.data.patch.fsyscall_table) |
46 | __end_gate_fsyscall_patchlist = .; | 47 | __paravirt_end_gate_fsyscall_patchlist = .; |
47 | 48 | ||
48 | __start_gate_brl_fsys_bubble_down_patchlist = .; | 49 | __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; |
49 | *(.data.patch.brl_fsys_bubble_down) | 50 | *(.data.patch.brl_fsys_bubble_down) |
50 | __end_gate_brl_fsys_bubble_down_patchlist = .; | 51 | __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; |
51 | } :readable | 52 | } :readable |
52 | 53 | ||
53 | .IA_64.unwind_info : { *(.IA_64.unwind_info*) } | 54 | .IA_64.unwind_info : { *(.IA_64.unwind_info*) } |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 59301c472800..23f846de62d5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1050,7 +1050,7 @@ END(ia64_delay_loop) | |||
1050 | * except that the multiplication and the shift are done with 128-bit | 1050 | * except that the multiplication and the shift are done with 128-bit |
1051 | * intermediate precision so that we can produce a full 64-bit result. | 1051 | * intermediate precision so that we can produce a full 64-bit result. |
1052 | */ | 1052 | */ |
1053 | GLOBAL_ENTRY(sched_clock) | 1053 | GLOBAL_ENTRY(ia64_native_sched_clock) |
1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) | 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) |
1056 | ;; | 1056 | ;; |
@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock) | |||
1066 | ;; | 1066 | ;; |
1067 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT | 1067 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT |
1068 | br.ret.sptk.many rp | 1068 | br.ret.sptk.many rp |
1069 | END(sched_clock) | 1069 | END(ia64_native_sched_clock) |
1070 | #ifndef CONFIG_PARAVIRT | ||
1071 | //unsigned long long | ||
1072 | //sched_clock(void) __attribute__((alias("ia64_native_sched_clock"))); | ||
1073 | .global sched_clock | ||
1074 | sched_clock = ia64_native_sched_clock | ||
1075 | #endif | ||
1070 | 1076 | ||
1071 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1072 | GLOBAL_ENTRY(cycle_to_cputime) | 1078 | GLOBAL_ENTRY(cycle_to_cputime) |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index f675d8e33853..ec9a5fdfa1b9 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -804,7 +804,7 @@ ENTRY(break_fault) | |||
804 | /////////////////////////////////////////////////////////////////////// | 804 | /////////////////////////////////////////////////////////////////////// |
805 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | 805 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
806 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 806 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
807 | mov.m r30=ar.itc // M get cycle for accounting | 807 | MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting |
808 | #else | 808 | #else |
809 | mov b6=r30 // I0 setup syscall handler branch reg early | 809 | mov b6=r30 // I0 setup syscall handler branch reg early |
810 | #endif | 810 | #endif |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index bab1de2d2f6a..8f33a8840422 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg) | |||
1456 | 1456 | ||
1457 | ia64_mca_cmc_int_handler(cmc_irq, arg); | 1457 | ia64_mca_cmc_int_handler(cmc_irq, arg); |
1458 | 1458 | ||
1459 | for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); | 1459 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
1460 | 1460 | ||
1461 | if (cpuid < NR_CPUS) { | 1461 | if (cpuid < nr_cpu_ids) { |
1462 | platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); | 1462 | platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); |
1463 | } else { | 1463 | } else { |
1464 | /* If no log record, switch out of polling mode */ | 1464 | /* If no log record, switch out of polling mode */ |
@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg) | |||
1525 | 1525 | ||
1526 | ia64_mca_cpe_int_handler(cpe_irq, arg); | 1526 | ia64_mca_cpe_int_handler(cpe_irq, arg); |
1527 | 1527 | ||
1528 | for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); | 1528 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
1529 | 1529 | ||
1530 | if (cpuid < NR_CPUS) { | 1530 | if (cpuid < NR_CPUS) { |
1531 | platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); | 1531 | platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index aaa7d901521f..da3b0cf495a3 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
@@ -446,6 +446,14 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, | |||
446 | mod->arch.opd = s; | 446 | mod->arch.opd = s; |
447 | else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) | 447 | else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) |
448 | mod->arch.unwind = s; | 448 | mod->arch.unwind = s; |
449 | #ifdef CONFIG_PARAVIRT | ||
450 | else if (strcmp(".paravirt_bundles", | ||
451 | secstrings + s->sh_name) == 0) | ||
452 | mod->arch.paravirt_bundles = s; | ||
453 | else if (strcmp(".paravirt_insts", | ||
454 | secstrings + s->sh_name) == 0) | ||
455 | mod->arch.paravirt_insts = s; | ||
456 | #endif | ||
449 | 457 | ||
450 | if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { | 458 | if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { |
451 | printk(KERN_ERR "%s: sections missing\n", mod->name); | 459 | printk(KERN_ERR "%s: sections missing\n", mod->name); |
@@ -525,8 +533,7 @@ get_ltoff (struct module *mod, uint64_t value, int *okp) | |||
525 | goto found; | 533 | goto found; |
526 | 534 | ||
527 | /* Not enough GOT entries? */ | 535 | /* Not enough GOT entries? */ |
528 | if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)) | 536 | BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); |
529 | BUG(); | ||
530 | 537 | ||
531 | e->val = value; | 538 | e->val = value; |
532 | ++mod->arch.next_got_entry; | 539 | ++mod->arch.next_got_entry; |
@@ -921,6 +928,30 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo | |||
921 | DEBUGP("%s: init: entry=%p\n", __func__, mod->init); | 928 | DEBUGP("%s: init: entry=%p\n", __func__, mod->init); |
922 | if (mod->arch.unwind) | 929 | if (mod->arch.unwind) |
923 | register_unwind_table(mod); | 930 | register_unwind_table(mod); |
931 | #ifdef CONFIG_PARAVIRT | ||
932 | if (mod->arch.paravirt_bundles) { | ||
933 | struct paravirt_patch_site_bundle *start = | ||
934 | (struct paravirt_patch_site_bundle *) | ||
935 | mod->arch.paravirt_bundles->sh_addr; | ||
936 | struct paravirt_patch_site_bundle *end = | ||
937 | (struct paravirt_patch_site_bundle *) | ||
938 | (mod->arch.paravirt_bundles->sh_addr + | ||
939 | mod->arch.paravirt_bundles->sh_size); | ||
940 | |||
941 | paravirt_patch_apply_bundle(start, end); | ||
942 | } | ||
943 | if (mod->arch.paravirt_insts) { | ||
944 | struct paravirt_patch_site_inst *start = | ||
945 | (struct paravirt_patch_site_inst *) | ||
946 | mod->arch.paravirt_insts->sh_addr; | ||
947 | struct paravirt_patch_site_inst *end = | ||
948 | (struct paravirt_patch_site_inst *) | ||
949 | (mod->arch.paravirt_insts->sh_addr + | ||
950 | mod->arch.paravirt_insts->sh_size); | ||
951 | |||
952 | paravirt_patch_apply_inst(start, end); | ||
953 | } | ||
954 | #endif | ||
924 | return 0; | 955 | return 0; |
925 | } | 956 | } |
926 | 957 | ||
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index 9f14c16f6369..a21d7bb9c69c 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c | |||
@@ -46,13 +46,23 @@ struct pv_info pv_info = { | |||
46 | * initialization hooks. | 46 | * initialization hooks. |
47 | */ | 47 | */ |
48 | 48 | ||
49 | struct pv_init_ops pv_init_ops; | 49 | static void __init |
50 | ia64_native_patch_branch(unsigned long tag, unsigned long type); | ||
51 | |||
52 | struct pv_init_ops pv_init_ops = | ||
53 | { | ||
54 | #ifdef ASM_SUPPORTED | ||
55 | .patch_bundle = ia64_native_patch_bundle, | ||
56 | #endif | ||
57 | .patch_branch = ia64_native_patch_branch, | ||
58 | }; | ||
50 | 59 | ||
51 | /*************************************************************************** | 60 | /*************************************************************************** |
52 | * pv_cpu_ops | 61 | * pv_cpu_ops |
53 | * intrinsics hooks. | 62 | * intrinsics hooks. |
54 | */ | 63 | */ |
55 | 64 | ||
65 | #ifndef ASM_SUPPORTED | ||
56 | /* ia64_native_xxx are macros so that we have to make them real functions */ | 66 | /* ia64_native_xxx are macros so that we have to make them real functions */ |
57 | 67 | ||
58 | #define DEFINE_VOID_FUNC1(name) \ | 68 | #define DEFINE_VOID_FUNC1(name) \ |
@@ -60,7 +70,14 @@ struct pv_init_ops pv_init_ops; | |||
60 | ia64_native_ ## name ## _func(unsigned long arg) \ | 70 | ia64_native_ ## name ## _func(unsigned long arg) \ |
61 | { \ | 71 | { \ |
62 | ia64_native_ ## name(arg); \ | 72 | ia64_native_ ## name(arg); \ |
63 | } \ | 73 | } |
74 | |||
75 | #define DEFINE_VOID_FUNC1_VOID(name) \ | ||
76 | static void \ | ||
77 | ia64_native_ ## name ## _func(void *arg) \ | ||
78 | { \ | ||
79 | ia64_native_ ## name(arg); \ | ||
80 | } | ||
64 | 81 | ||
65 | #define DEFINE_VOID_FUNC2(name) \ | 82 | #define DEFINE_VOID_FUNC2(name) \ |
66 | static void \ | 83 | static void \ |
@@ -68,7 +85,7 @@ struct pv_init_ops pv_init_ops; | |||
68 | unsigned long arg1) \ | 85 | unsigned long arg1) \ |
69 | { \ | 86 | { \ |
70 | ia64_native_ ## name(arg0, arg1); \ | 87 | ia64_native_ ## name(arg0, arg1); \ |
71 | } \ | 88 | } |
72 | 89 | ||
73 | #define DEFINE_FUNC0(name) \ | 90 | #define DEFINE_FUNC0(name) \ |
74 | static unsigned long \ | 91 | static unsigned long \ |
@@ -84,7 +101,7 @@ struct pv_init_ops pv_init_ops; | |||
84 | return ia64_native_ ## name(arg); \ | 101 | return ia64_native_ ## name(arg); \ |
85 | } \ | 102 | } \ |
86 | 103 | ||
87 | DEFINE_VOID_FUNC1(fc); | 104 | DEFINE_VOID_FUNC1_VOID(fc); |
88 | DEFINE_VOID_FUNC1(intrin_local_irq_restore); | 105 | DEFINE_VOID_FUNC1(intrin_local_irq_restore); |
89 | 106 | ||
90 | DEFINE_VOID_FUNC2(ptcga); | 107 | DEFINE_VOID_FUNC2(ptcga); |
@@ -274,6 +291,266 @@ ia64_native_setreg_func(int regnum, unsigned long val) | |||
274 | break; | 291 | break; |
275 | } | 292 | } |
276 | } | 293 | } |
294 | #else | ||
295 | |||
296 | #define __DEFINE_FUNC(name, code) \ | ||
297 | extern const char ia64_native_ ## name ## _direct_start[]; \ | ||
298 | extern const char ia64_native_ ## name ## _direct_end[]; \ | ||
299 | asm (".align 32\n" \ | ||
300 | ".proc ia64_native_" #name "_func\n" \ | ||
301 | "ia64_native_" #name "_func:\n" \ | ||
302 | "ia64_native_" #name "_direct_start:\n" \ | ||
303 | code \ | ||
304 | "ia64_native_" #name "_direct_end:\n" \ | ||
305 | "br.cond.sptk.many b6\n" \ | ||
306 | ".endp ia64_native_" #name "_func\n") | ||
307 | |||
308 | #define DEFINE_VOID_FUNC0(name, code) \ | ||
309 | extern void \ | ||
310 | ia64_native_ ## name ## _func(void); \ | ||
311 | __DEFINE_FUNC(name, code) | ||
312 | |||
313 | #define DEFINE_VOID_FUNC1(name, code) \ | ||
314 | extern void \ | ||
315 | ia64_native_ ## name ## _func(unsigned long arg); \ | ||
316 | __DEFINE_FUNC(name, code) | ||
317 | |||
318 | #define DEFINE_VOID_FUNC1_VOID(name, code) \ | ||
319 | extern void \ | ||
320 | ia64_native_ ## name ## _func(void *arg); \ | ||
321 | __DEFINE_FUNC(name, code) | ||
322 | |||
323 | #define DEFINE_VOID_FUNC2(name, code) \ | ||
324 | extern void \ | ||
325 | ia64_native_ ## name ## _func(unsigned long arg0, \ | ||
326 | unsigned long arg1); \ | ||
327 | __DEFINE_FUNC(name, code) | ||
328 | |||
329 | #define DEFINE_FUNC0(name, code) \ | ||
330 | extern unsigned long \ | ||
331 | ia64_native_ ## name ## _func(void); \ | ||
332 | __DEFINE_FUNC(name, code) | ||
333 | |||
334 | #define DEFINE_FUNC1(name, type, code) \ | ||
335 | extern unsigned long \ | ||
336 | ia64_native_ ## name ## _func(type arg); \ | ||
337 | __DEFINE_FUNC(name, code) | ||
338 | |||
339 | DEFINE_VOID_FUNC1_VOID(fc, | ||
340 | "fc r8\n"); | ||
341 | DEFINE_VOID_FUNC1(intrin_local_irq_restore, | ||
342 | ";;\n" | ||
343 | " cmp.ne p6, p7 = r8, r0\n" | ||
344 | ";;\n" | ||
345 | "(p6) ssm psr.i\n" | ||
346 | "(p7) rsm psr.i\n" | ||
347 | ";;\n" | ||
348 | "(p6) srlz.d\n"); | ||
349 | |||
350 | DEFINE_VOID_FUNC2(ptcga, | ||
351 | "ptc.ga r8, r9\n"); | ||
352 | DEFINE_VOID_FUNC2(set_rr, | ||
353 | "mov rr[r8] = r9\n"); | ||
354 | |||
355 | /* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */ | ||
356 | DEFINE_FUNC0(get_psr_i, | ||
357 | "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n" | ||
358 | "mov r8 = psr\n" | ||
359 | ";;\n" | ||
360 | "and r8 = r2, r8\n"); | ||
361 | |||
362 | DEFINE_FUNC1(thash, unsigned long, | ||
363 | "thash r8 = r8\n"); | ||
364 | DEFINE_FUNC1(get_cpuid, int, | ||
365 | "mov r8 = cpuid[r8]\n"); | ||
366 | DEFINE_FUNC1(get_pmd, int, | ||
367 | "mov r8 = pmd[r8]\n"); | ||
368 | DEFINE_FUNC1(get_rr, unsigned long, | ||
369 | "mov r8 = rr[r8]\n"); | ||
370 | |||
371 | DEFINE_VOID_FUNC0(ssm_i, | ||
372 | "ssm psr.i\n"); | ||
373 | DEFINE_VOID_FUNC0(rsm_i, | ||
374 | "rsm psr.i\n"); | ||
375 | |||
376 | extern void | ||
377 | ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1, | ||
378 | unsigned long val2, unsigned long val3, | ||
379 | unsigned long val4); | ||
380 | __DEFINE_FUNC(set_rr0_to_rr4, | ||
381 | "mov rr[r0] = r8\n" | ||
382 | "movl r2 = 0x2000000000000000\n" | ||
383 | ";;\n" | ||
384 | "mov rr[r2] = r9\n" | ||
385 | "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */ | ||
386 | ";;\n" | ||
387 | "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */ | ||
388 | "mov rr[r3] = r10\n" | ||
389 | ";;\n" | ||
390 | "mov rr[r2] = r11\n" | ||
391 | "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */ | ||
392 | ";;\n" | ||
393 | "mov rr[r3] = r14\n"); | ||
394 | |||
395 | extern unsigned long ia64_native_getreg_func(int regnum); | ||
396 | asm(".global ia64_native_getreg_func\n"); | ||
397 | #define __DEFINE_GET_REG(id, reg) \ | ||
398 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
399 | ";;\n" \ | ||
400 | "cmp.eq p6, p0 = r2, r8\n" \ | ||
401 | ";;\n" \ | ||
402 | "(p6) mov r8 = " #reg "\n" \ | ||
403 | "(p6) br.cond.sptk.many b6\n" \ | ||
404 | ";;\n" | ||
405 | #define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg) | ||
406 | #define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg) | ||
407 | |||
408 | __DEFINE_FUNC(getreg, | ||
409 | __DEFINE_GET_REG(GP, gp) | ||
410 | /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */ | ||
411 | __DEFINE_GET_REG(PSR, psr) | ||
412 | __DEFINE_GET_REG(TP, tp) | ||
413 | __DEFINE_GET_REG(SP, sp) | ||
414 | |||
415 | __DEFINE_GET_REG(AR_KR0, ar0) | ||
416 | __DEFINE_GET_REG(AR_KR1, ar1) | ||
417 | __DEFINE_GET_REG(AR_KR2, ar2) | ||
418 | __DEFINE_GET_REG(AR_KR3, ar3) | ||
419 | __DEFINE_GET_REG(AR_KR4, ar4) | ||
420 | __DEFINE_GET_REG(AR_KR5, ar5) | ||
421 | __DEFINE_GET_REG(AR_KR6, ar6) | ||
422 | __DEFINE_GET_REG(AR_KR7, ar7) | ||
423 | __DEFINE_GET_AR(RSC, rsc) | ||
424 | __DEFINE_GET_AR(BSP, bsp) | ||
425 | __DEFINE_GET_AR(BSPSTORE, bspstore) | ||
426 | __DEFINE_GET_AR(RNAT, rnat) | ||
427 | __DEFINE_GET_AR(FCR, fcr) | ||
428 | __DEFINE_GET_AR(EFLAG, eflag) | ||
429 | __DEFINE_GET_AR(CSD, csd) | ||
430 | __DEFINE_GET_AR(SSD, ssd) | ||
431 | __DEFINE_GET_REG(AR_CFLAG, ar27) | ||
432 | __DEFINE_GET_AR(FSR, fsr) | ||
433 | __DEFINE_GET_AR(FIR, fir) | ||
434 | __DEFINE_GET_AR(FDR, fdr) | ||
435 | __DEFINE_GET_AR(CCV, ccv) | ||
436 | __DEFINE_GET_AR(UNAT, unat) | ||
437 | __DEFINE_GET_AR(FPSR, fpsr) | ||
438 | __DEFINE_GET_AR(ITC, itc) | ||
439 | __DEFINE_GET_AR(PFS, pfs) | ||
440 | __DEFINE_GET_AR(LC, lc) | ||
441 | __DEFINE_GET_AR(EC, ec) | ||
442 | |||
443 | __DEFINE_GET_CR(DCR, dcr) | ||
444 | __DEFINE_GET_CR(ITM, itm) | ||
445 | __DEFINE_GET_CR(IVA, iva) | ||
446 | __DEFINE_GET_CR(PTA, pta) | ||
447 | __DEFINE_GET_CR(IPSR, ipsr) | ||
448 | __DEFINE_GET_CR(ISR, isr) | ||
449 | __DEFINE_GET_CR(IIP, iip) | ||
450 | __DEFINE_GET_CR(IFA, ifa) | ||
451 | __DEFINE_GET_CR(ITIR, itir) | ||
452 | __DEFINE_GET_CR(IIPA, iipa) | ||
453 | __DEFINE_GET_CR(IFS, ifs) | ||
454 | __DEFINE_GET_CR(IIM, iim) | ||
455 | __DEFINE_GET_CR(IHA, iha) | ||
456 | __DEFINE_GET_CR(LID, lid) | ||
457 | __DEFINE_GET_CR(IVR, ivr) | ||
458 | __DEFINE_GET_CR(TPR, tpr) | ||
459 | __DEFINE_GET_CR(EOI, eoi) | ||
460 | __DEFINE_GET_CR(IRR0, irr0) | ||
461 | __DEFINE_GET_CR(IRR1, irr1) | ||
462 | __DEFINE_GET_CR(IRR2, irr2) | ||
463 | __DEFINE_GET_CR(IRR3, irr3) | ||
464 | __DEFINE_GET_CR(ITV, itv) | ||
465 | __DEFINE_GET_CR(PMV, pmv) | ||
466 | __DEFINE_GET_CR(CMCV, cmcv) | ||
467 | __DEFINE_GET_CR(LRR0, lrr0) | ||
468 | __DEFINE_GET_CR(LRR1, lrr1) | ||
469 | |||
470 | "mov r8 = -1\n" /* unsupported case */ | ||
471 | ); | ||
472 | |||
473 | extern void ia64_native_setreg_func(int regnum, unsigned long val); | ||
474 | asm(".global ia64_native_setreg_func\n"); | ||
475 | #define __DEFINE_SET_REG(id, reg) \ | ||
476 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
477 | ";;\n" \ | ||
478 | "cmp.eq p6, p0 = r2, r9\n" \ | ||
479 | ";;\n" \ | ||
480 | "(p6) mov " #reg " = r8\n" \ | ||
481 | "(p6) br.cond.sptk.many b6\n" \ | ||
482 | ";;\n" | ||
483 | #define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg) | ||
484 | #define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg) | ||
485 | __DEFINE_FUNC(setreg, | ||
486 | "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n" | ||
487 | ";;\n" | ||
488 | "cmp.eq p6, p0 = r2, r9\n" | ||
489 | ";;\n" | ||
490 | "(p6) mov psr.l = r8\n" | ||
491 | #ifdef HAVE_SERIALIZE_DIRECTIVE | ||
492 | ".serialize.data\n" | ||
493 | #endif | ||
494 | "(p6) br.cond.sptk.many b6\n" | ||
495 | __DEFINE_SET_REG(GP, gp) | ||
496 | __DEFINE_SET_REG(SP, sp) | ||
497 | |||
498 | __DEFINE_SET_REG(AR_KR0, ar0) | ||
499 | __DEFINE_SET_REG(AR_KR1, ar1) | ||
500 | __DEFINE_SET_REG(AR_KR2, ar2) | ||
501 | __DEFINE_SET_REG(AR_KR3, ar3) | ||
502 | __DEFINE_SET_REG(AR_KR4, ar4) | ||
503 | __DEFINE_SET_REG(AR_KR5, ar5) | ||
504 | __DEFINE_SET_REG(AR_KR6, ar6) | ||
505 | __DEFINE_SET_REG(AR_KR7, ar7) | ||
506 | __DEFINE_SET_AR(RSC, rsc) | ||
507 | __DEFINE_SET_AR(BSP, bsp) | ||
508 | __DEFINE_SET_AR(BSPSTORE, bspstore) | ||
509 | __DEFINE_SET_AR(RNAT, rnat) | ||
510 | __DEFINE_SET_AR(FCR, fcr) | ||
511 | __DEFINE_SET_AR(EFLAG, eflag) | ||
512 | __DEFINE_SET_AR(CSD, csd) | ||
513 | __DEFINE_SET_AR(SSD, ssd) | ||
514 | __DEFINE_SET_REG(AR_CFLAG, ar27) | ||
515 | __DEFINE_SET_AR(FSR, fsr) | ||
516 | __DEFINE_SET_AR(FIR, fir) | ||
517 | __DEFINE_SET_AR(FDR, fdr) | ||
518 | __DEFINE_SET_AR(CCV, ccv) | ||
519 | __DEFINE_SET_AR(UNAT, unat) | ||
520 | __DEFINE_SET_AR(FPSR, fpsr) | ||
521 | __DEFINE_SET_AR(ITC, itc) | ||
522 | __DEFINE_SET_AR(PFS, pfs) | ||
523 | __DEFINE_SET_AR(LC, lc) | ||
524 | __DEFINE_SET_AR(EC, ec) | ||
525 | |||
526 | __DEFINE_SET_CR(DCR, dcr) | ||
527 | __DEFINE_SET_CR(ITM, itm) | ||
528 | __DEFINE_SET_CR(IVA, iva) | ||
529 | __DEFINE_SET_CR(PTA, pta) | ||
530 | __DEFINE_SET_CR(IPSR, ipsr) | ||
531 | __DEFINE_SET_CR(ISR, isr) | ||
532 | __DEFINE_SET_CR(IIP, iip) | ||
533 | __DEFINE_SET_CR(IFA, ifa) | ||
534 | __DEFINE_SET_CR(ITIR, itir) | ||
535 | __DEFINE_SET_CR(IIPA, iipa) | ||
536 | __DEFINE_SET_CR(IFS, ifs) | ||
537 | __DEFINE_SET_CR(IIM, iim) | ||
538 | __DEFINE_SET_CR(IHA, iha) | ||
539 | __DEFINE_SET_CR(LID, lid) | ||
540 | __DEFINE_SET_CR(IVR, ivr) | ||
541 | __DEFINE_SET_CR(TPR, tpr) | ||
542 | __DEFINE_SET_CR(EOI, eoi) | ||
543 | __DEFINE_SET_CR(IRR0, irr0) | ||
544 | __DEFINE_SET_CR(IRR1, irr1) | ||
545 | __DEFINE_SET_CR(IRR2, irr2) | ||
546 | __DEFINE_SET_CR(IRR3, irr3) | ||
547 | __DEFINE_SET_CR(ITV, itv) | ||
548 | __DEFINE_SET_CR(PMV, pmv) | ||
549 | __DEFINE_SET_CR(CMCV, cmcv) | ||
550 | __DEFINE_SET_CR(LRR0, lrr0) | ||
551 | __DEFINE_SET_CR(LRR1, lrr1) | ||
552 | ); | ||
553 | #endif | ||
277 | 554 | ||
278 | struct pv_cpu_ops pv_cpu_ops = { | 555 | struct pv_cpu_ops pv_cpu_ops = { |
279 | .fc = ia64_native_fc_func, | 556 | .fc = ia64_native_fc_func, |
@@ -366,4 +643,258 @@ ia64_native_do_steal_accounting(unsigned long *new_itm) | |||
366 | 643 | ||
367 | struct pv_time_ops pv_time_ops = { | 644 | struct pv_time_ops pv_time_ops = { |
368 | .do_steal_accounting = ia64_native_do_steal_accounting, | 645 | .do_steal_accounting = ia64_native_do_steal_accounting, |
646 | .sched_clock = ia64_native_sched_clock, | ||
647 | }; | ||
648 | |||
649 | /*************************************************************************** | ||
650 | * binary pacthing | ||
651 | * pv_init_ops.patch_bundle | ||
652 | */ | ||
653 | |||
654 | #ifdef ASM_SUPPORTED | ||
655 | #define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \ | ||
656 | __DEFINE_FUNC(get_ ## name, \ | ||
657 | ";;\n" \ | ||
658 | "mov r8 = " #reg "\n" \ | ||
659 | ";;\n") | ||
660 | |||
661 | #define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ | ||
662 | __DEFINE_FUNC(set_ ## name, \ | ||
663 | ";;\n" \ | ||
664 | "mov " #reg " = r8\n" \ | ||
665 | ";;\n") | ||
666 | |||
667 | #define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \ | ||
668 | IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \ | ||
669 | IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ | ||
670 | |||
671 | #define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \ | ||
672 | IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg) | ||
673 | |||
674 | #define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \ | ||
675 | IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg) | ||
676 | |||
677 | |||
678 | IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr); | ||
679 | IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp); | ||
680 | |||
681 | /* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */ | ||
682 | __DEFINE_FUNC(set_psr_l, | ||
683 | ";;\n" | ||
684 | "mov psr.l = r8\n" | ||
685 | #ifdef HAVE_SERIALIZE_DIRECTIVE | ||
686 | ".serialize.data\n" | ||
687 | #endif | ||
688 | ";;\n"); | ||
689 | |||
690 | IA64_NATIVE_PATCH_DEFINE_REG(gp, gp); | ||
691 | IA64_NATIVE_PATCH_DEFINE_REG(sp, sp); | ||
692 | |||
693 | IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0); | ||
694 | IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1); | ||
695 | IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2); | ||
696 | IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3); | ||
697 | IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4); | ||
698 | IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5); | ||
699 | IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6); | ||
700 | IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7); | ||
701 | |||
702 | IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc); | ||
703 | IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp); | ||
704 | IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore); | ||
705 | IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat); | ||
706 | IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr); | ||
707 | IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag); | ||
708 | IA64_NATIVE_PATCH_DEFINE_AR(csd, csd); | ||
709 | IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd); | ||
710 | IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27); | ||
711 | IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr); | ||
712 | IA64_NATIVE_PATCH_DEFINE_AR(fir, fir); | ||
713 | IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr); | ||
714 | IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv); | ||
715 | IA64_NATIVE_PATCH_DEFINE_AR(unat, unat); | ||
716 | IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr); | ||
717 | IA64_NATIVE_PATCH_DEFINE_AR(itc, itc); | ||
718 | IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs); | ||
719 | IA64_NATIVE_PATCH_DEFINE_AR(lc, lc); | ||
720 | IA64_NATIVE_PATCH_DEFINE_AR(ec, ec); | ||
721 | |||
722 | IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr); | ||
723 | IA64_NATIVE_PATCH_DEFINE_CR(itm, itm); | ||
724 | IA64_NATIVE_PATCH_DEFINE_CR(iva, iva); | ||
725 | IA64_NATIVE_PATCH_DEFINE_CR(pta, pta); | ||
726 | IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr); | ||
727 | IA64_NATIVE_PATCH_DEFINE_CR(isr, isr); | ||
728 | IA64_NATIVE_PATCH_DEFINE_CR(iip, iip); | ||
729 | IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa); | ||
730 | IA64_NATIVE_PATCH_DEFINE_CR(itir, itir); | ||
731 | IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa); | ||
732 | IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs); | ||
733 | IA64_NATIVE_PATCH_DEFINE_CR(iim, iim); | ||
734 | IA64_NATIVE_PATCH_DEFINE_CR(iha, iha); | ||
735 | IA64_NATIVE_PATCH_DEFINE_CR(lid, lid); | ||
736 | IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr); | ||
737 | IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr); | ||
738 | IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi); | ||
739 | IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0); | ||
740 | IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1); | ||
741 | IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2); | ||
742 | IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3); | ||
743 | IA64_NATIVE_PATCH_DEFINE_CR(itv, itv); | ||
744 | IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv); | ||
745 | IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv); | ||
746 | IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0); | ||
747 | IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1); | ||
748 | |||
749 | static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[] | ||
750 | __initdata_or_module = | ||
751 | { | ||
752 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \ | ||
753 | { \ | ||
754 | (void*)ia64_native_ ## name ## _direct_start, \ | ||
755 | (void*)ia64_native_ ## name ## _direct_end, \ | ||
756 | PARAVIRT_PATCH_TYPE_ ## type, \ | ||
757 | } | ||
758 | |||
759 | IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC), | ||
760 | IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH), | ||
761 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), | ||
762 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), | ||
763 | IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA), | ||
764 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR), | ||
765 | IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR), | ||
766 | IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), | ||
767 | IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), | ||
768 | IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), | ||
769 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), | ||
770 | IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore, | ||
771 | INTRIN_LOCAL_IRQ_RESTORE), | ||
772 | |||
773 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ | ||
774 | { \ | ||
775 | (void*)ia64_native_get_ ## name ## _direct_start, \ | ||
776 | (void*)ia64_native_get_ ## name ## _direct_end, \ | ||
777 | PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ | ||
778 | } | ||
779 | |||
780 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
781 | { \ | ||
782 | (void*)ia64_native_set_ ## name ## _direct_start, \ | ||
783 | (void*)ia64_native_set_ ## name ## _direct_end, \ | ||
784 | PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ | ||
785 | } | ||
786 | |||
787 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \ | ||
788 | IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \ | ||
789 | IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
790 | |||
791 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \ | ||
792 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg) | ||
793 | |||
794 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \ | ||
795 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg) | ||
796 | |||
797 | IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), | ||
798 | IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP), | ||
799 | |||
800 | IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L), | ||
801 | |||
802 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP), | ||
803 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP), | ||
804 | |||
805 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0), | ||
806 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1), | ||
807 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2), | ||
808 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3), | ||
809 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4), | ||
810 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5), | ||
811 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6), | ||
812 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7), | ||
813 | |||
814 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC), | ||
815 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP), | ||
816 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE), | ||
817 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT), | ||
818 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR), | ||
819 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG), | ||
820 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD), | ||
821 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD), | ||
822 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG), | ||
823 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR), | ||
824 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR), | ||
825 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR), | ||
826 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV), | ||
827 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT), | ||
828 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR), | ||
829 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC), | ||
830 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS), | ||
831 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC), | ||
832 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC), | ||
833 | |||
834 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR), | ||
835 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM), | ||
836 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA), | ||
837 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA), | ||
838 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR), | ||
839 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR), | ||
840 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP), | ||
841 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA), | ||
842 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR), | ||
843 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA), | ||
844 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS), | ||
845 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM), | ||
846 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA), | ||
847 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID), | ||
848 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR), | ||
849 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR), | ||
850 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI), | ||
851 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0), | ||
852 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1), | ||
853 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2), | ||
854 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3), | ||
855 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV), | ||
856 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV), | ||
857 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV), | ||
858 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0), | ||
859 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1), | ||
369 | }; | 860 | }; |
861 | |||
862 | unsigned long __init_or_module | ||
863 | ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type) | ||
864 | { | ||
865 | const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) / | ||
866 | sizeof(ia64_native_patch_bundle_elems[0]); | ||
867 | |||
868 | return __paravirt_patch_apply_bundle(sbundle, ebundle, type, | ||
869 | ia64_native_patch_bundle_elems, | ||
870 | nelems, NULL); | ||
871 | } | ||
872 | #endif /* ASM_SUPPOTED */ | ||
873 | |||
874 | extern const char ia64_native_switch_to[]; | ||
875 | extern const char ia64_native_leave_syscall[]; | ||
876 | extern const char ia64_native_work_processed_syscall[]; | ||
877 | extern const char ia64_native_leave_kernel[]; | ||
878 | |||
879 | const struct paravirt_patch_branch_target ia64_native_branch_target[] | ||
880 | __initconst = { | ||
881 | #define PARAVIRT_BR_TARGET(name, type) \ | ||
882 | { \ | ||
883 | ia64_native_ ## name, \ | ||
884 | PARAVIRT_PATCH_TYPE_BR_ ## type, \ | ||
885 | } | ||
886 | PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), | ||
887 | PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), | ||
888 | PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), | ||
889 | PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), | ||
890 | }; | ||
891 | |||
892 | static void __init | ||
893 | ia64_native_patch_branch(unsigned long tag, unsigned long type) | ||
894 | { | ||
895 | const unsigned long nelem = | ||
896 | sizeof(ia64_native_branch_target) / | ||
897 | sizeof(ia64_native_branch_target[0]); | ||
898 | __paravirt_patch_apply_branch(tag, type, | ||
899 | ia64_native_branch_target, nelem); | ||
900 | } | ||
diff --git a/arch/ia64/kernel/paravirt_patch.c b/arch/ia64/kernel/paravirt_patch.c new file mode 100644 index 000000000000..bfdfef1b1ffd --- /dev/null +++ b/arch/ia64/kernel/paravirt_patch.c | |||
@@ -0,0 +1,514 @@ | |||
1 | /****************************************************************************** | ||
2 | * linux/arch/ia64/xen/paravirt_patch.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/init.h> | ||
24 | #include <asm/intrinsics.h> | ||
25 | #include <asm/kprobes.h> | ||
26 | #include <asm/paravirt.h> | ||
27 | #include <asm/paravirt_patch.h> | ||
28 | |||
29 | typedef union ia64_inst { | ||
30 | struct { | ||
31 | unsigned long long qp : 6; | ||
32 | unsigned long long : 31; | ||
33 | unsigned long long opcode : 4; | ||
34 | unsigned long long reserved : 23; | ||
35 | } generic; | ||
36 | unsigned long long l; | ||
37 | } ia64_inst_t; | ||
38 | |||
39 | /* | ||
40 | * flush_icache_range() can't be used here. | ||
41 | * we are here before cpu_init() which initializes | ||
42 | * ia64_i_cache_stride_shift. flush_icache_range() uses it. | ||
43 | */ | ||
44 | void __init_or_module | ||
45 | paravirt_flush_i_cache_range(const void *instr, unsigned long size) | ||
46 | { | ||
47 | extern void paravirt_fc_i(const void *addr); | ||
48 | unsigned long i; | ||
49 | |||
50 | for (i = 0; i < size; i += sizeof(bundle_t)) | ||
51 | paravirt_fc_i(instr + i); | ||
52 | } | ||
53 | |||
54 | bundle_t* __init_or_module | ||
55 | paravirt_get_bundle(unsigned long tag) | ||
56 | { | ||
57 | return (bundle_t *)(tag & ~3UL); | ||
58 | } | ||
59 | |||
60 | unsigned long __init_or_module | ||
61 | paravirt_get_slot(unsigned long tag) | ||
62 | { | ||
63 | return tag & 3UL; | ||
64 | } | ||
65 | |||
66 | unsigned long __init_or_module | ||
67 | paravirt_get_num_inst(unsigned long stag, unsigned long etag) | ||
68 | { | ||
69 | bundle_t *sbundle = paravirt_get_bundle(stag); | ||
70 | unsigned long sslot = paravirt_get_slot(stag); | ||
71 | bundle_t *ebundle = paravirt_get_bundle(etag); | ||
72 | unsigned long eslot = paravirt_get_slot(etag); | ||
73 | |||
74 | return (ebundle - sbundle) * 3 + eslot - sslot + 1; | ||
75 | } | ||
76 | |||
77 | unsigned long __init_or_module | ||
78 | paravirt_get_next_tag(unsigned long tag) | ||
79 | { | ||
80 | unsigned long slot = paravirt_get_slot(tag); | ||
81 | |||
82 | switch (slot) { | ||
83 | case 0: | ||
84 | case 1: | ||
85 | return tag + 1; | ||
86 | case 2: { | ||
87 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
88 | return (unsigned long)(bundle + 1); | ||
89 | } | ||
90 | default: | ||
91 | BUG(); | ||
92 | } | ||
93 | /* NOTREACHED */ | ||
94 | } | ||
95 | |||
96 | ia64_inst_t __init_or_module | ||
97 | paravirt_read_slot0(const bundle_t *bundle) | ||
98 | { | ||
99 | ia64_inst_t inst; | ||
100 | inst.l = bundle->quad0.slot0; | ||
101 | return inst; | ||
102 | } | ||
103 | |||
104 | ia64_inst_t __init_or_module | ||
105 | paravirt_read_slot1(const bundle_t *bundle) | ||
106 | { | ||
107 | ia64_inst_t inst; | ||
108 | inst.l = bundle->quad0.slot1_p0 | | ||
109 | ((unsigned long long)bundle->quad1.slot1_p1 << 18UL); | ||
110 | return inst; | ||
111 | } | ||
112 | |||
113 | ia64_inst_t __init_or_module | ||
114 | paravirt_read_slot2(const bundle_t *bundle) | ||
115 | { | ||
116 | ia64_inst_t inst; | ||
117 | inst.l = bundle->quad1.slot2; | ||
118 | return inst; | ||
119 | } | ||
120 | |||
121 | ia64_inst_t __init_or_module | ||
122 | paravirt_read_inst(unsigned long tag) | ||
123 | { | ||
124 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
125 | unsigned long slot = paravirt_get_slot(tag); | ||
126 | |||
127 | switch (slot) { | ||
128 | case 0: | ||
129 | return paravirt_read_slot0(bundle); | ||
130 | case 1: | ||
131 | return paravirt_read_slot1(bundle); | ||
132 | case 2: | ||
133 | return paravirt_read_slot2(bundle); | ||
134 | default: | ||
135 | BUG(); | ||
136 | } | ||
137 | /* NOTREACHED */ | ||
138 | } | ||
139 | |||
140 | void __init_or_module | ||
141 | paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst) | ||
142 | { | ||
143 | bundle->quad0.slot0 = inst.l; | ||
144 | } | ||
145 | |||
146 | void __init_or_module | ||
147 | paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst) | ||
148 | { | ||
149 | bundle->quad0.slot1_p0 = inst.l; | ||
150 | bundle->quad1.slot1_p1 = inst.l >> 18UL; | ||
151 | } | ||
152 | |||
153 | void __init_or_module | ||
154 | paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst) | ||
155 | { | ||
156 | bundle->quad1.slot2 = inst.l; | ||
157 | } | ||
158 | |||
159 | void __init_or_module | ||
160 | paravirt_write_inst(unsigned long tag, ia64_inst_t inst) | ||
161 | { | ||
162 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
163 | unsigned long slot = paravirt_get_slot(tag); | ||
164 | |||
165 | switch (slot) { | ||
166 | case 0: | ||
167 | paravirt_write_slot0(bundle, inst); | ||
168 | break; | ||
169 | case 1: | ||
170 | paravirt_write_slot1(bundle, inst); | ||
171 | break; | ||
172 | case 2: | ||
173 | paravirt_write_slot2(bundle, inst); | ||
174 | break; | ||
175 | default: | ||
176 | BUG(); | ||
177 | break; | ||
178 | } | ||
179 | paravirt_flush_i_cache_range(bundle, sizeof(*bundle)); | ||
180 | } | ||
181 | |||
182 | /* for debug */ | ||
183 | void | ||
184 | paravirt_print_bundle(const bundle_t *bundle) | ||
185 | { | ||
186 | const unsigned long *quad = (const unsigned long *)bundle; | ||
187 | ia64_inst_t slot0 = paravirt_read_slot0(bundle); | ||
188 | ia64_inst_t slot1 = paravirt_read_slot1(bundle); | ||
189 | ia64_inst_t slot2 = paravirt_read_slot2(bundle); | ||
190 | |||
191 | printk(KERN_DEBUG | ||
192 | "bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]); | ||
193 | printk(KERN_DEBUG | ||
194 | "bundle template 0x%x\n", | ||
195 | bundle->quad0.template); | ||
196 | printk(KERN_DEBUG | ||
197 | "slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n", | ||
198 | (unsigned long)bundle->quad0.slot0, | ||
199 | (unsigned long)bundle->quad0.slot1_p0, | ||
200 | (unsigned long)bundle->quad1.slot1_p1, | ||
201 | (unsigned long)bundle->quad1.slot2); | ||
202 | printk(KERN_DEBUG | ||
203 | "slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n", | ||
204 | slot0.l, slot1.l, slot2.l); | ||
205 | } | ||
206 | |||
207 | static int noreplace_paravirt __init_or_module = 0; | ||
208 | |||
209 | static int __init setup_noreplace_paravirt(char *str) | ||
210 | { | ||
211 | noreplace_paravirt = 1; | ||
212 | return 1; | ||
213 | } | ||
214 | __setup("noreplace-paravirt", setup_noreplace_paravirt); | ||
215 | |||
216 | #ifdef ASM_SUPPORTED | ||
217 | static void __init_or_module | ||
218 | fill_nop_bundle(void *sbundle, void *ebundle) | ||
219 | { | ||
220 | extern const char paravirt_nop_bundle[]; | ||
221 | extern const unsigned long paravirt_nop_bundle_size; | ||
222 | |||
223 | void *bundle = sbundle; | ||
224 | |||
225 | BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); | ||
226 | BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); | ||
227 | |||
228 | while (bundle < ebundle) { | ||
229 | memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size); | ||
230 | |||
231 | bundle += paravirt_nop_bundle_size; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | /* helper function */ | ||
236 | unsigned long __init_or_module | ||
237 | __paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, | ||
238 | const struct paravirt_patch_bundle_elem *elems, | ||
239 | unsigned long nelems, | ||
240 | const struct paravirt_patch_bundle_elem **found) | ||
241 | { | ||
242 | unsigned long used = 0; | ||
243 | unsigned long i; | ||
244 | |||
245 | BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); | ||
246 | BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); | ||
247 | |||
248 | found = NULL; | ||
249 | for (i = 0; i < nelems; i++) { | ||
250 | const struct paravirt_patch_bundle_elem *p = &elems[i]; | ||
251 | if (p->type == type) { | ||
252 | unsigned long need = p->ebundle - p->sbundle; | ||
253 | unsigned long room = ebundle - sbundle; | ||
254 | |||
255 | if (found != NULL) | ||
256 | *found = p; | ||
257 | |||
258 | if (room < need) { | ||
259 | /* no room to replace. skip it */ | ||
260 | printk(KERN_DEBUG | ||
261 | "the space is too small to put " | ||
262 | "bundles. type %ld need %ld room %ld\n", | ||
263 | type, need, room); | ||
264 | break; | ||
265 | } | ||
266 | |||
267 | used = need; | ||
268 | memcpy(sbundle, p->sbundle, used); | ||
269 | break; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | return used; | ||
274 | } | ||
275 | |||
276 | void __init_or_module | ||
277 | paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, | ||
278 | const struct paravirt_patch_site_bundle *end) | ||
279 | { | ||
280 | const struct paravirt_patch_site_bundle *p; | ||
281 | |||
282 | if (noreplace_paravirt) | ||
283 | return; | ||
284 | if (pv_init_ops.patch_bundle == NULL) | ||
285 | return; | ||
286 | |||
287 | for (p = start; p < end; p++) { | ||
288 | unsigned long used; | ||
289 | |||
290 | used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle, | ||
291 | p->type); | ||
292 | if (used == 0) | ||
293 | continue; | ||
294 | |||
295 | fill_nop_bundle(p->sbundle + used, p->ebundle); | ||
296 | paravirt_flush_i_cache_range(p->sbundle, | ||
297 | p->ebundle - p->sbundle); | ||
298 | } | ||
299 | ia64_sync_i(); | ||
300 | ia64_srlz_i(); | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * nop.i, nop.m, nop.f instruction are same format. | ||
305 | * but nop.b has differennt format. | ||
306 | * This doesn't support nop.b for now. | ||
307 | */ | ||
308 | static void __init_or_module | ||
309 | fill_nop_inst(unsigned long stag, unsigned long etag) | ||
310 | { | ||
311 | extern const bundle_t paravirt_nop_mfi_inst_bundle[]; | ||
312 | unsigned long tag; | ||
313 | const ia64_inst_t nop_inst = | ||
314 | paravirt_read_slot0(paravirt_nop_mfi_inst_bundle); | ||
315 | |||
316 | for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag)) | ||
317 | paravirt_write_inst(tag, nop_inst); | ||
318 | } | ||
319 | |||
320 | void __init_or_module | ||
321 | paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, | ||
322 | const struct paravirt_patch_site_inst *end) | ||
323 | { | ||
324 | const struct paravirt_patch_site_inst *p; | ||
325 | |||
326 | if (noreplace_paravirt) | ||
327 | return; | ||
328 | if (pv_init_ops.patch_inst == NULL) | ||
329 | return; | ||
330 | |||
331 | for (p = start; p < end; p++) { | ||
332 | unsigned long tag; | ||
333 | bundle_t *sbundle; | ||
334 | bundle_t *ebundle; | ||
335 | |||
336 | tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type); | ||
337 | if (tag == p->stag) | ||
338 | continue; | ||
339 | |||
340 | fill_nop_inst(tag, p->etag); | ||
341 | sbundle = paravirt_get_bundle(p->stag); | ||
342 | ebundle = paravirt_get_bundle(p->etag) + 1; | ||
343 | paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) * | ||
344 | sizeof(bundle_t)); | ||
345 | } | ||
346 | ia64_sync_i(); | ||
347 | ia64_srlz_i(); | ||
348 | } | ||
349 | #endif /* ASM_SUPPOTED */ | ||
350 | |||
351 | /* brl.cond.sptk.many <target64> X3 */ | ||
352 | typedef union inst_x3_op { | ||
353 | ia64_inst_t inst; | ||
354 | struct { | ||
355 | unsigned long qp: 6; | ||
356 | unsigned long btyp: 3; | ||
357 | unsigned long unused: 3; | ||
358 | unsigned long p: 1; | ||
359 | unsigned long imm20b: 20; | ||
360 | unsigned long wh: 2; | ||
361 | unsigned long d: 1; | ||
362 | unsigned long i: 1; | ||
363 | unsigned long opcode: 4; | ||
364 | }; | ||
365 | unsigned long l; | ||
366 | } inst_x3_op_t; | ||
367 | |||
368 | typedef union inst_x3_imm { | ||
369 | ia64_inst_t inst; | ||
370 | struct { | ||
371 | unsigned long unused: 2; | ||
372 | unsigned long imm39: 39; | ||
373 | }; | ||
374 | unsigned long l; | ||
375 | } inst_x3_imm_t; | ||
376 | |||
377 | void __init_or_module | ||
378 | paravirt_patch_reloc_brl(unsigned long tag, const void *target) | ||
379 | { | ||
380 | unsigned long tag_op = paravirt_get_next_tag(tag); | ||
381 | unsigned long tag_imm = tag; | ||
382 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
383 | |||
384 | ia64_inst_t inst_op = paravirt_read_inst(tag_op); | ||
385 | ia64_inst_t inst_imm = paravirt_read_inst(tag_imm); | ||
386 | |||
387 | inst_x3_op_t inst_x3_op = { .l = inst_op.l }; | ||
388 | inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l }; | ||
389 | |||
390 | unsigned long imm60 = | ||
391 | ((unsigned long)target - (unsigned long)bundle) >> 4; | ||
392 | |||
393 | BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */ | ||
394 | BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); | ||
395 | |||
396 | /* imm60[59] 1bit */ | ||
397 | inst_x3_op.i = (imm60 >> 59) & 1; | ||
398 | /* imm60[19:0] 20bit */ | ||
399 | inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1); | ||
400 | /* imm60[58:20] 39bit */ | ||
401 | inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1); | ||
402 | |||
403 | inst_op.l = inst_x3_op.l; | ||
404 | inst_imm.l = inst_x3_imm.l; | ||
405 | |||
406 | paravirt_write_inst(tag_op, inst_op); | ||
407 | paravirt_write_inst(tag_imm, inst_imm); | ||
408 | } | ||
409 | |||
410 | /* br.cond.sptk.many <target25> B1 */ | ||
411 | typedef union inst_b1 { | ||
412 | ia64_inst_t inst; | ||
413 | struct { | ||
414 | unsigned long qp: 6; | ||
415 | unsigned long btype: 3; | ||
416 | unsigned long unused: 3; | ||
417 | unsigned long p: 1; | ||
418 | unsigned long imm20b: 20; | ||
419 | unsigned long wh: 2; | ||
420 | unsigned long d: 1; | ||
421 | unsigned long s: 1; | ||
422 | unsigned long opcode: 4; | ||
423 | }; | ||
424 | unsigned long l; | ||
425 | } inst_b1_t; | ||
426 | |||
427 | void __init | ||
428 | paravirt_patch_reloc_br(unsigned long tag, const void *target) | ||
429 | { | ||
430 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
431 | ia64_inst_t inst = paravirt_read_inst(tag); | ||
432 | unsigned long target25 = (unsigned long)target - (unsigned long)bundle; | ||
433 | inst_b1_t inst_b1; | ||
434 | |||
435 | BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); | ||
436 | |||
437 | inst_b1.l = inst.l; | ||
438 | if (target25 & (1UL << 63)) | ||
439 | inst_b1.s = 1; | ||
440 | else | ||
441 | inst_b1.s = 0; | ||
442 | |||
443 | inst_b1.imm20b = target25 >> 4; | ||
444 | inst.l = inst_b1.l; | ||
445 | |||
446 | paravirt_write_inst(tag, inst); | ||
447 | } | ||
448 | |||
449 | void __init | ||
450 | __paravirt_patch_apply_branch( | ||
451 | unsigned long tag, unsigned long type, | ||
452 | const struct paravirt_patch_branch_target *entries, | ||
453 | unsigned int nr_entries) | ||
454 | { | ||
455 | unsigned int i; | ||
456 | for (i = 0; i < nr_entries; i++) { | ||
457 | if (entries[i].type == type) { | ||
458 | paravirt_patch_reloc_br(tag, entries[i].entry); | ||
459 | break; | ||
460 | } | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void __init | ||
465 | paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start, | ||
466 | const struct paravirt_patch_site_branch *end) | ||
467 | { | ||
468 | const struct paravirt_patch_site_branch *p; | ||
469 | |||
470 | if (noreplace_paravirt) | ||
471 | return; | ||
472 | if (pv_init_ops.patch_branch == NULL) | ||
473 | return; | ||
474 | |||
475 | for (p = start; p < end; p++) | ||
476 | (*pv_init_ops.patch_branch)(p->tag, p->type); | ||
477 | |||
478 | ia64_sync_i(); | ||
479 | ia64_srlz_i(); | ||
480 | } | ||
481 | |||
482 | void __init | ||
483 | paravirt_patch_apply(void) | ||
484 | { | ||
485 | extern const char __start_paravirt_bundles[]; | ||
486 | extern const char __stop_paravirt_bundles[]; | ||
487 | extern const char __start_paravirt_insts[]; | ||
488 | extern const char __stop_paravirt_insts[]; | ||
489 | extern const char __start_paravirt_branches[]; | ||
490 | extern const char __stop_paravirt_branches[]; | ||
491 | |||
492 | paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *) | ||
493 | __start_paravirt_bundles, | ||
494 | (const struct paravirt_patch_site_bundle *) | ||
495 | __stop_paravirt_bundles); | ||
496 | paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *) | ||
497 | __start_paravirt_insts, | ||
498 | (const struct paravirt_patch_site_inst *) | ||
499 | __stop_paravirt_insts); | ||
500 | paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *) | ||
501 | __start_paravirt_branches, | ||
502 | (const struct paravirt_patch_site_branch *) | ||
503 | __stop_paravirt_branches); | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * Local variables: | ||
508 | * mode: C | ||
509 | * c-set-style: "linux" | ||
510 | * c-basic-offset: 8 | ||
511 | * tab-width: 8 | ||
512 | * indent-tabs-mode: t | ||
513 | * End: | ||
514 | */ | ||
diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c new file mode 100644 index 000000000000..b28082a95d45 --- /dev/null +++ b/arch/ia64/kernel/paravirt_patchlist.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /****************************************************************************** | ||
2 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
3 | * VA Linux Systems Japan K.K. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/bug.h> | ||
22 | #include <asm/paravirt.h> | ||
23 | |||
24 | #define DECLARE(name) \ | ||
25 | extern unsigned long \ | ||
26 | __ia64_native_start_gate_##name##_patchlist[]; \ | ||
27 | extern unsigned long \ | ||
28 | __ia64_native_end_gate_##name##_patchlist[] | ||
29 | |||
30 | DECLARE(fsyscall); | ||
31 | DECLARE(brl_fsys_bubble_down); | ||
32 | DECLARE(vtop); | ||
33 | DECLARE(mckinley_e9); | ||
34 | |||
35 | extern unsigned long __start_gate_section[]; | ||
36 | |||
37 | #define ASSIGN(name) \ | ||
38 | .start_##name##_patchlist = \ | ||
39 | (unsigned long)__ia64_native_start_gate_##name##_patchlist, \ | ||
40 | .end_##name##_patchlist = \ | ||
41 | (unsigned long)__ia64_native_end_gate_##name##_patchlist | ||
42 | |||
43 | struct pv_patchdata pv_patchdata __initdata = { | ||
44 | ASSIGN(fsyscall), | ||
45 | ASSIGN(brl_fsys_bubble_down), | ||
46 | ASSIGN(vtop), | ||
47 | ASSIGN(mckinley_e9), | ||
48 | |||
49 | .gate_section = (void*)__start_gate_section, | ||
50 | }; | ||
51 | |||
52 | |||
53 | unsigned long __init | ||
54 | paravirt_get_gate_patchlist(enum pv_gate_patchlist type) | ||
55 | { | ||
56 | |||
57 | #define CASE(NAME, name) \ | ||
58 | case PV_GATE_START_##NAME: \ | ||
59 | return pv_patchdata.start_##name##_patchlist; \ | ||
60 | case PV_GATE_END_##NAME: \ | ||
61 | return pv_patchdata.end_##name##_patchlist; \ | ||
62 | |||
63 | switch (type) { | ||
64 | CASE(FSYSCALL, fsyscall); | ||
65 | CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down); | ||
66 | CASE(VTOP, vtop); | ||
67 | CASE(MCKINLEY_E9, mckinley_e9); | ||
68 | default: | ||
69 | BUG(); | ||
70 | break; | ||
71 | } | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | void * __init | ||
76 | paravirt_get_gate_section(void) | ||
77 | { | ||
78 | return pv_patchdata.gate_section; | ||
79 | } | ||
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h new file mode 100644 index 000000000000..0684aa6c6507 --- /dev/null +++ b/arch/ia64/kernel/paravirt_patchlist.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /****************************************************************************** | ||
2 | * linux/arch/ia64/xen/paravirt_patchlist.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) | ||
24 | #include <asm/xen/patchlist.h> | ||
25 | #else | ||
26 | #include <asm/native/patchlist.h> | ||
27 | #endif | ||
28 | |||
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 2f42fcb9776a..6158560d7f17 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S | |||
@@ -20,8 +20,11 @@ | |||
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/init.h> | ||
23 | #include <asm/asmmacro.h> | 24 | #include <asm/asmmacro.h> |
24 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
26 | #include <asm/paravirt_privop.h> | ||
27 | #include <asm/paravirt_patch.h> | ||
25 | #include "entry.h" | 28 | #include "entry.h" |
26 | 29 | ||
27 | #define DATA8(sym, init_value) \ | 30 | #define DATA8(sym, init_value) \ |
@@ -32,29 +35,87 @@ | |||
32 | data8 init_value ; \ | 35 | data8 init_value ; \ |
33 | .popsection | 36 | .popsection |
34 | 37 | ||
35 | #define BRANCH(targ, reg, breg) \ | 38 | #define BRANCH(targ, reg, breg, type) \ |
36 | movl reg=targ ; \ | 39 | PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \ |
37 | ;; \ | 40 | ;; \ |
38 | ld8 reg=[reg] ; \ | 41 | movl reg=targ ; \ |
39 | ;; \ | 42 | ;; \ |
40 | mov breg=reg ; \ | 43 | ld8 reg=[reg] ; \ |
44 | ;; \ | ||
45 | mov breg=reg ; \ | ||
41 | br.cond.sptk.many breg | 46 | br.cond.sptk.many breg |
42 | 47 | ||
43 | #define BRANCH_PROC(sym, reg, breg) \ | 48 | #define BRANCH_PROC(sym, reg, breg, type) \ |
44 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ | 49 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ |
45 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ | 50 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ |
46 | BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ | 51 | BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ |
47 | END(paravirt_ ## sym) | 52 | END(paravirt_ ## sym) |
48 | 53 | ||
49 | #define BRANCH_PROC_UNWINFO(sym, reg, breg) \ | 54 | #define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \ |
50 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ | 55 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ |
51 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ | 56 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ |
52 | PT_REGS_UNWIND_INFO(0) ; \ | 57 | PT_REGS_UNWIND_INFO(0) ; \ |
53 | BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ | 58 | BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ |
54 | END(paravirt_ ## sym) | 59 | END(paravirt_ ## sym) |
55 | 60 | ||
56 | 61 | ||
57 | BRANCH_PROC(switch_to, r22, b7) | 62 | BRANCH_PROC(switch_to, r22, b7, SWITCH_TO) |
58 | BRANCH_PROC_UNWINFO(leave_syscall, r22, b7) | 63 | BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL) |
59 | BRANCH_PROC(work_processed_syscall, r2, b7) | 64 | BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL) |
60 | BRANCH_PROC_UNWINFO(leave_kernel, r22, b7) | 65 | BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL) |
66 | |||
67 | |||
68 | #ifdef CONFIG_MODULES | ||
69 | #define __INIT_OR_MODULE .text | ||
70 | #define __INITDATA_OR_MODULE .data | ||
71 | #else | ||
72 | #define __INIT_OR_MODULE __INIT | ||
73 | #define __INITDATA_OR_MODULE __INITDATA | ||
74 | #endif /* CONFIG_MODULES */ | ||
75 | |||
76 | __INIT_OR_MODULE | ||
77 | GLOBAL_ENTRY(paravirt_fc_i) | ||
78 | fc.i r32 | ||
79 | br.ret.sptk.many rp | ||
80 | END(paravirt_fc_i) | ||
81 | __FINIT | ||
82 | |||
83 | __INIT_OR_MODULE | ||
84 | .align 32 | ||
85 | GLOBAL_ENTRY(paravirt_nop_b_inst_bundle) | ||
86 | { | ||
87 | nop.b 0 | ||
88 | nop.b 0 | ||
89 | nop.b 0 | ||
90 | } | ||
91 | END(paravirt_nop_b_inst_bundle) | ||
92 | __FINIT | ||
93 | |||
94 | /* NOTE: nop.[mfi] has same format */ | ||
95 | __INIT_OR_MODULE | ||
96 | GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle) | ||
97 | { | ||
98 | nop.m 0 | ||
99 | nop.f 0 | ||
100 | nop.i 0 | ||
101 | } | ||
102 | END(paravirt_nop_mfi_inst_bundle) | ||
103 | __FINIT | ||
104 | |||
105 | __INIT_OR_MODULE | ||
106 | GLOBAL_ENTRY(paravirt_nop_bundle) | ||
107 | paravirt_nop_bundle_start: | ||
108 | { | ||
109 | nop 0 | ||
110 | nop 0 | ||
111 | nop 0 | ||
112 | } | ||
113 | paravirt_nop_bundle_end: | ||
114 | END(paravirt_nop_bundle) | ||
115 | __FINIT | ||
116 | |||
117 | __INITDATA_OR_MODULE | ||
118 | .align 8 | ||
119 | .global paravirt_nop_bundle_size | ||
120 | paravirt_nop_bundle_size: | ||
121 | data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start | ||
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index b83b2c516008..68a1311db806 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/string.h> | 8 | #include <linux/string.h> |
9 | 9 | ||
10 | #include <asm/paravirt.h> | ||
10 | #include <asm/patch.h> | 11 | #include <asm/patch.h> |
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
@@ -169,16 +170,35 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) | |||
169 | ia64_srlz_i(); | 170 | ia64_srlz_i(); |
170 | } | 171 | } |
171 | 172 | ||
173 | extern unsigned long ia64_native_fsyscall_table[NR_syscalls]; | ||
174 | extern char ia64_native_fsys_bubble_down[]; | ||
175 | struct pv_fsys_data pv_fsys_data __initdata = { | ||
176 | .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table, | ||
177 | .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down, | ||
178 | }; | ||
179 | |||
180 | unsigned long * __init | ||
181 | paravirt_get_fsyscall_table(void) | ||
182 | { | ||
183 | return pv_fsys_data.fsyscall_table; | ||
184 | } | ||
185 | |||
186 | char * __init | ||
187 | paravirt_get_fsys_bubble_down(void) | ||
188 | { | ||
189 | return pv_fsys_data.fsys_bubble_down; | ||
190 | } | ||
191 | |||
172 | static void __init | 192 | static void __init |
173 | patch_fsyscall_table (unsigned long start, unsigned long end) | 193 | patch_fsyscall_table (unsigned long start, unsigned long end) |
174 | { | 194 | { |
175 | extern unsigned long fsyscall_table[NR_syscalls]; | 195 | u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); |
176 | s32 *offp = (s32 *) start; | 196 | s32 *offp = (s32 *) start; |
177 | u64 ip; | 197 | u64 ip; |
178 | 198 | ||
179 | while (offp < (s32 *) end) { | 199 | while (offp < (s32 *) end) { |
180 | ip = (u64) ia64_imva((char *) offp + *offp); | 200 | ip = (u64) ia64_imva((char *) offp + *offp); |
181 | ia64_patch_imm64(ip, (u64) fsyscall_table); | 201 | ia64_patch_imm64(ip, fsyscall_table); |
182 | ia64_fc((void *) ip); | 202 | ia64_fc((void *) ip); |
183 | ++offp; | 203 | ++offp; |
184 | } | 204 | } |
@@ -189,7 +209,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end) | |||
189 | static void __init | 209 | static void __init |
190 | patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) | 210 | patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) |
191 | { | 211 | { |
192 | extern char fsys_bubble_down[]; | 212 | u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); |
193 | s32 *offp = (s32 *) start; | 213 | s32 *offp = (s32 *) start; |
194 | u64 ip; | 214 | u64 ip; |
195 | 215 | ||
@@ -207,13 +227,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) | |||
207 | void __init | 227 | void __init |
208 | ia64_patch_gate (void) | 228 | ia64_patch_gate (void) |
209 | { | 229 | { |
210 | # define START(name) ((unsigned long) __start_gate_##name##_patchlist) | 230 | # define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) |
211 | # define END(name) ((unsigned long)__end_gate_##name##_patchlist) | 231 | # define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) |
212 | 232 | ||
213 | patch_fsyscall_table(START(fsyscall), END(fsyscall)); | 233 | patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); |
214 | patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); | 234 | patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); |
215 | ia64_patch_vtop(START(vtop), END(vtop)); | 235 | ia64_patch_vtop(START(VTOP), END(VTOP)); |
216 | ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); | 236 | ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); |
217 | } | 237 | } |
218 | 238 | ||
219 | void ia64_patch_phys_stack_reg(unsigned long val) | 239 | void ia64_patch_phys_stack_reg(unsigned long val) |
@@ -229,7 +249,7 @@ void ia64_patch_phys_stack_reg(unsigned long val) | |||
229 | while (offp < end) { | 249 | while (offp < end) { |
230 | ip = (u64) offp + *offp; | 250 | ip = (u64) offp + *offp; |
231 | ia64_patch(ip, mask, imm); | 251 | ia64_patch(ip, mask, imm); |
232 | ia64_fc(ip); | 252 | ia64_fc((void *)ip); |
233 | ++offp; | 253 | ++offp; |
234 | } | 254 | } |
235 | ia64_sync_i(); | 255 | ia64_sync_i(); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 5c0f408cfd71..8a06dc480594 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg) | |||
5603 | * /proc/perfmon interface, for debug only | 5603 | * /proc/perfmon interface, for debug only |
5604 | */ | 5604 | */ |
5605 | 5605 | ||
5606 | #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) | 5606 | #define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1) |
5607 | 5607 | ||
5608 | static void * | 5608 | static void * |
5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) | 5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) |
@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos) | |||
5612 | return PFM_PROC_SHOW_HEADER; | 5612 | return PFM_PROC_SHOW_HEADER; |
5613 | } | 5613 | } |
5614 | 5614 | ||
5615 | while (*pos <= NR_CPUS) { | 5615 | while (*pos <= nr_cpu_ids) { |
5616 | if (cpu_online(*pos - 1)) { | 5616 | if (cpu_online(*pos - 1)) { |
5617 | return (void *)*pos; | 5617 | return (void *)*pos; |
5618 | } | 5618 | } |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index c57162705147..5d7c0e5b9e76 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -413,7 +413,7 @@ ia64_load_extra (struct task_struct *task) | |||
413 | * so there is nothing to worry about. | 413 | * so there is nothing to worry about. |
414 | */ | 414 | */ |
415 | int | 415 | int |
416 | copy_thread (int nr, unsigned long clone_flags, | 416 | copy_thread(unsigned long clone_flags, |
417 | unsigned long user_stack_base, unsigned long user_stack_size, | 417 | unsigned long user_stack_base, unsigned long user_stack_size, |
418 | struct task_struct *p, struct pt_regs *regs) | 418 | struct task_struct *p, struct pt_regs *regs) |
419 | { | 419 | { |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index ecb9eb78d687..7053c55b7649 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -317,7 +317,7 @@ retry: | |||
317 | } | 317 | } |
318 | 318 | ||
319 | n = data->cpu_check; | 319 | n = data->cpu_check; |
320 | for (i = 0; i < NR_CPUS; i++) { | 320 | for (i = 0; i < nr_cpu_ids; i++) { |
321 | if (cpu_isset(n, data->cpu_event)) { | 321 | if (cpu_isset(n, data->cpu_event)) { |
322 | if (!cpu_online(n)) { | 322 | if (!cpu_online(n)) { |
323 | cpu_clear(n, data->cpu_event); | 323 | cpu_clear(n, data->cpu_event); |
@@ -326,7 +326,7 @@ retry: | |||
326 | cpu = n; | 326 | cpu = n; |
327 | break; | 327 | break; |
328 | } | 328 | } |
329 | if (++n == NR_CPUS) | 329 | if (++n == nr_cpu_ids) |
330 | n = 0; | 330 | n = 0; |
331 | } | 331 | } |
332 | 332 | ||
@@ -337,7 +337,7 @@ retry: | |||
337 | 337 | ||
338 | /* for next read, start checking at next CPU */ | 338 | /* for next read, start checking at next CPU */ |
339 | data->cpu_check = cpu; | 339 | data->cpu_check = cpu; |
340 | if (++data->cpu_check == NR_CPUS) | 340 | if (++data->cpu_check == nr_cpu_ids) |
341 | data->cpu_check = 0; | 341 | data->cpu_check = 0; |
342 | 342 | ||
343 | snprintf(cmd, sizeof(cmd), "read %d\n", cpu); | 343 | snprintf(cmd, sizeof(cmd), "read %d\n", cpu); |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 865af27c7737..714066aeda7f 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/meminit.h> | 52 | #include <asm/meminit.h> |
53 | #include <asm/page.h> | 53 | #include <asm/page.h> |
54 | #include <asm/paravirt.h> | 54 | #include <asm/paravirt.h> |
55 | #include <asm/paravirt_patch.h> | ||
55 | #include <asm/patch.h> | 56 | #include <asm/patch.h> |
56 | #include <asm/pgtable.h> | 57 | #include <asm/pgtable.h> |
57 | #include <asm/processor.h> | 58 | #include <asm/processor.h> |
@@ -537,6 +538,7 @@ setup_arch (char **cmdline_p) | |||
537 | paravirt_arch_setup_early(); | 538 | paravirt_arch_setup_early(); |
538 | 539 | ||
539 | ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); | 540 | ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); |
541 | paravirt_patch_apply(); | ||
540 | 542 | ||
541 | *cmdline_p = __va(ia64_boot_param->command_line); | 543 | *cmdline_p = __va(ia64_boot_param->command_line); |
542 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); | 544 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
@@ -730,10 +732,10 @@ static void * | |||
730 | c_start (struct seq_file *m, loff_t *pos) | 732 | c_start (struct seq_file *m, loff_t *pos) |
731 | { | 733 | { |
732 | #ifdef CONFIG_SMP | 734 | #ifdef CONFIG_SMP |
733 | while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) | 735 | while (*pos < nr_cpu_ids && !cpu_online(*pos)) |
734 | ++*pos; | 736 | ++*pos; |
735 | #endif | 737 | #endif |
736 | return *pos < NR_CPUS ? cpu_data(*pos) : NULL; | 738 | return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; |
737 | } | 739 | } |
738 | 740 | ||
739 | static void * | 741 | static void * |
@@ -1016,8 +1018,7 @@ cpu_init (void) | |||
1016 | | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); | 1018 | | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); |
1017 | atomic_inc(&init_mm.mm_count); | 1019 | atomic_inc(&init_mm.mm_count); |
1018 | current->active_mm = &init_mm; | 1020 | current->active_mm = &init_mm; |
1019 | if (current->mm) | 1021 | BUG_ON(current->mm); |
1020 | BUG(); | ||
1021 | 1022 | ||
1022 | ia64_mmu_init(ia64_imva(cpu_data)); | 1023 | ia64_mmu_init(ia64_imva(cpu_data)); |
1023 | ia64_mca_cpu_init(ia64_imva(cpu_data)); | 1024 | ia64_mca_cpu_init(ia64_imva(cpu_data)); |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index da8f020d82c1..2ea4199d9c57 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -166,11 +166,11 @@ send_IPI_allbutself (int op) | |||
166 | * Called with preemption disabled. | 166 | * Called with preemption disabled. |
167 | */ | 167 | */ |
168 | static inline void | 168 | static inline void |
169 | send_IPI_mask(cpumask_t mask, int op) | 169 | send_IPI_mask(const struct cpumask *mask, int op) |
170 | { | 170 | { |
171 | unsigned int cpu; | 171 | unsigned int cpu; |
172 | 172 | ||
173 | for_each_cpu_mask(cpu, mask) { | 173 | for_each_cpu(cpu, mask) { |
174 | send_IPI_single(cpu, op); | 174 | send_IPI_single(cpu, op); |
175 | } | 175 | } |
176 | } | 176 | } |
@@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu) | |||
316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); | 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
317 | } | 317 | } |
318 | 318 | ||
319 | void arch_send_call_function_ipi(cpumask_t mask) | 319 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
320 | { | 320 | { |
321 | send_IPI_mask(mask, IPI_CALL_FUNC); | 321 | send_IPI_mask(mask, IPI_CALL_FUNC); |
322 | } | 322 | } |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 52290547c85b..7700e23034bb 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -581,14 +581,14 @@ smp_build_cpu_map (void) | |||
581 | 581 | ||
582 | ia64_cpu_to_sapicid[0] = boot_cpu_id; | 582 | ia64_cpu_to_sapicid[0] = boot_cpu_id; |
583 | cpus_clear(cpu_present_map); | 583 | cpus_clear(cpu_present_map); |
584 | cpu_set(0, cpu_present_map); | 584 | set_cpu_present(0, true); |
585 | cpu_set(0, cpu_possible_map); | 585 | set_cpu_possible(0, true); |
586 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { | 586 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { |
587 | sapicid = smp_boot_data.cpu_phys_id[i]; | 587 | sapicid = smp_boot_data.cpu_phys_id[i]; |
588 | if (sapicid == boot_cpu_id) | 588 | if (sapicid == boot_cpu_id) |
589 | continue; | 589 | continue; |
590 | cpu_set(cpu, cpu_present_map); | 590 | set_cpu_present(cpu, true); |
591 | cpu_set(cpu, cpu_possible_map); | 591 | set_cpu_possible(cpu, true); |
592 | ia64_cpu_to_sapicid[cpu] = sapicid; | 592 | ia64_cpu_to_sapicid[cpu] = sapicid; |
593 | cpu++; | 593 | cpu++; |
594 | } | 594 | } |
@@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
626 | */ | 626 | */ |
627 | if (!max_cpus) { | 627 | if (!max_cpus) { |
628 | printk(KERN_INFO "SMP mode deactivated.\n"); | 628 | printk(KERN_INFO "SMP mode deactivated.\n"); |
629 | cpus_clear(cpu_online_map); | 629 | init_cpu_online(cpumask_of(0)); |
630 | cpus_clear(cpu_present_map); | 630 | init_cpu_present(cpumask_of(0)); |
631 | cpus_clear(cpu_possible_map); | 631 | init_cpu_possible(cpumask_of(0)); |
632 | cpu_set(0, cpu_online_map); | ||
633 | cpu_set(0, cpu_present_map); | ||
634 | cpu_set(0, cpu_possible_map); | ||
635 | return; | 632 | return; |
636 | } | 633 | } |
637 | } | 634 | } |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index d6747bae52d8..641c8b61c4f1 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -51,6 +51,15 @@ EXPORT_SYMBOL(last_cli_ip); | |||
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef CONFIG_PARAVIRT | 53 | #ifdef CONFIG_PARAVIRT |
54 | /* We need to define a real function for sched_clock, to override the | ||
55 | weak default version */ | ||
56 | unsigned long long sched_clock(void) | ||
57 | { | ||
58 | return paravirt_sched_clock(); | ||
59 | } | ||
60 | #endif | ||
61 | |||
62 | #ifdef CONFIG_PARAVIRT | ||
54 | static void | 63 | static void |
55 | paravirt_clocksource_resume(void) | 64 | paravirt_clocksource_resume(void) |
56 | { | 65 | { |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 3765efc5f963..4a95e86b9ac2 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -169,6 +169,30 @@ SECTIONS | |||
169 | __end___mckinley_e9_bundles = .; | 169 | __end___mckinley_e9_bundles = .; |
170 | } | 170 | } |
171 | 171 | ||
172 | #if defined(CONFIG_PARAVIRT) | ||
173 | . = ALIGN(16); | ||
174 | .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) | ||
175 | { | ||
176 | __start_paravirt_bundles = .; | ||
177 | *(.paravirt_bundles) | ||
178 | __stop_paravirt_bundles = .; | ||
179 | } | ||
180 | . = ALIGN(16); | ||
181 | .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) | ||
182 | { | ||
183 | __start_paravirt_insts = .; | ||
184 | *(.paravirt_insts) | ||
185 | __stop_paravirt_insts = .; | ||
186 | } | ||
187 | . = ALIGN(16); | ||
188 | .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) | ||
189 | { | ||
190 | __start_paravirt_branches = .; | ||
191 | *(.paravirt_branches) | ||
192 | __stop_paravirt_branches = .; | ||
193 | } | ||
194 | #endif | ||
195 | |||
172 | #if defined(CONFIG_IA64_GENERIC) | 196 | #if defined(CONFIG_IA64_GENERIC) |
173 | /* Machine Vector */ | 197 | /* Machine Vector */ |
174 | . = ALIGN(16); | 198 | . = ALIGN(16); |
@@ -201,6 +225,12 @@ SECTIONS | |||
201 | __start_gate_section = .; | 225 | __start_gate_section = .; |
202 | *(.data.gate) | 226 | *(.data.gate) |
203 | __stop_gate_section = .; | 227 | __stop_gate_section = .; |
228 | #ifdef CONFIG_XEN | ||
229 | . = ALIGN(PAGE_SIZE); | ||
230 | __xen_start_gate_section = .; | ||
231 | *(.data.gate.xen) | ||
232 | __xen_stop_gate_section = .; | ||
233 | #endif | ||
204 | } | 234 | } |
205 | . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose | 235 | . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose |
206 | * kernel data | 236 | * kernel data |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 076b00d1dbff..28af6a731bb8 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len) | |||
70 | int l; | 70 | int l; |
71 | 71 | ||
72 | for (l = 0; l < (len + 32); l += 32) | 72 | for (l = 0; l < (len + 32); l += 32) |
73 | ia64_fc(start + l); | 73 | ia64_fc((void *)(start + l)); |
74 | 74 | ||
75 | ia64_sync_i(); | 75 | ia64_sync_i(); |
76 | ia64_srlz_i(); | 76 | ia64_srlz_i(); |
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index d4d280505878..a18ee17b9192 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -386,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | |||
386 | else | 386 | else |
387 | *rnat_addr = (*rnat_addr) & (~nat_mask); | 387 | *rnat_addr = (*rnat_addr) & (~nat_mask); |
388 | 388 | ||
389 | ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); | 389 | ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); |
390 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | 390 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); |
391 | } | 391 | } |
392 | local_irq_restore(psr); | 392 | local_irq_restore(psr); |
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 38232b37668b..2c2501f13159 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) | |||
210 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | 210 | phy_pte &= ~PAGE_FLAGS_RV_MASK; |
211 | psr = ia64_clear_ic(); | 211 | psr = ia64_clear_ic(); |
212 | ia64_itc(type, va, phy_pte, itir_ps(itir)); | 212 | ia64_itc(type, va, phy_pte, itir_ps(itir)); |
213 | paravirt_dv_serialize_data(); | ||
213 | ia64_set_psr(psr); | 214 | ia64_set_psr(psr); |
214 | } | 215 | } |
215 | 216 | ||
@@ -456,6 +457,7 @@ void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
456 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | 457 | phy_pte &= ~PAGE_FLAGS_RV_MASK; |
457 | psr = ia64_clear_ic(); | 458 | psr = ia64_clear_ic(); |
458 | ia64_itc(type, ifa, phy_pte, ps); | 459 | ia64_itc(type, ifa, phy_pte, ps); |
460 | paravirt_dv_serialize_data(); | ||
459 | ia64_set_psr(psr); | 461 | ia64_set_psr(psr); |
460 | } | 462 | } |
461 | if (!(pte&VTLB_PTE_IO)) | 463 | if (!(pte&VTLB_PTE_IO)) |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 56e12903973c..c0f3bee69042 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
36 | #include <asm/unistd.h> | 36 | #include <asm/unistd.h> |
37 | #include <asm/mca.h> | 37 | #include <asm/mca.h> |
38 | #include <asm/paravirt.h> | ||
38 | 39 | ||
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | 41 | ||
@@ -259,6 +260,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) | |||
259 | static void __init | 260 | static void __init |
260 | setup_gate (void) | 261 | setup_gate (void) |
261 | { | 262 | { |
263 | void *gate_section; | ||
262 | struct page *page; | 264 | struct page *page; |
263 | 265 | ||
264 | /* | 266 | /* |
@@ -266,10 +268,11 @@ setup_gate (void) | |||
266 | * headers etc. and once execute-only page to enable | 268 | * headers etc. and once execute-only page to enable |
267 | * privilege-promotion via "epc": | 269 | * privilege-promotion via "epc": |
268 | */ | 270 | */ |
269 | page = virt_to_page(ia64_imva(__start_gate_section)); | 271 | gate_section = paravirt_get_gate_section(); |
272 | page = virt_to_page(ia64_imva(gate_section)); | ||
270 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); | 273 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); |
271 | #ifdef HAVE_BUGGY_SEGREL | 274 | #ifdef HAVE_BUGGY_SEGREL |
272 | page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); | 275 | page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); |
273 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); | 276 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); |
274 | #else | 277 | #else |
275 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); | 278 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); |
@@ -633,8 +636,7 @@ mem_init (void) | |||
633 | #endif | 636 | #endif |
634 | 637 | ||
635 | #ifdef CONFIG_FLATMEM | 638 | #ifdef CONFIG_FLATMEM |
636 | if (!mem_map) | 639 | BUG_ON(!mem_map); |
637 | BUG(); | ||
638 | max_mapnr = max_low_pfn; | 640 | max_mapnr = max_low_pfn; |
639 | #endif | 641 | #endif |
640 | 642 | ||
@@ -667,8 +669,8 @@ mem_init (void) | |||
667 | * code can tell them apart. | 669 | * code can tell them apart. |
668 | */ | 670 | */ |
669 | for (i = 0; i < NR_syscalls; ++i) { | 671 | for (i = 0; i < NR_syscalls; ++i) { |
670 | extern unsigned long fsyscall_table[NR_syscalls]; | ||
671 | extern unsigned long sys_call_table[NR_syscalls]; | 672 | extern unsigned long sys_call_table[NR_syscalls]; |
673 | unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); | ||
672 | 674 | ||
673 | if (!fsyscall_table[i] || nolwsys) | 675 | if (!fsyscall_table[i] || nolwsys) |
674 | fsyscall_table[i] = sys_call_table[i] | 1; | 676 | fsyscall_table[i] = sys_call_table[i] | 1; |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index bd9818a36b47..b9f3d7bbb338 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, | |||
309 | 309 | ||
310 | preempt_disable(); | 310 | preempt_disable(); |
311 | #ifdef CONFIG_SMP | 311 | #ifdef CONFIG_SMP |
312 | if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { | 312 | if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { |
313 | platform_global_tlb_purge(mm, start, end, nbits); | 313 | platform_global_tlb_purge(mm, start, end, nbits); |
314 | preempt_enable(); | 314 | preempt_enable(); |
315 | return; | 315 | return; |
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed index ba66ac2e4c60..e59809a3fc01 100644 --- a/arch/ia64/scripts/pvcheck.sed +++ b/arch/ia64/scripts/pvcheck.sed | |||
@@ -17,6 +17,7 @@ s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g | |||
17 | s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g | 17 | s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g |
18 | s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr | 18 | s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr |
19 | s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g | 19 | s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g |
20 | s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g | ||
20 | s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g | 21 | s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g |
21 | s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g | 22 | s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g |
22 | s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g | 23 | s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g |
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 0d4ffa4da1da..57f280dd9def 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -135,8 +135,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, | |||
135 | } | 135 | } |
136 | 136 | ||
137 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); | 137 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); |
138 | if (!war_list) | 138 | BUG_ON(!war_list); |
139 | BUG(); | ||
140 | 139 | ||
141 | SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, | 140 | SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, |
142 | nasid, widget, __pa(war_list), 0, 0, 0 ,0); | 141 | nasid, widget, __pa(war_list), 0, 0, 0 ,0); |
@@ -180,23 +179,20 @@ sn_common_hubdev_init(struct hubdev_info *hubdev) | |||
180 | sizeof(struct sn_flush_device_kernel *); | 179 | sizeof(struct sn_flush_device_kernel *); |
181 | hubdev->hdi_flush_nasid_list.widget_p = | 180 | hubdev->hdi_flush_nasid_list.widget_p = |
182 | kzalloc(size, GFP_KERNEL); | 181 | kzalloc(size, GFP_KERNEL); |
183 | if (!hubdev->hdi_flush_nasid_list.widget_p) | 182 | BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p); |
184 | BUG(); | ||
185 | 183 | ||
186 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { | 184 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { |
187 | size = DEV_PER_WIDGET * | 185 | size = DEV_PER_WIDGET * |
188 | sizeof(struct sn_flush_device_kernel); | 186 | sizeof(struct sn_flush_device_kernel); |
189 | sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); | 187 | sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); |
190 | if (!sn_flush_device_kernel) | 188 | BUG_ON(!sn_flush_device_kernel); |
191 | BUG(); | ||
192 | 189 | ||
193 | dev_entry = sn_flush_device_kernel; | 190 | dev_entry = sn_flush_device_kernel; |
194 | for (device = 0; device < DEV_PER_WIDGET; | 191 | for (device = 0; device < DEV_PER_WIDGET; |
195 | device++, dev_entry++) { | 192 | device++, dev_entry++) { |
196 | size = sizeof(struct sn_flush_device_common); | 193 | size = sizeof(struct sn_flush_device_common); |
197 | dev_entry->common = kzalloc(size, GFP_KERNEL); | 194 | dev_entry->common = kzalloc(size, GFP_KERNEL); |
198 | if (!dev_entry->common) | 195 | BUG_ON(!dev_entry->common); |
199 | BUG(); | ||
200 | if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) | 196 | if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) |
201 | status = sal_get_device_dmaflush_list( | 197 | status = sal_get_device_dmaflush_list( |
202 | hubdev->hdi_nasid, widget, device, | 198 | hubdev->hdi_nasid, widget, device, |
@@ -326,8 +322,7 @@ sn_common_bus_fixup(struct pci_bus *bus, | |||
326 | */ | 322 | */ |
327 | controller->platform_data = kzalloc(sizeof(struct sn_platform_data), | 323 | controller->platform_data = kzalloc(sizeof(struct sn_platform_data), |
328 | GFP_KERNEL); | 324 | GFP_KERNEL); |
329 | if (controller->platform_data == NULL) | 325 | BUG_ON(controller->platform_data == NULL); |
330 | BUG(); | ||
331 | sn_platform_data = | 326 | sn_platform_data = |
332 | (struct sn_platform_data *) controller->platform_data; | 327 | (struct sn_platform_data *) controller->platform_data; |
333 | sn_platform_data->provider_soft = provider_soft; | 328 | sn_platform_data->provider_soft = provider_soft; |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index e2eb2da60f96..ee774c366a06 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -128,8 +128,7 @@ sn_legacy_pci_window_fixup(struct pci_controller *controller, | |||
128 | { | 128 | { |
129 | controller->window = kcalloc(2, sizeof(struct pci_window), | 129 | controller->window = kcalloc(2, sizeof(struct pci_window), |
130 | GFP_KERNEL); | 130 | GFP_KERNEL); |
131 | if (controller->window == NULL) | 131 | BUG_ON(controller->window == NULL); |
132 | BUG(); | ||
133 | controller->window[0].offset = legacy_io; | 132 | controller->window[0].offset = legacy_io; |
134 | controller->window[0].resource.name = "legacy_io"; | 133 | controller->window[0].resource.name = "legacy_io"; |
135 | controller->window[0].resource.flags = IORESOURCE_IO; | 134 | controller->window[0].resource.flags = IORESOURCE_IO; |
@@ -168,8 +167,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, | |||
168 | idx = controller->windows; | 167 | idx = controller->windows; |
169 | new_count = controller->windows + count; | 168 | new_count = controller->windows + count; |
170 | new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); | 169 | new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); |
171 | if (new_window == NULL) | 170 | BUG_ON(new_window == NULL); |
172 | BUG(); | ||
173 | if (controller->window) { | 171 | if (controller->window) { |
174 | memcpy(new_window, controller->window, | 172 | memcpy(new_window, controller->window, |
175 | sizeof(struct pci_window) * controller->windows); | 173 | sizeof(struct pci_window) * controller->windows); |
@@ -222,8 +220,7 @@ sn_io_slot_fixup(struct pci_dev *dev) | |||
222 | (u64) __pa(pcidev_info), | 220 | (u64) __pa(pcidev_info), |
223 | (u64) __pa(sn_irq_info)); | 221 | (u64) __pa(sn_irq_info)); |
224 | 222 | ||
225 | if (status) | 223 | BUG_ON(status); /* Cannot get platform pci device information */ |
226 | BUG(); /* Cannot get platform pci device information */ | ||
227 | 224 | ||
228 | 225 | ||
229 | /* Copy over PIO Mapped Addresses */ | 226 | /* Copy over PIO Mapped Addresses */ |
@@ -307,8 +304,7 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) | |||
307 | prom_bussoft_ptr = __va(prom_bussoft_ptr); | 304 | prom_bussoft_ptr = __va(prom_bussoft_ptr); |
308 | 305 | ||
309 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); | 306 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); |
310 | if (!controller) | 307 | BUG_ON(!controller); |
311 | BUG(); | ||
312 | controller->segment = segment; | 308 | controller->segment = segment; |
313 | 309 | ||
314 | /* | 310 | /* |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 02c5b8a9fb60..e456f062f241 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -732,8 +732,7 @@ void __init build_cnode_tables(void) | |||
732 | kl_config_hdr_t *klgraph_header; | 732 | kl_config_hdr_t *klgraph_header; |
733 | nasid = cnodeid_to_nasid(node); | 733 | nasid = cnodeid_to_nasid(node); |
734 | klgraph_header = ia64_sn_get_klconfig_addr(nasid); | 734 | klgraph_header = ia64_sn_get_klconfig_addr(nasid); |
735 | if (klgraph_header == NULL) | 735 | BUG_ON(klgraph_header == NULL); |
736 | BUG(); | ||
737 | brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); | 736 | brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); |
738 | while (brd) { | 737 | while (brd) { |
739 | if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { | 738 | if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { |
@@ -750,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice) | |||
750 | { | 749 | { |
751 | long cpu; | 750 | long cpu; |
752 | 751 | ||
753 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 752 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
754 | if (cpuid_to_nasid(cpu) == nasid && | 753 | if (cpuid_to_nasid(cpu) == nasid && |
755 | cpuid_to_slice(cpu) == slice) | 754 | cpuid_to_slice(cpu) == slice) |
756 | return cpu; | 755 | return cpu; |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index e585f9a2afb9..1176506b2bae 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm) | |||
133 | unsigned long itc; | 133 | unsigned long itc; |
134 | 134 | ||
135 | itc = ia64_get_itc(); | 135 | itc = ia64_get_itc(); |
136 | smp_flush_tlb_cpumask(mm->cpu_vm_mask); | 136 | smp_flush_tlb_cpumask(*mm_cpumask(mm)); |
137 | itc = ia64_get_itc() - itc; | 137 | itc = ia64_get_itc() - itc; |
138 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; | 138 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; |
139 | __get_cpu_var(ptcstats).shub_ipi_flushes++; | 139 | __get_cpu_var(ptcstats).shub_ipi_flushes++; |
@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
182 | nodes_clear(nodes_flushed); | 182 | nodes_clear(nodes_flushed); |
183 | i = 0; | 183 | i = 0; |
184 | 184 | ||
185 | for_each_cpu_mask(cpu, mm->cpu_vm_mask) { | 185 | for_each_cpu(cpu, mm_cpumask(mm)) { |
186 | cnode = cpu_to_node(cpu); | 186 | cnode = cpu_to_node(cpu); |
187 | node_set(cnode, nodes_flushed); | 187 | node_set(cnode, nodes_flushed); |
188 | lcpu = cpu; | 188 | lcpu = cpu; |
@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu) | |||
461 | 461 | ||
462 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | 462 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) |
463 | { | 463 | { |
464 | if (*offset < NR_CPUS) | 464 | if (*offset < nr_cpu_ids) |
465 | return offset; | 465 | return offset; |
466 | return NULL; | 466 | return NULL; |
467 | } | 467 | } |
@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | |||
469 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) | 469 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) |
470 | { | 470 | { |
471 | (*offset)++; | 471 | (*offset)++; |
472 | if (*offset < NR_CPUS) | 472 | if (*offset < nr_cpu_ids) |
473 | return offset; | 473 | return offset; |
474 | return NULL; | 474 | return NULL; |
475 | } | 475 | } |
@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) | |||
491 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); | 491 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); |
492 | } | 492 | } |
493 | 493 | ||
494 | if (cpu < NR_CPUS && cpu_online(cpu)) { | 494 | if (cpu < nr_cpu_ids && cpu_online(cpu)) { |
495 | stat = &per_cpu(ptcstats, cpu); | 495 | stat = &per_cpu(ptcstats, cpu); |
496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | 496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, |
497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | 497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, |
@@ -554,7 +554,7 @@ static int __init sn2_ptc_init(void) | |||
554 | 554 | ||
555 | proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, | 555 | proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, |
556 | NULL, &proc_sn2_ptc_operations); | 556 | NULL, &proc_sn2_ptc_operations); |
557 | if (!&proc_sn2_ptc_operations) { | 557 | if (!proc_sn2_ptc) { |
558 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); | 558 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); |
559 | return -EINVAL; | 559 | return -EINVAL; |
560 | } | 560 | } |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index be339477f906..9e6491cf72bd 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -275,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb | |||
275 | 275 | ||
276 | /* get it's interconnect topology */ | 276 | /* get it's interconnect topology */ |
277 | sz = op->ports * sizeof(struct sn_hwperf_port_info); | 277 | sz = op->ports * sizeof(struct sn_hwperf_port_info); |
278 | if (sz > sizeof(ptdata)) | 278 | BUG_ON(sz > sizeof(ptdata)); |
279 | BUG(); | ||
280 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | 279 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, |
281 | SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, | 280 | SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, |
282 | (u64)&ptdata, 0, 0, NULL); | 281 | (u64)&ptdata, 0, 0, NULL); |
@@ -310,8 +309,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb | |||
310 | if (router && (!found_cpu || !found_mem)) { | 309 | if (router && (!found_cpu || !found_mem)) { |
311 | /* search for a node connected to the same router */ | 310 | /* search for a node connected to the same router */ |
312 | sz = router->ports * sizeof(struct sn_hwperf_port_info); | 311 | sz = router->ports * sizeof(struct sn_hwperf_port_info); |
313 | if (sz > sizeof(ptdata)) | 312 | BUG_ON(sz > sizeof(ptdata)); |
314 | BUG(); | ||
315 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | 313 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, |
316 | SN_HWPERF_ENUM_PORTS, router->id, sz, | 314 | SN_HWPERF_ENUM_PORTS, router->id, sz, |
317 | (u64)&ptdata, 0, 0, NULL); | 315 | (u64)&ptdata, 0, 0, NULL); |
@@ -612,7 +610,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) | |||
612 | op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; | 610 | op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; |
613 | 611 | ||
614 | if (cpu != SN_HWPERF_ARG_ANY_CPU) { | 612 | if (cpu != SN_HWPERF_ARG_ANY_CPU) { |
615 | if (cpu >= NR_CPUS || !cpu_online(cpu)) { | 613 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
616 | r = -EINVAL; | 614 | r = -EINVAL; |
617 | goto out; | 615 | goto out; |
618 | } | 616 | } |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 060df4aa9916..c659ad5613a0 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -256,9 +256,7 @@ void sn_dma_flush(u64 addr) | |||
256 | 256 | ||
257 | hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; | 257 | hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; |
258 | 258 | ||
259 | if (!hubinfo) { | 259 | BUG_ON(!hubinfo); |
260 | BUG(); | ||
261 | } | ||
262 | 260 | ||
263 | flush_nasid_list = &hubinfo->hdi_flush_nasid_list; | 261 | flush_nasid_list = &hubinfo->hdi_flush_nasid_list; |
264 | if (flush_nasid_list->widget_p == NULL) | 262 | if (flush_nasid_list->widget_p == NULL) |
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile index 0ad0224693d9..e6f4a0a74228 100644 --- a/arch/ia64/xen/Makefile +++ b/arch/ia64/xen/Makefile | |||
@@ -3,14 +3,29 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ | 5 | obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ |
6 | hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o | 6 | hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ |
7 | gate-data.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 9 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
9 | 10 | ||
11 | # The gate DSO image is built using a special linker script. | ||
12 | include $(srctree)/arch/ia64/kernel/Makefile.gate | ||
13 | |||
14 | # tell compiled for xen | ||
15 | CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN | ||
16 | AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN | ||
17 | |||
18 | # use same file of native. | ||
19 | $(obj)/gate.o: $(src)/../kernel/gate.S FORCE | ||
20 | $(call if_changed_dep,as_o_S) | ||
21 | $(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE | ||
22 | $(call if_changed_dep,cpp_lds_S) | ||
23 | |||
24 | |||
10 | AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN | 25 | AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN |
11 | 26 | ||
12 | # xen multi compile | 27 | # xen multi compile |
13 | ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S | 28 | ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S |
14 | ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) | 29 | ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) |
15 | obj-y += $(ASM_PARAVIRT_OBJS) | 30 | obj-y += $(ASM_PARAVIRT_OBJS) |
16 | define paravirtualized_xen | 31 | define paravirtualized_xen |
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S new file mode 100644 index 000000000000..7d4830afc91d --- /dev/null +++ b/arch/ia64/xen/gate-data.S | |||
@@ -0,0 +1,3 @@ | |||
1 | .section .data.gate.xen, "aw" | ||
2 | |||
3 | .incbin "arch/ia64/xen/gate.so" | ||
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S index 45e02bb64a92..e32dae444dd6 100644 --- a/arch/ia64/xen/hypercall.S +++ b/arch/ia64/xen/hypercall.S | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/intrinsics.h> | 9 | #include <asm/intrinsics.h> |
10 | #include <asm/xen/privop.h> | 10 | #include <asm/xen/privop.h> |
11 | 11 | ||
12 | #ifdef __INTEL_COMPILER | ||
12 | /* | 13 | /* |
13 | * Hypercalls without parameter. | 14 | * Hypercalls without parameter. |
14 | */ | 15 | */ |
@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4) | |||
72 | br.ret.sptk.many rp | 73 | br.ret.sptk.many rp |
73 | ;; | 74 | ;; |
74 | END(xen_set_rr0_to_rr4) | 75 | END(xen_set_rr0_to_rr4) |
76 | #endif | ||
75 | 77 | ||
76 | GLOBAL_ENTRY(xen_send_ipi) | 78 | GLOBAL_ENTRY(xen_send_ipi) |
77 | mov r14=r32 | 79 | mov r14=r32 |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index 68d6204c3f16..fb8332690179 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -175,10 +175,58 @@ static void xen_itc_jitter_data_reset(void) | |||
175 | } while (unlikely(ret != lcycle)); | 175 | } while (unlikely(ret != lcycle)); |
176 | } | 176 | } |
177 | 177 | ||
178 | /* based on xen_sched_clock() in arch/x86/xen/time.c. */ | ||
179 | /* | ||
180 | * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, | ||
181 | * something similar logic should be implemented here. | ||
182 | */ | ||
183 | /* | ||
184 | * Xen sched_clock implementation. Returns the number of unstolen | ||
185 | * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED | ||
186 | * states. | ||
187 | */ | ||
188 | static unsigned long long xen_sched_clock(void) | ||
189 | { | ||
190 | struct vcpu_runstate_info runstate; | ||
191 | |||
192 | unsigned long long now; | ||
193 | unsigned long long offset; | ||
194 | unsigned long long ret; | ||
195 | |||
196 | /* | ||
197 | * Ideally sched_clock should be called on a per-cpu basis | ||
198 | * anyway, so preempt should already be disabled, but that's | ||
199 | * not current practice at the moment. | ||
200 | */ | ||
201 | preempt_disable(); | ||
202 | |||
203 | /* | ||
204 | * both ia64_native_sched_clock() and xen's runstate are | ||
205 | * based on mAR.ITC. So difference of them makes sense. | ||
206 | */ | ||
207 | now = ia64_native_sched_clock(); | ||
208 | |||
209 | get_runstate_snapshot(&runstate); | ||
210 | |||
211 | WARN_ON(runstate.state != RUNSTATE_running); | ||
212 | |||
213 | offset = 0; | ||
214 | if (now > runstate.state_entry_time) | ||
215 | offset = now - runstate.state_entry_time; | ||
216 | ret = runstate.time[RUNSTATE_blocked] + | ||
217 | runstate.time[RUNSTATE_running] + | ||
218 | offset; | ||
219 | |||
220 | preempt_enable(); | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
178 | struct pv_time_ops xen_time_ops __initdata = { | 225 | struct pv_time_ops xen_time_ops __initdata = { |
179 | .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, | 226 | .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, |
180 | .do_steal_accounting = xen_do_steal_accounting, | 227 | .do_steal_accounting = xen_do_steal_accounting, |
181 | .clocksource_resume = xen_itc_jitter_data_reset, | 228 | .clocksource_resume = xen_itc_jitter_data_reset, |
229 | .sched_clock = xen_sched_clock, | ||
182 | }; | 230 | }; |
183 | 231 | ||
184 | /* Called after suspend, to resume time. */ | 232 | /* Called after suspend, to resume time. */ |
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index 936cff3c96e0..5e2270a999fa 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/pm.h> | 26 | #include <linux/pm.h> |
27 | #include <linux/unistd.h> | ||
27 | 28 | ||
28 | #include <asm/xen/hypervisor.h> | 29 | #include <asm/xen/hypervisor.h> |
29 | #include <asm/xen/xencomm.h> | 30 | #include <asm/xen/xencomm.h> |
@@ -153,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void) | |||
153 | xen_setup_vcpu_info_placement(); | 154 | xen_setup_vcpu_info_placement(); |
154 | } | 155 | } |
155 | 156 | ||
157 | #ifdef ASM_SUPPORTED | ||
158 | static unsigned long __init_or_module | ||
159 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); | ||
160 | #endif | ||
161 | static void __init | ||
162 | xen_patch_branch(unsigned long tag, unsigned long type); | ||
163 | |||
156 | static const struct pv_init_ops xen_init_ops __initconst = { | 164 | static const struct pv_init_ops xen_init_ops __initconst = { |
157 | .banner = xen_banner, | 165 | .banner = xen_banner, |
158 | 166 | ||
@@ -163,6 +171,53 @@ static const struct pv_init_ops xen_init_ops __initconst = { | |||
163 | .arch_setup_nomca = xen_arch_setup_nomca, | 171 | .arch_setup_nomca = xen_arch_setup_nomca, |
164 | 172 | ||
165 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, | 173 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, |
174 | #ifdef ASM_SUPPORTED | ||
175 | .patch_bundle = xen_patch_bundle, | ||
176 | #endif | ||
177 | .patch_branch = xen_patch_branch, | ||
178 | }; | ||
179 | |||
180 | /*************************************************************************** | ||
181 | * pv_fsys_data | ||
182 | * addresses for fsys | ||
183 | */ | ||
184 | |||
185 | extern unsigned long xen_fsyscall_table[NR_syscalls]; | ||
186 | extern char xen_fsys_bubble_down[]; | ||
187 | struct pv_fsys_data xen_fsys_data __initdata = { | ||
188 | .fsyscall_table = (unsigned long *)xen_fsyscall_table, | ||
189 | .fsys_bubble_down = (void *)xen_fsys_bubble_down, | ||
190 | }; | ||
191 | |||
192 | /*************************************************************************** | ||
193 | * pv_patchdata | ||
194 | * patchdata addresses | ||
195 | */ | ||
196 | |||
197 | #define DECLARE(name) \ | ||
198 | extern unsigned long __xen_start_gate_##name##_patchlist[]; \ | ||
199 | extern unsigned long __xen_end_gate_##name##_patchlist[] | ||
200 | |||
201 | DECLARE(fsyscall); | ||
202 | DECLARE(brl_fsys_bubble_down); | ||
203 | DECLARE(vtop); | ||
204 | DECLARE(mckinley_e9); | ||
205 | |||
206 | extern unsigned long __xen_start_gate_section[]; | ||
207 | |||
208 | #define ASSIGN(name) \ | ||
209 | .start_##name##_patchlist = \ | ||
210 | (unsigned long)__xen_start_gate_##name##_patchlist, \ | ||
211 | .end_##name##_patchlist = \ | ||
212 | (unsigned long)__xen_end_gate_##name##_patchlist | ||
213 | |||
214 | static struct pv_patchdata xen_patchdata __initdata = { | ||
215 | ASSIGN(fsyscall), | ||
216 | ASSIGN(brl_fsys_bubble_down), | ||
217 | ASSIGN(vtop), | ||
218 | ASSIGN(mckinley_e9), | ||
219 | |||
220 | .gate_section = (void*)__xen_start_gate_section, | ||
166 | }; | 221 | }; |
167 | 222 | ||
168 | /*************************************************************************** | 223 | /*************************************************************************** |
@@ -170,6 +225,76 @@ static const struct pv_init_ops xen_init_ops __initconst = { | |||
170 | * intrinsics hooks. | 225 | * intrinsics hooks. |
171 | */ | 226 | */ |
172 | 227 | ||
228 | #ifndef ASM_SUPPORTED | ||
229 | static void | ||
230 | xen_set_itm_with_offset(unsigned long val) | ||
231 | { | ||
232 | /* ia64_cpu_local_tick() calls this with interrupt enabled. */ | ||
233 | /* WARN_ON(!irqs_disabled()); */ | ||
234 | xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); | ||
235 | } | ||
236 | |||
237 | static unsigned long | ||
238 | xen_get_itm_with_offset(void) | ||
239 | { | ||
240 | /* unused at this moment */ | ||
241 | printk(KERN_DEBUG "%s is called.\n", __func__); | ||
242 | |||
243 | WARN_ON(!irqs_disabled()); | ||
244 | return ia64_native_getreg(_IA64_REG_CR_ITM) + | ||
245 | XEN_MAPPEDREGS->itc_offset; | ||
246 | } | ||
247 | |||
248 | /* ia64_set_itc() is only called by | ||
249 | * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). | ||
250 | * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. | ||
251 | */ | ||
252 | static void | ||
253 | xen_set_itc(unsigned long val) | ||
254 | { | ||
255 | unsigned long mitc; | ||
256 | |||
257 | WARN_ON(!irqs_disabled()); | ||
258 | mitc = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
259 | XEN_MAPPEDREGS->itc_offset = val - mitc; | ||
260 | XEN_MAPPEDREGS->itc_last = val; | ||
261 | } | ||
262 | |||
263 | static unsigned long | ||
264 | xen_get_itc(void) | ||
265 | { | ||
266 | unsigned long res; | ||
267 | unsigned long itc_offset; | ||
268 | unsigned long itc_last; | ||
269 | unsigned long ret_itc_last; | ||
270 | |||
271 | itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
272 | do { | ||
273 | itc_last = XEN_MAPPEDREGS->itc_last; | ||
274 | res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
275 | res += itc_offset; | ||
276 | if (itc_last >= res) | ||
277 | res = itc_last + 1; | ||
278 | ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, | ||
279 | itc_last, res); | ||
280 | } while (unlikely(ret_itc_last != itc_last)); | ||
281 | return res; | ||
282 | |||
283 | #if 0 | ||
284 | /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. | ||
285 | Should it be paravirtualized instead? */ | ||
286 | WARN_ON(!irqs_disabled()); | ||
287 | itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
288 | itc_last = XEN_MAPPEDREGS->itc_last; | ||
289 | res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
290 | res += itc_offset; | ||
291 | if (itc_last >= res) | ||
292 | res = itc_last + 1; | ||
293 | XEN_MAPPEDREGS->itc_last = res; | ||
294 | return res; | ||
295 | #endif | ||
296 | } | ||
297 | |||
173 | static void xen_setreg(int regnum, unsigned long val) | 298 | static void xen_setreg(int regnum, unsigned long val) |
174 | { | 299 | { |
175 | switch (regnum) { | 300 | switch (regnum) { |
@@ -181,11 +306,14 @@ static void xen_setreg(int regnum, unsigned long val) | |||
181 | xen_set_eflag(val); | 306 | xen_set_eflag(val); |
182 | break; | 307 | break; |
183 | #endif | 308 | #endif |
309 | case _IA64_REG_AR_ITC: | ||
310 | xen_set_itc(val); | ||
311 | break; | ||
184 | case _IA64_REG_CR_TPR: | 312 | case _IA64_REG_CR_TPR: |
185 | xen_set_tpr(val); | 313 | xen_set_tpr(val); |
186 | break; | 314 | break; |
187 | case _IA64_REG_CR_ITM: | 315 | case _IA64_REG_CR_ITM: |
188 | xen_set_itm(val); | 316 | xen_set_itm_with_offset(val); |
189 | break; | 317 | break; |
190 | case _IA64_REG_CR_EOI: | 318 | case _IA64_REG_CR_EOI: |
191 | xen_eoi(val); | 319 | xen_eoi(val); |
@@ -209,6 +337,12 @@ static unsigned long xen_getreg(int regnum) | |||
209 | res = xen_get_eflag(); | 337 | res = xen_get_eflag(); |
210 | break; | 338 | break; |
211 | #endif | 339 | #endif |
340 | case _IA64_REG_AR_ITC: | ||
341 | res = xen_get_itc(); | ||
342 | break; | ||
343 | case _IA64_REG_CR_ITM: | ||
344 | res = xen_get_itm_with_offset(); | ||
345 | break; | ||
212 | case _IA64_REG_CR_IVR: | 346 | case _IA64_REG_CR_IVR: |
213 | res = xen_get_ivr(); | 347 | res = xen_get_ivr(); |
214 | break; | 348 | break; |
@@ -259,8 +393,417 @@ xen_intrin_local_irq_restore(unsigned long mask) | |||
259 | else | 393 | else |
260 | xen_rsm_i(); | 394 | xen_rsm_i(); |
261 | } | 395 | } |
396 | #else | ||
397 | #define __DEFINE_FUNC(name, code) \ | ||
398 | extern const char xen_ ## name ## _direct_start[]; \ | ||
399 | extern const char xen_ ## name ## _direct_end[]; \ | ||
400 | asm (".align 32\n" \ | ||
401 | ".proc xen_" #name "\n" \ | ||
402 | "xen_" #name ":\n" \ | ||
403 | "xen_" #name "_direct_start:\n" \ | ||
404 | code \ | ||
405 | "xen_" #name "_direct_end:\n" \ | ||
406 | "br.cond.sptk.many b6\n" \ | ||
407 | ".endp xen_" #name "\n") | ||
408 | |||
409 | #define DEFINE_VOID_FUNC0(name, code) \ | ||
410 | extern void \ | ||
411 | xen_ ## name (void); \ | ||
412 | __DEFINE_FUNC(name, code) | ||
413 | |||
414 | #define DEFINE_VOID_FUNC1(name, code) \ | ||
415 | extern void \ | ||
416 | xen_ ## name (unsigned long arg); \ | ||
417 | __DEFINE_FUNC(name, code) | ||
418 | |||
419 | #define DEFINE_VOID_FUNC1_VOID(name, code) \ | ||
420 | extern void \ | ||
421 | xen_ ## name (void *arg); \ | ||
422 | __DEFINE_FUNC(name, code) | ||
423 | |||
424 | #define DEFINE_VOID_FUNC2(name, code) \ | ||
425 | extern void \ | ||
426 | xen_ ## name (unsigned long arg0, \ | ||
427 | unsigned long arg1); \ | ||
428 | __DEFINE_FUNC(name, code) | ||
262 | 429 | ||
263 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | 430 | #define DEFINE_FUNC0(name, code) \ |
431 | extern unsigned long \ | ||
432 | xen_ ## name (void); \ | ||
433 | __DEFINE_FUNC(name, code) | ||
434 | |||
435 | #define DEFINE_FUNC1(name, type, code) \ | ||
436 | extern unsigned long \ | ||
437 | xen_ ## name (type arg); \ | ||
438 | __DEFINE_FUNC(name, code) | ||
439 | |||
440 | #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) | ||
441 | |||
442 | /* | ||
443 | * static void xen_set_itm_with_offset(unsigned long val) | ||
444 | * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); | ||
445 | */ | ||
446 | /* 2 bundles */ | ||
447 | DEFINE_VOID_FUNC1(set_itm_with_offset, | ||
448 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
449 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
450 | ";;\n" | ||
451 | "ld8 r3 = [r2]\n" | ||
452 | ";;\n" | ||
453 | "sub r8 = r8, r3\n" | ||
454 | "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); | ||
455 | |||
456 | /* | ||
457 | * static unsigned long xen_get_itm_with_offset(void) | ||
458 | * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; | ||
459 | */ | ||
460 | /* 2 bundles */ | ||
461 | DEFINE_FUNC0(get_itm_with_offset, | ||
462 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
463 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
464 | ";;\n" | ||
465 | "ld8 r3 = [r2]\n" | ||
466 | "mov r8 = cr.itm\n" | ||
467 | ";;\n" | ||
468 | "add r8 = r8, r2\n"); | ||
469 | |||
470 | /* | ||
471 | * static void xen_set_itc(unsigned long val) | ||
472 | * unsigned long mitc; | ||
473 | * | ||
474 | * WARN_ON(!irqs_disabled()); | ||
475 | * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
476 | * XEN_MAPPEDREGS->itc_offset = val - mitc; | ||
477 | * XEN_MAPPEDREGS->itc_last = val; | ||
478 | */ | ||
479 | /* 2 bundles */ | ||
480 | DEFINE_VOID_FUNC1(set_itc, | ||
481 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
482 | __stringify(XSI_ITC_LAST_OFS) "\n" | ||
483 | "mov r3 = ar.itc\n" | ||
484 | ";;\n" | ||
485 | "sub r3 = r8, r3\n" | ||
486 | "st8 [r2] = r8, " | ||
487 | __stringify(XSI_ITC_LAST_OFS) " - " | ||
488 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
489 | ";;\n" | ||
490 | "st8 [r2] = r3\n"); | ||
491 | |||
492 | /* | ||
493 | * static unsigned long xen_get_itc(void) | ||
494 | * unsigned long res; | ||
495 | * unsigned long itc_offset; | ||
496 | * unsigned long itc_last; | ||
497 | * unsigned long ret_itc_last; | ||
498 | * | ||
499 | * itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
500 | * do { | ||
501 | * itc_last = XEN_MAPPEDREGS->itc_last; | ||
502 | * res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
503 | * res += itc_offset; | ||
504 | * if (itc_last >= res) | ||
505 | * res = itc_last + 1; | ||
506 | * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, | ||
507 | * itc_last, res); | ||
508 | * } while (unlikely(ret_itc_last != itc_last)); | ||
509 | * return res; | ||
510 | */ | ||
511 | /* 5 bundles */ | ||
512 | DEFINE_FUNC0(get_itc, | ||
513 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
514 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
515 | ";;\n" | ||
516 | "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " | ||
517 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
518 | /* r9 = itc_offset */ | ||
519 | /* r2 = XSI_ITC_OFFSET */ | ||
520 | "888:\n" | ||
521 | "mov r8 = ar.itc\n" /* res = ar.itc */ | ||
522 | ";;\n" | ||
523 | "ld8 r3 = [r2]\n" /* r3 = itc_last */ | ||
524 | "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ | ||
525 | ";;\n" | ||
526 | "cmp.gtu p6, p0 = r3, r8\n" | ||
527 | ";;\n" | ||
528 | "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ | ||
529 | ";;\n" | ||
530 | "mov ar.ccv = r8\n" | ||
531 | ";;\n" | ||
532 | "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" | ||
533 | ";;\n" | ||
534 | "cmp.ne p6, p0 = r10, r3\n" | ||
535 | "(p6) hint @pause\n" | ||
536 | "(p6) br.cond.spnt 888b\n"); | ||
537 | |||
538 | DEFINE_VOID_FUNC1_VOID(fc, | ||
539 | "break " __stringify(HYPERPRIVOP_FC) "\n"); | ||
540 | |||
541 | /* | ||
542 | * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR | ||
543 | * masked_addr = *psr_i_addr_addr | ||
544 | * pending_intr_addr = masked_addr - 1 | ||
545 | * if (val & IA64_PSR_I) { | ||
546 | * masked = *masked_addr | ||
547 | * *masked_addr = 0:xen_set_virtual_psr_i(1) | ||
548 | * compiler barrier | ||
549 | * if (masked) { | ||
550 | * uint8_t pending = *pending_intr_addr; | ||
551 | * if (pending) | ||
552 | * XEN_HYPER_SSM_I | ||
553 | * } | ||
554 | * } else { | ||
555 | * *masked_addr = 1:xen_set_virtual_psr_i(0) | ||
556 | * } | ||
557 | */ | ||
558 | /* 6 bundles */ | ||
559 | DEFINE_VOID_FUNC1(intrin_local_irq_restore, | ||
560 | /* r8 = input value: 0 or IA64_PSR_I | ||
561 | * p6 = (flags & IA64_PSR_I) | ||
562 | * = if clause | ||
563 | * p7 = !(flags & IA64_PSR_I) | ||
564 | * = else clause | ||
565 | */ | ||
566 | "cmp.ne p6, p7 = r8, r0\n" | ||
567 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
568 | ";;\n" | ||
569 | /* r9 = XEN_PSR_I_ADDR */ | ||
570 | "ld8 r9 = [r9]\n" | ||
571 | ";;\n" | ||
572 | |||
573 | /* r10 = masked previous value */ | ||
574 | "(p6) ld1.acq r10 = [r9]\n" | ||
575 | ";;\n" | ||
576 | |||
577 | /* p8 = !masked interrupt masked previously? */ | ||
578 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
579 | |||
580 | /* p7 = else clause */ | ||
581 | "(p7) mov r11 = 1\n" | ||
582 | ";;\n" | ||
583 | /* masked = 1 */ | ||
584 | "(p7) st1.rel [r9] = r11\n" | ||
585 | |||
586 | /* p6 = if clause */ | ||
587 | /* masked = 0 | ||
588 | * r9 = masked_addr - 1 | ||
589 | * = pending_intr_addr | ||
590 | */ | ||
591 | "(p8) st1.rel [r9] = r0, -1\n" | ||
592 | ";;\n" | ||
593 | /* r8 = pending_intr */ | ||
594 | "(p8) ld1.acq r11 = [r9]\n" | ||
595 | ";;\n" | ||
596 | /* p9 = interrupt pending? */ | ||
597 | "(p8) cmp.ne.unc p9, p10 = r11, r0\n" | ||
598 | ";;\n" | ||
599 | "(p10) mf\n" | ||
600 | /* issue hypercall to trigger interrupt */ | ||
601 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); | ||
602 | |||
603 | DEFINE_VOID_FUNC2(ptcga, | ||
604 | "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); | ||
605 | DEFINE_VOID_FUNC2(set_rr, | ||
606 | "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); | ||
607 | |||
608 | /* | ||
609 | * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; | ||
610 | * tmp = *tmp | ||
611 | * tmp = *tmp; | ||
612 | * psr_i = tmp? 0: IA64_PSR_I; | ||
613 | */ | ||
614 | /* 4 bundles */ | ||
615 | DEFINE_FUNC0(get_psr_i, | ||
616 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
617 | ";;\n" | ||
618 | "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ | ||
619 | "mov r8 = 0\n" /* psr_i = 0 */ | ||
620 | ";;\n" | ||
621 | "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ | ||
622 | ";;\n" | ||
623 | "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ | ||
624 | ";;\n" | ||
625 | "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); | ||
626 | |||
627 | DEFINE_FUNC1(thash, unsigned long, | ||
628 | "break " __stringify(HYPERPRIVOP_THASH) "\n"); | ||
629 | DEFINE_FUNC1(get_cpuid, int, | ||
630 | "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); | ||
631 | DEFINE_FUNC1(get_pmd, int, | ||
632 | "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); | ||
633 | DEFINE_FUNC1(get_rr, unsigned long, | ||
634 | "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); | ||
635 | |||
636 | /* | ||
637 | * void xen_privop_ssm_i(void) | ||
638 | * | ||
639 | * int masked = !xen_get_virtual_psr_i(); | ||
640 | * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) | ||
641 | * xen_set_virtual_psr_i(1) | ||
642 | * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 | ||
643 | * // compiler barrier | ||
644 | * if (masked) { | ||
645 | * uint8_t* pend_int_addr = | ||
646 | * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; | ||
647 | * uint8_t pending = *pend_int_addr; | ||
648 | * if (pending) | ||
649 | * XEN_HYPER_SSM_I | ||
650 | * } | ||
651 | */ | ||
652 | /* 4 bundles */ | ||
653 | DEFINE_VOID_FUNC0(ssm_i, | ||
654 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
655 | ";;\n" | ||
656 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ | ||
657 | ";;\n" | ||
658 | "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ | ||
659 | ";;\n" | ||
660 | "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt | ||
661 | * r8 = XEN_PSR_I_ADDR - 1 | ||
662 | * = pend_int_addr | ||
663 | */ | ||
664 | "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I | ||
665 | * previously interrupt | ||
666 | * masked? | ||
667 | */ | ||
668 | ";;\n" | ||
669 | "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ | ||
670 | ";;\n" | ||
671 | "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ | ||
672 | ";;\n" | ||
673 | /* issue hypercall to get interrupt */ | ||
674 | "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
675 | ";;\n"); | ||
676 | |||
677 | /* | ||
678 | * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr | ||
679 | * = XEN_PSR_I_ADDR_ADDR; | ||
680 | * psr_i_addr = *psr_i_addr_addr; | ||
681 | * *psr_i_addr = 1; | ||
682 | */ | ||
683 | /* 2 bundles */ | ||
684 | DEFINE_VOID_FUNC0(rsm_i, | ||
685 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
686 | /* r8 = XEN_PSR_I_ADDR */ | ||
687 | "mov r9 = 1\n" | ||
688 | ";;\n" | ||
689 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ | ||
690 | ";;\n" | ||
691 | "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ | ||
692 | |||
693 | extern void | ||
694 | xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
695 | unsigned long val2, unsigned long val3, | ||
696 | unsigned long val4); | ||
697 | __DEFINE_FUNC(set_rr0_to_rr4, | ||
698 | "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); | ||
699 | |||
700 | |||
701 | extern unsigned long xen_getreg(int regnum); | ||
702 | #define __DEFINE_GET_REG(id, privop) \ | ||
703 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
704 | ";;\n" \ | ||
705 | "cmp.eq p6, p0 = r2, r8\n" \ | ||
706 | ";;\n" \ | ||
707 | "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ | ||
708 | "(p6) br.cond.sptk.many b6\n" \ | ||
709 | ";;\n" | ||
710 | |||
711 | __DEFINE_FUNC(getreg, | ||
712 | __DEFINE_GET_REG(PSR, PSR) | ||
713 | #ifdef CONFIG_IA32_SUPPORT | ||
714 | __DEFINE_GET_REG(AR_EFLAG, EFLAG) | ||
715 | #endif | ||
716 | |||
717 | /* get_itc */ | ||
718 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
719 | ";;\n" | ||
720 | "cmp.eq p6, p0 = r2, r8\n" | ||
721 | ";;\n" | ||
722 | "(p6) br.cond.spnt xen_get_itc\n" | ||
723 | ";;\n" | ||
724 | |||
725 | /* get itm */ | ||
726 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
727 | ";;\n" | ||
728 | "cmp.eq p6, p0 = r2, r8\n" | ||
729 | ";;\n" | ||
730 | "(p6) br.cond.spnt xen_get_itm_with_offset\n" | ||
731 | ";;\n" | ||
732 | |||
733 | __DEFINE_GET_REG(CR_IVR, IVR) | ||
734 | __DEFINE_GET_REG(CR_TPR, TPR) | ||
735 | |||
736 | /* fall back */ | ||
737 | "movl r2 = ia64_native_getreg_func\n" | ||
738 | ";;\n" | ||
739 | "mov b7 = r2\n" | ||
740 | ";;\n" | ||
741 | "br.cond.sptk.many b7\n"); | ||
742 | |||
743 | extern void xen_setreg(int regnum, unsigned long val); | ||
744 | #define __DEFINE_SET_REG(id, privop) \ | ||
745 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
746 | ";;\n" \ | ||
747 | "cmp.eq p6, p0 = r2, r9\n" \ | ||
748 | ";;\n" \ | ||
749 | "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ | ||
750 | "(p6) br.cond.sptk.many b6\n" \ | ||
751 | ";;\n" | ||
752 | |||
753 | __DEFINE_FUNC(setreg, | ||
754 | /* kr0 .. kr 7*/ | ||
755 | /* | ||
756 | * if (_IA64_REG_AR_KR0 <= regnum && | ||
757 | * regnum <= _IA64_REG_AR_KR7) { | ||
758 | * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 | ||
759 | * register __val asm ("r9") = val | ||
760 | * "break HYPERPRIVOP_SET_KR" | ||
761 | * } | ||
762 | */ | ||
763 | "mov r17 = r9\n" | ||
764 | "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" | ||
765 | ";;\n" | ||
766 | "cmp.ge p6, p0 = r9, r2\n" | ||
767 | "sub r17 = r17, r2\n" | ||
768 | ";;\n" | ||
769 | "(p6) cmp.ge.unc p7, p0 = " | ||
770 | __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) | ||
771 | ", r17\n" | ||
772 | ";;\n" | ||
773 | "(p7) mov r9 = r8\n" | ||
774 | ";;\n" | ||
775 | "(p7) mov r8 = r17\n" | ||
776 | "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" | ||
777 | |||
778 | /* set itm */ | ||
779 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
780 | ";;\n" | ||
781 | "cmp.eq p6, p0 = r2, r8\n" | ||
782 | ";;\n" | ||
783 | "(p6) br.cond.spnt xen_set_itm_with_offset\n" | ||
784 | |||
785 | /* set itc */ | ||
786 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
787 | ";;\n" | ||
788 | "cmp.eq p6, p0 = r2, r8\n" | ||
789 | ";;\n" | ||
790 | "(p6) br.cond.spnt xen_set_itc\n" | ||
791 | |||
792 | #ifdef CONFIG_IA32_SUPPORT | ||
793 | __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG) | ||
794 | #endif | ||
795 | __DEFINE_SET_REG(CR_TPR, SET_TPR) | ||
796 | __DEFINE_SET_REG(CR_EOI, EOI) | ||
797 | |||
798 | /* fall back */ | ||
799 | "movl r2 = ia64_native_setreg_func\n" | ||
800 | ";;\n" | ||
801 | "mov b7 = r2\n" | ||
802 | ";;\n" | ||
803 | "br.cond.sptk.many b7\n"); | ||
804 | #endif | ||
805 | |||
806 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { | ||
264 | .fc = xen_fc, | 807 | .fc = xen_fc, |
265 | .thash = xen_thash, | 808 | .thash = xen_thash, |
266 | .get_cpuid = xen_get_cpuid, | 809 | .get_cpuid = xen_get_cpuid, |
@@ -337,7 +880,7 @@ xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) | |||
337 | HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); | 880 | HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); |
338 | } | 881 | } |
339 | 882 | ||
340 | static const struct pv_iosapic_ops xen_iosapic_ops __initconst = { | 883 | static struct pv_iosapic_ops xen_iosapic_ops __initdata = { |
341 | .pcat_compat_init = xen_pcat_compat_init, | 884 | .pcat_compat_init = xen_pcat_compat_init, |
342 | .__get_irq_chip = xen_iosapic_get_irq_chip, | 885 | .__get_irq_chip = xen_iosapic_get_irq_chip, |
343 | 886 | ||
@@ -355,6 +898,8 @@ xen_setup_pv_ops(void) | |||
355 | xen_info_init(); | 898 | xen_info_init(); |
356 | pv_info = xen_info; | 899 | pv_info = xen_info; |
357 | pv_init_ops = xen_init_ops; | 900 | pv_init_ops = xen_init_ops; |
901 | pv_fsys_data = xen_fsys_data; | ||
902 | pv_patchdata = xen_patchdata; | ||
358 | pv_cpu_ops = xen_cpu_ops; | 903 | pv_cpu_ops = xen_cpu_ops; |
359 | pv_iosapic_ops = xen_iosapic_ops; | 904 | pv_iosapic_ops = xen_iosapic_ops; |
360 | pv_irq_ops = xen_irq_ops; | 905 | pv_irq_ops = xen_irq_ops; |
@@ -362,3 +907,252 @@ xen_setup_pv_ops(void) | |||
362 | 907 | ||
363 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); | 908 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); |
364 | } | 909 | } |
910 | |||
911 | #ifdef ASM_SUPPORTED | ||
912 | /*************************************************************************** | ||
913 | * binary pacthing | ||
914 | * pv_init_ops.patch_bundle | ||
915 | */ | ||
916 | |||
917 | #define DEFINE_FUNC_GETREG(name, privop) \ | ||
918 | DEFINE_FUNC0(get_ ## name, \ | ||
919 | "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") | ||
920 | |||
921 | DEFINE_FUNC_GETREG(psr, PSR); | ||
922 | DEFINE_FUNC_GETREG(eflag, EFLAG); | ||
923 | DEFINE_FUNC_GETREG(ivr, IVR); | ||
924 | DEFINE_FUNC_GETREG(tpr, TPR); | ||
925 | |||
926 | #define DEFINE_FUNC_SET_KR(n) \ | ||
927 | DEFINE_VOID_FUNC0(set_kr ## n, \ | ||
928 | ";;\n" \ | ||
929 | "mov r9 = r8\n" \ | ||
930 | "mov r8 = " #n "\n" \ | ||
931 | "break " __stringify(HYPERPRIVOP_SET_KR) "\n") | ||
932 | |||
933 | DEFINE_FUNC_SET_KR(0); | ||
934 | DEFINE_FUNC_SET_KR(1); | ||
935 | DEFINE_FUNC_SET_KR(2); | ||
936 | DEFINE_FUNC_SET_KR(3); | ||
937 | DEFINE_FUNC_SET_KR(4); | ||
938 | DEFINE_FUNC_SET_KR(5); | ||
939 | DEFINE_FUNC_SET_KR(6); | ||
940 | DEFINE_FUNC_SET_KR(7); | ||
941 | |||
942 | #define __DEFINE_FUNC_SETREG(name, privop) \ | ||
943 | DEFINE_VOID_FUNC0(name, \ | ||
944 | "break "__stringify(HYPERPRIVOP_ ## privop) "\n") | ||
945 | |||
946 | #define DEFINE_FUNC_SETREG(name, privop) \ | ||
947 | __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) | ||
948 | |||
949 | DEFINE_FUNC_SETREG(eflag, EFLAG); | ||
950 | DEFINE_FUNC_SETREG(tpr, TPR); | ||
951 | __DEFINE_FUNC_SETREG(eoi, EOI); | ||
952 | |||
953 | extern const char xen_check_events[]; | ||
954 | extern const char __xen_intrin_local_irq_restore_direct_start[]; | ||
955 | extern const char __xen_intrin_local_irq_restore_direct_end[]; | ||
956 | extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; | ||
957 | |||
958 | asm ( | ||
959 | ".align 32\n" | ||
960 | ".proc xen_check_events\n" | ||
961 | "xen_check_events:\n" | ||
962 | /* masked = 0 | ||
963 | * r9 = masked_addr - 1 | ||
964 | * = pending_intr_addr | ||
965 | */ | ||
966 | "st1.rel [r9] = r0, -1\n" | ||
967 | ";;\n" | ||
968 | /* r8 = pending_intr */ | ||
969 | "ld1.acq r11 = [r9]\n" | ||
970 | ";;\n" | ||
971 | /* p9 = interrupt pending? */ | ||
972 | "cmp.ne p9, p10 = r11, r0\n" | ||
973 | ";;\n" | ||
974 | "(p10) mf\n" | ||
975 | /* issue hypercall to trigger interrupt */ | ||
976 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
977 | "br.cond.sptk.many b6\n" | ||
978 | ".endp xen_check_events\n" | ||
979 | "\n" | ||
980 | ".align 32\n" | ||
981 | ".proc __xen_intrin_local_irq_restore_direct\n" | ||
982 | "__xen_intrin_local_irq_restore_direct:\n" | ||
983 | "__xen_intrin_local_irq_restore_direct_start:\n" | ||
984 | "1:\n" | ||
985 | "{\n" | ||
986 | "cmp.ne p6, p7 = r8, r0\n" | ||
987 | "mov r17 = ip\n" /* get ip to calc return address */ | ||
988 | "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
989 | ";;\n" | ||
990 | "}\n" | ||
991 | "{\n" | ||
992 | /* r9 = XEN_PSR_I_ADDR */ | ||
993 | "ld8 r9 = [r9]\n" | ||
994 | ";;\n" | ||
995 | /* r10 = masked previous value */ | ||
996 | "(p6) ld1.acq r10 = [r9]\n" | ||
997 | "adds r17 = 1f - 1b, r17\n" /* calculate return address */ | ||
998 | ";;\n" | ||
999 | "}\n" | ||
1000 | "{\n" | ||
1001 | /* p8 = !masked interrupt masked previously? */ | ||
1002 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
1003 | "\n" | ||
1004 | /* p7 = else clause */ | ||
1005 | "(p7) mov r11 = 1\n" | ||
1006 | ";;\n" | ||
1007 | "(p8) mov b6 = r17\n" /* set return address */ | ||
1008 | "}\n" | ||
1009 | "{\n" | ||
1010 | /* masked = 1 */ | ||
1011 | "(p7) st1.rel [r9] = r11\n" | ||
1012 | "\n" | ||
1013 | "[99:]\n" | ||
1014 | "(p8) brl.cond.dptk.few xen_check_events\n" | ||
1015 | "}\n" | ||
1016 | /* pv calling stub is 5 bundles. fill nop to adjust return address */ | ||
1017 | "{\n" | ||
1018 | "nop 0\n" | ||
1019 | "nop 0\n" | ||
1020 | "nop 0\n" | ||
1021 | "}\n" | ||
1022 | "1:\n" | ||
1023 | "__xen_intrin_local_irq_restore_direct_end:\n" | ||
1024 | ".endp __xen_intrin_local_irq_restore_direct\n" | ||
1025 | "\n" | ||
1026 | ".align 8\n" | ||
1027 | "__xen_intrin_local_irq_restore_direct_reloc:\n" | ||
1028 | "data8 99b\n" | ||
1029 | ); | ||
1030 | |||
1031 | static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] | ||
1032 | __initdata_or_module = | ||
1033 | { | ||
1034 | #define XEN_PATCH_BUNDLE_ELEM(name, type) \ | ||
1035 | { \ | ||
1036 | (void*)xen_ ## name ## _direct_start, \ | ||
1037 | (void*)xen_ ## name ## _direct_end, \ | ||
1038 | PARAVIRT_PATCH_TYPE_ ## type, \ | ||
1039 | } | ||
1040 | |||
1041 | XEN_PATCH_BUNDLE_ELEM(fc, FC), | ||
1042 | XEN_PATCH_BUNDLE_ELEM(thash, THASH), | ||
1043 | XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), | ||
1044 | XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), | ||
1045 | XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), | ||
1046 | XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), | ||
1047 | XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), | ||
1048 | XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), | ||
1049 | XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), | ||
1050 | XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), | ||
1051 | XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), | ||
1052 | { | ||
1053 | (void*)__xen_intrin_local_irq_restore_direct_start, | ||
1054 | (void*)__xen_intrin_local_irq_restore_direct_end, | ||
1055 | PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, | ||
1056 | }, | ||
1057 | |||
1058 | #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ | ||
1059 | { \ | ||
1060 | xen_get_ ## name ## _direct_start, \ | ||
1061 | xen_get_ ## name ## _direct_end, \ | ||
1062 | PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ | ||
1063 | } | ||
1064 | |||
1065 | XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), | ||
1066 | XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), | ||
1067 | |||
1068 | XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), | ||
1069 | XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), | ||
1070 | |||
1071 | XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), | ||
1072 | XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), | ||
1073 | |||
1074 | |||
1075 | #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
1076 | { \ | ||
1077 | xen_ ## name ## _direct_start, \ | ||
1078 | xen_ ## name ## _direct_end, \ | ||
1079 | PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ | ||
1080 | } | ||
1081 | |||
1082 | #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
1083 | __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) | ||
1084 | |||
1085 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), | ||
1086 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), | ||
1087 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), | ||
1088 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), | ||
1089 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), | ||
1090 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), | ||
1091 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), | ||
1092 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), | ||
1093 | |||
1094 | XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), | ||
1095 | XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), | ||
1096 | __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), | ||
1097 | |||
1098 | XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), | ||
1099 | XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), | ||
1100 | }; | ||
1101 | |||
1102 | static unsigned long __init_or_module | ||
1103 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) | ||
1104 | { | ||
1105 | const unsigned long nelems = sizeof(xen_patch_bundle_elems) / | ||
1106 | sizeof(xen_patch_bundle_elems[0]); | ||
1107 | unsigned long used; | ||
1108 | const struct paravirt_patch_bundle_elem *found; | ||
1109 | |||
1110 | used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, | ||
1111 | xen_patch_bundle_elems, nelems, | ||
1112 | &found); | ||
1113 | |||
1114 | if (found == NULL) | ||
1115 | /* fallback */ | ||
1116 | return ia64_native_patch_bundle(sbundle, ebundle, type); | ||
1117 | if (used == 0) | ||
1118 | return used; | ||
1119 | |||
1120 | /* relocation */ | ||
1121 | switch (type) { | ||
1122 | case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { | ||
1123 | unsigned long reloc = | ||
1124 | __xen_intrin_local_irq_restore_direct_reloc; | ||
1125 | unsigned long reloc_offset = reloc - (unsigned long) | ||
1126 | __xen_intrin_local_irq_restore_direct_start; | ||
1127 | unsigned long tag = (unsigned long)sbundle + reloc_offset; | ||
1128 | paravirt_patch_reloc_brl(tag, xen_check_events); | ||
1129 | break; | ||
1130 | } | ||
1131 | default: | ||
1132 | /* nothing */ | ||
1133 | break; | ||
1134 | } | ||
1135 | return used; | ||
1136 | } | ||
1137 | #endif /* ASM_SUPPOTED */ | ||
1138 | |||
1139 | const struct paravirt_patch_branch_target xen_branch_target[] | ||
1140 | __initconst = { | ||
1141 | #define PARAVIRT_BR_TARGET(name, type) \ | ||
1142 | { \ | ||
1143 | &xen_ ## name, \ | ||
1144 | PARAVIRT_PATCH_TYPE_BR_ ## type, \ | ||
1145 | } | ||
1146 | PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), | ||
1147 | PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), | ||
1148 | PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), | ||
1149 | PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), | ||
1150 | }; | ||
1151 | |||
1152 | static void __init | ||
1153 | xen_patch_branch(unsigned long tag, unsigned long type) | ||
1154 | { | ||
1155 | const unsigned long nelem = | ||
1156 | sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); | ||
1157 | __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem); | ||
1158 | } | ||
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 7103d91e1a2f..3e876f0baebc 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c | |||
@@ -225,7 +225,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
225 | return 0; /* Task didn't use the fpu at all. */ | 225 | return 0; /* Task didn't use the fpu at all. */ |
226 | } | 226 | } |
227 | 227 | ||
228 | int copy_thread(int nr, unsigned long clone_flags, unsigned long spu, | 228 | int copy_thread(unsigned long clone_flags, unsigned long spu, |
229 | unsigned long unused, struct task_struct *tsk, struct pt_regs *regs) | 229 | unsigned long unused, struct task_struct *tsk, struct pt_regs *regs) |
230 | { | 230 | { |
231 | struct pt_regs *childregs = task_pt_regs(tsk); | 231 | struct pt_regs *childregs = task_pt_regs(tsk); |
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 632ce016014d..ec37fb56c127 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c | |||
@@ -233,7 +233,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs) | |||
233 | parent_tidptr, child_tidptr); | 233 | parent_tidptr, child_tidptr); |
234 | } | 234 | } |
235 | 235 | ||
236 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 236 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
237 | unsigned long unused, | 237 | unsigned long unused, |
238 | struct task_struct * p, struct pt_regs * regs) | 238 | struct task_struct * p, struct pt_regs * regs) |
239 | { | 239 | { |
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c index 3f2d7745f31e..1e96c6eb6312 100644 --- a/arch/m68knommu/kernel/process.c +++ b/arch/m68knommu/kernel/process.c | |||
@@ -199,7 +199,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs) | |||
199 | return do_fork(clone_flags, newsp, regs, 0, NULL, NULL); | 199 | return do_fork(clone_flags, newsp, regs, 0, NULL, NULL); |
200 | } | 200 | } |
201 | 201 | ||
202 | int copy_thread(int nr, unsigned long clone_flags, | 202 | int copy_thread(unsigned long clone_flags, |
203 | unsigned long usp, unsigned long topstk, | 203 | unsigned long usp, unsigned long topstk, |
204 | struct task_struct * p, struct pt_regs * regs) | 204 | struct task_struct * p, struct pt_regs * regs) |
205 | { | 205 | { |
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 10e82441b496..5b60a09a0f08 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -480,6 +480,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
480 | return ret; | 480 | return ret; |
481 | } | 481 | } |
482 | 482 | ||
483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
483 | 485 | ||
484 | #define _raw_spin_relax(lock) cpu_relax() | 486 | #define _raw_spin_relax(lock) cpu_relax() |
485 | #define _raw_read_relax(lock) cpu_relax() | 487 | #define _raw_read_relax(lock) cpu_relax() |
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index a73e1531e151..40005010827c 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h | |||
@@ -350,16 +350,18 @@ | |||
350 | #define __NR_dup3 (__NR_Linux + 327) | 350 | #define __NR_dup3 (__NR_Linux + 327) |
351 | #define __NR_pipe2 (__NR_Linux + 328) | 351 | #define __NR_pipe2 (__NR_Linux + 328) |
352 | #define __NR_inotify_init1 (__NR_Linux + 329) | 352 | #define __NR_inotify_init1 (__NR_Linux + 329) |
353 | #define __NR_preadv (__NR_Linux + 330) | ||
354 | #define __NR_pwritev (__NR_Linux + 331) | ||
353 | 355 | ||
354 | /* | 356 | /* |
355 | * Offset of the last Linux o32 flavoured syscall | 357 | * Offset of the last Linux o32 flavoured syscall |
356 | */ | 358 | */ |
357 | #define __NR_Linux_syscalls 329 | 359 | #define __NR_Linux_syscalls 331 |
358 | 360 | ||
359 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 361 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
360 | 362 | ||
361 | #define __NR_O32_Linux 4000 | 363 | #define __NR_O32_Linux 4000 |
362 | #define __NR_O32_Linux_syscalls 329 | 364 | #define __NR_O32_Linux_syscalls 331 |
363 | 365 | ||
364 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 366 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
365 | 367 | ||
@@ -656,16 +658,18 @@ | |||
656 | #define __NR_dup3 (__NR_Linux + 286) | 658 | #define __NR_dup3 (__NR_Linux + 286) |
657 | #define __NR_pipe2 (__NR_Linux + 287) | 659 | #define __NR_pipe2 (__NR_Linux + 287) |
658 | #define __NR_inotify_init1 (__NR_Linux + 288) | 660 | #define __NR_inotify_init1 (__NR_Linux + 288) |
661 | #define __NR_preadv (__NR_Linux + 289) | ||
662 | #define __NR_pwritev (__NR_Linux + 290) | ||
659 | 663 | ||
660 | /* | 664 | /* |
661 | * Offset of the last Linux 64-bit flavoured syscall | 665 | * Offset of the last Linux 64-bit flavoured syscall |
662 | */ | 666 | */ |
663 | #define __NR_Linux_syscalls 288 | 667 | #define __NR_Linux_syscalls 290 |
664 | 668 | ||
665 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 669 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
666 | 670 | ||
667 | #define __NR_64_Linux 5000 | 671 | #define __NR_64_Linux 5000 |
668 | #define __NR_64_Linux_syscalls 288 | 672 | #define __NR_64_Linux_syscalls 290 |
669 | 673 | ||
670 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 674 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
671 | 675 | ||
@@ -966,16 +970,18 @@ | |||
966 | #define __NR_dup3 (__NR_Linux + 290) | 970 | #define __NR_dup3 (__NR_Linux + 290) |
967 | #define __NR_pipe2 (__NR_Linux + 291) | 971 | #define __NR_pipe2 (__NR_Linux + 291) |
968 | #define __NR_inotify_init1 (__NR_Linux + 292) | 972 | #define __NR_inotify_init1 (__NR_Linux + 292) |
973 | #define __NR_preadv (__NR_Linux + 293) | ||
974 | #define __NR_pwritev (__NR_Linux + 294) | ||
969 | 975 | ||
970 | /* | 976 | /* |
971 | * Offset of the last N32 flavoured syscall | 977 | * Offset of the last N32 flavoured syscall |
972 | */ | 978 | */ |
973 | #define __NR_Linux_syscalls 292 | 979 | #define __NR_Linux_syscalls 294 |
974 | 980 | ||
975 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 981 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
976 | 982 | ||
977 | #define __NR_N32_Linux 6000 | 983 | #define __NR_N32_Linux 6000 |
978 | #define __NR_N32_Linux_syscalls 292 | 984 | #define __NR_N32_Linux_syscalls 294 |
979 | 985 | ||
980 | #ifdef __KERNEL__ | 986 | #ifdef __KERNEL__ |
981 | 987 | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ca2e4026ad20..1eaaa450e20c 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -99,7 +99,7 @@ void flush_thread(void) | |||
99 | { | 99 | { |
100 | } | 100 | } |
101 | 101 | ||
102 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 102 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
103 | unsigned long unused, struct task_struct *p, struct pt_regs *regs) | 103 | unsigned long unused, struct task_struct *p, struct pt_regs *regs) |
104 | { | 104 | { |
105 | struct thread_info *ti = task_thread_info(p); | 105 | struct thread_info *ti = task_thread_info(p); |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9ab70c3b5be6..0b31b9bda048 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -650,6 +650,8 @@ einval: li v0, -ENOSYS | |||
650 | sys sys_dup3 3 | 650 | sys sys_dup3 3 |
651 | sys sys_pipe2 2 | 651 | sys sys_pipe2 2 |
652 | sys sys_inotify_init1 1 | 652 | sys sys_inotify_init1 1 |
653 | sys sys_preadv 6 /* 4330 */ | ||
654 | sys sys_pwritev 6 | ||
653 | .endm | 655 | .endm |
654 | 656 | ||
655 | /* We pre-compute the number of _instruction_ bytes needed to | 657 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 9b4698667154..c647fd6e722f 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -487,4 +487,6 @@ sys_call_table: | |||
487 | PTR sys_dup3 | 487 | PTR sys_dup3 |
488 | PTR sys_pipe2 | 488 | PTR sys_pipe2 |
489 | PTR sys_inotify_init1 | 489 | PTR sys_inotify_init1 |
490 | PTR sys_preadv | ||
491 | PTR sys_pwritev /* 5390 */ | ||
490 | .size sys_call_table,.-sys_call_table | 492 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f61d6b0e5731..c2c16ef9218f 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -413,4 +413,6 @@ EXPORT(sysn32_call_table) | |||
413 | PTR sys_dup3 /* 5290 */ | 413 | PTR sys_dup3 /* 5290 */ |
414 | PTR sys_pipe2 | 414 | PTR sys_pipe2 |
415 | PTR sys_inotify_init1 | 415 | PTR sys_inotify_init1 |
416 | PTR sys_preadv | ||
417 | PTR sys_pwritev | ||
416 | .size sysn32_call_table,.-sysn32_call_table | 418 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 60997f1f69d4..002fac27021e 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -533,4 +533,6 @@ sys_call_table: | |||
533 | PTR sys_dup3 | 533 | PTR sys_dup3 |
534 | PTR sys_pipe2 | 534 | PTR sys_pipe2 |
535 | PTR sys_inotify_init1 | 535 | PTR sys_inotify_init1 |
536 | PTR compat_sys_preadv /* 4330 */ | ||
537 | PTR compat_sys_pwritev | ||
536 | .size sys_call_table,.-sys_call_table | 538 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index b28c9a60445b..234cf344cdce 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -193,7 +193,7 @@ void prepare_to_copy(struct task_struct *tsk) | |||
193 | * set up the kernel stack for a new thread and copy arch-specific thread | 193 | * set up the kernel stack for a new thread and copy arch-specific thread |
194 | * control information | 194 | * control information |
195 | */ | 195 | */ |
196 | int copy_thread(int nr, unsigned long clone_flags, | 196 | int copy_thread(unsigned long clone_flags, |
197 | unsigned long c_usp, unsigned long ustk_size, | 197 | unsigned long c_usp, unsigned long ustk_size, |
198 | struct task_struct *p, struct pt_regs *kregs) | 198 | struct task_struct *p, struct pt_regs *kregs) |
199 | { | 199 | { |
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index f3d2090a18dc..fae03e136fa8 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -187,6 +187,9 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | |||
187 | return !rw->counter; | 187 | return !rw->counter; |
188 | } | 188 | } |
189 | 189 | ||
190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
192 | |||
190 | #define _raw_spin_relax(lock) cpu_relax() | 193 | #define _raw_spin_relax(lock) cpu_relax() |
191 | #define _raw_read_relax(lock) cpu_relax() | 194 | #define _raw_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 195 | #define _raw_write_relax(lock) cpu_relax() |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index b80e02a4d81d..8aa591ed9127 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -263,7 +263,7 @@ sys_vfork(struct pt_regs *regs) | |||
263 | } | 263 | } |
264 | 264 | ||
265 | int | 265 | int |
266 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 266 | copy_thread(unsigned long clone_flags, unsigned long usp, |
267 | unsigned long unused, /* in ia64 this is "user_stack_size" */ | 267 | unsigned long unused, /* in ia64 this is "user_stack_size" */ |
268 | struct task_struct * p, struct pt_regs * pregs) | 268 | struct task_struct * p, struct pt_regs * pregs) |
269 | { | 269 | { |
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 6aa0b5e087cd..a1098e23221f 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -27,16 +27,6 @@ config DEBUG_STACK_USAGE | |||
27 | 27 | ||
28 | This option will slow down process creation somewhat. | 28 | This option will slow down process creation somewhat. |
29 | 29 | ||
30 | config DEBUG_PAGEALLOC | ||
31 | bool "Debug page memory allocations" | ||
32 | depends on DEBUG_KERNEL && !HIBERNATION | ||
33 | depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
34 | help | ||
35 | Unmap pages from the kernel linear mapping after free_pages(). | ||
36 | This results in a large slowdown, but helps to find certain types | ||
37 | of memory corruptions. | ||
38 | |||
39 | |||
40 | config HCALL_STATS | 30 | config HCALL_STATS |
41 | bool "Hypervisor call instrumentation" | 31 | bool "Hypervisor call instrumentation" |
42 | depends on PPC_PSERIES && DEBUG_FS | 32 | depends on PPC_PSERIES && DEBUG_FS |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 36864364e601..c3b193121f81 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -287,6 +287,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
287 | rw->lock = 0; | 287 | rw->lock = 0; |
288 | } | 288 | } |
289 | 289 | ||
290 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
291 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
292 | |||
290 | #define _raw_spin_relax(lock) __spin_yield(lock) | 293 | #define _raw_spin_relax(lock) __spin_yield(lock) |
291 | #define _raw_read_relax(lock) __rw_yield(lock) | 294 | #define _raw_read_relax(lock) __rw_yield(lock) |
292 | #define _raw_write_relax(lock) __rw_yield(lock) | 295 | #define _raw_write_relax(lock) __rw_yield(lock) |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index eac064948780..7b44a33f03c2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -598,7 +598,7 @@ void prepare_to_copy(struct task_struct *tsk) | |||
598 | /* | 598 | /* |
599 | * Copy a thread.. | 599 | * Copy a thread.. |
600 | */ | 600 | */ |
601 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 601 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
602 | unsigned long unused, struct task_struct *p, | 602 | unsigned long unused, struct task_struct *p, |
603 | struct pt_regs *regs) | 603 | struct pt_regs *regs) |
604 | { | 604 | { |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index d3694498f3af..819e59f6f7c7 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -482,7 +482,7 @@ static void vio_cmo_balance(struct work_struct *work) | |||
482 | cmo->excess.size = cmo->entitled - cmo->reserve.size; | 482 | cmo->excess.size = cmo->entitled - cmo->reserve.size; |
483 | cmo->excess.free = cmo->excess.size - need; | 483 | cmo->excess.free = cmo->excess.size - need; |
484 | 484 | ||
485 | cancel_delayed_work(container_of(work, struct delayed_work, work)); | 485 | cancel_delayed_work(to_delayed_work(work)); |
486 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 486 | spin_unlock_irqrestore(&vio_cmo.lock, flags); |
487 | } | 487 | } |
488 | 488 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 64f068540d0d..706eb5c7e2ee 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -635,7 +635,7 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, | |||
635 | if (dentry->d_inode) | 635 | if (dentry->d_inode) |
636 | goto out_dput; | 636 | goto out_dput; |
637 | 637 | ||
638 | mode &= ~current->fs->umask; | 638 | mode &= ~current_umask(); |
639 | 639 | ||
640 | if (flags & SPU_CREATE_GANG) | 640 | if (flags & SPU_CREATE_GANG) |
641 | ret = spufs_create_gang(nd->path.dentry->d_inode, | 641 | ret = spufs_create_gang(nd->path.dentry->d_inode, |
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 7e297a3cde34..2283933a9a93 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug | |||
@@ -6,13 +6,4 @@ config TRACE_IRQFLAGS_SUPPORT | |||
6 | 6 | ||
7 | source "lib/Kconfig.debug" | 7 | source "lib/Kconfig.debug" |
8 | 8 | ||
9 | config DEBUG_PAGEALLOC | ||
10 | bool "Debug page memory allocations" | ||
11 | depends on DEBUG_KERNEL | ||
12 | depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
13 | help | ||
14 | Unmap pages from the kernel linear mapping after free_pages(). | ||
15 | This results in a slowdown, but helps to find certain types of | ||
16 | memory corruptions. | ||
17 | |||
18 | endmenu | 9 | endmenu |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index df84ae96915f..f3861b09ebb0 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -172,6 +172,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
172 | return _raw_write_trylock_retry(rw); | 172 | return _raw_write_trylock_retry(rw); |
173 | } | 173 | } |
174 | 174 | ||
175 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
176 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
177 | |||
175 | #define _raw_read_relax(lock) cpu_relax() | 178 | #define _raw_read_relax(lock) cpu_relax() |
176 | #define _raw_write_relax(lock) cpu_relax() | 179 | #define _raw_write_relax(lock) cpu_relax() |
177 | 180 | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index b48e961a38f6..a3acd8e60aff 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -160,7 +160,7 @@ void release_thread(struct task_struct *dead_task) | |||
160 | { | 160 | { |
161 | } | 161 | } |
162 | 162 | ||
163 | int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, | 163 | int copy_thread(unsigned long clone_flags, unsigned long new_stackp, |
164 | unsigned long unused, | 164 | unsigned long unused, |
165 | struct task_struct *p, struct pt_regs *regs) | 165 | struct task_struct *p, struct pt_regs *regs) |
166 | { | 166 | { |
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index e793181d64da..60283565f89b 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
@@ -216,6 +216,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
216 | return (oldval > (RW_LOCK_BIAS - 1)); | 216 | return (oldval > (RW_LOCK_BIAS - 1)); |
217 | } | 217 | } |
218 | 218 | ||
219 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
220 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
221 | |||
219 | #define _raw_spin_relax(lock) cpu_relax() | 222 | #define _raw_spin_relax(lock) cpu_relax() |
220 | #define _raw_read_relax(lock) cpu_relax() | 223 | #define _raw_read_relax(lock) cpu_relax() |
221 | #define _raw_write_relax(lock) cpu_relax() | 224 | #define _raw_write_relax(lock) cpu_relax() |
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index ddafbbbab2ab..694bc15f84fd 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -170,7 +170,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
170 | 170 | ||
171 | asmlinkage void ret_from_fork(void); | 171 | asmlinkage void ret_from_fork(void); |
172 | 172 | ||
173 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 173 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
174 | unsigned long unused, | 174 | unsigned long unused, |
175 | struct task_struct *p, struct pt_regs *regs) | 175 | struct task_struct *p, struct pt_regs *regs) |
176 | { | 176 | { |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index c90c7e5e5fee..96be839040f8 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -425,7 +425,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
425 | 425 | ||
426 | asmlinkage void ret_from_fork(void); | 426 | asmlinkage void ret_from_fork(void); |
427 | 427 | ||
428 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 428 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
429 | unsigned long unused, | 429 | unsigned long unused, |
430 | struct task_struct *p, struct pt_regs *regs) | 430 | struct task_struct *p, struct pt_regs *regs) |
431 | { | 431 | { |
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index d001b42041a5..90d5fe223a74 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug | |||
@@ -22,15 +22,6 @@ config DEBUG_DCFLUSH | |||
22 | config STACK_DEBUG | 22 | config STACK_DEBUG |
23 | bool "Stack Overflow Detection Support" | 23 | bool "Stack Overflow Detection Support" |
24 | 24 | ||
25 | config DEBUG_PAGEALLOC | ||
26 | bool "Debug page memory allocations" | ||
27 | depends on DEBUG_KERNEL && !HIBERNATION | ||
28 | depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
29 | help | ||
30 | Unmap pages from the kernel linear mapping after free_pages(). | ||
31 | This results in a large slowdown, but helps to find certain types | ||
32 | of memory corruptions. | ||
33 | |||
34 | config MCOUNT | 25 | config MCOUNT |
35 | bool | 26 | bool |
36 | depends on SPARC64 | 27 | depends on SPARC64 |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index bf2d532593e3..46f91ab66a50 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -177,6 +177,8 @@ static inline int __read_trylock(raw_rwlock_t *rw) | |||
177 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 177 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
178 | 178 | ||
179 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 179 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
180 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | ||
181 | #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) | ||
180 | 182 | ||
181 | #define _raw_spin_relax(lock) cpu_relax() | 183 | #define _raw_spin_relax(lock) cpu_relax() |
182 | #define _raw_read_relax(lock) cpu_relax() | 184 | #define _raw_read_relax(lock) cpu_relax() |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index c4d274d330e9..f6b2b92ad8d2 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -211,9 +211,11 @@ static int inline __write_trylock(raw_rwlock_t *lock) | |||
211 | } | 211 | } |
212 | 212 | ||
213 | #define __raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) __read_lock(p) |
214 | #define __raw_read_lock_flags(p, f) __read_lock(p) | ||
214 | #define __raw_read_trylock(p) __read_trylock(p) | 215 | #define __raw_read_trylock(p) __read_trylock(p) |
215 | #define __raw_read_unlock(p) __read_unlock(p) | 216 | #define __raw_read_unlock(p) __read_unlock(p) |
216 | #define __raw_write_lock(p) __write_lock(p) | 217 | #define __raw_write_lock(p) __write_lock(p) |
218 | #define __raw_write_lock_flags(p, f) __write_lock(p) | ||
217 | #define __raw_write_unlock(p) __write_unlock(p) | 219 | #define __raw_write_unlock(p) __write_unlock(p) |
218 | #define __raw_write_trylock(p) __write_trylock(p) | 220 | #define __raw_write_trylock(p) __write_trylock(p) |
219 | 221 | ||
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index f4bee35a1b46..2830b415e214 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -455,7 +455,7 @@ asmlinkage int sparc_do_fork(unsigned long clone_flags, | |||
455 | */ | 455 | */ |
456 | extern void ret_from_fork(void); | 456 | extern void ret_from_fork(void); |
457 | 457 | ||
458 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 458 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
459 | unsigned long unused, | 459 | unsigned long unused, |
460 | struct task_struct *p, struct pt_regs *regs) | 460 | struct task_struct *p, struct pt_regs *regs) |
461 | { | 461 | { |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index a73954b87f0a..4041f94e7724 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -561,7 +561,7 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags, | |||
561 | * Parent --> %o0 == childs pid, %o1 == 0 | 561 | * Parent --> %o0 == childs pid, %o1 == 0 |
562 | * Child --> %o0 == parents pid, %o1 == 1 | 562 | * Child --> %o0 == parents pid, %o1 == 1 |
563 | */ | 563 | */ |
564 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 564 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
565 | unsigned long unused, | 565 | unsigned long unused, |
566 | struct task_struct *p, struct pt_regs *regs) | 566 | struct task_struct *p, struct pt_regs *regs) |
567 | { | 567 | { |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 434224e2229f..434ba121e3c5 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -757,7 +757,7 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
757 | void (*proc)(unsigned char *, unsigned char *, void *); | 757 | void (*proc)(unsigned char *, unsigned char *, void *); |
758 | unsigned char addr_buf[4], netmask_buf[4]; | 758 | unsigned char addr_buf[4], netmask_buf[4]; |
759 | 759 | ||
760 | if (dev->open != uml_net_open) | 760 | if (dev->netdev_ops->ndo_open != uml_net_open) |
761 | return NOTIFY_DONE; | 761 | return NOTIFY_DONE; |
762 | 762 | ||
763 | lp = netdev_priv(dev); | 763 | lp = netdev_priv(dev); |
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index a1c6d07cac3e..4a28a1568d85 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -179,7 +179,7 @@ void fork_handler(void) | |||
179 | userspace(¤t->thread.regs.regs); | 179 | userspace(¤t->thread.regs.regs); |
180 | } | 180 | } |
181 | 181 | ||
182 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 182 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
183 | unsigned long stack_top, struct task_struct * p, | 183 | unsigned long stack_top, struct task_struct * p, |
184 | struct pt_regs *regs) | 184 | struct pt_regs *regs) |
185 | { | 185 | { |
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index c4df705b8359..a4625c7b2bf9 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c | |||
@@ -127,7 +127,8 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | |||
127 | 127 | ||
128 | fs = get_fs(); | 128 | fs = get_fs(); |
129 | set_fs(KERNEL_DS); | 129 | set_fs(KERNEL_DS); |
130 | ret = um_execve(filename, argv, envp); | 130 | ret = um_execve((char *)filename, (char __user *__user *)argv, |
131 | (char __user *__user *) envp); | ||
131 | set_fs(fs); | 132 | set_fs(fs); |
132 | 133 | ||
133 | return ret; | 134 | return ret; |
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S index 00e5f5203eea..c6260dd6ebb9 100644 --- a/arch/um/sys-i386/sys_call_table.S +++ b/arch/um/sys-i386/sys_call_table.S | |||
@@ -9,6 +9,17 @@ | |||
9 | 9 | ||
10 | #define old_mmap old_mmap_i386 | 10 | #define old_mmap old_mmap_i386 |
11 | 11 | ||
12 | #define ptregs_fork sys_fork | ||
13 | #define ptregs_execve sys_execve | ||
14 | #define ptregs_iopl sys_iopl | ||
15 | #define ptregs_vm86old sys_vm86old | ||
16 | #define ptregs_sigreturn sys_sigreturn | ||
17 | #define ptregs_clone sys_clone | ||
18 | #define ptregs_vm86 sys_vm86 | ||
19 | #define ptregs_rt_sigreturn sys_rt_sigreturn | ||
20 | #define ptregs_sigaltstack sys_sigaltstack | ||
21 | #define ptregs_vfork sys_vfork | ||
22 | |||
12 | .section .rodata,"a" | 23 | .section .rodata,"a" |
13 | 24 | ||
14 | #include "../../x86/kernel/syscall_table_32.S" | 25 | #include "../../x86/kernel/syscall_table_32.S" |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index a345cb5447a8..d8359e73317f 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -72,15 +72,6 @@ config DEBUG_STACK_USAGE | |||
72 | 72 | ||
73 | This option will slow down process creation somewhat. | 73 | This option will slow down process creation somewhat. |
74 | 74 | ||
75 | config DEBUG_PAGEALLOC | ||
76 | bool "Debug page memory allocations" | ||
77 | depends on DEBUG_KERNEL | ||
78 | depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
79 | ---help--- | ||
80 | Unmap pages from the kernel linear mapping after free_pages(). | ||
81 | This results in a large slowdown, but helps to find certain types | ||
82 | of memory corruptions. | ||
83 | |||
84 | config DEBUG_PER_CPU_MAPS | 75 | config DEBUG_PER_CPU_MAPS |
85 | bool "Debug access to per_cpu maps" | 76 | bool "Debug access to per_cpu maps" |
86 | depends on DEBUG_KERNEL | 77 | depends on DEBUG_KERNEL |
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index 8c3c25f35578..5054c2ddd1a0 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
4 | * Copyright 2007 rPath, Inc. - All Rights Reserved | 4 | * Copyright 2007 rPath, Inc. - All Rights Reserved |
5 | * Copyright 2009 Intel Corporation; author H. Peter Anvin | ||
5 | * | 6 | * |
6 | * This file is part of the Linux kernel, and is made available under | 7 | * This file is part of the Linux kernel, and is made available under |
7 | * the terms of the GNU General Public License version 2. | 8 | * the terms of the GNU General Public License version 2. |
@@ -16,24 +17,38 @@ | |||
16 | 17 | ||
17 | #define SMAP 0x534d4150 /* ASCII "SMAP" */ | 18 | #define SMAP 0x534d4150 /* ASCII "SMAP" */ |
18 | 19 | ||
20 | struct e820_ext_entry { | ||
21 | struct e820entry std; | ||
22 | u32 ext_flags; | ||
23 | } __attribute__((packed)); | ||
24 | |||
19 | static int detect_memory_e820(void) | 25 | static int detect_memory_e820(void) |
20 | { | 26 | { |
21 | int count = 0; | 27 | int count = 0; |
22 | u32 next = 0; | 28 | u32 next = 0; |
23 | u32 size, id; | 29 | u32 size, id, edi; |
24 | u8 err; | 30 | u8 err; |
25 | struct e820entry *desc = boot_params.e820_map; | 31 | struct e820entry *desc = boot_params.e820_map; |
32 | static struct e820_ext_entry buf; /* static so it is zeroed */ | ||
33 | |||
34 | /* | ||
35 | * Set this here so that if the BIOS doesn't change this field | ||
36 | * but still doesn't change %ecx, we're still okay... | ||
37 | */ | ||
38 | buf.ext_flags = 1; | ||
26 | 39 | ||
27 | do { | 40 | do { |
28 | size = sizeof(struct e820entry); | 41 | size = sizeof buf; |
29 | 42 | ||
30 | /* Important: %edx is clobbered by some BIOSes, | 43 | /* Important: %edx and %esi are clobbered by some BIOSes, |
31 | so it must be either used for the error output | 44 | so they must be either used for the error output |
32 | or explicitly marked clobbered. */ | 45 | or explicitly marked clobbered. Given that, assume there |
33 | asm("int $0x15; setc %0" | 46 | is something out there clobbering %ebp and %edi, too. */ |
47 | asm("pushl %%ebp; int $0x15; popl %%ebp; setc %0" | ||
34 | : "=d" (err), "+b" (next), "=a" (id), "+c" (size), | 48 | : "=d" (err), "+b" (next), "=a" (id), "+c" (size), |
35 | "=m" (*desc) | 49 | "=D" (edi), "+m" (buf) |
36 | : "D" (desc), "d" (SMAP), "a" (0xe820)); | 50 | : "D" (&buf), "d" (SMAP), "a" (0xe820) |
51 | : "esi"); | ||
37 | 52 | ||
38 | /* BIOSes which terminate the chain with CF = 1 as opposed | 53 | /* BIOSes which terminate the chain with CF = 1 as opposed |
39 | to %ebx = 0 don't always report the SMAP signature on | 54 | to %ebx = 0 don't always report the SMAP signature on |
@@ -51,8 +66,14 @@ static int detect_memory_e820(void) | |||
51 | break; | 66 | break; |
52 | } | 67 | } |
53 | 68 | ||
69 | /* ACPI 3.0 added the extended flags support. If bit 0 | ||
70 | in the extended flags is zero, we're supposed to simply | ||
71 | ignore the entry -- a backwards incompatible change! */ | ||
72 | if (size > 20 && !(buf.ext_flags & 1)) | ||
73 | continue; | ||
74 | |||
75 | *desc++ = buf.std; | ||
54 | count++; | 76 | count++; |
55 | desc++; | ||
56 | } while (next && count < ARRAY_SIZE(boot_params.e820_map)); | 77 | } while (next && count < ARRAY_SIZE(boot_params.e820_map)); |
57 | 78 | ||
58 | return boot_params.e820_entries = count; | 79 | return boot_params.e820_entries = count; |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index db0c803170ab..a505202086e8 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -828,4 +828,6 @@ ia32_sys_call_table: | |||
828 | .quad sys_dup3 /* 330 */ | 828 | .quad sys_dup3 /* 330 */ |
829 | .quad sys_pipe2 | 829 | .quad sys_pipe2 |
830 | .quad sys_inotify_init1 | 830 | .quad sys_inotify_init1 |
831 | .quad compat_sys_preadv | ||
832 | .quad compat_sys_pwritev | ||
831 | ia32_syscall_end: | 833 | ia32_syscall_end: |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index a0301bfeb954..e545ea01abcf 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -89,12 +89,40 @@ extern void pci_iommu_alloc(void); | |||
89 | /* MSI arch hook */ | 89 | /* MSI arch hook */ |
90 | #define arch_setup_msi_irqs arch_setup_msi_irqs | 90 | #define arch_setup_msi_irqs arch_setup_msi_irqs |
91 | 91 | ||
92 | #endif /* __KERNEL__ */ | 92 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) |
93 | |||
94 | #if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG) | ||
95 | |||
96 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
97 | dma_addr_t ADDR_NAME; | ||
98 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
99 | __u32 LEN_NAME; | ||
100 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
101 | ((PTR)->ADDR_NAME) | ||
102 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
103 | (((PTR)->ADDR_NAME) = (VAL)) | ||
104 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
105 | ((PTR)->LEN_NAME) | ||
106 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
107 | (((PTR)->LEN_NAME) = (VAL)) | ||
93 | 108 | ||
94 | #ifdef CONFIG_X86_32 | ||
95 | # include "pci_32.h" | ||
96 | #else | 109 | #else |
97 | # include "pci_64.h" | 110 | |
111 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0]; | ||
112 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0]; | ||
113 | #define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME) | ||
114 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
115 | do { break; } while (pci_unmap_addr(PTR, ADDR_NAME)) | ||
116 | #define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME) | ||
117 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
118 | do { break; } while (pci_unmap_len(PTR, LEN_NAME)) | ||
119 | |||
120 | #endif | ||
121 | |||
122 | #endif /* __KERNEL__ */ | ||
123 | |||
124 | #ifdef CONFIG_X86_64 | ||
125 | #include "pci_64.h" | ||
98 | #endif | 126 | #endif |
99 | 127 | ||
100 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | 128 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ |
diff --git a/arch/x86/include/asm/pci_32.h b/arch/x86/include/asm/pci_32.h deleted file mode 100644 index 6f1213a6ef4f..000000000000 --- a/arch/x86/include/asm/pci_32.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #ifndef _ASM_X86_PCI_32_H | ||
2 | #define _ASM_X86_PCI_32_H | ||
3 | |||
4 | |||
5 | #ifdef __KERNEL__ | ||
6 | |||
7 | |||
8 | /* Dynamic DMA mapping stuff. | ||
9 | * i386 has everything mapped statically. | ||
10 | */ | ||
11 | |||
12 | struct pci_dev; | ||
13 | |||
14 | /* The PCI address space does equal the physical memory | ||
15 | * address space. The networking and block device layers use | ||
16 | * this boolean for bounce buffer decisions. | ||
17 | */ | ||
18 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
19 | |||
20 | /* pci_unmap_{page,single} is a nop so... */ | ||
21 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0]; | ||
22 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0]; | ||
23 | #define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME) | ||
24 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
25 | do { break; } while (pci_unmap_addr(PTR, ADDR_NAME)) | ||
26 | #define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME) | ||
27 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
28 | do { break; } while (pci_unmap_len(PTR, LEN_NAME)) | ||
29 | |||
30 | |||
31 | #endif /* __KERNEL__ */ | ||
32 | |||
33 | |||
34 | #endif /* _ASM_X86_PCI_32_H */ | ||
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h index 4da207982777..ae5e40f67daf 100644 --- a/arch/x86/include/asm/pci_64.h +++ b/arch/x86/include/asm/pci_64.h | |||
@@ -24,28 +24,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn, | |||
24 | 24 | ||
25 | extern void dma32_reserve_bootmem(void); | 25 | extern void dma32_reserve_bootmem(void); |
26 | 26 | ||
27 | /* The PCI address space does equal the physical memory | ||
28 | * address space. The networking and block device layers use | ||
29 | * this boolean for bounce buffer decisions | ||
30 | * | ||
31 | * On AMD64 it mostly equals, but we set it to zero if a hardware | ||
32 | * IOMMU (gart) of sotware IOMMU (swiotlb) is available. | ||
33 | */ | ||
34 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | ||
35 | |||
36 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
37 | dma_addr_t ADDR_NAME; | ||
38 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
39 | __u32 LEN_NAME; | ||
40 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
41 | ((PTR)->ADDR_NAME) | ||
42 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
43 | (((PTR)->ADDR_NAME) = (VAL)) | ||
44 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
45 | ((PTR)->LEN_NAME) | ||
46 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
47 | (((PTR)->LEN_NAME) = (VAL)) | ||
48 | |||
49 | #endif /* __KERNEL__ */ | 27 | #endif /* __KERNEL__ */ |
50 | 28 | ||
51 | #endif /* _ASM_X86_PCI_64_H */ | 29 | #endif /* _ASM_X86_PCI_64_H */ |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 3a5696656680..e5e6caffec87 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -295,6 +295,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | 295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); |
296 | } | 296 | } |
297 | 297 | ||
298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
300 | |||
298 | #define _raw_spin_relax(lock) cpu_relax() | 301 | #define _raw_spin_relax(lock) cpu_relax() |
299 | #define _raw_read_relax(lock) cpu_relax() | 302 | #define _raw_read_relax(lock) cpu_relax() |
300 | #define _raw_write_relax(lock) cpu_relax() | 303 | #define _raw_write_relax(lock) cpu_relax() |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index f2bba78430a4..6e72d74cf8dc 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -338,6 +338,8 @@ | |||
338 | #define __NR_dup3 330 | 338 | #define __NR_dup3 330 |
339 | #define __NR_pipe2 331 | 339 | #define __NR_pipe2 331 |
340 | #define __NR_inotify_init1 332 | 340 | #define __NR_inotify_init1 332 |
341 | #define __NR_preadv 333 | ||
342 | #define __NR_pwritev 334 | ||
341 | 343 | ||
342 | #ifdef __KERNEL__ | 344 | #ifdef __KERNEL__ |
343 | 345 | ||
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index d2e415e6666f..f81829462325 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -653,6 +653,10 @@ __SYSCALL(__NR_dup3, sys_dup3) | |||
653 | __SYSCALL(__NR_pipe2, sys_pipe2) | 653 | __SYSCALL(__NR_pipe2, sys_pipe2) |
654 | #define __NR_inotify_init1 294 | 654 | #define __NR_inotify_init1 294 |
655 | __SYSCALL(__NR_inotify_init1, sys_inotify_init1) | 655 | __SYSCALL(__NR_inotify_init1, sys_inotify_init1) |
656 | #define __NR_preadv 295 | ||
657 | __SYSCALL(__NR_preadv, sys_preadv) | ||
658 | #define __NR_pwritev 296 | ||
659 | __SYSCALL(__NR_pwritev, sys_pwritev) | ||
656 | 660 | ||
657 | 661 | ||
658 | #ifndef __NO_STUBS | 662 | #ifndef __NO_STUBS |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 9f4dfba33b28..d3a98ea1062e 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -11,11 +11,13 @@ | |||
11 | #ifndef _ASM_X86_UV_UV_HUB_H | 11 | #ifndef _ASM_X86_UV_UV_HUB_H |
12 | #define _ASM_X86_UV_UV_HUB_H | 12 | #define _ASM_X86_UV_UV_HUB_H |
13 | 13 | ||
14 | #ifdef CONFIG_X86_64 | ||
14 | #include <linux/numa.h> | 15 | #include <linux/numa.h> |
15 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
16 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
17 | #include <asm/types.h> | 18 | #include <asm/types.h> |
18 | #include <asm/percpu.h> | 19 | #include <asm/percpu.h> |
20 | #include <asm/uv/uv_mmrs.h> | ||
19 | 21 | ||
20 | 22 | ||
21 | /* | 23 | /* |
@@ -397,6 +399,7 @@ static inline void uv_set_scir_bits(unsigned char value) | |||
397 | uv_write_local_mmr8(uv_hub_info->scir.offset, value); | 399 | uv_write_local_mmr8(uv_hub_info->scir.offset, value); |
398 | } | 400 | } |
399 | } | 401 | } |
402 | |||
400 | static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | 403 | static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) |
401 | { | 404 | { |
402 | if (uv_cpu_hub_info(cpu)->scir.state != value) { | 405 | if (uv_cpu_hub_info(cpu)->scir.state != value) { |
@@ -405,4 +408,15 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | |||
405 | } | 408 | } |
406 | } | 409 | } |
407 | 410 | ||
411 | static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | ||
412 | { | ||
413 | unsigned long val; | ||
414 | |||
415 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | ||
416 | ((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) | | ||
417 | (vector << UVH_IPI_INT_VECTOR_SHFT); | ||
418 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | ||
419 | } | ||
420 | |||
421 | #endif /* CONFIG_X86_64 */ | ||
408 | #endif /* _ASM_X86_UV_UV_HUB_H */ | 422 | #endif /* _ASM_X86_UV_UV_HUB_H */ |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index dd627793a234..db68ac8a5ac2 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | /* | 2 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 3 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 4 | * License. See the file "COPYING" in the main directory of this archive |
@@ -243,6 +244,158 @@ union uvh_event_occurred0_u { | |||
243 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 | 244 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 |
244 | 245 | ||
245 | /* ========================================================================= */ | 246 | /* ========================================================================= */ |
247 | /* UVH_GR0_TLB_INT0_CONFIG */ | ||
248 | /* ========================================================================= */ | ||
249 | #define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL | ||
250 | |||
251 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 | ||
252 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
253 | #define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 | ||
254 | #define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL | ||
255 | #define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11 | ||
256 | #define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
257 | #define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12 | ||
258 | #define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
259 | #define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13 | ||
260 | #define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL | ||
261 | #define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15 | ||
262 | #define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL | ||
263 | #define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16 | ||
264 | #define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL | ||
265 | #define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32 | ||
266 | #define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
267 | |||
268 | union uvh_gr0_tlb_int0_config_u { | ||
269 | unsigned long v; | ||
270 | struct uvh_gr0_tlb_int0_config_s { | ||
271 | unsigned long vector_ : 8; /* RW */ | ||
272 | unsigned long dm : 3; /* RW */ | ||
273 | unsigned long destmode : 1; /* RW */ | ||
274 | unsigned long status : 1; /* RO */ | ||
275 | unsigned long p : 1; /* RO */ | ||
276 | unsigned long rsvd_14 : 1; /* */ | ||
277 | unsigned long t : 1; /* RO */ | ||
278 | unsigned long m : 1; /* RW */ | ||
279 | unsigned long rsvd_17_31: 15; /* */ | ||
280 | unsigned long apic_id : 32; /* RW */ | ||
281 | } s; | ||
282 | }; | ||
283 | |||
284 | /* ========================================================================= */ | ||
285 | /* UVH_GR0_TLB_INT1_CONFIG */ | ||
286 | /* ========================================================================= */ | ||
287 | #define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL | ||
288 | |||
289 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 | ||
290 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
291 | #define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 | ||
292 | #define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL | ||
293 | #define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11 | ||
294 | #define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
295 | #define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12 | ||
296 | #define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
297 | #define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13 | ||
298 | #define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL | ||
299 | #define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15 | ||
300 | #define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL | ||
301 | #define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16 | ||
302 | #define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL | ||
303 | #define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32 | ||
304 | #define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
305 | |||
306 | union uvh_gr0_tlb_int1_config_u { | ||
307 | unsigned long v; | ||
308 | struct uvh_gr0_tlb_int1_config_s { | ||
309 | unsigned long vector_ : 8; /* RW */ | ||
310 | unsigned long dm : 3; /* RW */ | ||
311 | unsigned long destmode : 1; /* RW */ | ||
312 | unsigned long status : 1; /* RO */ | ||
313 | unsigned long p : 1; /* RO */ | ||
314 | unsigned long rsvd_14 : 1; /* */ | ||
315 | unsigned long t : 1; /* RO */ | ||
316 | unsigned long m : 1; /* RW */ | ||
317 | unsigned long rsvd_17_31: 15; /* */ | ||
318 | unsigned long apic_id : 32; /* RW */ | ||
319 | } s; | ||
320 | }; | ||
321 | |||
322 | /* ========================================================================= */ | ||
323 | /* UVH_GR1_TLB_INT0_CONFIG */ | ||
324 | /* ========================================================================= */ | ||
325 | #define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL | ||
326 | |||
327 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 | ||
328 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
329 | #define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 | ||
330 | #define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL | ||
331 | #define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11 | ||
332 | #define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
333 | #define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12 | ||
334 | #define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
335 | #define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13 | ||
336 | #define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL | ||
337 | #define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15 | ||
338 | #define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL | ||
339 | #define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16 | ||
340 | #define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL | ||
341 | #define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32 | ||
342 | #define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
343 | |||
344 | union uvh_gr1_tlb_int0_config_u { | ||
345 | unsigned long v; | ||
346 | struct uvh_gr1_tlb_int0_config_s { | ||
347 | unsigned long vector_ : 8; /* RW */ | ||
348 | unsigned long dm : 3; /* RW */ | ||
349 | unsigned long destmode : 1; /* RW */ | ||
350 | unsigned long status : 1; /* RO */ | ||
351 | unsigned long p : 1; /* RO */ | ||
352 | unsigned long rsvd_14 : 1; /* */ | ||
353 | unsigned long t : 1; /* RO */ | ||
354 | unsigned long m : 1; /* RW */ | ||
355 | unsigned long rsvd_17_31: 15; /* */ | ||
356 | unsigned long apic_id : 32; /* RW */ | ||
357 | } s; | ||
358 | }; | ||
359 | |||
360 | /* ========================================================================= */ | ||
361 | /* UVH_GR1_TLB_INT1_CONFIG */ | ||
362 | /* ========================================================================= */ | ||
363 | #define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL | ||
364 | |||
365 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 | ||
366 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL | ||
367 | #define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 | ||
368 | #define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL | ||
369 | #define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11 | ||
370 | #define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL | ||
371 | #define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12 | ||
372 | #define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL | ||
373 | #define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13 | ||
374 | #define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL | ||
375 | #define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15 | ||
376 | #define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL | ||
377 | #define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16 | ||
378 | #define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL | ||
379 | #define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32 | ||
380 | #define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL | ||
381 | |||
382 | union uvh_gr1_tlb_int1_config_u { | ||
383 | unsigned long v; | ||
384 | struct uvh_gr1_tlb_int1_config_s { | ||
385 | unsigned long vector_ : 8; /* RW */ | ||
386 | unsigned long dm : 3; /* RW */ | ||
387 | unsigned long destmode : 1; /* RW */ | ||
388 | unsigned long status : 1; /* RO */ | ||
389 | unsigned long p : 1; /* RO */ | ||
390 | unsigned long rsvd_14 : 1; /* */ | ||
391 | unsigned long t : 1; /* RO */ | ||
392 | unsigned long m : 1; /* RW */ | ||
393 | unsigned long rsvd_17_31: 15; /* */ | ||
394 | unsigned long apic_id : 32; /* RW */ | ||
395 | } s; | ||
396 | }; | ||
397 | |||
398 | /* ========================================================================= */ | ||
246 | /* UVH_INT_CMPB */ | 399 | /* UVH_INT_CMPB */ |
247 | /* ========================================================================= */ | 400 | /* ========================================================================= */ |
248 | #define UVH_INT_CMPB 0x22080UL | 401 | #define UVH_INT_CMPB 0x22080UL |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1bd6da1f8fad..1248318436e8 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -118,17 +118,12 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) | |||
118 | 118 | ||
119 | static void uv_send_IPI_one(int cpu, int vector) | 119 | static void uv_send_IPI_one(int cpu, int vector) |
120 | { | 120 | { |
121 | unsigned long val, apicid; | 121 | unsigned long apicid; |
122 | int pnode; | 122 | int pnode; |
123 | 123 | ||
124 | apicid = per_cpu(x86_cpu_to_apicid, cpu); | 124 | apicid = per_cpu(x86_cpu_to_apicid, cpu); |
125 | pnode = uv_apicid_to_pnode(apicid); | 125 | pnode = uv_apicid_to_pnode(apicid); |
126 | 126 | uv_hub_send_ipi(pnode, apicid, vector); | |
127 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | ||
128 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | | ||
129 | (vector << UVH_IPI_INT_VECTOR_SHFT); | ||
130 | |||
131 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | ||
132 | } | 127 | } |
133 | 128 | ||
134 | static void uv_send_IPI_mask(const struct cpumask *mask, int vector) | 129 | static void uv_send_IPI_mask(const struct cpumask *mask, int vector) |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 14014d766cad..76f8f84043a2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -245,7 +245,7 @@ void prepare_to_copy(struct task_struct *tsk) | |||
245 | unlazy_fpu(tsk); | 245 | unlazy_fpu(tsk); |
246 | } | 246 | } |
247 | 247 | ||
248 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 248 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
249 | unsigned long unused, | 249 | unsigned long unused, |
250 | struct task_struct *p, struct pt_regs *regs) | 250 | struct task_struct *p, struct pt_regs *regs) |
251 | { | 251 | { |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index abb7e6a7f0c6..b751a41392b1 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -278,7 +278,7 @@ void prepare_to_copy(struct task_struct *tsk) | |||
278 | unlazy_fpu(tsk); | 278 | unlazy_fpu(tsk); |
279 | } | 279 | } |
280 | 280 | ||
281 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 281 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
282 | unsigned long unused, | 282 | unsigned long unused, |
283 | struct task_struct *p, struct pt_regs *regs) | 283 | struct task_struct *p, struct pt_regs *regs) |
284 | { | 284 | { |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 19378715f415..b7cc21bc6ae0 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1455,6 +1455,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1455 | * system call instruction. | 1455 | * system call instruction. |
1456 | */ | 1456 | */ |
1457 | if (test_thread_flag(TIF_SINGLESTEP) && | 1457 | if (test_thread_flag(TIF_SINGLESTEP) && |
1458 | tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL)) | 1458 | tracehook_consider_fatal_signal(current, SIGTRAP)) |
1459 | send_sigtrap(current, regs, 0, TRAP_BRKPT); | 1459 | send_sigtrap(current, regs, 0, TRAP_BRKPT); |
1460 | } | 1460 | } |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 3bdb64829b82..ff5c8736b491 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -332,3 +332,5 @@ ENTRY(sys_call_table) | |||
332 | .long sys_dup3 /* 330 */ | 332 | .long sys_dup3 /* 330 */ |
333 | .long sys_pipe2 | 333 | .long sys_pipe2 |
334 | .long sys_inotify_init1 | 334 | .long sys_inotify_init1 |
335 | .long sys_preadv | ||
336 | .long sys_pwritev | ||
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 9185597eb6a0..031f36685710 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c | |||
@@ -172,7 +172,7 @@ void prepare_to_copy(struct task_struct *tsk) | |||
172 | * childregs. | 172 | * childregs. |
173 | */ | 173 | */ |
174 | 174 | ||
175 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 175 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
176 | unsigned long unused, | 176 | unsigned long unused, |
177 | struct task_struct * p, struct pt_regs * regs) | 177 | struct task_struct * p, struct pt_regs * regs) |
178 | { | 178 | { |