diff options
Diffstat (limited to 'arch')
26 files changed, 386 insertions, 142 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7f36d00600b4..feb988a7ec37 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -11,7 +11,11 @@ | |||
11 | 11 | ||
12 | #define kmap_prot PAGE_KERNEL | 12 | #define kmap_prot PAGE_KERNEL |
13 | 13 | ||
14 | #define flush_cache_kmaps() flush_cache_all() | 14 | #define flush_cache_kmaps() \ |
15 | do { \ | ||
16 | if (cache_is_vivt()) \ | ||
17 | flush_cache_all(); \ | ||
18 | } while (0) | ||
15 | 19 | ||
16 | extern pte_t *pkmap_page_table; | 20 | extern pte_t *pkmap_page_table; |
17 | 21 | ||
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page); | |||
21 | extern void *kmap_high_get(struct page *page); | 25 | extern void *kmap_high_get(struct page *page); |
22 | extern void kunmap_high(struct page *page); | 26 | extern void kunmap_high(struct page *page); |
23 | 27 | ||
28 | extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); | ||
29 | extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); | ||
30 | |||
31 | /* | ||
32 | * The following functions are already defined by <linux/highmem.h> | ||
33 | * when CONFIG_HIGHMEM is not set. | ||
34 | */ | ||
35 | #ifdef CONFIG_HIGHMEM | ||
24 | extern void *kmap(struct page *page); | 36 | extern void *kmap(struct page *page); |
25 | extern void kunmap(struct page *page); | 37 | extern void kunmap(struct page *page); |
26 | extern void *kmap_atomic(struct page *page, enum km_type type); | 38 | extern void *kmap_atomic(struct page *page, enum km_type type); |
27 | extern void kunmap_atomic(void *kvaddr, enum km_type type); | 39 | extern void kunmap_atomic(void *kvaddr, enum km_type type); |
28 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | 40 | extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); |
29 | extern struct page *kmap_atomic_to_page(const void *ptr); | 41 | extern struct page *kmap_atomic_to_page(const void *ptr); |
42 | #endif | ||
30 | 43 | ||
31 | #endif | 44 | #endif |
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index c019949a5189..c4b2ea3fbe42 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h | |||
@@ -18,6 +18,7 @@ enum km_type { | |||
18 | KM_IRQ1, | 18 | KM_IRQ1, |
19 | KM_SOFTIRQ0, | 19 | KM_SOFTIRQ0, |
20 | KM_SOFTIRQ1, | 20 | KM_SOFTIRQ1, |
21 | KM_L1_CACHE, | ||
21 | KM_L2_CACHE, | 22 | KM_L2_CACHE, |
22 | KM_TYPE_NR | 23 | KM_TYPE_NR |
23 | }; | 24 | }; |
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index bf65e9f4525d..47f023aa8495 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h | |||
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe { | |||
59 | #endif /* CONFIG_IWMMXT */ | 59 | #endif /* CONFIG_IWMMXT */ |
60 | 60 | ||
61 | #ifdef CONFIG_VFP | 61 | #ifdef CONFIG_VFP |
62 | #if __LINUX_ARM_ARCH__ < 6 | ||
63 | /* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra | ||
64 | * word after the registers, and a word of padding at the end for | ||
65 | * alignment. */ | ||
66 | #define VFP_MAGIC 0x56465001 | 62 | #define VFP_MAGIC 0x56465001 |
67 | #define VFP_STORAGE_SIZE 152 | ||
68 | #else | ||
69 | #define VFP_MAGIC 0x56465002 | ||
70 | #define VFP_STORAGE_SIZE 144 | ||
71 | #endif | ||
72 | 63 | ||
73 | struct vfp_sigframe | 64 | struct vfp_sigframe |
74 | { | 65 | { |
75 | unsigned long magic; | 66 | unsigned long magic; |
76 | unsigned long size; | 67 | unsigned long size; |
77 | union vfp_state storage; | 68 | struct user_vfp ufp; |
78 | }; | 69 | struct user_vfp_exc ufp_exc; |
70 | } __attribute__((__aligned__(8))); | ||
71 | |||
72 | /* | ||
73 | * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, | ||
74 | * 4 bytes padding. | ||
75 | */ | ||
76 | #define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) | ||
77 | |||
79 | #endif /* CONFIG_VFP */ | 78 | #endif /* CONFIG_VFP */ |
80 | 79 | ||
81 | /* | 80 | /* |
@@ -91,7 +90,7 @@ struct aux_sigframe { | |||
91 | #ifdef CONFIG_IWMMXT | 90 | #ifdef CONFIG_IWMMXT |
92 | struct iwmmxt_sigframe iwmmxt; | 91 | struct iwmmxt_sigframe iwmmxt; |
93 | #endif | 92 | #endif |
94 | #if 0 && defined CONFIG_VFP /* Not yet saved. */ | 93 | #ifdef CONFIG_VFP |
95 | struct vfp_sigframe vfp; | 94 | struct vfp_sigframe vfp; |
96 | #endif | 95 | #endif |
97 | /* Something that isn't a valid magic number for any coprocessor. */ | 96 | /* Something that isn't a valid magic number for any coprocessor. */ |
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index df95e050f9dd..05ac4b06876a 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h | |||
@@ -83,11 +83,21 @@ struct user{ | |||
83 | 83 | ||
84 | /* | 84 | /* |
85 | * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 | 85 | * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 |
86 | * are ignored by the ptrace system call. | 86 | * are ignored by the ptrace system call and the signal handler. |
87 | */ | 87 | */ |
88 | struct user_vfp { | 88 | struct user_vfp { |
89 | unsigned long long fpregs[32]; | 89 | unsigned long long fpregs[32]; |
90 | unsigned long fpscr; | 90 | unsigned long fpscr; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | /* | ||
94 | * VFP exception registers exposed to user space during signal delivery. | ||
95 | * Fields not relavant to the current VFP architecture are ignored. | ||
96 | */ | ||
97 | struct user_vfp_exc { | ||
98 | unsigned long fpexc; | ||
99 | unsigned long fpinst; | ||
100 | unsigned long fpinst2; | ||
101 | }; | ||
102 | |||
93 | #endif /* _ARM_USER_H */ | 103 | #endif /* _ARM_USER_H */ |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f367eb8..907d5a620bca 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/ucontext.h> | 19 | #include <asm/ucontext.h> |
20 | #include <asm/unistd.h> | 20 | #include <asm/unistd.h> |
21 | #include <asm/vfp.h> | ||
21 | 22 | ||
22 | #include "ptrace.h" | 23 | #include "ptrace.h" |
23 | #include "signal.h" | 24 | #include "signal.h" |
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) | |||
175 | 176 | ||
176 | #endif | 177 | #endif |
177 | 178 | ||
179 | #ifdef CONFIG_VFP | ||
180 | |||
181 | static int preserve_vfp_context(struct vfp_sigframe __user *frame) | ||
182 | { | ||
183 | struct thread_info *thread = current_thread_info(); | ||
184 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
185 | const unsigned long magic = VFP_MAGIC; | ||
186 | const unsigned long size = VFP_STORAGE_SIZE; | ||
187 | int err = 0; | ||
188 | |||
189 | vfp_sync_hwstate(thread); | ||
190 | __put_user_error(magic, &frame->magic, err); | ||
191 | __put_user_error(size, &frame->size, err); | ||
192 | |||
193 | /* | ||
194 | * Copy the floating point registers. There can be unused | ||
195 | * registers see asm/hwcap.h for details. | ||
196 | */ | ||
197 | err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, | ||
198 | sizeof(h->fpregs)); | ||
199 | /* | ||
200 | * Copy the status and control register. | ||
201 | */ | ||
202 | __put_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
203 | |||
204 | /* | ||
205 | * Copy the exception registers. | ||
206 | */ | ||
207 | __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); | ||
208 | __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
209 | __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
210 | |||
211 | return err ? -EFAULT : 0; | ||
212 | } | ||
213 | |||
214 | static int restore_vfp_context(struct vfp_sigframe __user *frame) | ||
215 | { | ||
216 | struct thread_info *thread = current_thread_info(); | ||
217 | struct vfp_hard_struct *h = &thread->vfpstate.hard; | ||
218 | unsigned long magic; | ||
219 | unsigned long size; | ||
220 | unsigned long fpexc; | ||
221 | int err = 0; | ||
222 | |||
223 | __get_user_error(magic, &frame->magic, err); | ||
224 | __get_user_error(size, &frame->size, err); | ||
225 | |||
226 | if (err) | ||
227 | return -EFAULT; | ||
228 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) | ||
229 | return -EINVAL; | ||
230 | |||
231 | /* | ||
232 | * Copy the floating point registers. There can be unused | ||
233 | * registers see asm/hwcap.h for details. | ||
234 | */ | ||
235 | err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, | ||
236 | sizeof(h->fpregs)); | ||
237 | /* | ||
238 | * Copy the status and control register. | ||
239 | */ | ||
240 | __get_user_error(h->fpscr, &frame->ufp.fpscr, err); | ||
241 | |||
242 | /* | ||
243 | * Sanitise and restore the exception registers. | ||
244 | */ | ||
245 | __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); | ||
246 | /* Ensure the VFP is enabled. */ | ||
247 | fpexc |= FPEXC_EN; | ||
248 | /* Ensure FPINST2 is invalid and the exception flag is cleared. */ | ||
249 | fpexc &= ~(FPEXC_EX | FPEXC_FP2V); | ||
250 | h->fpexc = fpexc; | ||
251 | |||
252 | __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); | ||
253 | __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); | ||
254 | |||
255 | if (!err) | ||
256 | vfp_flush_hwstate(thread); | ||
257 | |||
258 | return err ? -EFAULT : 0; | ||
259 | } | ||
260 | |||
261 | #endif | ||
262 | |||
178 | /* | 263 | /* |
179 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. | 264 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. |
180 | */ | 265 | */ |
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) | |||
233 | err |= restore_iwmmxt_context(&aux->iwmmxt); | 318 | err |= restore_iwmmxt_context(&aux->iwmmxt); |
234 | #endif | 319 | #endif |
235 | #ifdef CONFIG_VFP | 320 | #ifdef CONFIG_VFP |
236 | // if (err == 0) | 321 | if (err == 0) |
237 | // err |= vfp_restore_state(&sf->aux.vfp); | 322 | err |= restore_vfp_context(&aux->vfp); |
238 | #endif | 323 | #endif |
239 | 324 | ||
240 | return err; | 325 | return err; |
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) | |||
348 | err |= preserve_iwmmxt_context(&aux->iwmmxt); | 433 | err |= preserve_iwmmxt_context(&aux->iwmmxt); |
349 | #endif | 434 | #endif |
350 | #ifdef CONFIG_VFP | 435 | #ifdef CONFIG_VFP |
351 | // if (err == 0) | 436 | if (err == 0) |
352 | // err |= vfp_save_state(&sf->aux.vfp); | 437 | err |= preserve_vfp_context(&aux->vfp); |
353 | #endif | 438 | #endif |
354 | __put_user_error(0, &aux->end_magic, err); | 439 | __put_user_error(0, &aux->end_magic, err); |
355 | 440 | ||
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 027dd570dcc3..d4004557532a 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile | |||
@@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d | |||
16 | obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o | 16 | obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o |
17 | obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o | 17 | obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o |
18 | obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o | 18 | obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o |
19 | obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o | 19 | obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o |
20 | obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o | 20 | obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o |
21 | obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o | 21 | obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o |
22 | obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o | 22 | obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o |
23 | obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o | 23 | obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o |
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 9fcbd6ca0090..9c5b48e68a71 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S | |||
@@ -175,8 +175,6 @@ ENTRY(at91_slow_clock) | |||
175 | orr r3, r3, #(1 << 29) /* bit 29 always set */ | 175 | orr r3, r3, #(1 << 29) /* bit 29 always set */ |
176 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] | 176 | str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] |
177 | 177 | ||
178 | wait_pllalock | ||
179 | |||
180 | /* Save PLLB setting and disable it */ | 178 | /* Save PLLB setting and disable it */ |
181 | ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] | 179 | ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] |
182 | str r3, .saved_pllbr | 180 | str r3, .saved_pllbr |
@@ -184,8 +182,6 @@ ENTRY(at91_slow_clock) | |||
184 | mov r3, #AT91_PMC_PLLCOUNT | 182 | mov r3, #AT91_PMC_PLLCOUNT |
185 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] | 183 | str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] |
186 | 184 | ||
187 | wait_pllblock | ||
188 | |||
189 | /* Turn off the main oscillator */ | 185 | /* Turn off the main oscillator */ |
190 | ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] | 186 | ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] |
191 | bic r3, r3, #AT91_PMC_MOSCEN | 187 | bic r3, r3, #AT91_PMC_MOSCEN |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 8bca4dea6dfa..f55fa1044f72 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | #ifdef CONFIG_HIGHMEM | 44 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); |
45 | /* | ||
46 | * kmap_atomic() doesn't set the page virtual address, and | ||
47 | * kunmap_atomic() takes care of cache flushing already. | ||
48 | */ | ||
49 | if (page_address(to) != NULL) | ||
50 | #endif | ||
51 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); | ||
52 | kunmap_atomic(kto, KM_USER1); | 45 | kunmap_atomic(kto, KM_USER1); |
53 | kunmap_atomic(kfrom, KM_USER0); | 46 | kunmap_atomic(kfrom, KM_USER0); |
54 | } | 47 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1351edc0b26f..13fa536d82e6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
464 | vaddr += offset; | 464 | vaddr += offset; |
465 | op(vaddr, len, dir); | 465 | op(vaddr, len, dir); |
466 | kunmap_high(page); | 466 | kunmap_high(page); |
467 | } else if (cache_is_vipt()) { | ||
468 | pte_t saved_pte; | ||
469 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | ||
470 | op(vaddr + offset, len, dir); | ||
471 | kunmap_high_l1_vipt(page, saved_pte); | ||
467 | } | 472 | } |
468 | } else { | 473 | } else { |
469 | vaddr = page_address(page) + offset; | 474 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index e34f095e2090..c6844cb9b508 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
15 | #include <asm/cachetype.h> | 15 | #include <asm/cachetype.h> |
16 | #include <asm/highmem.h> | ||
16 | #include <asm/smp_plat.h> | 17 | #include <asm/smp_plat.h> |
17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
18 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
152 | 153 | ||
153 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 154 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
154 | { | 155 | { |
155 | void *addr = page_address(page); | ||
156 | |||
157 | /* | 156 | /* |
158 | * Writeback any data associated with the kernel mapping of this | 157 | * Writeback any data associated with the kernel mapping of this |
159 | * page. This ensures that data in the physical page is mutually | 158 | * page. This ensures that data in the physical page is mutually |
160 | * coherent with the kernels mapping. | 159 | * coherent with the kernels mapping. |
161 | */ | 160 | */ |
162 | #ifdef CONFIG_HIGHMEM | 161 | if (!PageHighMem(page)) { |
163 | /* | 162 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
164 | * kmap_atomic() doesn't set the page virtual address, and | 163 | } else { |
165 | * kunmap_atomic() takes care of cache flushing already. | 164 | void *addr = kmap_high_get(page); |
166 | */ | 165 | if (addr) { |
167 | if (addr) | 166 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
168 | #endif | 167 | kunmap_high(page); |
169 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 168 | } else if (cache_is_vipt()) { |
169 | pte_t saved_pte; | ||
170 | addr = kmap_high_l1_vipt(page, &saved_pte); | ||
171 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
172 | kunmap_high_l1_vipt(page, saved_pte); | ||
173 | } | ||
174 | } | ||
170 | 175 | ||
171 | /* | 176 | /* |
172 | * If this is a page cache page, and we have an aliasing VIPT cache, | 177 | * If this is a page cache page, and we have an aliasing VIPT cache, |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7c1b41..77b030f5ec09 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); | 79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); |
80 | 80 | ||
81 | if (kvaddr >= (void *)FIXADDR_START) { | 81 | if (kvaddr >= (void *)FIXADDR_START) { |
82 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 82 | if (cache_is_vivt()) |
83 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | ||
83 | #ifdef CONFIG_DEBUG_HIGHMEM | 84 | #ifdef CONFIG_DEBUG_HIGHMEM |
84 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 85 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
85 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); | 86 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); |
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
124 | pte = TOP_PTE(vaddr); | 125 | pte = TOP_PTE(vaddr); |
125 | return pte_page(*pte); | 126 | return pte_page(*pte); |
126 | } | 127 | } |
128 | |||
129 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
130 | |||
131 | #include <linux/percpu.h> | ||
132 | |||
133 | /* | ||
134 | * The VIVT cache of a highmem page is always flushed before the page | ||
135 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
136 | * in that case. | ||
137 | * | ||
138 | * However unmapped pages may still be cached with a VIPT cache, and | ||
139 | * it is not possible to perform cache maintenance on them using physical | ||
140 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
141 | * virtual mapping for that purpose. | ||
142 | * | ||
143 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
144 | * functions which are possibly called from interrupt context. As we don't | ||
145 | * want to keep interrupt disabled all the time when such maintenance is | ||
146 | * taking place, we therefore allow for some reentrancy by preserving and | ||
147 | * restoring the previous fixmap entry before the interrupted context is | ||
148 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
149 | * the previous fixmap, and leaving the current one in place allow it to | ||
150 | * be reused the next time without a TLB flush (common with DMA). | ||
151 | */ | ||
152 | |||
153 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
154 | |||
155 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
156 | { | ||
157 | unsigned int idx, cpu = smp_processor_id(); | ||
158 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
159 | unsigned long vaddr, flags; | ||
160 | pte_t pte, *ptep; | ||
161 | |||
162 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
163 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
164 | ptep = TOP_PTE(vaddr); | ||
165 | pte = mk_pte(page, kmap_prot); | ||
166 | |||
167 | if (!in_interrupt()) | ||
168 | preempt_disable(); | ||
169 | |||
170 | raw_local_irq_save(flags); | ||
171 | (*depth)++; | ||
172 | if (pte_val(*ptep) == pte_val(pte)) { | ||
173 | *saved_pte = pte; | ||
174 | } else { | ||
175 | *saved_pte = *ptep; | ||
176 | set_pte_ext(ptep, pte, 0); | ||
177 | local_flush_tlb_kernel_page(vaddr); | ||
178 | } | ||
179 | raw_local_irq_restore(flags); | ||
180 | |||
181 | return (void *)vaddr; | ||
182 | } | ||
183 | |||
184 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
185 | { | ||
186 | unsigned int idx, cpu = smp_processor_id(); | ||
187 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
188 | unsigned long vaddr, flags; | ||
189 | pte_t pte, *ptep; | ||
190 | |||
191 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
192 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
193 | ptep = TOP_PTE(vaddr); | ||
194 | pte = mk_pte(page, kmap_prot); | ||
195 | |||
196 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
197 | BUG_ON(*depth <= 0); | ||
198 | |||
199 | raw_local_irq_save(flags); | ||
200 | (*depth)--; | ||
201 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
202 | set_pte_ext(ptep, saved_pte, 0); | ||
203 | local_flush_tlb_kernel_page(vaddr); | ||
204 | } | ||
205 | raw_local_irq_restore(flags); | ||
206 | |||
207 | if (!in_interrupt()) | ||
208 | preempt_enable(); | ||
209 | } | ||
210 | |||
211 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4223d086aa17..241c24a1c18f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode) | |||
1054 | pgd_t *pgd; | 1054 | pgd_t *pgd; |
1055 | int i; | 1055 | int i; |
1056 | 1056 | ||
1057 | if (current->mm && current->mm->pgd) | 1057 | /* |
1058 | pgd = current->mm->pgd; | 1058 | * We need to access to user-mode page tables here. For kernel threads |
1059 | else | 1059 | * we don't have any user-mode mappings so we use the context that we |
1060 | pgd = init_mm.pgd; | 1060 | * "borrowed". |
1061 | */ | ||
1062 | pgd = current->active_mm->pgd; | ||
1061 | 1063 | ||
1062 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | 1064 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
1063 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 1065 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index a420cb949328..315a540c7ce5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -428,26 +428,6 @@ static void vfp_pm_init(void) | |||
428 | static inline void vfp_pm_init(void) { } | 428 | static inline void vfp_pm_init(void) { } |
429 | #endif /* CONFIG_PM */ | 429 | #endif /* CONFIG_PM */ |
430 | 430 | ||
431 | /* | ||
432 | * Synchronise the hardware VFP state of a thread other than current with the | ||
433 | * saved one. This function is used by the ptrace mechanism. | ||
434 | */ | ||
435 | #ifdef CONFIG_SMP | ||
436 | void vfp_sync_hwstate(struct thread_info *thread) | ||
437 | { | ||
438 | } | ||
439 | |||
440 | void vfp_flush_hwstate(struct thread_info *thread) | ||
441 | { | ||
442 | /* | ||
443 | * On SMP systems, the VFP state is automatically saved at every | ||
444 | * context switch. We mark the thread VFP state as belonging to a | ||
445 | * non-existent CPU so that the saved one will be reloaded when | ||
446 | * needed. | ||
447 | */ | ||
448 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
449 | } | ||
450 | #else | ||
451 | void vfp_sync_hwstate(struct thread_info *thread) | 431 | void vfp_sync_hwstate(struct thread_info *thread) |
452 | { | 432 | { |
453 | unsigned int cpu = get_cpu(); | 433 | unsigned int cpu = get_cpu(); |
@@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread) | |||
490 | last_VFP_context[cpu] = NULL; | 470 | last_VFP_context[cpu] = NULL; |
491 | } | 471 | } |
492 | 472 | ||
473 | #ifdef CONFIG_SMP | ||
474 | /* | ||
475 | * For SMP we still have to take care of the case where the thread | ||
476 | * migrates to another CPU and then back to the original CPU on which | ||
477 | * the last VFP user is still the same thread. Mark the thread VFP | ||
478 | * state as belonging to a non-existent CPU so that the saved one will | ||
479 | * be reloaded in the above case. | ||
480 | */ | ||
481 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
482 | #endif | ||
493 | put_cpu(); | 483 | put_cpu(); |
494 | } | 484 | } |
495 | #endif | ||
496 | 485 | ||
497 | #include <linux/smp.h> | 486 | #include <linux/smp.h> |
498 | 487 | ||
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index 88b7af20a996..d9d2ed647435 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h | |||
@@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new) | |||
148 | static inline int atomic_sub_and_test(int i, atomic_t *v) | 148 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
149 | { | 149 | { |
150 | char c; | 150 | char c; |
151 | __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); | 151 | __asm__ __volatile__("subl %2,%1; seq %0" |
152 | : "=d" (c), "+m" (*v) | ||
153 | : "id" (i)); | ||
152 | return c != 0; | 154 | return c != 0; |
153 | } | 155 | } |
154 | 156 | ||
155 | static inline int atomic_add_negative(int i, atomic_t *v) | 157 | static inline int atomic_add_negative(int i, atomic_t *v) |
156 | { | 158 | { |
157 | char c; | 159 | char c; |
158 | __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); | 160 | __asm__ __volatile__("addl %2,%1; smi %0" |
161 | : "=d" (c), "+m" (*v) | ||
162 | : "id" (i)); | ||
159 | return c != 0; | 163 | return c != 0; |
160 | } | 164 | } |
161 | 165 | ||
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h index 1320eaa4cc2a..a29dd74a17cb 100644 --- a/arch/m68k/include/asm/sigcontext.h +++ b/arch/m68k/include/asm/sigcontext.h | |||
@@ -17,13 +17,11 @@ struct sigcontext { | |||
17 | #ifndef __uClinux__ | 17 | #ifndef __uClinux__ |
18 | # ifdef __mcoldfire__ | 18 | # ifdef __mcoldfire__ |
19 | unsigned long sc_fpregs[2][2]; /* room for two fp registers */ | 19 | unsigned long sc_fpregs[2][2]; /* room for two fp registers */ |
20 | unsigned long sc_fpcntl[3]; | ||
21 | unsigned char sc_fpstate[16+6*8]; | ||
22 | # else | 20 | # else |
23 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ | 21 | unsigned long sc_fpregs[2*3]; /* room for two fp registers */ |
22 | # endif | ||
24 | unsigned long sc_fpcntl[3]; | 23 | unsigned long sc_fpcntl[3]; |
25 | unsigned char sc_fpstate[216]; | 24 | unsigned char sc_fpstate[216]; |
26 | # endif | ||
27 | #endif | 25 | #endif |
28 | }; | 26 | }; |
29 | 27 | ||
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 59b4556a5b92..e790bc1fbfa3 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -626,7 +626,7 @@ ia32_sys_call_table: | |||
626 | .quad stub32_sigreturn | 626 | .quad stub32_sigreturn |
627 | .quad stub32_clone /* 120 */ | 627 | .quad stub32_clone /* 120 */ |
628 | .quad sys_setdomainname | 628 | .quad sys_setdomainname |
629 | .quad sys_uname | 629 | .quad sys_newuname |
630 | .quad sys_modify_ldt | 630 | .quad sys_modify_ldt |
631 | .quad compat_sys_adjtimex | 631 | .quad compat_sys_adjtimex |
632 | .quad sys32_mprotect /* 125 */ | 632 | .quad sys32_mprotect /* 125 */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ba19ad4c47d0..86a0ff0aeac7 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define _ASM_X86_AMD_IOMMU_TYPES_H | 21 | #define _ASM_X86_AMD_IOMMU_TYPES_H |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/mutex.h> | ||
24 | #include <linux/list.h> | 25 | #include <linux/list.h> |
25 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
26 | 27 | ||
@@ -140,6 +141,7 @@ | |||
140 | 141 | ||
141 | /* constants to configure the command buffer */ | 142 | /* constants to configure the command buffer */ |
142 | #define CMD_BUFFER_SIZE 8192 | 143 | #define CMD_BUFFER_SIZE 8192 |
144 | #define CMD_BUFFER_UNINITIALIZED 1 | ||
143 | #define CMD_BUFFER_ENTRIES 512 | 145 | #define CMD_BUFFER_ENTRIES 512 |
144 | #define MMIO_CMD_SIZE_SHIFT 56 | 146 | #define MMIO_CMD_SIZE_SHIFT 56 |
145 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) | 147 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) |
@@ -237,6 +239,7 @@ struct protection_domain { | |||
237 | struct list_head list; /* for list of all protection domains */ | 239 | struct list_head list; /* for list of all protection domains */ |
238 | struct list_head dev_list; /* List of all devices in this domain */ | 240 | struct list_head dev_list; /* List of all devices in this domain */ |
239 | spinlock_t lock; /* mostly used to lock the page table*/ | 241 | spinlock_t lock; /* mostly used to lock the page table*/ |
242 | struct mutex api_lock; /* protect page tables in the iommu-api path */ | ||
240 | u16 id; /* the domain id written to the device table */ | 243 | u16 id; /* the domain id written to the device table */ |
241 | int mode; /* paging mode (0-6 levels) */ | 244 | int mode; /* paging mode (0-6 levels) */ |
242 | u64 *pt_root; /* page table root pointer */ | 245 | u64 *pt_root; /* page table root pointer */ |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index ba0eed8aa1a6..b60f2924c413 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -28,22 +28,39 @@ | |||
28 | 28 | ||
29 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/kvm_para.h> | ||
32 | 31 | ||
33 | /*G:030 | 32 | /*G:030 |
34 | * But first, how does our Guest contact the Host to ask for privileged | 33 | * But first, how does our Guest contact the Host to ask for privileged |
35 | * operations? There are two ways: the direct way is to make a "hypercall", | 34 | * operations? There are two ways: the direct way is to make a "hypercall", |
36 | * to make requests of the Host Itself. | 35 | * to make requests of the Host Itself. |
37 | * | 36 | * |
38 | * We use the KVM hypercall mechanism, though completely different hypercall | 37 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and |
39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in | 38 | * above are used by real hardware interrupts). Seventeen hypercalls are |
40 | * the %eax register, and the arguments (when required) are placed in %ebx, | 39 | * available: the hypercall number is put in the %eax register, and the |
41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. | 40 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. |
41 | * If a return value makes sense, it's returned in %eax. | ||
42 | * | 42 | * |
43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
44 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
45 | * definition of a gentleman: "someone who is only rude intentionally". | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
46 | :*/ | 46 | */ |
47 | static inline unsigned long | ||
48 | hcall(unsigned long call, | ||
49 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
50 | unsigned long arg4) | ||
51 | { | ||
52 | /* "int" is the Intel instruction to trigger a trap. */ | ||
53 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | ||
54 | /* The call in %eax (aka "a") might be overwritten */ | ||
55 | : "=a"(call) | ||
56 | /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */ | ||
57 | : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4) | ||
58 | /* "memory" means this might write somewhere in memory. | ||
59 | * This isn't true for all calls, but it's safe to tell | ||
60 | * gcc that it might happen so it doesn't get clever. */ | ||
61 | : "memory"); | ||
62 | return call; | ||
63 | } | ||
47 | 64 | ||
48 | /* Can't use our min() macro here: needs to be a constant */ | 65 | /* Can't use our min() macro here: needs to be a constant */ |
49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 66 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f3dadb571d9b..f854d89b7edf 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev) | |||
118 | return false; | 118 | return false; |
119 | 119 | ||
120 | /* No device or no PCI device */ | 120 | /* No device or no PCI device */ |
121 | if (!dev || dev->bus != &pci_bus_type) | 121 | if (dev->bus != &pci_bus_type) |
122 | return false; | 122 | return false; |
123 | 123 | ||
124 | devid = get_device_id(dev); | 124 | devid = get_device_id(dev); |
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
392 | u32 tail, head; | 392 | u32 tail, head; |
393 | u8 *target; | 393 | u8 *target; |
394 | 394 | ||
395 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
395 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 396 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
396 | target = iommu->cmd_buf + tail; | 397 | target = iommu->cmd_buf + tail; |
397 | memcpy_toio(target, cmd, sizeof(*cmd)); | 398 | memcpy_toio(target, cmd, sizeof(*cmd)); |
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) | |||
2186 | struct dma_ops_domain *dma_dom; | 2187 | struct dma_ops_domain *dma_dom; |
2187 | u16 devid; | 2188 | u16 devid; |
2188 | 2189 | ||
2189 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2190 | for_each_pci_dev(dev) { |
2190 | 2191 | ||
2191 | /* Do we handle this device? */ | 2192 | /* Do we handle this device? */ |
2192 | if (!check_device(&dev->dev)) | 2193 | if (!check_device(&dev->dev)) |
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
2298 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 2299 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
2299 | struct device *dev = dev_data->dev; | 2300 | struct device *dev = dev_data->dev; |
2300 | 2301 | ||
2301 | do_detach(dev); | 2302 | __detach_device(dev); |
2302 | atomic_set(&dev_data->bind, 0); | 2303 | atomic_set(&dev_data->bind, 0); |
2303 | } | 2304 | } |
2304 | 2305 | ||
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) | |||
2327 | return NULL; | 2328 | return NULL; |
2328 | 2329 | ||
2329 | spin_lock_init(&domain->lock); | 2330 | spin_lock_init(&domain->lock); |
2331 | mutex_init(&domain->api_lock); | ||
2330 | domain->id = domain_id_alloc(); | 2332 | domain->id = domain_id_alloc(); |
2331 | if (!domain->id) | 2333 | if (!domain->id) |
2332 | goto out_err; | 2334 | goto out_err; |
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
2379 | 2381 | ||
2380 | free_pagetable(domain); | 2382 | free_pagetable(domain); |
2381 | 2383 | ||
2382 | domain_id_free(domain->id); | 2384 | protection_domain_free(domain); |
2383 | |||
2384 | kfree(domain); | ||
2385 | 2385 | ||
2386 | dom->priv = NULL; | 2386 | dom->priv = NULL; |
2387 | } | 2387 | } |
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2456 | iova &= PAGE_MASK; | 2456 | iova &= PAGE_MASK; |
2457 | paddr &= PAGE_MASK; | 2457 | paddr &= PAGE_MASK; |
2458 | 2458 | ||
2459 | mutex_lock(&domain->api_lock); | ||
2460 | |||
2459 | for (i = 0; i < npages; ++i) { | 2461 | for (i = 0; i < npages; ++i) { |
2460 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); | 2462 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2461 | if (ret) | 2463 | if (ret) |
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2465 | paddr += PAGE_SIZE; | 2467 | paddr += PAGE_SIZE; |
2466 | } | 2468 | } |
2467 | 2469 | ||
2470 | mutex_unlock(&domain->api_lock); | ||
2471 | |||
2468 | return 0; | 2472 | return 0; |
2469 | } | 2473 | } |
2470 | 2474 | ||
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2477 | 2481 | ||
2478 | iova &= PAGE_MASK; | 2482 | iova &= PAGE_MASK; |
2479 | 2483 | ||
2484 | mutex_lock(&domain->api_lock); | ||
2485 | |||
2480 | for (i = 0; i < npages; ++i) { | 2486 | for (i = 0; i < npages; ++i) { |
2481 | iommu_unmap_page(domain, iova, PM_MAP_4k); | 2487 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
2482 | iova += PAGE_SIZE; | 2488 | iova += PAGE_SIZE; |
2483 | } | 2489 | } |
2484 | 2490 | ||
2485 | iommu_flush_tlb_pde(domain); | 2491 | iommu_flush_tlb_pde(domain); |
2492 | |||
2493 | mutex_unlock(&domain->api_lock); | ||
2486 | } | 2494 | } |
2487 | 2495 | ||
2488 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2496 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 42f5350b908f..6360abf993d4 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -138,9 +138,9 @@ int amd_iommus_present; | |||
138 | bool amd_iommu_np_cache __read_mostly; | 138 | bool amd_iommu_np_cache __read_mostly; |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Set to true if ACPI table parsing and hardware intialization went properly | 141 | * The ACPI table parsing functions set this variable on an error |
142 | */ | 142 | */ |
143 | static bool amd_iommu_initialized; | 143 | static int __initdata amd_iommu_init_err; |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * List of protection domains - used during resume | 146 | * List of protection domains - used during resume |
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) | |||
391 | */ | 391 | */ |
392 | for (i = 0; i < table->length; ++i) | 392 | for (i = 0; i < table->length; ++i) |
393 | checksum += p[i]; | 393 | checksum += p[i]; |
394 | if (checksum != 0) | 394 | if (checksum != 0) { |
395 | /* ACPI table corrupt */ | 395 | /* ACPI table corrupt */ |
396 | return -ENODEV; | 396 | amd_iommu_init_err = -ENODEV; |
397 | return 0; | ||
398 | } | ||
397 | 399 | ||
398 | p += IVRS_HEADER_LENGTH; | 400 | p += IVRS_HEADER_LENGTH; |
399 | 401 | ||
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
436 | if (cmd_buf == NULL) | 438 | if (cmd_buf == NULL) |
437 | return NULL; | 439 | return NULL; |
438 | 440 | ||
439 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; | 441 | iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; |
440 | 442 | ||
441 | return cmd_buf; | 443 | return cmd_buf; |
442 | } | 444 | } |
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
472 | &entry, sizeof(entry)); | 474 | &entry, sizeof(entry)); |
473 | 475 | ||
474 | amd_iommu_reset_cmd_buffer(iommu); | 476 | amd_iommu_reset_cmd_buffer(iommu); |
477 | iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); | ||
475 | } | 478 | } |
476 | 479 | ||
477 | static void __init free_command_buffer(struct amd_iommu *iommu) | 480 | static void __init free_command_buffer(struct amd_iommu *iommu) |
478 | { | 481 | { |
479 | free_pages((unsigned long)iommu->cmd_buf, | 482 | free_pages((unsigned long)iommu->cmd_buf, |
480 | get_order(iommu->cmd_buf_size)); | 483 | get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); |
481 | } | 484 | } |
482 | 485 | ||
483 | /* allocates the memory where the IOMMU will log its events to */ | 486 | /* allocates the memory where the IOMMU will log its events to */ |
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
920 | h->mmio_phys); | 923 | h->mmio_phys); |
921 | 924 | ||
922 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); | 925 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
923 | if (iommu == NULL) | 926 | if (iommu == NULL) { |
924 | return -ENOMEM; | 927 | amd_iommu_init_err = -ENOMEM; |
928 | return 0; | ||
929 | } | ||
930 | |||
925 | ret = init_iommu_one(iommu, h); | 931 | ret = init_iommu_one(iommu, h); |
926 | if (ret) | 932 | if (ret) { |
927 | return ret; | 933 | amd_iommu_init_err = ret; |
934 | return 0; | ||
935 | } | ||
928 | break; | 936 | break; |
929 | default: | 937 | default: |
930 | break; | 938 | break; |
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
934 | } | 942 | } |
935 | WARN_ON(p != end); | 943 | WARN_ON(p != end); |
936 | 944 | ||
937 | amd_iommu_initialized = true; | ||
938 | |||
939 | return 0; | 945 | return 0; |
940 | } | 946 | } |
941 | 947 | ||
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) | |||
1211 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) | 1217 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) |
1212 | return -ENODEV; | 1218 | return -ENODEV; |
1213 | 1219 | ||
1220 | ret = amd_iommu_init_err; | ||
1221 | if (ret) | ||
1222 | goto out; | ||
1223 | |||
1214 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); | 1224 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
1215 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); | 1225 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); |
1216 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); | 1226 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); |
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) | |||
1270 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | 1280 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) |
1271 | goto free; | 1281 | goto free; |
1272 | 1282 | ||
1273 | if (!amd_iommu_initialized) | 1283 | if (amd_iommu_init_err) { |
1284 | ret = amd_iommu_init_err; | ||
1274 | goto free; | 1285 | goto free; |
1286 | } | ||
1275 | 1287 | ||
1276 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | 1288 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) |
1277 | goto free; | 1289 | goto free; |
1278 | 1290 | ||
1291 | if (amd_iommu_init_err) { | ||
1292 | ret = amd_iommu_init_err; | ||
1293 | goto free; | ||
1294 | } | ||
1295 | |||
1279 | ret = sysdev_class_register(&amd_iommu_sysdev_class); | 1296 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
1280 | if (ret) | 1297 | if (ret) |
1281 | goto free; | 1298 | goto free; |
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) | |||
1288 | if (ret) | 1305 | if (ret) |
1289 | goto free; | 1306 | goto free; |
1290 | 1307 | ||
1308 | enable_iommus(); | ||
1309 | |||
1291 | if (iommu_pass_through) | 1310 | if (iommu_pass_through) |
1292 | ret = amd_iommu_init_passthrough(); | 1311 | ret = amd_iommu_init_passthrough(); |
1293 | else | 1312 | else |
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) | |||
1300 | 1319 | ||
1301 | amd_iommu_init_notifier(); | 1320 | amd_iommu_init_notifier(); |
1302 | 1321 | ||
1303 | enable_iommus(); | ||
1304 | |||
1305 | if (iommu_pass_through) | 1322 | if (iommu_pass_through) |
1306 | goto out; | 1323 | goto out; |
1307 | 1324 | ||
@@ -1315,6 +1332,7 @@ out: | |||
1315 | return ret; | 1332 | return ret; |
1316 | 1333 | ||
1317 | free: | 1334 | free: |
1335 | disable_iommus(); | ||
1318 | 1336 | ||
1319 | amd_iommu_uninit_devices(); | 1337 | amd_iommu_uninit_devices(); |
1320 | 1338 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 3704997e8b25..b5d8b0bcf235 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void) | |||
393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
394 | int bus; | 394 | int bus; |
395 | int dev_base, dev_limit; | 395 | int dev_base, dev_limit; |
396 | u32 ctl; | ||
396 | 397 | ||
397 | bus = bus_dev_ranges[i].bus; | 398 | bus = bus_dev_ranges[i].bus; |
398 | dev_base = bus_dev_ranges[i].dev_base; | 399 | dev_base = bus_dev_ranges[i].dev_base; |
@@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void) | |||
406 | gart_iommu_aperture = 1; | 407 | gart_iommu_aperture = 1; |
407 | x86_init.iommu.iommu_init = gart_iommu_init; | 408 | x86_init.iommu.iommu_init = gart_iommu_init; |
408 | 409 | ||
409 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; | 410 | ctl = read_pci_config(bus, slot, 3, |
411 | AMD64_GARTAPERTURECTL); | ||
412 | |||
413 | /* | ||
414 | * Before we do anything else disable the GART. It may | ||
415 | * still be enabled if we boot into a crash-kernel here. | ||
416 | * Reconfiguring the GART while it is enabled could have | ||
417 | * unknown side-effects. | ||
418 | */ | ||
419 | ctl &= ~GARTEN; | ||
420 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); | ||
421 | |||
422 | aper_order = (ctl >> 1) & 7; | ||
410 | aper_size = (32 * 1024 * 1024) << aper_order; | 423 | aper_size = (32 * 1024 * 1024) << aper_order; |
411 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; | 424 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; |
412 | aper_base <<= 25; | 425 | aper_base <<= 25; |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a4849c10a77e..ebd4c51d096a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
28 | #include <asm/reboot.h> | 28 | #include <asm/reboot.h> |
29 | #include <asm/virtext.h> | 29 | #include <asm/virtext.h> |
30 | #include <asm/x86_init.h> | ||
31 | 30 | ||
32 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 31 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
33 | 32 | ||
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
103 | #ifdef CONFIG_HPET_TIMER | 102 | #ifdef CONFIG_HPET_TIMER |
104 | hpet_disable(); | 103 | hpet_disable(); |
105 | #endif | 104 | #endif |
106 | |||
107 | #ifdef CONFIG_X86_64 | ||
108 | x86_platform.iommu_shutdown(); | ||
109 | #endif | ||
110 | |||
111 | crash_save_cpu(regs, safe_smp_processor_id()); | 105 | crash_save_cpu(regs, safe_smp_processor_id()); |
112 | } | 106 | } |
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index e39e77168a37..e1a93be4fd44 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) | 14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | #include <linux/uaccess.h> | ||
18 | |||
17 | extern void | 19 | extern void |
18 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 20 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
19 | unsigned long *stack, unsigned long bp, char *log_lvl); | 21 | unsigned long *stack, unsigned long bp, char *log_lvl); |
@@ -42,8 +44,10 @@ static inline unsigned long rewind_frame_pointer(int n) | |||
42 | get_bp(frame); | 44 | get_bp(frame); |
43 | 45 | ||
44 | #ifdef CONFIG_FRAME_POINTER | 46 | #ifdef CONFIG_FRAME_POINTER |
45 | while (n--) | 47 | while (n--) { |
46 | frame = frame->next_frame; | 48 | if (probe_kernel_address(&frame->next_frame, frame)) |
49 | break; | ||
50 | } | ||
47 | #endif | 51 | #endif |
48 | 52 | ||
49 | return (unsigned long)frame; | 53 | return (unsigned long)frame; |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 68cd24f9deae..0f7f130caa67 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -565,6 +565,9 @@ static void enable_gart_translations(void) | |||
565 | 565 | ||
566 | enable_gart_translation(dev, __pa(agp_gatt_table)); | 566 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
567 | } | 567 | } |
568 | |||
569 | /* Flush the GART-TLB to remove stale entries */ | ||
570 | k8_flush_garts(); | ||
568 | } | 571 | } |
569 | 572 | ||
570 | /* | 573 | /* |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 7e59dc1d3fc2..2bdf628066bd 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
115 | local_irq_save(flags); | 115 | local_irq_save(flags); |
116 | if (lguest_data.hcall_status[next_call] != 0xFF) { | 116 | if (lguest_data.hcall_status[next_call] != 0xFF) { |
117 | /* Table full, so do normal hcall which will flush table. */ | 117 | /* Table full, so do normal hcall which will flush table. */ |
118 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 118 | hcall(call, arg1, arg2, arg3, arg4); |
119 | } else { | 119 | } else { |
120 | lguest_data.hcalls[next_call].arg0 = call; | 120 | lguest_data.hcalls[next_call].arg0 = call; |
121 | lguest_data.hcalls[next_call].arg1 = arg1; | 121 | lguest_data.hcalls[next_call].arg1 = arg1; |
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
145 | * So, when we're in lazy mode, we call async_hcall() to store the call for | 145 | * So, when we're in lazy mode, we call async_hcall() to store the call for |
146 | * future processing: | 146 | * future processing: |
147 | */ | 147 | */ |
148 | static void lazy_hcall1(unsigned long call, | 148 | static void lazy_hcall1(unsigned long call, unsigned long arg1) |
149 | unsigned long arg1) | ||
150 | { | 149 | { |
151 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 150 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
152 | kvm_hypercall1(call, arg1); | 151 | hcall(call, arg1, 0, 0, 0); |
153 | else | 152 | else |
154 | async_hcall(call, arg1, 0, 0, 0); | 153 | async_hcall(call, arg1, 0, 0, 0); |
155 | } | 154 | } |
156 | 155 | ||
157 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ | 156 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ |
158 | static void lazy_hcall2(unsigned long call, | 157 | static void lazy_hcall2(unsigned long call, |
159 | unsigned long arg1, | 158 | unsigned long arg1, |
160 | unsigned long arg2) | 159 | unsigned long arg2) |
161 | { | 160 | { |
162 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 161 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
163 | kvm_hypercall2(call, arg1, arg2); | 162 | hcall(call, arg1, arg2, 0, 0); |
164 | else | 163 | else |
165 | async_hcall(call, arg1, arg2, 0, 0); | 164 | async_hcall(call, arg1, arg2, 0, 0); |
166 | } | 165 | } |
167 | 166 | ||
168 | static void lazy_hcall3(unsigned long call, | 167 | static void lazy_hcall3(unsigned long call, |
169 | unsigned long arg1, | 168 | unsigned long arg1, |
170 | unsigned long arg2, | 169 | unsigned long arg2, |
171 | unsigned long arg3) | 170 | unsigned long arg3) |
172 | { | 171 | { |
173 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 172 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
174 | kvm_hypercall3(call, arg1, arg2, arg3); | 173 | hcall(call, arg1, arg2, arg3, 0); |
175 | else | 174 | else |
176 | async_hcall(call, arg1, arg2, arg3, 0); | 175 | async_hcall(call, arg1, arg2, arg3, 0); |
177 | } | 176 | } |
178 | 177 | ||
179 | #ifdef CONFIG_X86_PAE | 178 | #ifdef CONFIG_X86_PAE |
180 | static void lazy_hcall4(unsigned long call, | 179 | static void lazy_hcall4(unsigned long call, |
181 | unsigned long arg1, | 180 | unsigned long arg1, |
182 | unsigned long arg2, | 181 | unsigned long arg2, |
183 | unsigned long arg3, | 182 | unsigned long arg3, |
184 | unsigned long arg4) | 183 | unsigned long arg4) |
185 | { | 184 | { |
186 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 185 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
187 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 186 | hcall(call, arg1, arg2, arg3, arg4); |
188 | else | 187 | else |
189 | async_hcall(call, arg1, arg2, arg3, arg4); | 188 | async_hcall(call, arg1, arg2, arg3, arg4); |
190 | } | 189 | } |
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call, | |||
196 | :*/ | 195 | :*/ |
197 | static void lguest_leave_lazy_mmu_mode(void) | 196 | static void lguest_leave_lazy_mmu_mode(void) |
198 | { | 197 | { |
199 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 198 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
200 | paravirt_leave_lazy_mmu(); | 199 | paravirt_leave_lazy_mmu(); |
201 | } | 200 | } |
202 | 201 | ||
203 | static void lguest_end_context_switch(struct task_struct *next) | 202 | static void lguest_end_context_switch(struct task_struct *next) |
204 | { | 203 | { |
205 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 204 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
206 | paravirt_end_context_switch(next); | 205 | paravirt_end_context_switch(next); |
207 | } | 206 | } |
208 | 207 | ||
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt, | |||
286 | /* Keep the local copy up to date. */ | 285 | /* Keep the local copy up to date. */ |
287 | native_write_idt_entry(dt, entrynum, g); | 286 | native_write_idt_entry(dt, entrynum, g); |
288 | /* Tell Host about this new entry. */ | 287 | /* Tell Host about this new entry. */ |
289 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); | 288 | hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0); |
290 | } | 289 | } |
291 | 290 | ||
292 | /* | 291 | /* |
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc) | |||
300 | struct desc_struct *idt = (void *)desc->address; | 299 | struct desc_struct *idt = (void *)desc->address; |
301 | 300 | ||
302 | for (i = 0; i < (desc->size+1)/8; i++) | 301 | for (i = 0; i < (desc->size+1)/8; i++) |
303 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); | 302 | hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0); |
304 | } | 303 | } |
305 | 304 | ||
306 | /* | 305 | /* |
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc) | |||
321 | struct desc_struct *gdt = (void *)desc->address; | 320 | struct desc_struct *gdt = (void *)desc->address; |
322 | 321 | ||
323 | for (i = 0; i < (desc->size+1)/8; i++) | 322 | for (i = 0; i < (desc->size+1)/8; i++) |
324 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); | 323 | hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0); |
325 | } | 324 | } |
326 | 325 | ||
327 | /* | 326 | /* |
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | |||
334 | { | 333 | { |
335 | native_write_gdt_entry(dt, entrynum, desc, type); | 334 | native_write_gdt_entry(dt, entrynum, desc, type); |
336 | /* Tell Host about this new entry. */ | 335 | /* Tell Host about this new entry. */ |
337 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum, | 336 | hcall(LHCALL_LOAD_GDT_ENTRY, entrynum, |
338 | dt[entrynum].a, dt[entrynum].b); | 337 | dt[entrynum].a, dt[entrynum].b, 0); |
339 | } | 338 | } |
340 | 339 | ||
341 | /* | 340 | /* |
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, | |||
931 | } | 930 | } |
932 | 931 | ||
933 | /* Please wake us this far in the future. */ | 932 | /* Please wake us this far in the future. */ |
934 | kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); | 933 | hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0); |
935 | return 0; | 934 | return 0; |
936 | } | 935 | } |
937 | 936 | ||
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode, | |||
942 | case CLOCK_EVT_MODE_UNUSED: | 941 | case CLOCK_EVT_MODE_UNUSED: |
943 | case CLOCK_EVT_MODE_SHUTDOWN: | 942 | case CLOCK_EVT_MODE_SHUTDOWN: |
944 | /* A 0 argument shuts the clock down. */ | 943 | /* A 0 argument shuts the clock down. */ |
945 | kvm_hypercall0(LHCALL_SET_CLOCKEVENT); | 944 | hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); |
946 | break; | 945 | break; |
947 | case CLOCK_EVT_MODE_ONESHOT: | 946 | case CLOCK_EVT_MODE_ONESHOT: |
948 | /* This is what we expect. */ | 947 | /* This is what we expect. */ |
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void) | |||
1100 | /* STOP! Until an interrupt comes in. */ | 1099 | /* STOP! Until an interrupt comes in. */ |
1101 | static void lguest_safe_halt(void) | 1100 | static void lguest_safe_halt(void) |
1102 | { | 1101 | { |
1103 | kvm_hypercall0(LHCALL_HALT); | 1102 | hcall(LHCALL_HALT, 0, 0, 0, 0); |
1104 | } | 1103 | } |
1105 | 1104 | ||
1106 | /* | 1105 | /* |
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void) | |||
1112 | */ | 1111 | */ |
1113 | static void lguest_power_off(void) | 1112 | static void lguest_power_off(void) |
1114 | { | 1113 | { |
1115 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), | 1114 | hcall(LHCALL_SHUTDOWN, __pa("Power down"), |
1116 | LGUEST_SHUTDOWN_POWEROFF); | 1115 | LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
1117 | } | 1116 | } |
1118 | 1117 | ||
1119 | /* | 1118 | /* |
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void) | |||
1123 | */ | 1122 | */ |
1124 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) | 1123 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) |
1125 | { | 1124 | { |
1126 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); | 1125 | hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
1127 | /* The hcall won't return, but to keep gcc happy, we're "done". */ | 1126 | /* The hcall won't return, but to keep gcc happy, we're "done". */ |
1128 | return NOTIFY_DONE; | 1127 | return NOTIFY_DONE; |
1129 | } | 1128 | } |
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1162 | len = sizeof(scratch) - 1; | 1161 | len = sizeof(scratch) - 1; |
1163 | scratch[len] = '\0'; | 1162 | scratch[len] = '\0'; |
1164 | memcpy(scratch, buf, len); | 1163 | memcpy(scratch, buf, len); |
1165 | kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); | 1164 | hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); |
1166 | 1165 | ||
1167 | /* This routine returns the number of bytes actually written. */ | 1166 | /* This routine returns the number of bytes actually written. */ |
1168 | return len; | 1167 | return len; |
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1174 | */ | 1173 | */ |
1175 | static void lguest_restart(char *reason) | 1174 | static void lguest_restart(char *reason) |
1176 | { | 1175 | { |
1177 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); | 1176 | hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); |
1178 | } | 1177 | } |
1179 | 1178 | ||
1180 | /*G:050 | 1179 | /*G:050 |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 27eac0faee48..4f420c2f2d55 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -32,7 +32,7 @@ ENTRY(lguest_entry) | |||
32 | */ | 32 | */ |
33 | movl $LHCALL_LGUEST_INIT, %eax | 33 | movl $LHCALL_LGUEST_INIT, %eax |
34 | movl $lguest_data - __PAGE_OFFSET, %ebx | 34 | movl $lguest_data - __PAGE_OFFSET, %ebx |
35 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 35 | int $LGUEST_TRAP_ENTRY |
36 | 36 | ||
37 | /* Set up the initial stack so we can run C code. */ | 37 | /* Set up the initial stack so we can run C code. */ |
38 | movl $(init_thread_union+THREAD_SIZE),%esp | 38 | movl $(init_thread_union+THREAD_SIZE),%esp |