diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/Makefile | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 62 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/iommu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/iommu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/sysdev/dart.h | 2 | ||||
-rw-r--r-- | arch/powerpc/sysdev/u3_iommu.c | 4 |
10 files changed, 46 insertions, 58 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 98f67c78d1bd..a13eb575f834 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -61,15 +61,17 @@ endif | |||
61 | LDFLAGS_vmlinux := -Bstatic | 61 | LDFLAGS_vmlinux := -Bstatic |
62 | 62 | ||
63 | # The -Iarch/$(ARCH)/include is temporary while we are merging | 63 | # The -Iarch/$(ARCH)/include is temporary while we are merging |
64 | CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include | 64 | CPPFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -Iarch/$(ARCH)/include |
65 | AFLAGS += -Iarch/$(ARCH) | 65 | AFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) |
66 | CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe | ||
67 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc | 66 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc |
68 | CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple | 67 | CFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -ffixed-r2 -mmultiple |
69 | CFLAGS += $(CFLAGS-y) | 68 | CPPFLAGS += $(CPPFLAGS-y) |
69 | AFLAGS += $(AFLAGS-y) | ||
70 | CFLAGS += -msoft-float -pipe $(CFLAGS-y) | ||
70 | CPP = $(CC) -E $(CFLAGS) | 71 | CPP = $(CC) -E $(CFLAGS) |
71 | # Temporary hack until we have migrated to asm-powerpc | 72 | # Temporary hack until we have migrated to asm-powerpc |
72 | LINUXINCLUDE += -Iarch/$(ARCH)/include | 73 | LINUXINCLUDE-$(CONFIG_PPC32) := -Iarch/$(ARCH)/include |
74 | LINUXINCLUDE += $(LINUXINCLUDE-y) | ||
73 | 75 | ||
74 | CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__ | 76 | CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__ |
75 | 77 | ||
@@ -173,11 +175,13 @@ archclean: | |||
173 | 175 | ||
174 | archprepare: checkbin | 176 | archprepare: checkbin |
175 | 177 | ||
178 | ifeq ($(CONFIG_PPC32),y) | ||
176 | # Temporary hack until we have migrated to asm-powerpc | 179 | # Temporary hack until we have migrated to asm-powerpc |
177 | include/asm: arch/$(ARCH)/include/asm | 180 | include/asm: arch/$(ARCH)/include/asm |
178 | arch/$(ARCH)/include/asm: FORCE | 181 | arch/$(ARCH)/include/asm: FORCE |
179 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi | 182 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi |
180 | $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm | 183 | $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm |
184 | endif | ||
181 | 185 | ||
182 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output | 186 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output |
183 | # to stdout and these checks are run even on install targets. | 187 | # to stdout and these checks are run even on install targets. |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index af4d1bc9a2eb..94db25708456 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -163,15 +163,13 @@ EXPORT_SYMBOL(giveup_altivec); | |||
163 | EXPORT_SYMBOL(giveup_spe); | 163 | EXPORT_SYMBOL(giveup_spe); |
164 | #endif /* CONFIG_SPE */ | 164 | #endif /* CONFIG_SPE */ |
165 | 165 | ||
166 | #ifdef CONFIG_PPC64 | 166 | #ifndef CONFIG_PPC64 |
167 | EXPORT_SYMBOL(__flush_icache_range); | ||
168 | #else | ||
169 | EXPORT_SYMBOL(flush_instruction_cache); | 167 | EXPORT_SYMBOL(flush_instruction_cache); |
170 | EXPORT_SYMBOL(flush_icache_range); | ||
171 | EXPORT_SYMBOL(flush_tlb_kernel_range); | 168 | EXPORT_SYMBOL(flush_tlb_kernel_range); |
172 | EXPORT_SYMBOL(flush_tlb_page); | 169 | EXPORT_SYMBOL(flush_tlb_page); |
173 | EXPORT_SYMBOL(_tlbie); | 170 | EXPORT_SYMBOL(_tlbie); |
174 | #endif | 171 | #endif |
172 | EXPORT_SYMBOL(__flush_icache_range); | ||
175 | EXPORT_SYMBOL(flush_dcache_range); | 173 | EXPORT_SYMBOL(flush_dcache_range); |
176 | 174 | ||
177 | #ifdef CONFIG_SMP | 175 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a5a7542a8ff3..105d5609ff57 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -201,6 +201,28 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | |||
201 | } | 201 | } |
202 | #endif /* CONFIG_SPE */ | 202 | #endif /* CONFIG_SPE */ |
203 | 203 | ||
204 | /* | ||
205 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), | ||
206 | * and the current task has some state, discard it. | ||
207 | */ | ||
208 | static inline void discard_lazy_cpu_state(void) | ||
209 | { | ||
210 | #ifndef CONFIG_SMP | ||
211 | preempt_disable(); | ||
212 | if (last_task_used_math == current) | ||
213 | last_task_used_math = NULL; | ||
214 | #ifdef CONFIG_ALTIVEC | ||
215 | if (last_task_used_altivec == current) | ||
216 | last_task_used_altivec = NULL; | ||
217 | #endif /* CONFIG_ALTIVEC */ | ||
218 | #ifdef CONFIG_SPE | ||
219 | if (last_task_used_spe == current) | ||
220 | last_task_used_spe = NULL; | ||
221 | #endif | ||
222 | preempt_enable(); | ||
223 | #endif /* CONFIG_SMP */ | ||
224 | } | ||
225 | |||
204 | int set_dabr(unsigned long dabr) | 226 | int set_dabr(unsigned long dabr) |
205 | { | 227 | { |
206 | if (ppc_md.set_dabr) | 228 | if (ppc_md.set_dabr) |
@@ -434,19 +456,7 @@ void show_regs(struct pt_regs * regs) | |||
434 | void exit_thread(void) | 456 | void exit_thread(void) |
435 | { | 457 | { |
436 | kprobe_flush_task(current); | 458 | kprobe_flush_task(current); |
437 | 459 | discard_lazy_cpu_state(); | |
438 | #ifndef CONFIG_SMP | ||
439 | if (last_task_used_math == current) | ||
440 | last_task_used_math = NULL; | ||
441 | #ifdef CONFIG_ALTIVEC | ||
442 | if (last_task_used_altivec == current) | ||
443 | last_task_used_altivec = NULL; | ||
444 | #endif /* CONFIG_ALTIVEC */ | ||
445 | #ifdef CONFIG_SPE | ||
446 | if (last_task_used_spe == current) | ||
447 | last_task_used_spe = NULL; | ||
448 | #endif | ||
449 | #endif /* CONFIG_SMP */ | ||
450 | } | 460 | } |
451 | 461 | ||
452 | void flush_thread(void) | 462 | void flush_thread(void) |
@@ -458,18 +468,7 @@ void flush_thread(void) | |||
458 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 468 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
459 | #endif | 469 | #endif |
460 | 470 | ||
461 | #ifndef CONFIG_SMP | 471 | discard_lazy_cpu_state(); |
462 | if (last_task_used_math == current) | ||
463 | last_task_used_math = NULL; | ||
464 | #ifdef CONFIG_ALTIVEC | ||
465 | if (last_task_used_altivec == current) | ||
466 | last_task_used_altivec = NULL; | ||
467 | #endif /* CONFIG_ALTIVEC */ | ||
468 | #ifdef CONFIG_SPE | ||
469 | if (last_task_used_spe == current) | ||
470 | last_task_used_spe = NULL; | ||
471 | #endif | ||
472 | #endif /* CONFIG_SMP */ | ||
473 | 472 | ||
474 | #ifdef CONFIG_PPC64 /* for now */ | 473 | #ifdef CONFIG_PPC64 /* for now */ |
475 | if (current->thread.dabr) { | 474 | if (current->thread.dabr) { |
@@ -635,18 +634,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | |||
635 | } | 634 | } |
636 | #endif | 635 | #endif |
637 | 636 | ||
638 | #ifndef CONFIG_SMP | 637 | discard_lazy_cpu_state(); |
639 | if (last_task_used_math == current) | ||
640 | last_task_used_math = NULL; | ||
641 | #ifdef CONFIG_ALTIVEC | ||
642 | if (last_task_used_altivec == current) | ||
643 | last_task_used_altivec = NULL; | ||
644 | #endif | ||
645 | #ifdef CONFIG_SPE | ||
646 | if (last_task_used_spe == current) | ||
647 | last_task_used_spe = NULL; | ||
648 | #endif | ||
649 | #endif /* CONFIG_SMP */ | ||
650 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | 638 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); |
651 | current->thread.fpscr.val = 0; | 639 | current->thread.fpscr.val = 0; |
652 | #ifdef CONFIG_ALTIVEC | 640 | #ifdef CONFIG_ALTIVEC |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 4ce0105c308e..bcdc209dca85 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -265,7 +265,7 @@ static int __init call_prom_ret(const char *service, int nargs, int nret, | |||
265 | va_end(list); | 265 | va_end(list); |
266 | 266 | ||
267 | for (i = 0; i < nret; i++) | 267 | for (i = 0; i < nret; i++) |
268 | rets[nargs+i] = 0; | 268 | args.args[nargs+i] = 0; |
269 | 269 | ||
270 | if (enter_prom(&args, RELOC(prom_entry)) < 0) | 270 | if (enter_prom(&args, RELOC(prom_entry)) < 0) |
271 | return PROM_ERROR; | 271 | return PROM_ERROR; |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index b44b36e0c293..f0c47dab0903 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -145,8 +145,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
145 | struct page *pg = virt_to_page(vdso32_kbase + | 145 | struct page *pg = virt_to_page(vdso32_kbase + |
146 | i*PAGE_SIZE); | 146 | i*PAGE_SIZE); |
147 | struct page *upg = (vma && vma->vm_mm) ? | 147 | struct page *upg = (vma && vma->vm_mm) ? |
148 | follow_page(vma->vm_mm, vma->vm_start + | 148 | follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) |
149 | i*PAGE_SIZE, 0) | ||
150 | : NULL; | 149 | : NULL; |
151 | dump_one_vdso_page(pg, upg); | 150 | dump_one_vdso_page(pg, upg); |
152 | } | 151 | } |
@@ -157,8 +156,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
157 | struct page *pg = virt_to_page(vdso64_kbase + | 156 | struct page *pg = virt_to_page(vdso64_kbase + |
158 | i*PAGE_SIZE); | 157 | i*PAGE_SIZE); |
159 | struct page *upg = (vma && vma->vm_mm) ? | 158 | struct page *upg = (vma && vma->vm_mm) ? |
160 | follow_page(vma->vm_mm, vma->vm_start + | 159 | follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) |
161 | i*PAGE_SIZE, 0) | ||
162 | : NULL; | 160 | : NULL; |
163 | dump_one_vdso_page(pg, upg); | 161 | dump_one_vdso_page(pg, upg); |
164 | } | 162 | } |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f867bba893ca..6bc9dbad7dea 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -295,7 +295,7 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
295 | if (addr < 0x100000000UL) | 295 | if (addr < 0x100000000UL) |
296 | err = open_low_hpage_areas(current->mm, | 296 | err = open_low_hpage_areas(current->mm, |
297 | LOW_ESID_MASK(addr, len)); | 297 | LOW_ESID_MASK(addr, len)); |
298 | if ((addr + len) >= 0x100000000UL) | 298 | if ((addr + len) > 0x100000000UL) |
299 | err = open_high_hpage_areas(current->mm, | 299 | err = open_high_hpage_areas(current->mm, |
300 | HTLB_AREA_MASK(addr, len)); | 300 | HTLB_AREA_MASK(addr, len)); |
301 | if (err) { | 301 | if (err) { |
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index bf081b345820..2b54eeb2c899 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Rewrite, cleanup: | 4 | * Rewrite, cleanup: |
5 | * | 5 | * |
6 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 6 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
7 | * | 7 | * |
8 | * Dynamic DMA mapping support, iSeries-specific parts. | 8 | * Dynamic DMA mapping support, iSeries-specific parts. |
9 | * | 9 | * |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 97ba5214417f..c78f2b290a73 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Rewrite, cleanup: | 6 | * Rewrite, cleanup: |
7 | * | 7 | * |
8 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 8 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
9 | * | 9 | * |
10 | * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR. | 10 | * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR. |
11 | * | 11 | * |
diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h index ea8f0d9eed8a..33ed9ed7fc1e 100644 --- a/arch/powerpc/sysdev/dart.h +++ b/arch/powerpc/sysdev/dart.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 2 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c index f32baf7f4693..5c1a26a6d00c 100644 --- a/arch/powerpc/sysdev/u3_iommu.c +++ b/arch/powerpc/sysdev/u3_iommu.c | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * arch/powerpc/sysdev/u3_iommu.c | 2 | * arch/powerpc/sysdev/u3_iommu.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
5 | * | 5 | * |
6 | * Based on pSeries_iommu.c: | 6 | * Based on pSeries_iommu.c: |
7 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 7 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
8 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 8 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
9 | * | 9 | * |
10 | * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu. | 10 | * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu. |
11 | * | 11 | * |