aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/include/asm/syscall.h6
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S7
-rw-r--r--arch/powerpc/kernel/iommu.c7
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/mm/mem.c6
8 files changed, 17 insertions, 40 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8a54eb8e3768..2e19500921f9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -313,19 +313,6 @@ config 8XX_MINIMAL_FPEMU
313 313
314 It is recommended that you build a soft-float userspace instead. 314 It is recommended that you build a soft-float userspace instead.
315 315
316config IOMMU_VMERGE
317 bool "Enable IOMMU virtual merging"
318 depends on PPC64
319 default y
320 help
321 Cause IO segments sent to a device for DMA to be merged virtually
322 by the IOMMU when they happen to have been allocated contiguously.
323 This doesn't add pressure to the IOMMU allocator. However, some
324 drivers don't support getting large merged segments coming back
325 from *_map_sg().
326
327 Most drivers don't have this problem; it is safe to say Y here.
328
329config IOMMU_HELPER 316config IOMMU_HELPER
330 def_bool PPC64 317 def_bool PPC64
331 318
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index aea714797590..d553bbeb726c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -25,7 +25,7 @@
25#define PPC_INST_LDARX 0x7c0000a8 25#define PPC_INST_LDARX 0x7c0000a8
26#define PPC_INST_LSWI 0x7c0004aa 26#define PPC_INST_LSWI 0x7c0004aa
27#define PPC_INST_LSWX 0x7c00042a 27#define PPC_INST_LSWX 0x7c00042a
28#define PPC_INST_LWARX 0x7c000029 28#define PPC_INST_LWARX 0x7c000028
29#define PPC_INST_LWSYNC 0x7c2004ac 29#define PPC_INST_LWSYNC 0x7c2004ac
30#define PPC_INST_LXVD2X 0x7c000698 30#define PPC_INST_LXVD2X 0x7c000698
31#define PPC_INST_MCRXR 0x7c000400 31#define PPC_INST_MCRXR 0x7c000400
@@ -62,8 +62,8 @@
62#define __PPC_T_TLB(t) (((t) & 0x3) << 21) 62#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
63#define __PPC_WC(w) (((w) & 0x3) << 21) 63#define __PPC_WC(w) (((w) & 0x3) << 21)
64/* 64/*
65 * Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have 65 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
66 * any side effects on all 32bit processors, we can do this all the time. 66 * larx with EH set as an illegal instruction.
67 */ 67 */
68#ifdef CONFIG_PPC64 68#ifdef CONFIG_PPC64
69#define __PPC_EH(eh) (((eh) & 0x1) << 0) 69#define __PPC_EH(eh) (((eh) & 0x1) << 0)
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index efa7f0b879f3..23913e902fc3 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -30,7 +30,7 @@ static inline void syscall_rollback(struct task_struct *task,
30static inline long syscall_get_error(struct task_struct *task, 30static inline long syscall_get_error(struct task_struct *task,
31 struct pt_regs *regs) 31 struct pt_regs *regs)
32{ 32{
33 return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0; 33 return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
34} 34}
35 35
36static inline long syscall_get_return_value(struct task_struct *task, 36static inline long syscall_get_return_value(struct task_struct *task,
@@ -44,10 +44,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
44 int error, long val) 44 int error, long val)
45{ 45{
46 if (error) { 46 if (error) {
47 regs->ccr |= 0x1000L; 47 regs->ccr |= 0x10000000L;
48 regs->gpr[3] = -error; 48 regs->gpr[3] = -error;
49 } else { 49 } else {
50 regs->ccr &= ~0x1000L; 50 regs->ccr &= ~0x10000000L;
51 regs->gpr[3] = val; 51 regs->gpr[3] = val;
52 } 52 }
53} 53}
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 25793bb0e782..725526547994 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -747,9 +747,6 @@ finish_tlb_load:
747#else 747#else
748 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 748 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
749#endif 749#endif
750#ifdef CONFIG_SMP
751 ori r12, r12, MAS2_M
752#endif
753 mtspr SPRN_MAS2, r12 750 mtspr SPRN_MAS2, r12
754 751
755#ifdef CONFIG_PTE_64BIT 752#ifdef CONFIG_PTE_64BIT
@@ -887,13 +884,17 @@ KernelSPE:
887 lwz r3,_MSR(r1) 884 lwz r3,_MSR(r1)
888 oris r3,r3,MSR_SPE@h 885 oris r3,r3,MSR_SPE@h
889 stw r3,_MSR(r1) /* enable use of SPE after return */ 886 stw r3,_MSR(r1) /* enable use of SPE after return */
887#ifdef CONFIG_PRINTK
890 lis r3,87f@h 888 lis r3,87f@h
891 ori r3,r3,87f@l 889 ori r3,r3,87f@l
892 mr r4,r2 /* current */ 890 mr r4,r2 /* current */
893 lwz r5,_NIP(r1) 891 lwz r5,_NIP(r1)
894 bl printk 892 bl printk
893#endif
895 b ret_from_except 894 b ret_from_except
895#ifdef CONFIG_PRINTK
89687: .string "SPE used in kernel (task=%p, pc=%x) \n" 89687: .string "SPE used in kernel (task=%p, pc=%x) \n"
897#endif
897 .align 4,0 898 .align 4,0
898 899
899#endif /* CONFIG_SPE */ 900#endif /* CONFIG_SPE */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5547ae6e6b0b..ec94f906ea43 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -42,12 +42,7 @@
42 42
43#define DBG(...) 43#define DBG(...)
44 44
45#ifdef CONFIG_IOMMU_VMERGE 45static int novmerge;
46static int novmerge = 0;
47#else
48static int novmerge = 1;
49#endif
50
51static int protect4gb = 1; 46static int protect4gb = 1;
52 47
53static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); 48static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index b152de3e64d4..8f58986c2ad9 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -39,7 +39,6 @@
39#include <asm/serial.h> 39#include <asm/serial.h>
40#include <asm/udbg.h> 40#include <asm/udbg.h>
41#include <asm/mmu_context.h> 41#include <asm/mmu_context.h>
42#include <asm/swiotlb.h>
43 42
44#include "setup.h" 43#include "setup.h"
45 44
@@ -343,11 +342,6 @@ void __init setup_arch(char **cmdline_p)
343 ppc_md.setup_arch(); 342 ppc_md.setup_arch();
344 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 343 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
345 344
346#ifdef CONFIG_SWIOTLB
347 if (ppc_swiotlb_enable)
348 swiotlb_init(1);
349#endif
350
351 paging_init(); 345 paging_init();
352 346
353 /* Initialize the MMU context management stuff */ 347 /* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 63547394048c..914389158a9b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,7 +61,6 @@
61#include <asm/xmon.h> 61#include <asm/xmon.h>
62#include <asm/udbg.h> 62#include <asm/udbg.h>
63#include <asm/kexec.h> 63#include <asm/kexec.h>
64#include <asm/swiotlb.h>
65#include <asm/mmu_context.h> 64#include <asm/mmu_context.h>
66 65
67#include "setup.h" 66#include "setup.h"
@@ -541,11 +540,6 @@ void __init setup_arch(char **cmdline_p)
541 if (ppc_md.setup_arch) 540 if (ppc_md.setup_arch)
542 ppc_md.setup_arch(); 541 ppc_md.setup_arch();
543 542
544#ifdef CONFIG_SWIOTLB
545 if (ppc_swiotlb_enable)
546 swiotlb_init(1);
547#endif
548
549 paging_init(); 543 paging_init();
550 544
551 /* Initialize the MMU context management stuff */ 545 /* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 311224cdb7ad..448f972b22f5 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -48,6 +48,7 @@
48#include <asm/sparsemem.h> 48#include <asm/sparsemem.h>
49#include <asm/vdso.h> 49#include <asm/vdso.h>
50#include <asm/fixmap.h> 50#include <asm/fixmap.h>
51#include <asm/swiotlb.h>
51 52
52#include "mmu_decl.h" 53#include "mmu_decl.h"
53 54
@@ -320,6 +321,11 @@ void __init mem_init(void)
320 struct page *page; 321 struct page *page;
321 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 322 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
322 323
324#ifdef CONFIG_SWIOTLB
325 if (ppc_swiotlb_enable)
326 swiotlb_init(1);
327#endif
328
323 num_physpages = lmb.memory.size >> PAGE_SHIFT; 329 num_physpages = lmb.memory.size >> PAGE_SHIFT;
324 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 330 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
325 331