diff options
Diffstat (limited to 'arch/powerpc/include')
58 files changed, 1295 insertions, 927 deletions
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 107d9b915e33..37c32aba79b7 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h | |||
@@ -11,9 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <asm/types.h> | 13 | #include <asm/types.h> |
14 | 14 | #include <asm/ppc-opcode.h> | |
15 | #define PPC_NOP_INSTR 0x60000000 | ||
16 | #define PPC_LWSYNC_INSTR 0x7c2004ac | ||
17 | 15 | ||
18 | /* Flags for create_branch: | 16 | /* Flags for create_branch: |
19 | * "b" == create_branch(addr, target, 0); | 17 | * "b" == create_branch(addr, target, 0); |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 4911104791c3..80f315e8a421 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -145,6 +145,7 @@ extern const char *powerpc_base_platform; | |||
145 | #define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) | 145 | #define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040) |
146 | #define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) | 146 | #define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080) |
147 | #define CPU_FTR_601 ASM_CONST(0x0000000000000100) | 147 | #define CPU_FTR_601 ASM_CONST(0x0000000000000100) |
148 | #define CPU_FTR_DBELL ASM_CONST(0x0000000000000200) | ||
148 | #define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) | 149 | #define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400) |
149 | #define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) | 150 | #define CPU_FTR_L3CR ASM_CONST(0x0000000000000800) |
150 | #define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) | 151 | #define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000) |
@@ -241,9 +242,11 @@ extern const char *powerpc_base_platform; | |||
241 | /* We need to mark all pages as being coherent if we're SMP or we have a | 242 | /* We need to mark all pages as being coherent if we're SMP or we have a |
242 | * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II | 243 | * 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II |
243 | * require it for PCI "streaming/prefetch" to work properly. | 244 | * require it for PCI "streaming/prefetch" to work properly. |
245 | * This is also required by 52xx family. | ||
244 | */ | 246 | */ |
245 | #if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \ | 247 | #if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \ |
246 | || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) | 248 | || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \ |
249 | || defined(CONFIG_PPC_MPC52xx) | ||
247 | #define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT | 250 | #define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT |
248 | #else | 251 | #else |
249 | #define CPU_FTR_COMMON 0 | 252 | #define CPU_FTR_COMMON 0 |
@@ -373,7 +376,8 @@ extern const char *powerpc_base_platform; | |||
373 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) | 376 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) |
374 | #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 377 | #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
375 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ | 378 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ |
376 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE) | 379 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ |
380 | CPU_FTR_DBELL) | ||
377 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) | 381 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) |
378 | 382 | ||
379 | /* 64-bit CPUs */ | 383 | /* 64-bit CPUs */ |
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h new file mode 100644 index 000000000000..501189a543d1 --- /dev/null +++ b/arch/powerpc/include/asm/dbell.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Freescale Semicondutor, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * provides masks and opcode images for use by code generation, emulation | ||
10 | * and for instructions that older assemblers might not know about | ||
11 | */ | ||
12 | #ifndef _ASM_POWERPC_DBELL_H | ||
13 | #define _ASM_POWERPC_DBELL_H | ||
14 | |||
15 | #include <linux/smp.h> | ||
16 | #include <linux/threads.h> | ||
17 | |||
18 | #include <asm/ppc-opcode.h> | ||
19 | |||
20 | #define PPC_DBELL_MSG_BRDCAST (0x04000000) | ||
21 | #define PPC_DBELL_TYPE(x) (((x) & 0xf) << 28) | ||
22 | enum ppc_dbell { | ||
23 | PPC_DBELL = 0, /* doorbell */ | ||
24 | PPC_DBELL_CRIT = 1, /* critical doorbell */ | ||
25 | PPC_G_DBELL = 2, /* guest doorbell */ | ||
26 | PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */ | ||
27 | PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ | ||
28 | }; | ||
29 | |||
30 | #ifdef CONFIG_SMP | ||
31 | extern unsigned long dbell_smp_message[NR_CPUS]; | ||
32 | extern void smp_dbell_message_pass(int target, int msg); | ||
33 | #endif | ||
34 | |||
35 | static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) | ||
36 | { | ||
37 | u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) | | ||
38 | (tag & 0x07ffffff); | ||
39 | |||
40 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | ||
41 | } | ||
42 | |||
43 | #endif /* _ASM_POWERPC_DBELL_H */ | ||
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 86cef7ddc8d5..c69f2b5f0cc4 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -109,18 +109,8 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
109 | * only ISA DMA device we support is the floppy and we have a hack | 109 | * only ISA DMA device we support is the floppy and we have a hack |
110 | * in the floppy driver directly to get a device for us. | 110 | * in the floppy driver directly to get a device for us. |
111 | */ | 111 | */ |
112 | 112 | if (unlikely(dev == NULL)) | |
113 | if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) { | ||
114 | #ifdef CONFIG_PPC64 | ||
115 | return NULL; | 113 | return NULL; |
116 | #else | ||
117 | /* Use default on 32-bit if dma_ops is not set up */ | ||
118 | /* TODO: Long term, we should fix drivers so that dev and | ||
119 | * archdata dma_ops are set up for all buses. | ||
120 | */ | ||
121 | return &dma_direct_ops; | ||
122 | #endif | ||
123 | } | ||
124 | 114 | ||
125 | return dev->archdata.dma_ops; | 115 | return dev->archdata.dma_ops; |
126 | } | 116 | } |
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index b5600ce6055e..1a856b15226e 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | |||
11 | #include <asm/ptrace.h> | 12 | #include <asm/ptrace.h> |
12 | #include <asm/cputable.h> | 13 | #include <asm/cputable.h> |
13 | #include <asm/auxvec.h> | 14 | #include <asm/auxvec.h> |
@@ -178,7 +179,8 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG]; | |||
178 | the loader. We need to make sure that it is out of the way of the program | 179 | the loader. We need to make sure that it is out of the way of the program |
179 | that it will "exec", and that there is sufficient room for the brk. */ | 180 | that it will "exec", and that there is sufficient room for the brk. */ |
180 | 181 | ||
181 | #define ELF_ET_DYN_BASE (0x20000000) | 182 | extern unsigned long randomize_et_dyn(unsigned long base); |
183 | #define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) | ||
182 | 184 | ||
183 | /* | 185 | /* |
184 | * Our registers are always unsigned longs, whether we're a 32 bit | 186 | * Our registers are always unsigned longs, whether we're a 32 bit |
@@ -270,6 +272,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
270 | int uses_interp); | 272 | int uses_interp); |
271 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); | 273 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); |
272 | 274 | ||
275 | /* 1GB for 64bit, 8MB for 32bit */ | ||
276 | #define STACK_RND_MASK (is_32bit_task() ? \ | ||
277 | (0x7ff >> (PAGE_SHIFT - 12)) : \ | ||
278 | (0x3ffff >> (PAGE_SHIFT - 12))) | ||
279 | |||
280 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | ||
281 | #define arch_randomize_brk arch_randomize_brk | ||
282 | |||
273 | #endif /* __KERNEL__ */ | 283 | #endif /* __KERNEL__ */ |
274 | 284 | ||
275 | /* | 285 | /* |
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 8428b38a3d30..d60fd18f428c 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h | |||
@@ -61,7 +61,7 @@ extern void __set_fixmap (enum fixed_addresses idx, | |||
61 | * Some hardware wants to get fixmapped without caching. | 61 | * Some hardware wants to get fixmapped without caching. |
62 | */ | 62 | */ |
63 | #define set_fixmap_nocache(idx, phys) \ | 63 | #define set_fixmap_nocache(idx, phys) \ |
64 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | 64 | __set_fixmap(idx, phys, PAGE_KERNEL_NCG) |
65 | 65 | ||
66 | #define clear_fixmap(idx) \ | 66 | #define clear_fixmap(idx) \ |
67 | __set_fixmap(idx, 0, __pgprot(0)) | 67 | __set_fixmap(idx, 0, __pgprot(0)) |
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index e5f2ae8362f7..dde1296b8b41 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h | |||
@@ -5,7 +5,44 @@ | |||
5 | #define MCOUNT_ADDR ((long)(_mcount)) | 5 | #define MCOUNT_ADDR ((long)(_mcount)) |
6 | #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ | 6 | #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ |
7 | 7 | ||
8 | #ifndef __ASSEMBLY__ | 8 | #ifdef __ASSEMBLY__ |
9 | |||
10 | /* Based off of objdump optput from glibc */ | ||
11 | |||
12 | #define MCOUNT_SAVE_FRAME \ | ||
13 | stwu r1,-48(r1); \ | ||
14 | stw r3, 12(r1); \ | ||
15 | stw r4, 16(r1); \ | ||
16 | stw r5, 20(r1); \ | ||
17 | stw r6, 24(r1); \ | ||
18 | mflr r3; \ | ||
19 | lwz r4, 52(r1); \ | ||
20 | mfcr r5; \ | ||
21 | stw r7, 28(r1); \ | ||
22 | stw r8, 32(r1); \ | ||
23 | stw r9, 36(r1); \ | ||
24 | stw r10,40(r1); \ | ||
25 | stw r3, 44(r1); \ | ||
26 | stw r5, 8(r1) | ||
27 | |||
28 | #define MCOUNT_RESTORE_FRAME \ | ||
29 | lwz r6, 8(r1); \ | ||
30 | lwz r0, 44(r1); \ | ||
31 | lwz r3, 12(r1); \ | ||
32 | mtctr r0; \ | ||
33 | lwz r4, 16(r1); \ | ||
34 | mtcr r6; \ | ||
35 | lwz r5, 20(r1); \ | ||
36 | lwz r6, 24(r1); \ | ||
37 | lwz r0, 52(r1); \ | ||
38 | lwz r7, 28(r1); \ | ||
39 | lwz r8, 32(r1); \ | ||
40 | mtlr r0; \ | ||
41 | lwz r9, 36(r1); \ | ||
42 | lwz r10,40(r1); \ | ||
43 | addi r1, r1, 48 | ||
44 | |||
45 | #else /* !__ASSEMBLY__ */ | ||
9 | extern void _mcount(void); | 46 | extern void _mcount(void); |
10 | 47 | ||
11 | #ifdef CONFIG_DYNAMIC_FTRACE | 48 | #ifdef CONFIG_DYNAMIC_FTRACE |
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 04e4a620952e..684a73f4324f 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/highmem.h> | ||
27 | #include <asm/kmap_types.h> | 28 | #include <asm/kmap_types.h> |
28 | #include <asm/tlbflush.h> | 29 | #include <asm/tlbflush.h> |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
@@ -39,15 +40,15 @@ extern pte_t *pkmap_page_table; | |||
39 | * chunk of RAM. | 40 | * chunk of RAM. |
40 | */ | 41 | */ |
41 | /* | 42 | /* |
42 | * We use one full pte table with 4K pages. And with 16K/64K pages pte | 43 | * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte |
43 | * table covers enough memory (32MB and 512MB resp.) that both FIXMAP | 44 | * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP |
44 | * and PKMAP can be placed in single pte table. We use 1024 pages for | 45 | * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP |
45 | * PKMAP in case of 16K/64K pages. | 46 | * in case of 16K/64K/256K page sizes. |
46 | */ | 47 | */ |
47 | #ifdef CONFIG_PPC_4K_PAGES | 48 | #ifdef CONFIG_PPC_4K_PAGES |
48 | #define PKMAP_ORDER PTE_SHIFT | 49 | #define PKMAP_ORDER PTE_SHIFT |
49 | #else | 50 | #else |
50 | #define PKMAP_ORDER 10 | 51 | #define PKMAP_ORDER 9 |
51 | #endif | 52 | #endif |
52 | #define LAST_PKMAP (1 << PKMAP_ORDER) | 53 | #define LAST_PKMAP (1 << PKMAP_ORDER) |
53 | #ifndef CONFIG_PPC_4K_PAGES | 54 | #ifndef CONFIG_PPC_4K_PAGES |
@@ -94,12 +95,13 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro | |||
94 | if (!PageHighMem(page)) | 95 | if (!PageHighMem(page)) |
95 | return page_address(page); | 96 | return page_address(page); |
96 | 97 | ||
98 | debug_kmap_atomic(type); | ||
97 | idx = type + KM_TYPE_NR*smp_processor_id(); | 99 | idx = type + KM_TYPE_NR*smp_processor_id(); |
98 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 100 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
99 | #ifdef CONFIG_DEBUG_HIGHMEM | 101 | #ifdef CONFIG_DEBUG_HIGHMEM |
100 | BUG_ON(!pte_none(*(kmap_pte-idx))); | 102 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
101 | #endif | 103 | #endif |
102 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); | 104 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); |
103 | local_flush_tlb_page(NULL, vaddr); | 105 | local_flush_tlb_page(NULL, vaddr); |
104 | 106 | ||
105 | return (void*) vaddr; | 107 | return (void*) vaddr; |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index e10f151c3db6..b43076ff92c9 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -129,7 +129,7 @@ static inline int irqs_disabled_flags(unsigned long flags) | |||
129 | * interrupt-retrigger: should we handle this via lost interrupts and IPIs | 129 | * interrupt-retrigger: should we handle this via lost interrupts and IPIs |
130 | * or should we not care like we do now ? --BenH. | 130 | * or should we not care like we do now ? --BenH. |
131 | */ | 131 | */ |
132 | struct hw_interrupt_type; | 132 | struct irq_chip; |
133 | 133 | ||
134 | #ifdef CONFIG_PERF_COUNTERS | 134 | #ifdef CONFIG_PERF_COUNTERS |
135 | static inline unsigned long get_perf_counter_pending(void) | 135 | static inline unsigned long get_perf_counter_pending(void) |
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 494cd8b0a278..001f2f11c19b 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h | |||
@@ -632,6 +632,9 @@ static inline void iosync(void) | |||
632 | * ioremap_flags and cannot be hooked (but can be used by a hook on one | 632 | * ioremap_flags and cannot be hooked (but can be used by a hook on one |
633 | * of the previous ones) | 633 | * of the previous ones) |
634 | * | 634 | * |
635 | * * __ioremap_caller is the same as above but takes an explicit caller | ||
636 | * reference rather than using __builtin_return_address(0) | ||
637 | * | ||
635 | * * __iounmap, is the low level implementation used by iounmap and cannot | 638 | * * __iounmap, is the low level implementation used by iounmap and cannot |
636 | * be hooked (but can be used by a hook on iounmap) | 639 | * be hooked (but can be used by a hook on iounmap) |
637 | * | 640 | * |
@@ -646,6 +649,9 @@ extern void iounmap(volatile void __iomem *addr); | |||
646 | 649 | ||
647 | extern void __iomem *__ioremap(phys_addr_t, unsigned long size, | 650 | extern void __iomem *__ioremap(phys_addr_t, unsigned long size, |
648 | unsigned long flags); | 651 | unsigned long flags); |
652 | extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, | ||
653 | unsigned long flags, void *caller); | ||
654 | |||
649 | extern void __iounmap(volatile void __iomem *addr); | 655 | extern void __iounmap(volatile void __iomem *addr); |
650 | 656 | ||
651 | extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, | 657 | extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, |
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index 4e0cf65f7f5a..bb2de6aa5ce0 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h | |||
@@ -52,4 +52,11 @@ struct kvm_fpu { | |||
52 | __u64 fpr[32]; | 52 | __u64 fpr[32]; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct kvm_debug_exit_arch { | ||
56 | }; | ||
57 | |||
58 | /* for KVM_SET_GUEST_DEBUG */ | ||
59 | struct kvm_guest_debug_arch { | ||
60 | }; | ||
61 | |||
55 | #endif /* __LINUX_KVM_POWERPC_H */ | 62 | #endif /* __LINUX_KVM_POWERPC_H */ |
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h index f49031b632ca..d22d39942a92 100644 --- a/arch/powerpc/include/asm/kvm_44x.h +++ b/arch/powerpc/include/asm/kvm_44x.h | |||
@@ -28,6 +28,13 @@ | |||
28 | * need to find some way of advertising it. */ | 28 | * need to find some way of advertising it. */ |
29 | #define KVM44x_GUEST_TLB_SIZE 64 | 29 | #define KVM44x_GUEST_TLB_SIZE 64 |
30 | 30 | ||
31 | struct kvmppc_44x_tlbe { | ||
32 | u32 tid; /* Only the low 8 bits are used. */ | ||
33 | u32 word0; | ||
34 | u32 word1; | ||
35 | u32 word2; | ||
36 | }; | ||
37 | |||
31 | struct kvmppc_44x_shadow_ref { | 38 | struct kvmppc_44x_shadow_ref { |
32 | struct page *page; | 39 | struct page *page; |
33 | u16 gtlb_index; | 40 | u16 gtlb_index; |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 2197764796d9..56bfae59837f 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -42,7 +42,12 @@ | |||
42 | #define BOOKE_INTERRUPT_DTLB_MISS 13 | 42 | #define BOOKE_INTERRUPT_DTLB_MISS 13 |
43 | #define BOOKE_INTERRUPT_ITLB_MISS 14 | 43 | #define BOOKE_INTERRUPT_ITLB_MISS 14 |
44 | #define BOOKE_INTERRUPT_DEBUG 15 | 44 | #define BOOKE_INTERRUPT_DEBUG 15 |
45 | #define BOOKE_MAX_INTERRUPT 15 | 45 | |
46 | /* E500 */ | ||
47 | #define BOOKE_INTERRUPT_SPE_UNAVAIL 32 | ||
48 | #define BOOKE_INTERRUPT_SPE_FP_DATA 33 | ||
49 | #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 | ||
50 | #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 | ||
46 | 51 | ||
47 | #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ | 52 | #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ |
48 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ | 53 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ |
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h new file mode 100644 index 000000000000..9d497ce49726 --- /dev/null +++ b/arch/powerpc/include/asm/kvm_e500.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Yu Liu, <yu.liu@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * This file is derived from arch/powerpc/include/asm/kvm_44x.h, | ||
8 | * by Hollis Blanchard <hollisb@us.ibm.com>. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __ASM_KVM_E500_H__ | ||
16 | #define __ASM_KVM_E500_H__ | ||
17 | |||
18 | #include <linux/kvm_host.h> | ||
19 | |||
20 | #define BOOKE_INTERRUPT_SIZE 36 | ||
21 | |||
22 | #define E500_PID_NUM 3 | ||
23 | #define E500_TLB_NUM 2 | ||
24 | |||
25 | struct tlbe{ | ||
26 | u32 mas1; | ||
27 | u32 mas2; | ||
28 | u32 mas3; | ||
29 | u32 mas7; | ||
30 | }; | ||
31 | |||
32 | struct kvmppc_vcpu_e500 { | ||
33 | /* Unmodified copy of the guest's TLB. */ | ||
34 | struct tlbe *guest_tlb[E500_TLB_NUM]; | ||
35 | /* TLB that's actually used when the guest is running. */ | ||
36 | struct tlbe *shadow_tlb[E500_TLB_NUM]; | ||
37 | /* Pages which are referenced in the shadow TLB. */ | ||
38 | struct page **shadow_pages[E500_TLB_NUM]; | ||
39 | |||
40 | unsigned int guest_tlb_size[E500_TLB_NUM]; | ||
41 | unsigned int shadow_tlb_size[E500_TLB_NUM]; | ||
42 | unsigned int guest_tlb_nv[E500_TLB_NUM]; | ||
43 | |||
44 | u32 host_pid[E500_PID_NUM]; | ||
45 | u32 pid[E500_PID_NUM]; | ||
46 | |||
47 | u32 mas0; | ||
48 | u32 mas1; | ||
49 | u32 mas2; | ||
50 | u32 mas3; | ||
51 | u32 mas4; | ||
52 | u32 mas5; | ||
53 | u32 mas6; | ||
54 | u32 mas7; | ||
55 | u32 l1csr1; | ||
56 | u32 hid0; | ||
57 | u32 hid1; | ||
58 | |||
59 | struct kvm_vcpu vcpu; | ||
60 | }; | ||
61 | |||
62 | static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) | ||
63 | { | ||
64 | return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu); | ||
65 | } | ||
66 | |||
67 | #endif /* __ASM_KVM_E500_H__ */ | ||
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c1e436fe7738..dfdf13c9fefd 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -64,13 +64,6 @@ struct kvm_vcpu_stat { | |||
64 | u32 halt_wakeup; | 64 | u32 halt_wakeup; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct kvmppc_44x_tlbe { | ||
68 | u32 tid; /* Only the low 8 bits are used. */ | ||
69 | u32 word0; | ||
70 | u32 word1; | ||
71 | u32 word2; | ||
72 | }; | ||
73 | |||
74 | enum kvm_exit_types { | 67 | enum kvm_exit_types { |
75 | MMIO_EXITS, | 68 | MMIO_EXITS, |
76 | DCR_EXITS, | 69 | DCR_EXITS, |
@@ -118,11 +111,6 @@ struct kvm_arch { | |||
118 | struct kvm_vcpu_arch { | 111 | struct kvm_vcpu_arch { |
119 | u32 host_stack; | 112 | u32 host_stack; |
120 | u32 host_pid; | 113 | u32 host_pid; |
121 | u32 host_dbcr0; | ||
122 | u32 host_dbcr1; | ||
123 | u32 host_dbcr2; | ||
124 | u32 host_iac[4]; | ||
125 | u32 host_msr; | ||
126 | 114 | ||
127 | u64 fpr[32]; | 115 | u64 fpr[32]; |
128 | ulong gpr[32]; | 116 | ulong gpr[32]; |
@@ -157,7 +145,7 @@ struct kvm_vcpu_arch { | |||
157 | u32 tbu; | 145 | u32 tbu; |
158 | u32 tcr; | 146 | u32 tcr; |
159 | u32 tsr; | 147 | u32 tsr; |
160 | u32 ivor[16]; | 148 | u32 ivor[64]; |
161 | ulong ivpr; | 149 | ulong ivpr; |
162 | u32 pir; | 150 | u32 pir; |
163 | 151 | ||
@@ -170,6 +158,7 @@ struct kvm_vcpu_arch { | |||
170 | u32 ccr1; | 158 | u32 ccr1; |
171 | u32 dbcr0; | 159 | u32 dbcr0; |
172 | u32 dbcr1; | 160 | u32 dbcr1; |
161 | u32 dbsr; | ||
173 | 162 | ||
174 | #ifdef CONFIG_KVM_EXIT_TIMING | 163 | #ifdef CONFIG_KVM_EXIT_TIMING |
175 | struct kvmppc_exit_timing timing_exit; | 164 | struct kvmppc_exit_timing timing_exit; |
@@ -200,10 +189,4 @@ struct kvm_vcpu_arch { | |||
200 | unsigned long pending_exceptions; | 189 | unsigned long pending_exceptions; |
201 | }; | 190 | }; |
202 | 191 | ||
203 | struct kvm_guest_debug { | ||
204 | int enabled; | ||
205 | unsigned long bp[4]; | ||
206 | int singlestep; | ||
207 | }; | ||
208 | |||
209 | #endif /* __POWERPC_KVM_HOST_H__ */ | 192 | #endif /* __POWERPC_KVM_HOST_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 36d2a50a8487..2c6ee349df5e 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -52,13 +52,19 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run, | |||
52 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | 52 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
53 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); | 53 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); |
54 | 54 | ||
55 | /* Core-specific hooks */ | ||
56 | |||
55 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | 57 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
56 | u64 asid, u32 flags, u32 max_bytes, | ||
57 | unsigned int gtlb_idx); | 58 | unsigned int gtlb_idx); |
58 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); | 59 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
59 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); | 60 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
60 | 61 | extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); | |
61 | /* Core-specific hooks */ | 62 | extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
63 | extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); | ||
64 | extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, | ||
65 | gva_t eaddr); | ||
66 | extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); | ||
67 | extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); | ||
62 | 68 | ||
63 | extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, | 69 | extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, |
64 | unsigned int id); | 70 | unsigned int id); |
@@ -71,9 +77,6 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | |||
71 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 77 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
72 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); | 78 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); |
73 | 79 | ||
74 | extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu); | ||
75 | extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu); | ||
76 | |||
77 | extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); | 80 | extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); |
78 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); | 81 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
79 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); | 82 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); |
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 25aaa97facd8..68235f7e4a8f 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -97,7 +97,7 @@ struct lppaca { | |||
97 | u64 saved_gpr4; // Saved GPR4 x28-x2F | 97 | u64 saved_gpr4; // Saved GPR4 x28-x2F |
98 | u64 saved_gpr5; // Saved GPR5 x30-x37 | 98 | u64 saved_gpr5; // Saved GPR5 x30-x37 |
99 | 99 | ||
100 | u8 reserved4; // Reserved x38-x38 | 100 | u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38 |
101 | u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39 | 101 | u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39 |
102 | u8 fpregs_in_use; // FP regs in use x3A-x3A | 102 | u8 fpregs_in_use; // FP regs in use x3A-x3A |
103 | u8 pmcregs_in_use; // PMC regs in use x3B-x3B | 103 | u8 pmcregs_in_use; // PMC regs in use x3B-x3B |
@@ -133,8 +133,10 @@ struct lppaca { | |||
133 | //============================================================================= | 133 | //============================================================================= |
134 | // CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data | 134 | // CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data |
135 | //============================================================================= | 135 | //============================================================================= |
136 | u32 page_ins; // CMO Hint - # page ins by OS x00-x04 | 136 | u32 page_ins; // CMO Hint - # page ins by OS x00-x03 |
137 | u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF | 137 | u8 reserved8[148]; // Reserved x04-x97 |
138 | volatile u64 dtl_idx; // Dispatch Trace Log head idx x98-x9F | ||
139 | u8 reserved9[96]; // Reserved xA0-xFF | ||
138 | } __attribute__((__aligned__(0x400))); | 140 | } __attribute__((__aligned__(0x400))); |
139 | 141 | ||
140 | extern struct lppaca lppaca[]; | 142 | extern struct lppaca lppaca[]; |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 2740c44ff717..0efdb1dfdc5f 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -90,7 +90,7 @@ struct machdep_calls { | |||
90 | void (*tce_flush)(struct iommu_table *tbl); | 90 | void (*tce_flush)(struct iommu_table *tbl); |
91 | 91 | ||
92 | void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, | 92 | void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, |
93 | unsigned long flags); | 93 | unsigned long flags, void *caller); |
94 | void (*iounmap)(volatile void __iomem *token); | 94 | void (*iounmap)(volatile void __iomem *token); |
95 | 95 | ||
96 | #ifdef CONFIG_PM | 96 | #ifdef CONFIG_PM |
@@ -327,8 +327,6 @@ extern void __devinit smp_generic_take_timebase(void); | |||
327 | */ | 327 | */ |
328 | /* Print a boot progress message. */ | 328 | /* Print a boot progress message. */ |
329 | void ppc64_boot_msg(unsigned int src, const char *msg); | 329 | void ppc64_boot_msg(unsigned int src, const char *msg); |
330 | /* Print a termination message (print only -- does not stop the kernel) */ | ||
331 | void ppc64_terminate_msg(unsigned int src, const char *msg); | ||
332 | 330 | ||
333 | static inline void log_error(char *buf, unsigned int err_type, int fatal) | 331 | static inline void log_error(char *buf, unsigned int err_type, int fatal) |
334 | { | 332 | { |
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h index 27cc6fdcd3b7..3c86576bfefa 100644 --- a/arch/powerpc/include/asm/mmu-44x.h +++ b/arch/powerpc/include/asm/mmu-44x.h | |||
@@ -83,6 +83,8 @@ typedef struct { | |||
83 | #define PPC44x_TLBE_SIZE PPC44x_TLB_16K | 83 | #define PPC44x_TLBE_SIZE PPC44x_TLB_16K |
84 | #elif (PAGE_SHIFT == 16) | 84 | #elif (PAGE_SHIFT == 16) |
85 | #define PPC44x_TLBE_SIZE PPC44x_TLB_64K | 85 | #define PPC44x_TLBE_SIZE PPC44x_TLB_64K |
86 | #elif (PAGE_SHIFT == 18) | ||
87 | #define PPC44x_TLBE_SIZE PPC44x_TLB_256K | ||
86 | #else | 88 | #else |
87 | #error "Unsupported PAGE_SIZE" | 89 | #error "Unsupported PAGE_SIZE" |
88 | #endif | 90 | #endif |
diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-book3e.h index 3f941c0f7e8e..7e74cff81d86 100644 --- a/arch/powerpc/include/asm/mmu-fsl-booke.h +++ b/arch/powerpc/include/asm/mmu-book3e.h | |||
@@ -1,26 +1,42 @@ | |||
1 | #ifndef _ASM_POWERPC_MMU_FSL_BOOKE_H_ | 1 | #ifndef _ASM_POWERPC_MMU_BOOK3E_H_ |
2 | #define _ASM_POWERPC_MMU_FSL_BOOKE_H_ | 2 | #define _ASM_POWERPC_MMU_BOOK3E_H_ |
3 | /* | 3 | /* |
4 | * Freescale Book-E MMU support | 4 | * Freescale Book-E/Book-3e (ISA 2.06+) MMU support |
5 | */ | 5 | */ |
6 | 6 | ||
7 | /* Book-E defined page sizes */ | 7 | /* Book-3e defined page sizes */ |
8 | #define BOOKE_PAGESZ_1K 0 | 8 | #define BOOK3E_PAGESZ_1K 0 |
9 | #define BOOKE_PAGESZ_4K 1 | 9 | #define BOOK3E_PAGESZ_2K 1 |
10 | #define BOOKE_PAGESZ_16K 2 | 10 | #define BOOK3E_PAGESZ_4K 2 |
11 | #define BOOKE_PAGESZ_64K 3 | 11 | #define BOOK3E_PAGESZ_8K 3 |
12 | #define BOOKE_PAGESZ_256K 4 | 12 | #define BOOK3E_PAGESZ_16K 4 |
13 | #define BOOKE_PAGESZ_1M 5 | 13 | #define BOOK3E_PAGESZ_32K 5 |
14 | #define BOOKE_PAGESZ_4M 6 | 14 | #define BOOK3E_PAGESZ_64K 6 |
15 | #define BOOKE_PAGESZ_16M 7 | 15 | #define BOOK3E_PAGESZ_128K 7 |
16 | #define BOOKE_PAGESZ_64M 8 | 16 | #define BOOK3E_PAGESZ_256K 8 |
17 | #define BOOKE_PAGESZ_256M 9 | 17 | #define BOOK3E_PAGESZ_512K 9 |
18 | #define BOOKE_PAGESZ_1GB 10 | 18 | #define BOOK3E_PAGESZ_1M 10 |
19 | #define BOOKE_PAGESZ_4GB 11 | 19 | #define BOOK3E_PAGESZ_2M 11 |
20 | #define BOOKE_PAGESZ_16GB 12 | 20 | #define BOOK3E_PAGESZ_4M 12 |
21 | #define BOOKE_PAGESZ_64GB 13 | 21 | #define BOOK3E_PAGESZ_8M 13 |
22 | #define BOOKE_PAGESZ_256GB 14 | 22 | #define BOOK3E_PAGESZ_16M 14 |
23 | #define BOOKE_PAGESZ_1TB 15 | 23 | #define BOOK3E_PAGESZ_32M 15 |
24 | #define BOOK3E_PAGESZ_64M 16 | ||
25 | #define BOOK3E_PAGESZ_128M 17 | ||
26 | #define BOOK3E_PAGESZ_256M 18 | ||
27 | #define BOOK3E_PAGESZ_512M 19 | ||
28 | #define BOOK3E_PAGESZ_1GB 20 | ||
29 | #define BOOK3E_PAGESZ_2GB 21 | ||
30 | #define BOOK3E_PAGESZ_4GB 22 | ||
31 | #define BOOK3E_PAGESZ_8GB 23 | ||
32 | #define BOOK3E_PAGESZ_16GB 24 | ||
33 | #define BOOK3E_PAGESZ_32GB 25 | ||
34 | #define BOOK3E_PAGESZ_64GB 26 | ||
35 | #define BOOK3E_PAGESZ_128GB 27 | ||
36 | #define BOOK3E_PAGESZ_256GB 28 | ||
37 | #define BOOK3E_PAGESZ_512GB 29 | ||
38 | #define BOOK3E_PAGESZ_1TB 30 | ||
39 | #define BOOK3E_PAGESZ_2TB 31 | ||
24 | 40 | ||
25 | #define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) | 41 | #define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) |
26 | #define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) | 42 | #define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) |
@@ -29,8 +45,9 @@ | |||
29 | #define MAS1_VALID 0x80000000 | 45 | #define MAS1_VALID 0x80000000 |
30 | #define MAS1_IPROT 0x40000000 | 46 | #define MAS1_IPROT 0x40000000 |
31 | #define MAS1_TID(x) ((x << 16) & 0x3FFF0000) | 47 | #define MAS1_TID(x) ((x << 16) & 0x3FFF0000) |
48 | #define MAS1_IND 0x00002000 | ||
32 | #define MAS1_TS 0x00001000 | 49 | #define MAS1_TS 0x00001000 |
33 | #define MAS1_TSIZE(x) ((x << 8) & 0x00000F00) | 50 | #define MAS1_TSIZE(x) ((x << 7) & 0x00000F80) |
34 | 51 | ||
35 | #define MAS2_EPN 0xFFFFF000 | 52 | #define MAS2_EPN 0xFFFFF000 |
36 | #define MAS2_X0 0x00000040 | 53 | #define MAS2_X0 0x00000040 |
@@ -40,7 +57,7 @@ | |||
40 | #define MAS2_M 0x00000004 | 57 | #define MAS2_M 0x00000004 |
41 | #define MAS2_G 0x00000002 | 58 | #define MAS2_G 0x00000002 |
42 | #define MAS2_E 0x00000001 | 59 | #define MAS2_E 0x00000001 |
43 | #define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10)) | 60 | #define MAS2_EPN_MASK(size) (~0 << (size + 10)) |
44 | #define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) | 61 | #define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) |
45 | 62 | ||
46 | #define MAS3_RPN 0xFFFFF000 | 63 | #define MAS3_RPN 0xFFFFF000 |
@@ -56,7 +73,7 @@ | |||
56 | #define MAS3_SR 0x00000001 | 73 | #define MAS3_SR 0x00000001 |
57 | 74 | ||
58 | #define MAS4_TLBSELD(x) MAS0_TLBSEL(x) | 75 | #define MAS4_TLBSELD(x) MAS0_TLBSEL(x) |
59 | #define MAS4_TIDDSEL 0x000F0000 | 76 | #define MAS4_INDD 0x00008000 |
60 | #define MAS4_TSIZED(x) MAS1_TSIZE(x) | 77 | #define MAS4_TSIZED(x) MAS1_TSIZE(x) |
61 | #define MAS4_X0D 0x00000040 | 78 | #define MAS4_X0D 0x00000040 |
62 | #define MAS4_X1D 0x00000020 | 79 | #define MAS4_X1D 0x00000020 |
@@ -68,6 +85,7 @@ | |||
68 | 85 | ||
69 | #define MAS6_SPID0 0x3FFF0000 | 86 | #define MAS6_SPID0 0x3FFF0000 |
70 | #define MAS6_SPID1 0x00007FFE | 87 | #define MAS6_SPID1 0x00007FFE |
88 | #define MAS6_ISIZE(x) MAS1_TSIZE(x) | ||
71 | #define MAS6_SAS 0x00000001 | 89 | #define MAS6_SAS 0x00000001 |
72 | #define MAS6_SPID MAS6_SPID0 | 90 | #define MAS6_SPID MAS6_SPID0 |
73 | 91 | ||
@@ -75,6 +93,8 @@ | |||
75 | 93 | ||
76 | #ifndef __ASSEMBLY__ | 94 | #ifndef __ASSEMBLY__ |
77 | 95 | ||
96 | extern unsigned int tlbcam_index; | ||
97 | |||
78 | typedef struct { | 98 | typedef struct { |
79 | unsigned int id; | 99 | unsigned int id; |
80 | unsigned int active; | 100 | unsigned int active; |
@@ -82,4 +102,4 @@ typedef struct { | |||
82 | } mm_context_t; | 102 | } mm_context_t; |
83 | #endif /* !__ASSEMBLY__ */ | 103 | #endif /* !__ASSEMBLY__ */ |
84 | 104 | ||
85 | #endif /* _ASM_POWERPC_MMU_FSL_BOOKE_H_ */ | 105 | #endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 68b752626808..98c104a09961 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -284,8 +284,6 @@ extern void add_gpage(unsigned long addr, unsigned long page_size, | |||
284 | unsigned long number_of_pages); | 284 | unsigned long number_of_pages); |
285 | extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); | 285 | extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); |
286 | 286 | ||
287 | extern void htab_initialize(void); | ||
288 | extern void htab_initialize_secondary(void); | ||
289 | extern void hpte_init_native(void); | 287 | extern void hpte_init_native(void); |
290 | extern void hpte_init_lpar(void); | 288 | extern void hpte_init_lpar(void); |
291 | extern void hpte_init_iSeries(void); | 289 | extern void hpte_init_iSeries(void); |
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 6e7639911318..cbf154387091 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
@@ -36,9 +36,9 @@ | |||
36 | */ | 36 | */ |
37 | #define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000) | 37 | #define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000) |
38 | 38 | ||
39 | /* Enable use of tlbilx invalidate-by-PID variant. | 39 | /* Enable use of tlbilx invalidate instructions. |
40 | */ | 40 | */ |
41 | #define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000) | 41 | #define MMU_FTR_USE_TLBILX ASM_CONST(0x00080000) |
42 | 42 | ||
43 | /* This indicates that the processor cannot handle multiple outstanding | 43 | /* This indicates that the processor cannot handle multiple outstanding |
44 | * broadcast tlbivax or tlbsync. This makes the code use a spinlock | 44 | * broadcast tlbivax or tlbsync. This makes the code use a spinlock |
@@ -46,6 +46,12 @@ | |||
46 | */ | 46 | */ |
47 | #define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000) | 47 | #define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000) |
48 | 48 | ||
49 | /* This indicates that the processor doesn't handle way selection | ||
50 | * properly and needs SW to track and update the LRU state. This | ||
51 | * is specific to an errata on e300c2/c3/c4 class parts | ||
52 | */ | ||
53 | #define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) | ||
54 | |||
49 | #ifndef __ASSEMBLY__ | 55 | #ifndef __ASSEMBLY__ |
50 | #include <asm/cputable.h> | 56 | #include <asm/cputable.h> |
51 | 57 | ||
@@ -56,6 +62,10 @@ static inline int mmu_has_feature(unsigned long feature) | |||
56 | 62 | ||
57 | extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; | 63 | extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; |
58 | 64 | ||
65 | /* MMU initialization (64-bit only fo now) */ | ||
66 | extern void early_init_mmu(void); | ||
67 | extern void early_init_mmu_secondary(void); | ||
68 | |||
59 | #endif /* !__ASSEMBLY__ */ | 69 | #endif /* !__ASSEMBLY__ */ |
60 | 70 | ||
61 | 71 | ||
@@ -71,9 +81,9 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; | |||
71 | #elif defined(CONFIG_44x) | 81 | #elif defined(CONFIG_44x) |
72 | /* 44x-style software loaded TLB */ | 82 | /* 44x-style software loaded TLB */ |
73 | # include <asm/mmu-44x.h> | 83 | # include <asm/mmu-44x.h> |
74 | #elif defined(CONFIG_FSL_BOOKE) | 84 | #elif defined(CONFIG_PPC_BOOK3E_MMU) |
75 | /* Freescale Book-E software loaded TLB */ | 85 | /* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ |
76 | # include <asm/mmu-fsl-booke.h> | 86 | # include <asm/mmu-book3e.h> |
77 | #elif defined (CONFIG_PPC_8xx) | 87 | #elif defined (CONFIG_PPC_8xx) |
78 | /* Motorola/Freescale 8xx software loaded TLB */ | 88 | /* Motorola/Freescale 8xx software loaded TLB */ |
79 | # include <asm/mmu-8xx.h> | 89 | # include <asm/mmu-8xx.h> |
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index ab4f19263c42..b7063669f972 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h | |||
@@ -31,7 +31,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
31 | struct task_struct *tsk) | 31 | struct task_struct *tsk) |
32 | { | 32 | { |
33 | /* Mark this context has been used on the new CPU */ | 33 | /* Mark this context has been used on the new CPU */ |
34 | cpu_set(smp_processor_id(), next->cpu_vm_mask); | 34 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
35 | 35 | ||
36 | /* 32-bit keeps track of the current PGDIR in the thread struct */ | 36 | /* 32-bit keeps track of the current PGDIR in the thread struct */ |
37 | #ifdef CONFIG_PPC32 | 37 | #ifdef CONFIG_PPC32 |
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index 19f299b7e256..35acac90c8ca 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #define _ASM_MMZONE_H_ | 8 | #define _ASM_MMZONE_H_ |
9 | #ifdef __KERNEL__ | 9 | #ifdef __KERNEL__ |
10 | 10 | ||
11 | #include <linux/cpumask.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * generic non-linear memory support: | 14 | * generic non-linear memory support: |
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 81a23932a160..52e049cd9e68 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h | |||
@@ -273,6 +273,7 @@ extern void mpc5200_setup_xlb_arbiter(void); | |||
273 | extern void mpc52xx_declare_of_platform_devices(void); | 273 | extern void mpc52xx_declare_of_platform_devices(void); |
274 | extern void mpc52xx_map_common_devices(void); | 274 | extern void mpc52xx_map_common_devices(void); |
275 | extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv); | 275 | extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv); |
276 | extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node); | ||
276 | extern void mpc52xx_restart(char *cmd); | 277 | extern void mpc52xx_restart(char *cmd); |
277 | 278 | ||
278 | /* mpc52xx_pic.c */ | 279 | /* mpc52xx_pic.c */ |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 197d569f5bd3..32cbf16f10ea 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -19,12 +19,14 @@ | |||
19 | #include <asm/kdump.h> | 19 | #include <asm/kdump.h> |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages | 22 | * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages |
23 | * on PPC44x). For PPC64 we support either 4K or 64K software | 23 | * on PPC44x). For PPC64 we support either 4K or 64K software |
24 | * page size. When using 64K pages however, whether we are really supporting | 24 | * page size. When using 64K pages however, whether we are really supporting |
25 | * 64K pages in HW or not is irrelevant to those definitions. | 25 | * 64K pages in HW or not is irrelevant to those definitions. |
26 | */ | 26 | */ |
27 | #if defined(CONFIG_PPC_64K_PAGES) | 27 | #if defined(CONFIG_PPC_256K_PAGES) |
28 | #define PAGE_SHIFT 18 | ||
29 | #elif defined(CONFIG_PPC_64K_PAGES) | ||
28 | #define PAGE_SHIFT 16 | 30 | #define PAGE_SHIFT 16 |
29 | #elif defined(CONFIG_PPC_16K_PAGES) | 31 | #elif defined(CONFIG_PPC_16K_PAGES) |
30 | #define PAGE_SHIFT 14 | 32 | #define PAGE_SHIFT 14 |
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index 1458d9500381..a0e3f6e6b4ee 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h | |||
@@ -19,7 +19,11 @@ | |||
19 | #define PTE_FLAGS_OFFSET 0 | 19 | #define PTE_FLAGS_OFFSET 0 |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_PPC_256K_PAGES | ||
23 | #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */ | ||
24 | #else | ||
22 | #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */ | 25 | #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */ |
26 | #endif | ||
23 | 27 | ||
24 | #ifndef __ASSEMBLY__ | 28 | #ifndef __ASSEMBLY__ |
25 | /* | 29 | /* |
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 3548159a1beb..ba17d5d90a49 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -114,6 +114,10 @@ extern int pci_domain_nr(struct pci_bus *bus); | |||
114 | /* Decide whether to display the domain number in /proc */ | 114 | /* Decide whether to display the domain number in /proc */ |
115 | extern int pci_proc_domain(struct pci_bus *bus); | 115 | extern int pci_proc_domain(struct pci_bus *bus); |
116 | 116 | ||
117 | /* MSI arch hooks */ | ||
118 | #define arch_setup_msi_irqs arch_setup_msi_irqs | ||
119 | #define arch_teardown_msi_irqs arch_teardown_msi_irqs | ||
120 | #define arch_msi_check_device arch_msi_check_device | ||
117 | 121 | ||
118 | struct vm_area_struct; | 122 | struct vm_area_struct; |
119 | /* Map a range of PCI memory or I/O space for a device into user space */ | 123 | /* Map a range of PCI memory or I/O space for a device into user space */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 820b5f0a35ce..ba45c997830f 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h | |||
@@ -19,55 +19,6 @@ extern int icache_44x_need_flush; | |||
19 | #endif /* __ASSEMBLY__ */ | 19 | #endif /* __ASSEMBLY__ */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * The PowerPC MMU uses a hash table containing PTEs, together with | ||
23 | * a set of 16 segment registers (on 32-bit implementations), to define | ||
24 | * the virtual to physical address mapping. | ||
25 | * | ||
26 | * We use the hash table as an extended TLB, i.e. a cache of currently | ||
27 | * active mappings. We maintain a two-level page table tree, much | ||
28 | * like that used by the i386, for the sake of the Linux memory | ||
29 | * management code. Low-level assembler code in hashtable.S | ||
30 | * (procedure hash_page) is responsible for extracting ptes from the | ||
31 | * tree and putting them into the hash table when necessary, and | ||
32 | * updating the accessed and modified bits in the page table tree. | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. | ||
37 | * We also use the two level tables, but we can put the real bits in them | ||
38 | * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, | ||
39 | * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has | ||
40 | * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit | ||
41 | * based upon user/super access. The TLB does not have accessed nor write | ||
42 | * protect. We assume that if the TLB get loaded with an entry it is | ||
43 | * accessed, and overload the changed bit for write protect. We use | ||
44 | * two bits in the software pte that are supposed to be set to zero in | ||
45 | * the TLB entry (24 and 25) for these indicators. Although the level 1 | ||
46 | * descriptor contains the guarded and writethrough/copyback bits, we can | ||
47 | * set these at the page level since they get copied from the Mx_TWC | ||
48 | * register when the TLB entry is loaded. We will use bit 27 for guard, since | ||
49 | * that is where it exists in the MD_TWC, and bit 26 for writethrough. | ||
50 | * These will get masked from the level 2 descriptor at TLB load time, and | ||
51 | * copied to the MD_TWC before it gets loaded. | ||
52 | * Large page sizes added. We currently support two sizes, 4K and 8M. | ||
53 | * This also allows a TLB hander optimization because we can directly | ||
54 | * load the PMD into MD_TWC. The 8M pages are only used for kernel | ||
55 | * mapping of well known areas. The PMD (PGD) entries contain control | ||
56 | * flags in addition to the address, so care must be taken that the | ||
57 | * software no longer assumes these are only pointers. | ||
58 | */ | ||
59 | |||
60 | /* | ||
61 | * At present, all PowerPC 400-class processors share a similar TLB | ||
62 | * architecture. The instruction and data sides share a unified, | ||
63 | * 64-entry, fully-associative TLB which is maintained totally under | ||
64 | * software control. In addition, the instruction side has a | ||
65 | * hardware-managed, 4-entry, fully-associative TLB which serves as a | ||
66 | * first level to the shared TLB. These two TLBs are known as the UTLB | ||
67 | * and ITLB, respectively (see "mmu.h" for definitions). | ||
68 | */ | ||
69 | |||
70 | /* | ||
71 | * The normal case is that PTEs are 32-bits and we have a 1-page | 22 | * The normal case is that PTEs are 32-bits and we have a 1-page |
72 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | 23 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus |
73 | * | 24 | * |
@@ -135,409 +86,22 @@ extern int icache_44x_need_flush; | |||
135 | */ | 86 | */ |
136 | 87 | ||
137 | #if defined(CONFIG_40x) | 88 | #if defined(CONFIG_40x) |
138 | 89 | #include <asm/pte-40x.h> | |
139 | /* There are several potential gotchas here. The 40x hardware TLBLO | ||
140 | field looks like this: | ||
141 | |||
142 | 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
143 | RPN..................... 0 0 EX WR ZSEL....... W I M G | ||
144 | |||
145 | Where possible we make the Linux PTE bits match up with this | ||
146 | |||
147 | - bits 20 and 21 must be cleared, because we use 4k pages (40x can | ||
148 | support down to 1k pages), this is done in the TLBMiss exception | ||
149 | handler. | ||
150 | - We use only zones 0 (for kernel pages) and 1 (for user pages) | ||
151 | of the 16 available. Bit 24-26 of the TLB are cleared in the TLB | ||
152 | miss handler. Bit 27 is PAGE_USER, thus selecting the correct | ||
153 | zone. | ||
154 | - PRESENT *must* be in the bottom two bits because swap cache | ||
155 | entries use the top 30 bits. Because 40x doesn't support SMP | ||
156 | anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 | ||
157 | is cleared in the TLB miss handler before the TLB entry is loaded. | ||
158 | - All other bits of the PTE are loaded into TLBLO without | ||
159 | modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for | ||
160 | software PTE bits. We actually use use bits 21, 24, 25, and | ||
161 | 30 respectively for the software bits: ACCESSED, DIRTY, RW, and | ||
162 | PRESENT. | ||
163 | */ | ||
164 | |||
165 | /* Definitions for 40x embedded chips. */ | ||
166 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ | ||
167 | #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ | ||
168 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ | ||
169 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ | ||
170 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ | ||
171 | #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ | ||
172 | #define _PAGE_RW 0x040 /* software: Writes permitted */ | ||
173 | #define _PAGE_DIRTY 0x080 /* software: dirty page */ | ||
174 | #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ | ||
175 | #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ | ||
176 | #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ | ||
177 | |||
178 | #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ | ||
179 | #define _PMD_BAD 0x802 | ||
180 | #define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */ | ||
181 | #define _PMD_SIZE_4M 0x0c0 | ||
182 | #define _PMD_SIZE_16M 0x0e0 | ||
183 | #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) | ||
184 | |||
185 | /* Until my rework is finished, 40x still needs atomic PTE updates */ | ||
186 | #define PTE_ATOMIC_UPDATES 1 | ||
187 | |||
188 | #elif defined(CONFIG_44x) | 90 | #elif defined(CONFIG_44x) |
189 | /* | 91 | #include <asm/pte-44x.h> |
190 | * Definitions for PPC440 | ||
191 | * | ||
192 | * Because of the 3 word TLB entries to support 36-bit addressing, | ||
193 | * the attribute are difficult to map in such a fashion that they | ||
194 | * are easily loaded during exception processing. I decided to | ||
195 | * organize the entry so the ERPN is the only portion in the | ||
196 | * upper word of the PTE and the attribute bits below are packed | ||
197 | * in as sensibly as they can be in the area below a 4KB page size | ||
198 | * oriented RPN. This at least makes it easy to load the RPN and | ||
199 | * ERPN fields in the TLB. -Matt | ||
200 | * | ||
201 | * Note that these bits preclude future use of a page size | ||
202 | * less than 4KB. | ||
203 | * | ||
204 | * | ||
205 | * PPC 440 core has following TLB attribute fields; | ||
206 | * | ||
207 | * TLB1: | ||
208 | * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
209 | * RPN................................. - - - - - - ERPN....... | ||
210 | * | ||
211 | * TLB2: | ||
212 | * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
213 | * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR | ||
214 | * | ||
215 | * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional | ||
216 | * TLB2 storage attibute fields. Those are: | ||
217 | * | ||
218 | * TLB2: | ||
219 | * 0...10 11 12 13 14 15 16...31 | ||
220 | * no change WL1 IL1I IL1D IL2I IL2D no change | ||
221 | * | ||
222 | * There are some constrains and options, to decide mapping software bits | ||
223 | * into TLB entry. | ||
224 | * | ||
225 | * - PRESENT *must* be in the bottom three bits because swap cache | ||
226 | * entries use the top 29 bits for TLB2. | ||
227 | * | ||
228 | * - FILE *must* be in the bottom three bits because swap cache | ||
229 | * entries use the top 29 bits for TLB2. | ||
230 | * | ||
231 | * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, | ||
232 | * because it doesn't support SMP. However, some later 460 variants | ||
233 | * have -some- form of SMP support and so I keep the bit there for | ||
234 | * future use | ||
235 | * | ||
236 | * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used | ||
237 | * for memory protection related functions (see PTE structure in | ||
238 | * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the | ||
239 | * above bits. Note that the bit values are CPU specific, not architecture | ||
240 | * specific. | ||
241 | * | ||
242 | * The kernel PTE entry holds an arch-dependent swp_entry structure under | ||
243 | * certain situations. In other words, in such situations some portion of | ||
244 | * the PTE bits are used as a swp_entry. In the PPC implementation, the | ||
245 | * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still | ||
246 | * hold protection values. That means the three protection bits are | ||
247 | * reserved for both PTE and SWAP entry at the most significant three | ||
248 | * LSBs. | ||
249 | * | ||
250 | * There are three protection bits available for SWAP entry: | ||
251 | * _PAGE_PRESENT | ||
252 | * _PAGE_FILE | ||
253 | * _PAGE_HASHPTE (if HW has) | ||
254 | * | ||
255 | * So those three bits have to be inside of 0-2nd LSB of PTE. | ||
256 | * | ||
257 | */ | ||
258 | |||
259 | #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ | ||
260 | #define _PAGE_RW 0x00000002 /* S: Write permission */ | ||
261 | #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ | ||
262 | #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ | ||
263 | #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ | ||
264 | #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ | ||
265 | #define _PAGE_SPECIAL 0x00000020 /* S: Special page */ | ||
266 | #define _PAGE_USER 0x00000040 /* S: User page */ | ||
267 | #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ | ||
268 | #define _PAGE_GUARDED 0x00000100 /* H: G bit */ | ||
269 | #define _PAGE_COHERENT 0x00000200 /* H: M bit */ | ||
270 | #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ | ||
271 | #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ | ||
272 | |||
273 | /* TODO: Add large page lowmem mapping support */ | ||
274 | #define _PMD_PRESENT 0 | ||
275 | #define _PMD_PRESENT_MASK (PAGE_MASK) | ||
276 | #define _PMD_BAD (~PAGE_MASK) | ||
277 | |||
278 | /* ERPN in a PTE never gets cleared, ignore it */ | ||
279 | #define _PTE_NONE_MASK 0xffffffff00000000ULL | ||
280 | |||
281 | #define __HAVE_ARCH_PTE_SPECIAL | ||
282 | |||
283 | #elif defined(CONFIG_FSL_BOOKE) | 92 | #elif defined(CONFIG_FSL_BOOKE) |
284 | /* | 93 | #include <asm/pte-fsl-booke.h> |
285 | MMU Assist Register 3: | ||
286 | |||
287 | 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | ||
288 | RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR | ||
289 | |||
290 | - PRESENT *must* be in the bottom three bits because swap cache | ||
291 | entries use the top 29 bits. | ||
292 | |||
293 | - FILE *must* be in the bottom three bits because swap cache | ||
294 | entries use the top 29 bits. | ||
295 | */ | ||
296 | |||
297 | /* Definitions for FSL Book-E Cores */ | ||
298 | #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ | ||
299 | #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ | ||
300 | #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ | ||
301 | #define _PAGE_RW 0x00004 /* S: Write permission (SW) */ | ||
302 | #define _PAGE_DIRTY 0x00008 /* S: Page dirty */ | ||
303 | #define _PAGE_HWEXEC 0x00010 /* H: SX permission */ | ||
304 | #define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ | ||
305 | |||
306 | #define _PAGE_ENDIAN 0x00040 /* H: E bit */ | ||
307 | #define _PAGE_GUARDED 0x00080 /* H: G bit */ | ||
308 | #define _PAGE_COHERENT 0x00100 /* H: M bit */ | ||
309 | #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ | ||
310 | #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ | ||
311 | #define _PAGE_SPECIAL 0x00800 /* S: Special page */ | ||
312 | |||
313 | #ifdef CONFIG_PTE_64BIT | ||
314 | /* ERPN in a PTE never gets cleared, ignore it */ | ||
315 | #define _PTE_NONE_MASK 0xffffffffffff0000ULL | ||
316 | #endif | ||
317 | |||
318 | #define _PMD_PRESENT 0 | ||
319 | #define _PMD_PRESENT_MASK (PAGE_MASK) | ||
320 | #define _PMD_BAD (~PAGE_MASK) | ||
321 | |||
322 | #define __HAVE_ARCH_PTE_SPECIAL | ||
323 | |||
324 | #elif defined(CONFIG_8xx) | 94 | #elif defined(CONFIG_8xx) |
325 | /* Definitions for 8xx embedded chips. */ | 95 | #include <asm/pte-8xx.h> |
326 | #define _PAGE_PRESENT 0x0001 /* Page is valid */ | ||
327 | #define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ | ||
328 | #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ | ||
329 | #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ | ||
330 | |||
331 | /* These five software bits must be masked out when the entry is loaded | ||
332 | * into the TLB. | ||
333 | */ | ||
334 | #define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */ | ||
335 | #define _PAGE_GUARDED 0x0010 /* software: guarded access */ | ||
336 | #define _PAGE_DIRTY 0x0020 /* software: page changed */ | ||
337 | #define _PAGE_RW 0x0040 /* software: user write access allowed */ | ||
338 | #define _PAGE_ACCESSED 0x0080 /* software: page referenced */ | ||
339 | |||
340 | /* Setting any bits in the nibble with the follow two controls will | ||
341 | * require a TLB exception handler change. It is assumed unused bits | ||
342 | * are always zero. | ||
343 | */ | ||
344 | #define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ | ||
345 | #define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ | ||
346 | |||
347 | #define _PMD_PRESENT 0x0001 | ||
348 | #define _PMD_BAD 0x0ff0 | ||
349 | #define _PMD_PAGE_MASK 0x000c | ||
350 | #define _PMD_PAGE_8M 0x000c | ||
351 | |||
352 | #define _PTE_NONE_MASK _PAGE_ACCESSED | ||
353 | |||
354 | /* Until my rework is finished, 8xx still needs atomic PTE updates */ | ||
355 | #define PTE_ATOMIC_UPDATES 1 | ||
356 | |||
357 | #else /* CONFIG_6xx */ | 96 | #else /* CONFIG_6xx */ |
358 | /* Definitions for 60x, 740/750, etc. */ | 97 | #include <asm/pte-hash32.h> |
359 | #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ | ||
360 | #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ | ||
361 | #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ | ||
362 | #define _PAGE_USER 0x004 /* usermode access allowed */ | ||
363 | #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ | ||
364 | #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ | ||
365 | #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ | ||
366 | #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ | ||
367 | #define _PAGE_DIRTY 0x080 /* C: page changed */ | ||
368 | #define _PAGE_ACCESSED 0x100 /* R: page referenced */ | ||
369 | #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ | ||
370 | #define _PAGE_RW 0x400 /* software: user write access allowed */ | ||
371 | #define _PAGE_SPECIAL 0x800 /* software: Special page */ | ||
372 | |||
373 | #ifdef CONFIG_PTE_64BIT | ||
374 | /* We never clear the high word of the pte */ | ||
375 | #define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE) | ||
376 | #else | ||
377 | #define _PTE_NONE_MASK _PAGE_HASHPTE | ||
378 | #endif | 98 | #endif |
379 | 99 | ||
380 | #define _PMD_PRESENT 0 | 100 | /* And here we include common definitions */ |
381 | #define _PMD_PRESENT_MASK (PAGE_MASK) | 101 | #include <asm/pte-common.h> |
382 | #define _PMD_BAD (~PAGE_MASK) | ||
383 | |||
384 | /* Hash table based platforms need atomic updates of the linux PTE */ | ||
385 | #define PTE_ATOMIC_UPDATES 1 | ||
386 | |||
387 | #define __HAVE_ARCH_PTE_SPECIAL | ||
388 | |||
389 | #endif | ||
390 | |||
391 | /* | ||
392 | * Some bits are only used on some cpu families... | ||
393 | */ | ||
394 | #ifndef _PAGE_HASHPTE | ||
395 | #define _PAGE_HASHPTE 0 | ||
396 | #endif | ||
397 | #ifndef _PTE_NONE_MASK | ||
398 | #define _PTE_NONE_MASK 0 | ||
399 | #endif | ||
400 | #ifndef _PAGE_SHARED | ||
401 | #define _PAGE_SHARED 0 | ||
402 | #endif | ||
403 | #ifndef _PAGE_HWWRITE | ||
404 | #define _PAGE_HWWRITE 0 | ||
405 | #endif | ||
406 | #ifndef _PAGE_HWEXEC | ||
407 | #define _PAGE_HWEXEC 0 | ||
408 | #endif | ||
409 | #ifndef _PAGE_EXEC | ||
410 | #define _PAGE_EXEC 0 | ||
411 | #endif | ||
412 | #ifndef _PAGE_ENDIAN | ||
413 | #define _PAGE_ENDIAN 0 | ||
414 | #endif | ||
415 | #ifndef _PAGE_COHERENT | ||
416 | #define _PAGE_COHERENT 0 | ||
417 | #endif | ||
418 | #ifndef _PAGE_WRITETHRU | ||
419 | #define _PAGE_WRITETHRU 0 | ||
420 | #endif | ||
421 | #ifndef _PAGE_SPECIAL | ||
422 | #define _PAGE_SPECIAL 0 | ||
423 | #endif | ||
424 | #ifndef _PMD_PRESENT_MASK | ||
425 | #define _PMD_PRESENT_MASK _PMD_PRESENT | ||
426 | #endif | ||
427 | #ifndef _PMD_SIZE | ||
428 | #define _PMD_SIZE 0 | ||
429 | #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() | ||
430 | #endif | ||
431 | |||
432 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ | ||
433 | _PAGE_SPECIAL) | ||
434 | |||
435 | |||
436 | #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ | ||
437 | _PAGE_WRITETHRU | _PAGE_ENDIAN | \ | ||
438 | _PAGE_USER | _PAGE_ACCESSED | \ | ||
439 | _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ | ||
440 | _PAGE_EXEC | _PAGE_HWEXEC) | ||
441 | |||
442 | /* | ||
443 | * We define 2 sets of base prot bits, one for basic pages (ie, | ||
444 | * cacheable kernel and user pages) and one for non cacheable | ||
445 | * pages. We always set _PAGE_COHERENT when SMP is enabled or | ||
446 | * the processor might need it for DMA coherency. | ||
447 | */ | ||
448 | #if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) | ||
449 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | ||
450 | #else | ||
451 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) | ||
452 | #endif | ||
453 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) | ||
454 | |||
455 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) | ||
456 | #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) | ||
457 | #define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) | ||
458 | |||
459 | #ifdef CONFIG_PPC_STD_MMU | ||
460 | /* On standard PPC MMU, no user access implies kernel read/write access, | ||
461 | * so to write-protect kernel memory we must turn on user access */ | ||
462 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) | ||
463 | #else | ||
464 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) | ||
465 | #endif | ||
466 | |||
467 | #define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) | ||
468 | #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) | ||
469 | |||
470 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | ||
471 | defined(CONFIG_KPROBES) | ||
472 | /* We want the debuggers to be able to set breakpoints anywhere, so | ||
473 | * don't write protect the kernel text */ | ||
474 | #define _PAGE_RAM_TEXT _PAGE_RAM | ||
475 | #else | ||
476 | #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) | ||
477 | #endif | ||
478 | |||
479 | #define PAGE_NONE __pgprot(_PAGE_BASE) | ||
480 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
481 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
482 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | ||
483 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) | ||
484 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
485 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
486 | |||
487 | #define PAGE_KERNEL __pgprot(_PAGE_RAM) | ||
488 | #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) | ||
489 | |||
490 | /* | ||
491 | * The PowerPC can only do execute protection on a segment (256MB) basis, | ||
492 | * not on a page basis. So we consider execute permission the same as read. | ||
493 | * Also, write permissions imply read permissions. | ||
494 | * This is the closest we can get.. | ||
495 | */ | ||
496 | #define __P000 PAGE_NONE | ||
497 | #define __P001 PAGE_READONLY_X | ||
498 | #define __P010 PAGE_COPY | ||
499 | #define __P011 PAGE_COPY_X | ||
500 | #define __P100 PAGE_READONLY | ||
501 | #define __P101 PAGE_READONLY_X | ||
502 | #define __P110 PAGE_COPY | ||
503 | #define __P111 PAGE_COPY_X | ||
504 | |||
505 | #define __S000 PAGE_NONE | ||
506 | #define __S001 PAGE_READONLY_X | ||
507 | #define __S010 PAGE_SHARED | ||
508 | #define __S011 PAGE_SHARED_X | ||
509 | #define __S100 PAGE_READONLY | ||
510 | #define __S101 PAGE_READONLY_X | ||
511 | #define __S110 PAGE_SHARED | ||
512 | #define __S111 PAGE_SHARED_X | ||
513 | 102 | ||
514 | #ifndef __ASSEMBLY__ | 103 | #ifndef __ASSEMBLY__ |
515 | /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a | ||
516 | * kernel without large page PMD support */ | ||
517 | extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | ||
518 | |||
519 | /* | ||
520 | * Conversions between PTE values and page frame numbers. | ||
521 | */ | ||
522 | |||
523 | /* in some case we want to additionaly adjust where the pfn is in the pte to | ||
524 | * allow room for more flags */ | ||
525 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) | ||
526 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) | ||
527 | #else | ||
528 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT) | ||
529 | #endif | ||
530 | 104 | ||
531 | #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) | ||
532 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
533 | |||
534 | #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ | ||
535 | pgprot_val(prot)) | ||
536 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) | ||
537 | #endif /* __ASSEMBLY__ */ | ||
538 | |||
539 | #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) | ||
540 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | ||
541 | #define pte_clear(mm, addr, ptep) \ | 105 | #define pte_clear(mm, addr, ptep) \ |
542 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) | 106 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) |
543 | 107 | ||
@@ -546,43 +110,6 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |||
546 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | 110 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) |
547 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) | 111 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) |
548 | 112 | ||
549 | #ifndef __ASSEMBLY__ | ||
550 | /* | ||
551 | * The following only work if pte_present() is true. | ||
552 | * Undefined behaviour if not.. | ||
553 | */ | ||
554 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
555 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
556 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
557 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
558 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | ||
559 | |||
560 | static inline pte_t pte_wrprotect(pte_t pte) { | ||
561 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | ||
562 | static inline pte_t pte_mkclean(pte_t pte) { | ||
563 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | ||
564 | static inline pte_t pte_mkold(pte_t pte) { | ||
565 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
566 | |||
567 | static inline pte_t pte_mkwrite(pte_t pte) { | ||
568 | pte_val(pte) |= _PAGE_RW; return pte; } | ||
569 | static inline pte_t pte_mkdirty(pte_t pte) { | ||
570 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
571 | static inline pte_t pte_mkyoung(pte_t pte) { | ||
572 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
573 | static inline pte_t pte_mkspecial(pte_t pte) { | ||
574 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } | ||
575 | static inline pgprot_t pte_pgprot(pte_t pte) | ||
576 | { | ||
577 | return __pgprot(pte_val(pte) & PAGE_PROT_BITS); | ||
578 | } | ||
579 | |||
580 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
581 | { | ||
582 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | ||
583 | return pte; | ||
584 | } | ||
585 | |||
586 | /* | 113 | /* |
587 | * When flushing the tlb entry for a page, we also need to flush the hash | 114 | * When flushing the tlb entry for a page, we also need to flush the hash |
588 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. | 115 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. |
@@ -599,11 +126,19 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, | |||
599 | unsigned long address); | 126 | unsigned long address); |
600 | 127 | ||
601 | /* | 128 | /* |
602 | * Atomic PTE updates. | 129 | * PTE updates. This function is called whenever an existing |
130 | * valid PTE is updated. This does -not- include set_pte_at() | ||
131 | * which nowadays only sets a new PTE. | ||
603 | * | 132 | * |
604 | * pte_update clears and sets bit atomically, and returns | 133 | * Depending on the type of MMU, we may need to use atomic updates |
605 | * the old pte value. In the 64-bit PTE case we lock around the | 134 | * and the PTE may be either 32 or 64 bit wide. In the later case, |
606 | * low PTE word since we expect ALL flag bits to be there | 135 | * when using atomic updates, only the low part of the PTE is |
136 | * accessed atomically. | ||
137 | * | ||
138 | * In addition, on 44x, we also maintain a global flag indicating | ||
139 | * that an executable user mapping was modified, which is needed | ||
140 | * to properly flush the virtually tagged instruction cache of | ||
141 | * those implementations. | ||
607 | */ | 142 | */ |
608 | #ifndef CONFIG_PTE_64BIT | 143 | #ifndef CONFIG_PTE_64BIT |
609 | static inline unsigned long pte_update(pte_t *p, | 144 | static inline unsigned long pte_update(pte_t *p, |
@@ -668,44 +203,6 @@ static inline unsigned long long pte_update(pte_t *p, | |||
668 | #endif /* CONFIG_PTE_64BIT */ | 203 | #endif /* CONFIG_PTE_64BIT */ |
669 | 204 | ||
670 | /* | 205 | /* |
671 | * set_pte stores a linux PTE into the linux page table. | ||
672 | * On machines which use an MMU hash table we avoid changing the | ||
673 | * _PAGE_HASHPTE bit. | ||
674 | */ | ||
675 | |||
676 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
677 | pte_t *ptep, pte_t pte) | ||
678 | { | ||
679 | #if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | ||
680 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); | ||
681 | #elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | ||
682 | #if _PAGE_HASHPTE != 0 | ||
683 | if (pte_val(*ptep) & _PAGE_HASHPTE) | ||
684 | flush_hash_entry(mm, ptep, addr); | ||
685 | #endif | ||
686 | __asm__ __volatile__("\ | ||
687 | stw%U0%X0 %2,%0\n\ | ||
688 | eieio\n\ | ||
689 | stw%U0%X0 %L2,%1" | ||
690 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | ||
691 | : "r" (pte) : "memory"); | ||
692 | #else | ||
693 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
694 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
695 | #endif | ||
696 | } | ||
697 | |||
698 | |||
699 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
700 | pte_t *ptep, pte_t pte) | ||
701 | { | ||
702 | #if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM) | ||
703 | WARN_ON(pte_present(*ptep)); | ||
704 | #endif | ||
705 | __set_pte_at(mm, addr, ptep, pte); | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * 2.6 calls this without flushing the TLB entry; this is wrong | 206 | * 2.6 calls this without flushing the TLB entry; this is wrong |
710 | * for our hash-based implementation, we fix that up here. | 207 | * for our hash-based implementation, we fix that up here. |
711 | */ | 208 | */ |
@@ -745,24 +242,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |||
745 | } | 242 | } |
746 | 243 | ||
747 | 244 | ||
748 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 245 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) |
749 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | ||
750 | { | 246 | { |
751 | unsigned long bits = pte_val(entry) & | 247 | unsigned long bits = pte_val(entry) & |
752 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); | 248 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | |
249 | _PAGE_HWEXEC | _PAGE_EXEC); | ||
753 | pte_update(ptep, 0, bits); | 250 | pte_update(ptep, 0, bits); |
754 | } | 251 | } |
755 | 252 | ||
756 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
757 | ({ \ | ||
758 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
759 | if (__changed) { \ | ||
760 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | ||
761 | flush_tlb_page_nohash(__vma, __address); \ | ||
762 | } \ | ||
763 | __changed; \ | ||
764 | }) | ||
765 | |||
766 | #define __HAVE_ARCH_PTE_SAME | 253 | #define __HAVE_ARCH_PTE_SAME |
767 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | 254 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) |
768 | 255 | ||
diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h index 1dbca4e7de67..6eefdcffa359 100644 --- a/arch/powerpc/include/asm/pgtable-4k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_4K_H | 1 | #ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H |
2 | #define _ASM_POWERPC_PGTABLE_4K_H | 2 | #define _ASM_POWERPC_PGTABLE_PPC64_4K_H |
3 | /* | 3 | /* |
4 | * Entries per page directory level. The PTE level must use a 64b record | 4 | * Entries per page directory level. The PTE level must use a 64b record |
5 | * for each page table entry. The PMD and PGD level use a 32b record for | 5 | * for each page table entry. The PMD and PGD level use a 32b record for |
@@ -40,28 +40,6 @@ | |||
40 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 40 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
42 | 42 | ||
43 | /* PTE bits */ | ||
44 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | ||
45 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | ||
46 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | ||
47 | #define _PAGE_F_SECOND _PAGE_SECONDARY | ||
48 | #define _PAGE_F_GIX _PAGE_GROUP_IX | ||
49 | #define _PAGE_SPECIAL 0x10000 /* software: special page */ | ||
50 | #define __HAVE_ARCH_PTE_SPECIAL | ||
51 | |||
52 | /* PTE flags to conserve for HPTE identification */ | ||
53 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ | ||
54 | _PAGE_SECONDARY | _PAGE_GROUP_IX) | ||
55 | |||
56 | /* There is no 4K PFN hack on 4K pages */ | ||
57 | #define _PAGE_4K_PFN 0 | ||
58 | |||
59 | /* PAGE_MASK gives the right answer below, but only by accident */ | ||
60 | /* It should be preserving the high 48 bits and then specifically */ | ||
61 | /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ | ||
62 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ | ||
63 | _PAGE_HPTEFLAGS | _PAGE_SPECIAL) | ||
64 | |||
65 | /* Bits to mask out from a PMD to get to the PTE page */ | 43 | /* Bits to mask out from a PMD to get to the PTE page */ |
66 | #define PMD_MASKED_BITS 0 | 44 | #define PMD_MASKED_BITS 0 |
67 | /* Bits to mask out from a PUD to get to the PMD page */ | 45 | /* Bits to mask out from a PUD to get to the PMD page */ |
@@ -69,30 +47,6 @@ | |||
69 | /* Bits to mask out from a PGD to get to the PUD page */ | 47 | /* Bits to mask out from a PGD to get to the PUD page */ |
70 | #define PGD_MASKED_BITS 0 | 48 | #define PGD_MASKED_BITS 0 |
71 | 49 | ||
72 | /* shift to put page number into pte */ | ||
73 | #define PTE_RPN_SHIFT (17) | ||
74 | |||
75 | #ifdef STRICT_MM_TYPECHECKS | ||
76 | #define __real_pte(e,p) ((real_pte_t){(e)}) | ||
77 | #define __rpte_to_pte(r) ((r).pte) | ||
78 | #else | ||
79 | #define __real_pte(e,p) (e) | ||
80 | #define __rpte_to_pte(r) (__pte(r)) | ||
81 | #endif | ||
82 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) | ||
83 | |||
84 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | ||
85 | do { \ | ||
86 | index = 0; \ | ||
87 | shift = mmu_psize_defs[psize].shift; \ | ||
88 | |||
89 | #define pte_iterate_hashed_end() } while(0) | ||
90 | |||
91 | #ifdef CONFIG_PPC_HAS_HASH_64K | ||
92 | #define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) | ||
93 | #else | ||
94 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | ||
95 | #endif | ||
96 | 50 | ||
97 | /* | 51 | /* |
98 | * 4-level page tables related bits | 52 | * 4-level page tables related bits |
@@ -112,6 +66,9 @@ | |||
112 | #define pud_ERROR(e) \ | 66 | #define pud_ERROR(e) \ |
113 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | 67 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) |
114 | 68 | ||
69 | /* | ||
70 | * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ | ||
115 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | 71 | #define remap_4k_pfn(vma, addr, pfn, prot) \ |
116 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) | 72 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) |
117 | #endif /* _ASM_POWERPC_PGTABLE_4K_H */ | 73 | |
74 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */ | ||
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h new file mode 100644 index 000000000000..6cc085b945a5 --- /dev/null +++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_PPC64_64K_H | ||
2 | #define _ASM_POWERPC_PGTABLE_PPC64_64K_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopud.h> | ||
5 | |||
6 | |||
7 | #define PTE_INDEX_SIZE 12 | ||
8 | #define PMD_INDEX_SIZE 12 | ||
9 | #define PUD_INDEX_SIZE 0 | ||
10 | #define PGD_INDEX_SIZE 4 | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) | ||
15 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
16 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
17 | |||
18 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
19 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
20 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
21 | |||
22 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | ||
23 | #define MIN_HUGEPTE_SHIFT PAGE_SHIFT | ||
24 | |||
25 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
26 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
27 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
28 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
29 | |||
30 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | ||
31 | #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
32 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
33 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
34 | |||
35 | #endif /* __ASSEMBLY__ */ | ||
36 | |||
37 | /* Bits to mask out from a PMD to get to the PTE page */ | ||
38 | #define PMD_MASKED_BITS 0x1ff | ||
39 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ | ||
40 | #define PUD_MASKED_BITS 0x1ff | ||
41 | |||
42 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ | ||
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index b0f18be81d9f..c40db05f21e0 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -11,9 +11,9 @@ | |||
11 | #endif /* __ASSEMBLY__ */ | 11 | #endif /* __ASSEMBLY__ */ |
12 | 12 | ||
13 | #ifdef CONFIG_PPC_64K_PAGES | 13 | #ifdef CONFIG_PPC_64K_PAGES |
14 | #include <asm/pgtable-64k.h> | 14 | #include <asm/pgtable-ppc64-64k.h> |
15 | #else | 15 | #else |
16 | #include <asm/pgtable-4k.h> | 16 | #include <asm/pgtable-ppc64-4k.h> |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #define FIRST_USER_ADDRESS 0 | 19 | #define FIRST_USER_ADDRESS 0 |
@@ -25,6 +25,8 @@ | |||
25 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) | 25 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) |
26 | #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) | 26 | #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) |
27 | 27 | ||
28 | |||
29 | /* Some sanity checking */ | ||
28 | #if TASK_SIZE_USER64 > PGTABLE_RANGE | 30 | #if TASK_SIZE_USER64 > PGTABLE_RANGE |
29 | #error TASK_SIZE_USER64 exceeds pagetable range | 31 | #error TASK_SIZE_USER64 exceeds pagetable range |
30 | #endif | 32 | #endif |
@@ -33,7 +35,6 @@ | |||
33 | #error TASK_SIZE_USER64 exceeds user VSID range | 35 | #error TASK_SIZE_USER64 exceeds user VSID range |
34 | #endif | 36 | #endif |
35 | 37 | ||
36 | |||
37 | /* | 38 | /* |
38 | * Define the address range of the vmalloc VM area. | 39 | * Define the address range of the vmalloc VM area. |
39 | */ | 40 | */ |
@@ -76,83 +77,12 @@ | |||
76 | 77 | ||
77 | 78 | ||
78 | /* | 79 | /* |
79 | * Common bits in a linux-style PTE. These match the bits in the | 80 | * Include the PTE bits definitions |
80 | * (hardware-defined) PowerPC PTE as closely as possible. Additional | ||
81 | * bits may be defined in pgtable-*.h | ||
82 | */ | 81 | */ |
83 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | 82 | #include <asm/pte-hash64.h> |
84 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | 83 | #include <asm/pte-common.h> |
85 | #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ | ||
86 | #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ | ||
87 | #define _PAGE_GUARDED 0x0008 | ||
88 | #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ | ||
89 | #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ | ||
90 | #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ | ||
91 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ | ||
92 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ | ||
93 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | ||
94 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | ||
95 | |||
96 | /* Strong Access Ordering */ | ||
97 | #define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) | ||
98 | |||
99 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | ||
100 | |||
101 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) | ||
102 | |||
103 | /* __pgprot defined in arch/powerpc/include/asm/page.h */ | ||
104 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | ||
105 | |||
106 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) | ||
107 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) | ||
108 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
109 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
110 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
111 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
112 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) | ||
113 | #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
114 | _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) | ||
115 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) | ||
116 | |||
117 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) | ||
118 | #define HAVE_PAGE_AGP | ||
119 | |||
120 | #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \ | ||
121 | _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ | ||
122 | _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ | ||
123 | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) | ||
124 | /* PTEIDX nibble */ | ||
125 | #define _PTEIDX_SECONDARY 0x8 | ||
126 | #define _PTEIDX_GROUP_IX 0x7 | ||
127 | 84 | ||
128 | 85 | ||
129 | /* | ||
130 | * POWER4 and newer have per page execute protection, older chips can only | ||
131 | * do this on a segment (256MB) basis. | ||
132 | * | ||
133 | * Also, write permissions imply read permissions. | ||
134 | * This is the closest we can get.. | ||
135 | * | ||
136 | * Note due to the way vm flags are laid out, the bits are XWR | ||
137 | */ | ||
138 | #define __P000 PAGE_NONE | ||
139 | #define __P001 PAGE_READONLY | ||
140 | #define __P010 PAGE_COPY | ||
141 | #define __P011 PAGE_COPY | ||
142 | #define __P100 PAGE_READONLY_X | ||
143 | #define __P101 PAGE_READONLY_X | ||
144 | #define __P110 PAGE_COPY_X | ||
145 | #define __P111 PAGE_COPY_X | ||
146 | |||
147 | #define __S000 PAGE_NONE | ||
148 | #define __S001 PAGE_READONLY | ||
149 | #define __S010 PAGE_SHARED | ||
150 | #define __S011 PAGE_SHARED | ||
151 | #define __S100 PAGE_READONLY_X | ||
152 | #define __S101 PAGE_READONLY_X | ||
153 | #define __S110 PAGE_SHARED_X | ||
154 | #define __S111 PAGE_SHARED_X | ||
155 | |||
156 | #ifdef CONFIG_PPC_MM_SLICES | 86 | #ifdef CONFIG_PPC_MM_SLICES |
157 | #define HAVE_ARCH_UNMAPPED_AREA | 87 | #define HAVE_ARCH_UNMAPPED_AREA |
158 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 88 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
@@ -161,32 +91,38 @@ | |||
161 | #ifndef __ASSEMBLY__ | 91 | #ifndef __ASSEMBLY__ |
162 | 92 | ||
163 | /* | 93 | /* |
164 | * Conversion functions: convert a page and protection to a page entry, | 94 | * This is the default implementation of various PTE accessors, it's |
165 | * and a page entry and page directory to the page they refer to. | 95 | * used in all cases except Book3S with 64K pages where we have a |
166 | * | 96 | * concept of sub-pages |
167 | * mk_pte takes a (struct page *) as input | ||
168 | */ | 97 | */ |
169 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 98 | #ifndef __real_pte |
170 | 99 | ||
171 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | 100 | #ifdef STRICT_MM_TYPECHECKS |
172 | { | 101 | #define __real_pte(e,p) ((real_pte_t){(e)}) |
173 | pte_t pte; | 102 | #define __rpte_to_pte(r) ((r).pte) |
103 | #else | ||
104 | #define __real_pte(e,p) (e) | ||
105 | #define __rpte_to_pte(r) (__pte(r)) | ||
106 | #endif | ||
107 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) | ||
174 | 108 | ||
109 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | ||
110 | do { \ | ||
111 | index = 0; \ | ||
112 | shift = mmu_psize_defs[psize].shift; \ | ||
175 | 113 | ||
176 | pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); | 114 | #define pte_iterate_hashed_end() } while(0) |
177 | return pte; | ||
178 | } | ||
179 | 115 | ||
180 | #define pte_modify(_pte, newprot) \ | 116 | #ifdef CONFIG_PPC_HAS_HASH_64K |
181 | (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) | 117 | #define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) |
118 | #else | ||
119 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | ||
120 | #endif | ||
182 | 121 | ||
183 | #define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) | 122 | #endif /* __real_pte */ |
184 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | ||
185 | 123 | ||
186 | /* pte_clear moved to later in this file */ | ||
187 | 124 | ||
188 | #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) | 125 | /* pte_clear moved to later in this file */ |
189 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
190 | 126 | ||
191 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) | 127 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) |
192 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) | 128 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) |
@@ -235,36 +171,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
235 | /* This now only contains the vmalloc pages */ | 171 | /* This now only contains the vmalloc pages */ |
236 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 172 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
237 | 173 | ||
238 | /* | ||
239 | * The following only work if pte_present() is true. | ||
240 | * Undefined behaviour if not.. | ||
241 | */ | ||
242 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} | ||
243 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} | ||
244 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} | ||
245 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} | ||
246 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | ||
247 | |||
248 | static inline pte_t pte_wrprotect(pte_t pte) { | ||
249 | pte_val(pte) &= ~(_PAGE_RW); return pte; } | ||
250 | static inline pte_t pte_mkclean(pte_t pte) { | ||
251 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } | ||
252 | static inline pte_t pte_mkold(pte_t pte) { | ||
253 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
254 | static inline pte_t pte_mkwrite(pte_t pte) { | ||
255 | pte_val(pte) |= _PAGE_RW; return pte; } | ||
256 | static inline pte_t pte_mkdirty(pte_t pte) { | ||
257 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
258 | static inline pte_t pte_mkyoung(pte_t pte) { | ||
259 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
260 | static inline pte_t pte_mkhuge(pte_t pte) { | ||
261 | return pte; } | ||
262 | static inline pte_t pte_mkspecial(pte_t pte) { | ||
263 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } | ||
264 | static inline pgprot_t pte_pgprot(pte_t pte) | ||
265 | { | ||
266 | return __pgprot(pte_val(pte) & PAGE_PROT_BITS); | ||
267 | } | ||
268 | 174 | ||
269 | /* Atomic PTE updates */ | 175 | /* Atomic PTE updates */ |
270 | static inline unsigned long pte_update(struct mm_struct *mm, | 176 | static inline unsigned long pte_update(struct mm_struct *mm, |
@@ -272,6 +178,7 @@ static inline unsigned long pte_update(struct mm_struct *mm, | |||
272 | pte_t *ptep, unsigned long clr, | 178 | pte_t *ptep, unsigned long clr, |
273 | int huge) | 179 | int huge) |
274 | { | 180 | { |
181 | #ifdef PTE_ATOMIC_UPDATES | ||
275 | unsigned long old, tmp; | 182 | unsigned long old, tmp; |
276 | 183 | ||
277 | __asm__ __volatile__( | 184 | __asm__ __volatile__( |
@@ -284,6 +191,13 @@ static inline unsigned long pte_update(struct mm_struct *mm, | |||
284 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) | 191 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) |
285 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) | 192 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) |
286 | : "cc" ); | 193 | : "cc" ); |
194 | #else | ||
195 | unsigned long old = pte_val(*ptep); | ||
196 | *ptep = __pte(old & ~clr); | ||
197 | #endif | ||
198 | /* huge pages use the old page table lock */ | ||
199 | if (!huge) | ||
200 | assert_pte_locked(mm, addr); | ||
287 | 201 | ||
288 | if (old & _PAGE_HASHPTE) | 202 | if (old & _PAGE_HASHPTE) |
289 | hpte_need_flush(mm, addr, ptep, old, huge); | 203 | hpte_need_flush(mm, addr, ptep, old, huge); |
@@ -359,26 +273,17 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |||
359 | pte_update(mm, addr, ptep, ~0UL, 0); | 273 | pte_update(mm, addr, ptep, ~0UL, 0); |
360 | } | 274 | } |
361 | 275 | ||
362 | /* | ||
363 | * set_pte stores a linux PTE into the linux page table. | ||
364 | */ | ||
365 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
366 | pte_t *ptep, pte_t pte) | ||
367 | { | ||
368 | if (pte_present(*ptep)) | ||
369 | pte_clear(mm, addr, ptep); | ||
370 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | ||
371 | *ptep = pte; | ||
372 | } | ||
373 | 276 | ||
374 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | 277 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this |
375 | * function doesn't need to flush the hash entry | 278 | * function doesn't need to flush the hash entry |
376 | */ | 279 | */ |
377 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 280 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) |
378 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | ||
379 | { | 281 | { |
380 | unsigned long bits = pte_val(entry) & | 282 | unsigned long bits = pte_val(entry) & |
381 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); | 283 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | |
284 | _PAGE_EXEC | _PAGE_HWEXEC); | ||
285 | |||
286 | #ifdef PTE_ATOMIC_UPDATES | ||
382 | unsigned long old, tmp; | 287 | unsigned long old, tmp; |
383 | 288 | ||
384 | __asm__ __volatile__( | 289 | __asm__ __volatile__( |
@@ -391,16 +296,11 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |||
391 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) | 296 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) |
392 | :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) | 297 | :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) |
393 | :"cc"); | 298 | :"cc"); |
299 | #else | ||
300 | unsigned long old = pte_val(*ptep); | ||
301 | *ptep = __pte(old | bits); | ||
302 | #endif | ||
394 | } | 303 | } |
395 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
396 | ({ \ | ||
397 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
398 | if (__changed) { \ | ||
399 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | ||
400 | flush_tlb_page_nohash(__vma, __address); \ | ||
401 | } \ | ||
402 | __changed; \ | ||
403 | }) | ||
404 | 304 | ||
405 | #define __HAVE_ARCH_PTE_SAME | 305 | #define __HAVE_ARCH_PTE_SAME |
406 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | 306 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 07f55e601696..eb17da781128 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -6,7 +6,17 @@ | |||
6 | #include <asm/processor.h> /* For TASK_SIZE */ | 6 | #include <asm/processor.h> /* For TASK_SIZE */ |
7 | #include <asm/mmu.h> | 7 | #include <asm/mmu.h> |
8 | #include <asm/page.h> | 8 | #include <asm/page.h> |
9 | |||
9 | struct mm_struct; | 10 | struct mm_struct; |
11 | |||
12 | #ifdef CONFIG_DEBUG_VM | ||
13 | extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); | ||
14 | #else /* CONFIG_DEBUG_VM */ | ||
15 | static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) | ||
16 | { | ||
17 | } | ||
18 | #endif /* !CONFIG_DEBUG_VM */ | ||
19 | |||
10 | #endif /* !__ASSEMBLY__ */ | 20 | #endif /* !__ASSEMBLY__ */ |
11 | 21 | ||
12 | #if defined(CONFIG_PPC64) | 22 | #if defined(CONFIG_PPC64) |
@@ -17,6 +27,130 @@ struct mm_struct; | |||
17 | 27 | ||
18 | #ifndef __ASSEMBLY__ | 28 | #ifndef __ASSEMBLY__ |
19 | 29 | ||
30 | /* Generic accessors to PTE bits */ | ||
31 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
32 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
33 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
34 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
35 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | ||
36 | static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } | ||
37 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
38 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
39 | |||
40 | /* Conversion functions: convert a page and protection to a page entry, | ||
41 | * and a page entry and page directory to the page they refer to. | ||
42 | * | ||
43 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
44 | * long for now. | ||
45 | */ | ||
46 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { | ||
47 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
48 | pgprot_val(pgprot)); } | ||
49 | static inline unsigned long pte_pfn(pte_t pte) { | ||
50 | return pte_val(pte) >> PTE_RPN_SHIFT; } | ||
51 | |||
52 | /* Keep these as a macros to avoid include dependency mess */ | ||
53 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
54 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
55 | |||
56 | /* Generic modifiers for PTE bits */ | ||
57 | static inline pte_t pte_wrprotect(pte_t pte) { | ||
58 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | ||
59 | static inline pte_t pte_mkclean(pte_t pte) { | ||
60 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | ||
61 | static inline pte_t pte_mkold(pte_t pte) { | ||
62 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
63 | static inline pte_t pte_mkwrite(pte_t pte) { | ||
64 | pte_val(pte) |= _PAGE_RW; return pte; } | ||
65 | static inline pte_t pte_mkdirty(pte_t pte) { | ||
66 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
67 | static inline pte_t pte_mkyoung(pte_t pte) { | ||
68 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
69 | static inline pte_t pte_mkspecial(pte_t pte) { | ||
70 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } | ||
71 | static inline pte_t pte_mkhuge(pte_t pte) { | ||
72 | return pte; } | ||
73 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
74 | { | ||
75 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | ||
76 | return pte; | ||
77 | } | ||
78 | |||
79 | |||
80 | /* Insert a PTE, top-level function is out of line. It uses an inline | ||
81 | * low level function in the respective pgtable-* files | ||
82 | */ | ||
83 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | ||
84 | pte_t pte); | ||
85 | |||
86 | /* This low level function performs the actual PTE insertion | ||
87 | * Setting the PTE depends on the MMU type and other factors. It's | ||
88 | * an horrible mess that I'm not going to try to clean up now but | ||
89 | * I'm keeping it in one place rather than spread around | ||
90 | */ | ||
91 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
92 | pte_t *ptep, pte_t pte, int percpu) | ||
93 | { | ||
94 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | ||
95 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | ||
96 | * helper pte_update() which does an atomic update. We need to do that | ||
97 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | ||
98 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | ||
99 | * the hash bits instead (ie, same as the non-SMP case) | ||
100 | */ | ||
101 | if (percpu) | ||
102 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
103 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
104 | else | ||
105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | ||
106 | |||
107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | ||
108 | /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we | ||
109 | * can just store as long as we do the two halves in the right order | ||
110 | * with a barrier in between. This is possible because we take care, | ||
111 | * in the hash code, to pre-invalidate if the PTE was already hashed, | ||
112 | * which synchronizes us with any concurrent invalidation. | ||
113 | * In the percpu case, we also fallback to the simple update preserving | ||
114 | * the hash bits | ||
115 | */ | ||
116 | if (percpu) { | ||
117 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
118 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
119 | return; | ||
120 | } | ||
121 | #if _PAGE_HASHPTE != 0 | ||
122 | if (pte_val(*ptep) & _PAGE_HASHPTE) | ||
123 | flush_hash_entry(mm, ptep, addr); | ||
124 | #endif | ||
125 | __asm__ __volatile__("\ | ||
126 | stw%U0%X0 %2,%0\n\ | ||
127 | eieio\n\ | ||
128 | stw%U0%X0 %L2,%1" | ||
129 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | ||
130 | : "r" (pte) : "memory"); | ||
131 | |||
132 | #elif defined(CONFIG_PPC_STD_MMU_32) | ||
133 | /* Third case is 32-bit hash table in UP mode, we need to preserve | ||
134 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | ||
135 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | ||
136 | * and see we need to keep track that this PTE needs invalidating | ||
137 | */ | ||
138 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
139 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
140 | |||
141 | #else | ||
142 | /* Anything else just stores the PTE normally. That covers all 64-bit | ||
143 | * cases, and 32-bit non-hash with 64-bit PTEs in UP mode | ||
144 | */ | ||
145 | *ptep = pte; | ||
146 | #endif | ||
147 | } | ||
148 | |||
149 | |||
150 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
151 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | ||
152 | pte_t *ptep, pte_t entry, int dirty); | ||
153 | |||
20 | /* | 154 | /* |
21 | * Macro to mark a page protection value as "uncacheable". | 155 | * Macro to mark a page protection value as "uncacheable". |
22 | */ | 156 | */ |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h new file mode 100644 index 000000000000..f4a4db8d5555 --- /dev/null +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Freescale Semicondutor, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * provides masks and opcode images for use by code generation, emulation | ||
10 | * and for instructions that older assemblers might not know about | ||
11 | */ | ||
12 | #ifndef _ASM_POWERPC_PPC_OPCODE_H | ||
13 | #define _ASM_POWERPC_PPC_OPCODE_H | ||
14 | |||
15 | #include <linux/stringify.h> | ||
16 | #include <asm/asm-compat.h> | ||
17 | |||
18 | /* sorted alphabetically */ | ||
19 | #define PPC_INST_DCBA 0x7c0005ec | ||
20 | #define PPC_INST_DCBA_MASK 0xfc0007fe | ||
21 | #define PPC_INST_DCBAL 0x7c2005ec | ||
22 | #define PPC_INST_DCBZL 0x7c2007ec | ||
23 | #define PPC_INST_ISEL 0x7c00001e | ||
24 | #define PPC_INST_ISEL_MASK 0xfc00003e | ||
25 | #define PPC_INST_LSWI 0x7c0004aa | ||
26 | #define PPC_INST_LSWX 0x7c00042a | ||
27 | #define PPC_INST_LWSYNC 0x7c2004ac | ||
28 | #define PPC_INST_MCRXR 0x7c000400 | ||
29 | #define PPC_INST_MCRXR_MASK 0xfc0007fe | ||
30 | #define PPC_INST_MFSPR_PVR 0x7c1f42a6 | ||
31 | #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff | ||
32 | #define PPC_INST_MSGSND 0x7c00019c | ||
33 | #define PPC_INST_NOP 0x60000000 | ||
34 | #define PPC_INST_POPCNTB 0x7c0000f4 | ||
35 | #define PPC_INST_POPCNTB_MASK 0xfc0007fe | ||
36 | #define PPC_INST_RFCI 0x4c000066 | ||
37 | #define PPC_INST_RFDI 0x4c00004e | ||
38 | #define PPC_INST_RFMCI 0x4c00004c | ||
39 | |||
40 | #define PPC_INST_STRING 0x7c00042a | ||
41 | #define PPC_INST_STRING_MASK 0xfc0007fe | ||
42 | #define PPC_INST_STRING_GEN_MASK 0xfc00067e | ||
43 | |||
44 | #define PPC_INST_STSWI 0x7c0005aa | ||
45 | #define PPC_INST_STSWX 0x7c00052a | ||
46 | #define PPC_INST_TLBILX 0x7c000626 | ||
47 | #define PPC_INST_WAIT 0x7c00007c | ||
48 | |||
49 | /* macros to insert fields into opcodes */ | ||
50 | #define __PPC_RA(a) ((a & 0x1f) << 16) | ||
51 | #define __PPC_RB(b) ((b & 0x1f) << 11) | ||
52 | #define __PPC_T_TLB(t) ((t & 0x3) << 21) | ||
53 | #define __PPC_WC(w) ((w & 0x3) << 21) | ||
54 | |||
55 | /* Deal with instructions that older assemblers aren't aware of */ | ||
56 | #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ | ||
57 | __PPC_RA(a) | __PPC_RB(b)) | ||
58 | #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \ | ||
59 | __PPC_RA(a) | __PPC_RB(b)) | ||
60 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ | ||
61 | __PPC_RB(b)) | ||
62 | #define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) | ||
63 | #define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) | ||
64 | #define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) | ||
65 | #define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \ | ||
66 | __PPC_T_TLB(t) | __PPC_RA(a) | __PPC_RB(b)) | ||
67 | #define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) | ||
68 | #define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) | ||
69 | #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) | ||
70 | #define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ | ||
71 | __PPC_WC(w)) | ||
72 | |||
73 | #endif /* _ASM_POWERPC_PPC_OPCODE_H */ | ||
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 1a0d628eb114..f59a66684aed 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/stringify.h> | 7 | #include <linux/stringify.h> |
8 | #include <asm/asm-compat.h> | 8 | #include <asm/asm-compat.h> |
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/ppc-opcode.h> | ||
10 | 11 | ||
11 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
12 | #error __FILE__ should only be used in assembler files | 13 | #error __FILE__ should only be used in assembler files |
@@ -167,11 +168,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
167 | #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority | 168 | #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority |
168 | #define HMT_HIGH or 3,3,3 | 169 | #define HMT_HIGH or 3,3,3 |
169 | 170 | ||
170 | /* handle instructions that older assemblers may not know */ | ||
171 | #define RFCI .long 0x4c000066 /* rfci instruction */ | ||
172 | #define RFDI .long 0x4c00004e /* rfdi instruction */ | ||
173 | #define RFMCI .long 0x4c00004c /* rfmci instruction */ | ||
174 | |||
175 | #ifdef __KERNEL__ | 171 | #ifdef __KERNEL__ |
176 | #ifdef CONFIG_PPC64 | 172 | #ifdef CONFIG_PPC64 |
177 | 173 | ||
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index d3466490104a..9eed29eee604 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -313,6 +313,25 @@ static inline void prefetchw(const void *x) | |||
313 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | 313 | #define HAVE_ARCH_PICK_MMAP_LAYOUT |
314 | #endif | 314 | #endif |
315 | 315 | ||
316 | #ifdef CONFIG_PPC64 | ||
317 | static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) | ||
318 | { | ||
319 | unsigned long sp; | ||
320 | |||
321 | if (is_32) | ||
322 | sp = regs->gpr[1] & 0x0ffffffffUL; | ||
323 | else | ||
324 | sp = regs->gpr[1]; | ||
325 | |||
326 | return sp; | ||
327 | } | ||
328 | #else | ||
329 | static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) | ||
330 | { | ||
331 | return regs->gpr[1]; | ||
332 | } | ||
333 | #endif | ||
334 | |||
316 | #endif /* __KERNEL__ */ | 335 | #endif /* __KERNEL__ */ |
317 | #endif /* __ASSEMBLY__ */ | 336 | #endif /* __ASSEMBLY__ */ |
318 | #endif /* _ASM_POWERPC_PROCESSOR_H */ | 337 | #endif /* _ASM_POWERPC_PROCESSOR_H */ |
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index 67f1812698d2..cdb6fd814de8 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h | |||
@@ -50,6 +50,9 @@ enum ps3_param_av_multi_out { | |||
50 | 50 | ||
51 | enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void); | 51 | enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void); |
52 | 52 | ||
53 | extern u64 ps3_os_area_get_rtc_diff(void); | ||
54 | extern void ps3_os_area_set_rtc_diff(u64 rtc_diff); | ||
55 | |||
53 | /* dma routines */ | 56 | /* dma routines */ |
54 | 57 | ||
55 | enum ps3_dma_page_size { | 58 | enum ps3_dma_page_size { |
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index cd24ac16660a..0427b0b53d2d 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h | |||
@@ -730,7 +730,7 @@ extern int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *); | |||
730 | extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *, | 730 | extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *, |
731 | u32); | 731 | u32); |
732 | 732 | ||
733 | extern int ps3av_set_video_mode(u32); | 733 | extern int ps3av_set_video_mode(int); |
734 | extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32); | 734 | extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32); |
735 | extern int ps3av_get_auto_mode(void); | 735 | extern int ps3av_get_auto_mode(void); |
736 | extern int ps3av_get_mode(void); | 736 | extern int ps3av_get_mode(void); |
diff --git a/arch/powerpc/include/asm/ps3fb.h b/arch/powerpc/include/asm/ps3fb.h index e7233a849680..90dbefb8cfc4 100644 --- a/arch/powerpc/include/asm/ps3fb.h +++ b/arch/powerpc/include/asm/ps3fb.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/ioctl.h> | 23 | #include <linux/ioctl.h> |
24 | #include <linux/types.h> | ||
24 | 25 | ||
25 | /* ioctl */ | 26 | /* ioctl */ |
26 | #define PS3FB_IOCTL_SETMODE _IOW('r', 1, int) /* set video mode */ | 27 | #define PS3FB_IOCTL_SETMODE _IOW('r', 1, int) /* set video mode */ |
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h new file mode 100644 index 000000000000..07630faae029 --- /dev/null +++ b/arch/powerpc/include/asm/pte-40x.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_40x_H | ||
2 | #define _ASM_POWERPC_PTE_40x_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * At present, all PowerPC 400-class processors share a similar TLB | ||
7 | * architecture. The instruction and data sides share a unified, | ||
8 | * 64-entry, fully-associative TLB which is maintained totally under | ||
9 | * software control. In addition, the instruction side has a | ||
10 | * hardware-managed, 4-entry, fully-associative TLB which serves as a | ||
11 | * first level to the shared TLB. These two TLBs are known as the UTLB | ||
12 | * and ITLB, respectively (see "mmu.h" for definitions). | ||
13 | * | ||
14 | * There are several potential gotchas here. The 40x hardware TLBLO | ||
15 | * field looks like this: | ||
16 | * | ||
17 | * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
18 | * RPN..................... 0 0 EX WR ZSEL....... W I M G | ||
19 | * | ||
20 | * Where possible we make the Linux PTE bits match up with this | ||
21 | * | ||
22 | * - bits 20 and 21 must be cleared, because we use 4k pages (40x can | ||
23 | * support down to 1k pages), this is done in the TLBMiss exception | ||
24 | * handler. | ||
25 | * - We use only zones 0 (for kernel pages) and 1 (for user pages) | ||
26 | * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB | ||
27 | * miss handler. Bit 27 is PAGE_USER, thus selecting the correct | ||
28 | * zone. | ||
29 | * - PRESENT *must* be in the bottom two bits because swap cache | ||
30 | * entries use the top 30 bits. Because 40x doesn't support SMP | ||
31 | * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 | ||
32 | * is cleared in the TLB miss handler before the TLB entry is loaded. | ||
33 | * - All other bits of the PTE are loaded into TLBLO without | ||
34 | * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for | ||
35 | * software PTE bits. We actually use use bits 21, 24, 25, and | ||
36 | * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and | ||
37 | * PRESENT. | ||
38 | */ | ||
39 | |||
40 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ | ||
41 | #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ | ||
42 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ | ||
43 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ | ||
44 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ | ||
45 | #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ | ||
46 | #define _PAGE_RW 0x040 /* software: Writes permitted */ | ||
47 | #define _PAGE_DIRTY 0x080 /* software: dirty page */ | ||
48 | #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ | ||
49 | #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ | ||
50 | #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ | ||
51 | |||
52 | #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ | ||
53 | #define _PMD_BAD 0x802 | ||
54 | #define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */ | ||
55 | #define _PMD_SIZE_4M 0x0c0 | ||
56 | #define _PMD_SIZE_16M 0x0e0 | ||
57 | |||
58 | #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) | ||
59 | |||
60 | /* Until my rework is finished, 40x still needs atomic PTE updates */ | ||
61 | #define PTE_ATOMIC_UPDATES 1 | ||
62 | |||
63 | #endif /* __KERNEL__ */ | ||
64 | #endif /* _ASM_POWERPC_PTE_40x_H */ | ||
diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/pte-44x.h new file mode 100644 index 000000000000..37e98bcf83e0 --- /dev/null +++ b/arch/powerpc/include/asm/pte-44x.h | |||
@@ -0,0 +1,102 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_44x_H | ||
2 | #define _ASM_POWERPC_PTE_44x_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * Definitions for PPC440 | ||
7 | * | ||
8 | * Because of the 3 word TLB entries to support 36-bit addressing, | ||
9 | * the attribute are difficult to map in such a fashion that they | ||
10 | * are easily loaded during exception processing. I decided to | ||
11 | * organize the entry so the ERPN is the only portion in the | ||
12 | * upper word of the PTE and the attribute bits below are packed | ||
13 | * in as sensibly as they can be in the area below a 4KB page size | ||
14 | * oriented RPN. This at least makes it easy to load the RPN and | ||
15 | * ERPN fields in the TLB. -Matt | ||
16 | * | ||
17 | * This isn't entirely true anymore, at least some bits are now | ||
18 | * easier to move into the TLB from the PTE. -BenH. | ||
19 | * | ||
20 | * Note that these bits preclude future use of a page size | ||
21 | * less than 4KB. | ||
22 | * | ||
23 | * | ||
24 | * PPC 440 core has following TLB attribute fields; | ||
25 | * | ||
26 | * TLB1: | ||
27 | * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
28 | * RPN................................. - - - - - - ERPN....... | ||
29 | * | ||
30 | * TLB2: | ||
31 | * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
32 | * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR | ||
33 | * | ||
34 | * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional | ||
35 | * TLB2 storage attibute fields. Those are: | ||
36 | * | ||
37 | * TLB2: | ||
38 | * 0...10 11 12 13 14 15 16...31 | ||
39 | * no change WL1 IL1I IL1D IL2I IL2D no change | ||
40 | * | ||
41 | * There are some constrains and options, to decide mapping software bits | ||
42 | * into TLB entry. | ||
43 | * | ||
44 | * - PRESENT *must* be in the bottom three bits because swap cache | ||
45 | * entries use the top 29 bits for TLB2. | ||
46 | * | ||
47 | * - FILE *must* be in the bottom three bits because swap cache | ||
48 | * entries use the top 29 bits for TLB2. | ||
49 | * | ||
50 | * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, | ||
51 | * because it doesn't support SMP. However, some later 460 variants | ||
52 | * have -some- form of SMP support and so I keep the bit there for | ||
53 | * future use | ||
54 | * | ||
55 | * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used | ||
56 | * for memory protection related functions (see PTE structure in | ||
57 | * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the | ||
58 | * above bits. Note that the bit values are CPU specific, not architecture | ||
59 | * specific. | ||
60 | * | ||
61 | * The kernel PTE entry holds an arch-dependent swp_entry structure under | ||
62 | * certain situations. In other words, in such situations some portion of | ||
63 | * the PTE bits are used as a swp_entry. In the PPC implementation, the | ||
64 | * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still | ||
65 | * hold protection values. That means the three protection bits are | ||
66 | * reserved for both PTE and SWAP entry at the most significant three | ||
67 | * LSBs. | ||
68 | * | ||
69 | * There are three protection bits available for SWAP entry: | ||
70 | * _PAGE_PRESENT | ||
71 | * _PAGE_FILE | ||
72 | * _PAGE_HASHPTE (if HW has) | ||
73 | * | ||
74 | * So those three bits have to be inside of 0-2nd LSB of PTE. | ||
75 | * | ||
76 | */ | ||
77 | |||
78 | #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ | ||
79 | #define _PAGE_RW 0x00000002 /* S: Write permission */ | ||
80 | #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ | ||
81 | #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ | ||
82 | #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ | ||
83 | #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ | ||
84 | #define _PAGE_SPECIAL 0x00000020 /* S: Special page */ | ||
85 | #define _PAGE_USER 0x00000040 /* S: User page */ | ||
86 | #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ | ||
87 | #define _PAGE_GUARDED 0x00000100 /* H: G bit */ | ||
88 | #define _PAGE_COHERENT 0x00000200 /* H: M bit */ | ||
89 | #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ | ||
90 | #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ | ||
91 | |||
92 | /* TODO: Add large page lowmem mapping support */ | ||
93 | #define _PMD_PRESENT 0 | ||
94 | #define _PMD_PRESENT_MASK (PAGE_MASK) | ||
95 | #define _PMD_BAD (~PAGE_MASK) | ||
96 | |||
97 | /* ERPN in a PTE never gets cleared, ignore it */ | ||
98 | #define _PTE_NONE_MASK 0xffffffff00000000ULL | ||
99 | |||
100 | |||
101 | #endif /* __KERNEL__ */ | ||
102 | #endif /* _ASM_POWERPC_PTE_44x_H */ | ||
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h new file mode 100644 index 000000000000..8c6e31251034 --- /dev/null +++ b/arch/powerpc/include/asm/pte-8xx.h | |||
@@ -0,0 +1,67 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_8xx_H | ||
2 | #define _ASM_POWERPC_PTE_8xx_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. | ||
7 | * We also use the two level tables, but we can put the real bits in them | ||
8 | * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, | ||
9 | * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has | ||
10 | * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit | ||
11 | * based upon user/super access. The TLB does not have accessed nor write | ||
12 | * protect. We assume that if the TLB get loaded with an entry it is | ||
13 | * accessed, and overload the changed bit for write protect. We use | ||
14 | * two bits in the software pte that are supposed to be set to zero in | ||
15 | * the TLB entry (24 and 25) for these indicators. Although the level 1 | ||
16 | * descriptor contains the guarded and writethrough/copyback bits, we can | ||
17 | * set these at the page level since they get copied from the Mx_TWC | ||
18 | * register when the TLB entry is loaded. We will use bit 27 for guard, since | ||
19 | * that is where it exists in the MD_TWC, and bit 26 for writethrough. | ||
20 | * These will get masked from the level 2 descriptor at TLB load time, and | ||
21 | * copied to the MD_TWC before it gets loaded. | ||
22 | * Large page sizes added. We currently support two sizes, 4K and 8M. | ||
23 | * This also allows a TLB hander optimization because we can directly | ||
24 | * load the PMD into MD_TWC. The 8M pages are only used for kernel | ||
25 | * mapping of well known areas. The PMD (PGD) entries contain control | ||
26 | * flags in addition to the address, so care must be taken that the | ||
27 | * software no longer assumes these are only pointers. | ||
28 | */ | ||
29 | |||
30 | /* Definitions for 8xx embedded chips. */ | ||
31 | #define _PAGE_PRESENT 0x0001 /* Page is valid */ | ||
32 | #define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ | ||
33 | #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ | ||
34 | #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ | ||
35 | |||
36 | /* These five software bits must be masked out when the entry is loaded | ||
37 | * into the TLB. | ||
38 | */ | ||
39 | #define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */ | ||
40 | #define _PAGE_GUARDED 0x0010 /* software: guarded access */ | ||
41 | #define _PAGE_DIRTY 0x0020 /* software: page changed */ | ||
42 | #define _PAGE_RW 0x0040 /* software: user write access allowed */ | ||
43 | #define _PAGE_ACCESSED 0x0080 /* software: page referenced */ | ||
44 | |||
45 | /* Setting any bits in the nibble with the follow two controls will | ||
46 | * require a TLB exception handler change. It is assumed unused bits | ||
47 | * are always zero. | ||
48 | */ | ||
49 | #define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ | ||
50 | #define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ | ||
51 | |||
52 | #define _PMD_PRESENT 0x0001 | ||
53 | #define _PMD_BAD 0x0ff0 | ||
54 | #define _PMD_PAGE_MASK 0x000c | ||
55 | #define _PMD_PAGE_8M 0x000c | ||
56 | |||
57 | #define _PTE_NONE_MASK _PAGE_ACCESSED | ||
58 | |||
59 | /* Until my rework is finished, 8xx still needs atomic PTE updates */ | ||
60 | #define PTE_ATOMIC_UPDATES 1 | ||
61 | |||
62 | /* We need to add _PAGE_SHARED to kernel pages */ | ||
63 | #define _PAGE_KERNEL_RO (_PAGE_SHARED) | ||
64 | #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) | ||
65 | |||
66 | #endif /* __KERNEL__ */ | ||
67 | #endif /* _ASM_POWERPC_PTE_8xx_H */ | ||
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h new file mode 100644 index 000000000000..d9740e886801 --- /dev/null +++ b/arch/powerpc/include/asm/pte-common.h | |||
@@ -0,0 +1,180 @@ | |||
1 | /* Included from asm/pgtable-*.h only ! */ | ||
2 | |||
3 | /* | ||
4 | * Some bits are only used on some cpu families... Make sure that all | ||
5 | * the undefined gets a sensible default | ||
6 | */ | ||
7 | #ifndef _PAGE_HASHPTE | ||
8 | #define _PAGE_HASHPTE 0 | ||
9 | #endif | ||
10 | #ifndef _PAGE_SHARED | ||
11 | #define _PAGE_SHARED 0 | ||
12 | #endif | ||
13 | #ifndef _PAGE_HWWRITE | ||
14 | #define _PAGE_HWWRITE 0 | ||
15 | #endif | ||
16 | #ifndef _PAGE_HWEXEC | ||
17 | #define _PAGE_HWEXEC 0 | ||
18 | #endif | ||
19 | #ifndef _PAGE_EXEC | ||
20 | #define _PAGE_EXEC 0 | ||
21 | #endif | ||
22 | #ifndef _PAGE_ENDIAN | ||
23 | #define _PAGE_ENDIAN 0 | ||
24 | #endif | ||
25 | #ifndef _PAGE_COHERENT | ||
26 | #define _PAGE_COHERENT 0 | ||
27 | #endif | ||
28 | #ifndef _PAGE_WRITETHRU | ||
29 | #define _PAGE_WRITETHRU 0 | ||
30 | #endif | ||
31 | #ifndef _PAGE_SPECIAL | ||
32 | #define _PAGE_SPECIAL 0 | ||
33 | #endif | ||
34 | #ifndef _PAGE_4K_PFN | ||
35 | #define _PAGE_4K_PFN 0 | ||
36 | #endif | ||
37 | #ifndef _PAGE_PSIZE | ||
38 | #define _PAGE_PSIZE 0 | ||
39 | #endif | ||
40 | #ifndef _PMD_PRESENT_MASK | ||
41 | #define _PMD_PRESENT_MASK _PMD_PRESENT | ||
42 | #endif | ||
43 | #ifndef _PMD_SIZE | ||
44 | #define _PMD_SIZE 0 | ||
45 | #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() | ||
46 | #endif | ||
47 | #ifndef _PAGE_KERNEL_RO | ||
48 | #define _PAGE_KERNEL_RO 0 | ||
49 | #endif | ||
50 | #ifndef _PAGE_KERNEL_RW | ||
51 | #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) | ||
52 | #endif | ||
53 | #ifndef _PAGE_HPTEFLAGS | ||
54 | #define _PAGE_HPTEFLAGS _PAGE_HASHPTE | ||
55 | #endif | ||
56 | #ifndef _PTE_NONE_MASK | ||
57 | #define _PTE_NONE_MASK _PAGE_HPTEFLAGS | ||
58 | #endif | ||
59 | |||
60 | /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a | ||
61 | * kernel without large page PMD support | ||
62 | */ | ||
63 | #ifndef __ASSEMBLY__ | ||
64 | extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | ||
65 | #endif /* __ASSEMBLY__ */ | ||
66 | |||
67 | /* Location of the PFN in the PTE. Most 32-bit platforms use the same | ||
68 | * as _PAGE_SHIFT here (ie, naturally aligned). | ||
69 | * Platform who don't just pre-define the value so we don't override it here | ||
70 | */ | ||
71 | #ifndef PTE_RPN_SHIFT | ||
72 | #define PTE_RPN_SHIFT (PAGE_SHIFT) | ||
73 | #endif | ||
74 | |||
75 | /* The mask convered by the RPN must be a ULL on 32-bit platforms with | ||
76 | * 64-bit PTEs | ||
77 | */ | ||
78 | #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | ||
79 | #define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT)) | ||
80 | #define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1)) | ||
81 | #else | ||
82 | #define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT)) | ||
83 | #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) | ||
84 | #endif | ||
85 | |||
86 | /* _PAGE_CHG_MASK masks of bits that are to be preserved accross | ||
87 | * pgprot changes | ||
88 | */ | ||
89 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | ||
90 | _PAGE_ACCESSED | _PAGE_SPECIAL) | ||
91 | |||
92 | /* Mask of bits returned by pte_pgprot() */ | ||
93 | #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ | ||
94 | _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \ | ||
95 | _PAGE_USER | _PAGE_ACCESSED | \ | ||
96 | _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ | ||
97 | _PAGE_EXEC | _PAGE_HWEXEC) | ||
98 | |||
99 | /* | ||
100 | * We define 2 sets of base prot bits, one for basic pages (ie, | ||
101 | * cacheable kernel and user pages) and one for non cacheable | ||
102 | * pages. We always set _PAGE_COHERENT when SMP is enabled or | ||
103 | * the processor might need it for DMA coherency. | ||
104 | */ | ||
105 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) | ||
106 | #if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) | ||
107 | #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) | ||
108 | #else | ||
109 | #define _PAGE_BASE (_PAGE_BASE_NC) | ||
110 | #endif | ||
111 | |||
112 | /* Permission masks used to generate the __P and __S table, | ||
113 | * | ||
114 | * Note:__pgprot is defined in arch/powerpc/include/asm/page.h | ||
115 | * | ||
116 | * Write permissions imply read permissions for now (we could make write-only | ||
117 | * pages on BookE but we don't bother for now). Execute permission control is | ||
118 | * possible on platforms that define _PAGE_EXEC | ||
119 | * | ||
120 | * Note due to the way vm flags are laid out, the bits are XWR | ||
121 | */ | ||
122 | #define PAGE_NONE __pgprot(_PAGE_BASE) | ||
123 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | ||
124 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) | ||
125 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
126 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
127 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
128 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
129 | |||
130 | #define __P000 PAGE_NONE | ||
131 | #define __P001 PAGE_READONLY | ||
132 | #define __P010 PAGE_COPY | ||
133 | #define __P011 PAGE_COPY | ||
134 | #define __P100 PAGE_READONLY_X | ||
135 | #define __P101 PAGE_READONLY_X | ||
136 | #define __P110 PAGE_COPY_X | ||
137 | #define __P111 PAGE_COPY_X | ||
138 | |||
139 | #define __S000 PAGE_NONE | ||
140 | #define __S001 PAGE_READONLY | ||
141 | #define __S010 PAGE_SHARED | ||
142 | #define __S011 PAGE_SHARED | ||
143 | #define __S100 PAGE_READONLY_X | ||
144 | #define __S101 PAGE_READONLY_X | ||
145 | #define __S110 PAGE_SHARED_X | ||
146 | #define __S111 PAGE_SHARED_X | ||
147 | |||
148 | /* Permission masks used for kernel mappings */ | ||
149 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) | ||
150 | #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ | ||
151 | _PAGE_NO_CACHE) | ||
152 | #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ | ||
153 | _PAGE_NO_CACHE | _PAGE_GUARDED) | ||
154 | #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC) | ||
155 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) | ||
156 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC) | ||
157 | |||
158 | /* Protection used for kernel text. We want the debuggers to be able to | ||
159 | * set breakpoints anywhere, so don't write protect the kernel text | ||
160 | * on platforms where such control is possible. | ||
161 | */ | ||
162 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | ||
163 | defined(CONFIG_KPROBES) | ||
164 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X | ||
165 | #else | ||
166 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX | ||
167 | #endif | ||
168 | |||
169 | /* Make modules code happy. We don't set RO yet */ | ||
170 | #define PAGE_KERNEL_EXEC PAGE_KERNEL_X | ||
171 | |||
172 | /* Advertise special mapping type for AGP */ | ||
173 | #define PAGE_AGP (PAGE_KERNEL_NC) | ||
174 | #define HAVE_PAGE_AGP | ||
175 | |||
176 | /* Advertise support for _PAGE_SPECIAL */ | ||
177 | #ifdef _PAGE_SPECIAL | ||
178 | #define __HAVE_ARCH_PTE_SPECIAL | ||
179 | #endif | ||
180 | |||
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h new file mode 100644 index 000000000000..10820f58acf5 --- /dev/null +++ b/arch/powerpc/include/asm/pte-fsl-booke.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_FSL_BOOKE_H | ||
2 | #define _ASM_POWERPC_PTE_FSL_BOOKE_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based | ||
6 | * processors | ||
7 | * | ||
8 | MMU Assist Register 3: | ||
9 | |||
10 | 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | ||
11 | RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR | ||
12 | |||
13 | - PRESENT *must* be in the bottom three bits because swap cache | ||
14 | entries use the top 29 bits. | ||
15 | |||
16 | - FILE *must* be in the bottom three bits because swap cache | ||
17 | entries use the top 29 bits. | ||
18 | */ | ||
19 | |||
20 | /* Definitions for FSL Book-E Cores */ | ||
21 | #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ | ||
22 | #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ | ||
23 | #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ | ||
24 | #define _PAGE_RW 0x00004 /* S: Write permission (SW) */ | ||
25 | #define _PAGE_DIRTY 0x00008 /* S: Page dirty */ | ||
26 | #define _PAGE_HWEXEC 0x00010 /* H: SX permission */ | ||
27 | #define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ | ||
28 | |||
29 | #define _PAGE_ENDIAN 0x00040 /* H: E bit */ | ||
30 | #define _PAGE_GUARDED 0x00080 /* H: G bit */ | ||
31 | #define _PAGE_COHERENT 0x00100 /* H: M bit */ | ||
32 | #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ | ||
33 | #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ | ||
34 | #define _PAGE_SPECIAL 0x00800 /* S: Special page */ | ||
35 | |||
36 | #ifdef CONFIG_PTE_64BIT | ||
37 | /* ERPN in a PTE never gets cleared, ignore it */ | ||
38 | #define _PTE_NONE_MASK 0xffffffffffff0000ULL | ||
39 | /* We extend the size of the PTE flags area when using 64-bit PTEs */ | ||
40 | #define PTE_RPN_SHIFT (PAGE_SHIFT + 8) | ||
41 | #endif | ||
42 | |||
43 | #define _PMD_PRESENT 0 | ||
44 | #define _PMD_PRESENT_MASK (PAGE_MASK) | ||
45 | #define _PMD_BAD (~PAGE_MASK) | ||
46 | |||
47 | #endif /* __KERNEL__ */ | ||
48 | #endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ | ||
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h new file mode 100644 index 000000000000..16e571c7f9ef --- /dev/null +++ b/arch/powerpc/include/asm/pte-hash32.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_HASH32_H | ||
2 | #define _ASM_POWERPC_PTE_HASH32_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * The "classic" 32-bit implementation of the PowerPC MMU uses a hash | ||
7 | * table containing PTEs, together with a set of 16 segment registers, | ||
8 | * to define the virtual to physical address mapping. | ||
9 | * | ||
10 | * We use the hash table as an extended TLB, i.e. a cache of currently | ||
11 | * active mappings. We maintain a two-level page table tree, much | ||
12 | * like that used by the i386, for the sake of the Linux memory | ||
13 | * management code. Low-level assembler code in hash_low_32.S | ||
14 | * (procedure hash_page) is responsible for extracting ptes from the | ||
15 | * tree and putting them into the hash table when necessary, and | ||
16 | * updating the accessed and modified bits in the page table tree. | ||
17 | */ | ||
18 | |||
19 | #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ | ||
20 | #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ | ||
21 | #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ | ||
22 | #define _PAGE_USER 0x004 /* usermode access allowed */ | ||
23 | #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ | ||
24 | #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ | ||
25 | #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ | ||
26 | #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ | ||
27 | #define _PAGE_DIRTY 0x080 /* C: page changed */ | ||
28 | #define _PAGE_ACCESSED 0x100 /* R: page referenced */ | ||
29 | #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ | ||
30 | #define _PAGE_RW 0x400 /* software: user write access allowed */ | ||
31 | #define _PAGE_SPECIAL 0x800 /* software: Special page */ | ||
32 | |||
33 | #ifdef CONFIG_PTE_64BIT | ||
34 | /* We never clear the high word of the pte */ | ||
35 | #define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE) | ||
36 | #else | ||
37 | #define _PTE_NONE_MASK _PAGE_HASHPTE | ||
38 | #endif | ||
39 | |||
40 | #define _PMD_PRESENT 0 | ||
41 | #define _PMD_PRESENT_MASK (PAGE_MASK) | ||
42 | #define _PMD_BAD (~PAGE_MASK) | ||
43 | |||
44 | /* Hash table based platforms need atomic updates of the linux PTE */ | ||
45 | #define PTE_ATOMIC_UPDATES 1 | ||
46 | |||
47 | #endif /* __KERNEL__ */ | ||
48 | #endif /* _ASM_POWERPC_PTE_HASH32_H */ | ||
diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h new file mode 100644 index 000000000000..c134e809aac3 --- /dev/null +++ b/arch/powerpc/include/asm/pte-hash64-4k.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* To be include by pgtable-hash64.h only */ | ||
2 | |||
3 | /* PTE bits */ | ||
4 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | ||
5 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | ||
6 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | ||
7 | #define _PAGE_F_SECOND _PAGE_SECONDARY | ||
8 | #define _PAGE_F_GIX _PAGE_GROUP_IX | ||
9 | #define _PAGE_SPECIAL 0x10000 /* software: special page */ | ||
10 | |||
11 | /* PTE flags to conserve for HPTE identification */ | ||
12 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ | ||
13 | _PAGE_SECONDARY | _PAGE_GROUP_IX) | ||
14 | |||
15 | /* shift to put page number into pte */ | ||
16 | #define PTE_RPN_SHIFT (17) | ||
17 | |||
diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 7389003349a6..e05d26fa372f 100644 --- a/arch/powerpc/include/asm/pgtable-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
@@ -1,76 +1,6 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_64K_H | 1 | /* To be include by pgtable-hash64.h only */ |
2 | #define _ASM_POWERPC_PGTABLE_64K_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopud.h> | ||
5 | |||
6 | |||
7 | #define PTE_INDEX_SIZE 12 | ||
8 | #define PMD_INDEX_SIZE 12 | ||
9 | #define PUD_INDEX_SIZE 0 | ||
10 | #define PGD_INDEX_SIZE 4 | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) | ||
14 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
15 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
16 | |||
17 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
18 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
19 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
20 | |||
21 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
22 | /* | ||
23 | * For the sub-page protection option, we extend the PGD with one of | ||
24 | * these. Basically we have a 3-level tree, with the top level being | ||
25 | * the protptrs array. To optimize speed and memory consumption when | ||
26 | * only addresses < 4GB are being protected, pointers to the first | ||
27 | * four pages of sub-page protection words are stored in the low_prot | ||
28 | * array. | ||
29 | * Each page of sub-page protection words protects 1GB (4 bytes | ||
30 | * protects 64k). For the 3-level tree, each page of pointers then | ||
31 | * protects 8TB. | ||
32 | */ | ||
33 | struct subpage_prot_table { | ||
34 | unsigned long maxaddr; /* only addresses < this are protected */ | ||
35 | unsigned int **protptrs[2]; | ||
36 | unsigned int *low_prot[4]; | ||
37 | }; | ||
38 | |||
39 | #undef PGD_TABLE_SIZE | ||
40 | #define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \ | ||
41 | sizeof(struct subpage_prot_table)) | ||
42 | |||
43 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | ||
44 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | ||
45 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | ||
46 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | ||
47 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | ||
48 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | ||
49 | |||
50 | extern void subpage_prot_free(pgd_t *pgd); | ||
51 | |||
52 | static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | ||
53 | { | ||
54 | return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD); | ||
55 | } | ||
56 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
57 | #endif /* __ASSEMBLY__ */ | ||
58 | |||
59 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | ||
60 | #define MIN_HUGEPTE_SHIFT PAGE_SHIFT | ||
61 | |||
62 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
63 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
64 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
65 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
66 | |||
67 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | ||
68 | #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
69 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
70 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
71 | 2 | ||
72 | /* Additional PTE bits (don't change without checking asm in hash_low.S) */ | 3 | /* Additional PTE bits (don't change without checking asm in hash_low.S) */ |
73 | #define __HAVE_ARCH_PTE_SPECIAL | ||
74 | #define _PAGE_SPECIAL 0x00000400 /* software: special page */ | 4 | #define _PAGE_SPECIAL 0x00000400 /* software: special page */ |
75 | #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ | 5 | #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ |
76 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ | 6 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ |
@@ -107,21 +37,15 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | |||
107 | * of addressable physical space, or 46 bits for the special 4k PFNs. | 37 | * of addressable physical space, or 46 bits for the special 4k PFNs. |
108 | */ | 38 | */ |
109 | #define PTE_RPN_SHIFT (30) | 39 | #define PTE_RPN_SHIFT (30) |
110 | #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) | ||
111 | #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) | ||
112 | |||
113 | /* _PAGE_CHG_MASK masks of bits that are to be preserved accross | ||
114 | * pgprot changes | ||
115 | */ | ||
116 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | ||
117 | _PAGE_ACCESSED | _PAGE_SPECIAL) | ||
118 | 40 | ||
119 | /* Bits to mask out from a PMD to get to the PTE page */ | 41 | #ifndef __ASSEMBLY__ |
120 | #define PMD_MASKED_BITS 0x1ff | ||
121 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ | ||
122 | #define PUD_MASKED_BITS 0x1ff | ||
123 | 42 | ||
124 | /* Manipulate "rpte" values */ | 43 | /* |
44 | * With 64K pages on hash table, we have a special PTE format that | ||
45 | * uses a second "half" of the page table to encode sub-page information | ||
46 | * in order to deal with 64K made of 4K HW pages. Thus we override the | ||
47 | * generic accessors and iterators here | ||
48 | */ | ||
125 | #define __real_pte(e,p) ((real_pte_t) { \ | 49 | #define __real_pte(e,p) ((real_pte_t) { \ |
126 | (e), pte_val(*((p) + PTRS_PER_PTE)) }) | 50 | (e), pte_val(*((p) + PTRS_PER_PTE)) }) |
127 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ | 51 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ |
@@ -130,7 +54,6 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | |||
130 | #define __rpte_sub_valid(rpte, index) \ | 54 | #define __rpte_sub_valid(rpte, index) \ |
131 | (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) | 55 | (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) |
132 | 56 | ||
133 | |||
134 | /* Trick: we set __end to va + 64k, which happens works for | 57 | /* Trick: we set __end to va + 64k, which happens works for |
135 | * a 16M page as well as we want only one iteration | 58 | * a 16M page as well as we want only one iteration |
136 | */ | 59 | */ |
@@ -152,4 +75,41 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | |||
152 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ | 75 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ |
153 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) | 76 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) |
154 | 77 | ||
155 | #endif /* _ASM_POWERPC_PGTABLE_64K_H */ | 78 | |
79 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
80 | /* | ||
81 | * For the sub-page protection option, we extend the PGD with one of | ||
82 | * these. Basically we have a 3-level tree, with the top level being | ||
83 | * the protptrs array. To optimize speed and memory consumption when | ||
84 | * only addresses < 4GB are being protected, pointers to the first | ||
85 | * four pages of sub-page protection words are stored in the low_prot | ||
86 | * array. | ||
87 | * Each page of sub-page protection words protects 1GB (4 bytes | ||
88 | * protects 64k). For the 3-level tree, each page of pointers then | ||
89 | * protects 8TB. | ||
90 | */ | ||
91 | struct subpage_prot_table { | ||
92 | unsigned long maxaddr; /* only addresses < this are protected */ | ||
93 | unsigned int **protptrs[2]; | ||
94 | unsigned int *low_prot[4]; | ||
95 | }; | ||
96 | |||
97 | #undef PGD_TABLE_SIZE | ||
98 | #define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \ | ||
99 | sizeof(struct subpage_prot_table)) | ||
100 | |||
101 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | ||
102 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | ||
103 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | ||
104 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | ||
105 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | ||
106 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | ||
107 | |||
108 | extern void subpage_prot_free(pgd_t *pgd); | ||
109 | |||
110 | static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | ||
111 | { | ||
112 | return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD); | ||
113 | } | ||
114 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
115 | #endif /* __ASSEMBLY__ */ | ||
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h new file mode 100644 index 000000000000..0419eeb53274 --- /dev/null +++ b/arch/powerpc/include/asm/pte-hash64.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_HASH64_H | ||
2 | #define _ASM_POWERPC_PTE_HASH64_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * Common bits between 4K and 64K pages in a linux-style PTE. | ||
7 | * These match the bits in the (hardware-defined) PowerPC PTE as closely | ||
8 | * as possible. Additional bits may be defined in pgtable-hash64-*.h | ||
9 | * | ||
10 | * Note: We only support user read/write permissions. Supervisor always | ||
11 | * have full read/write to pages above PAGE_OFFSET (pages below that | ||
12 | * always use the user access permissions). | ||
13 | * | ||
14 | * We could create separate kernel read-only if we used the 3 PP bits | ||
15 | * combinations that newer processors provide but we currently don't. | ||
16 | */ | ||
17 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | ||
18 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | ||
19 | #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ | ||
20 | #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ | ||
21 | #define _PAGE_GUARDED 0x0008 | ||
22 | #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ | ||
23 | #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ | ||
24 | #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ | ||
25 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ | ||
26 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ | ||
27 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | ||
28 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | ||
29 | |||
30 | /* No separate kernel read-only */ | ||
31 | #define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ | ||
32 | #define _PAGE_KERNEL_RO _PAGE_KERNEL_RW | ||
33 | |||
34 | /* Strong Access Ordering */ | ||
35 | #define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) | ||
36 | |||
37 | /* No page size encoding in the linux PTE */ | ||
38 | #define _PAGE_PSIZE 0 | ||
39 | |||
40 | /* PTEIDX nibble */ | ||
41 | #define _PTEIDX_SECONDARY 0x8 | ||
42 | #define _PTEIDX_GROUP_IX 0x7 | ||
43 | |||
44 | /* Hash table based platforms need atomic updates of the linux PTE */ | ||
45 | #define PTE_ATOMIC_UPDATES 1 | ||
46 | |||
47 | #ifdef CONFIG_PPC_64K_PAGES | ||
48 | #include <asm/pte-hash64-64k.h> | ||
49 | #else | ||
50 | #include <asm/pte-hash64-4k.h> | ||
51 | #endif | ||
52 | |||
53 | #endif /* __KERNEL__ */ | ||
54 | #endif /* _ASM_POWERPC_PTE_HASH64_H */ | ||
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index f484a343efba..c9ff1ec97479 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -155,6 +155,8 @@ | |||
155 | #define CTRL_RUNLATCH 0x1 | 155 | #define CTRL_RUNLATCH 0x1 |
156 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ | 156 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ |
157 | #define DABR_TRANSLATION (1UL << 2) | 157 | #define DABR_TRANSLATION (1UL << 2) |
158 | #define DABR_DATA_WRITE (1UL << 1) | ||
159 | #define DABR_DATA_READ (1UL << 0) | ||
158 | #define SPRN_DABR2 0x13D /* e300 */ | 160 | #define SPRN_DABR2 0x13D /* e300 */ |
159 | #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ | 161 | #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ |
160 | #define DABRX_USER (1UL << 0) | 162 | #define DABRX_USER (1UL << 0) |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 67453766bff1..a56f4d61aa72 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define __ASM_POWERPC_REG_BOOKE_H__ | 10 | #define __ASM_POWERPC_REG_BOOKE_H__ |
11 | 11 | ||
12 | /* Machine State Register (MSR) Fields */ | 12 | /* Machine State Register (MSR) Fields */ |
13 | #define MSR_GS (1<<28) /* Guest state */ | ||
13 | #define MSR_UCLE (1<<26) /* User-mode cache lock enable */ | 14 | #define MSR_UCLE (1<<26) /* User-mode cache lock enable */ |
14 | #define MSR_SPE (1<<25) /* Enable SPE */ | 15 | #define MSR_SPE (1<<25) /* Enable SPE */ |
15 | #define MSR_DWE (1<<10) /* Debug Wait Enable */ | 16 | #define MSR_DWE (1<<10) /* Debug Wait Enable */ |
@@ -110,6 +111,7 @@ | |||
110 | #define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */ | 111 | #define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */ |
111 | #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ | 112 | #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ |
112 | #define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */ | 113 | #define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */ |
114 | #define SPRN_MMUCFG 0x3F7 /* MMU Configuration Register */ | ||
113 | #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ | 115 | #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ |
114 | #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ | 116 | #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ |
115 | #define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ | 117 | #define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ |
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index f5a4e168e498..1e5cfad0e3f7 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h | |||
@@ -61,4 +61,7 @@ | |||
61 | 61 | ||
62 | #define SO_MARK 36 | 62 | #define SO_MARK 36 |
63 | 63 | ||
64 | #define SO_TIMESTAMPING 37 | ||
65 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | ||
66 | |||
64 | #endif /* _ASM_POWERPC_SOCKET_H */ | 67 | #endif /* _ASM_POWERPC_SOCKET_H */ |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 36864364e601..c3b193121f81 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -287,6 +287,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
287 | rw->lock = 0; | 287 | rw->lock = 0; |
288 | } | 288 | } |
289 | 289 | ||
290 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | ||
291 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | ||
292 | |||
290 | #define _raw_spin_relax(lock) __spin_yield(lock) | 293 | #define _raw_spin_relax(lock) __spin_yield(lock) |
291 | #define _raw_read_relax(lock) __rw_yield(lock) | 294 | #define _raw_read_relax(lock) __rw_yield(lock) |
292 | #define _raw_write_relax(lock) __rw_yield(lock) | 295 | #define _raw_write_relax(lock) __rw_yield(lock) |
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h index cbf2c9404c37..c6efc3466aa6 100644 --- a/arch/powerpc/include/asm/suspend.h +++ b/arch/powerpc/include/asm/suspend.h | |||
@@ -3,7 +3,4 @@ | |||
3 | 3 | ||
4 | static inline int arch_prepare_suspend(void) { return 0; } | 4 | static inline int arch_prepare_suspend(void) { return 0; } |
5 | 5 | ||
6 | void save_processor_state(void); | ||
7 | void restore_processor_state(void); | ||
8 | |||
9 | #endif /* __ASM_POWERPC_SUSPEND_H */ | 6 | #endif /* __ASM_POWERPC_SUSPEND_H */ |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index d312eec8abb9..affa8caed7eb 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -65,7 +65,7 @@ SYSCALL(ni_syscall) | |||
65 | SYSX(sys_ni_syscall,sys_olduname, sys_olduname) | 65 | SYSX(sys_ni_syscall,sys_olduname, sys_olduname) |
66 | COMPAT_SYS_SPU(umask) | 66 | COMPAT_SYS_SPU(umask) |
67 | SYSCALL_SPU(chroot) | 67 | SYSCALL_SPU(chroot) |
68 | SYSCALL(ustat) | 68 | COMPAT_SYS(ustat) |
69 | SYSCALL_SPU(dup2) | 69 | SYSCALL_SPU(dup2) |
70 | SYSCALL_SPU(getppid) | 70 | SYSCALL_SPU(getppid) |
71 | SYSCALL_SPU(getpgrp) | 71 | SYSCALL_SPU(getpgrp) |
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index 2a4be19a92c4..f612798e1c93 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h | |||
@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, | |||
531 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 531 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
532 | #endif | 532 | #endif |
533 | 533 | ||
534 | #define arch_align_stack(x) (x) | 534 | extern unsigned long arch_align_stack(unsigned long sp); |
535 | 535 | ||
536 | /* Used in very early kernel initialization. */ | 536 | /* Used in very early kernel initialization. */ |
537 | extern unsigned long reloc_offset(void); | 537 | extern unsigned long reloc_offset(void); |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 9665a26a253a..9aba5a38a7c4 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -12,8 +12,10 @@ | |||
12 | 12 | ||
13 | /* We have 8k stacks on ppc32 and 16k on ppc64 */ | 13 | /* We have 8k stacks on ppc32 and 16k on ppc64 */ |
14 | 14 | ||
15 | #ifdef CONFIG_PPC64 | 15 | #if defined(CONFIG_PPC64) |
16 | #define THREAD_SHIFT 14 | 16 | #define THREAD_SHIFT 14 |
17 | #elif defined(CONFIG_PPC_256K_PAGES) | ||
18 | #define THREAD_SHIFT 15 | ||
17 | #else | 19 | #else |
18 | #define THREAD_SHIFT 13 | 20 | #define THREAD_SHIFT 13 |
19 | #endif | 21 | #endif |
@@ -154,6 +156,13 @@ static inline void set_restore_sigmask(void) | |||
154 | ti->local_flags |= _TLF_RESTORE_SIGMASK; | 156 | ti->local_flags |= _TLF_RESTORE_SIGMASK; |
155 | set_bit(TIF_SIGPENDING, &ti->flags); | 157 | set_bit(TIF_SIGPENDING, &ti->flags); |
156 | } | 158 | } |
159 | |||
160 | #ifdef CONFIG_PPC64 | ||
161 | #define is_32bit_task() (test_thread_flag(TIF_32BIT)) | ||
162 | #else | ||
163 | #define is_32bit_task() (1) | ||
164 | #endif | ||
165 | |||
157 | #endif /* !__ASSEMBLY__ */ | 166 | #endif /* !__ASSEMBLY__ */ |
158 | 167 | ||
159 | #endif /* __KERNEL__ */ | 168 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 375258559ae6..054a16d68082 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h | |||
@@ -24,11 +24,6 @@ static inline cpumask_t node_to_cpumask(int node) | |||
24 | 24 | ||
25 | #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) | 25 | #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) |
26 | 26 | ||
27 | static inline int node_to_first_cpu(int node) | ||
28 | { | ||
29 | return cpumask_first(cpumask_of_node(node)); | ||
30 | } | ||
31 | |||
32 | int of_node_to_nid(struct device_node *device); | 27 | int of_node_to_nid(struct device_node *device); |
33 | 28 | ||
34 | struct pci_bus; | 29 | struct pci_bus; |
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 6418ceea44b7..cd21e5e6b04f 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | 16 | ||
17 | extern void (*udbg_putc)(char c); | 17 | extern void (*udbg_putc)(char c); |
18 | extern void (*udbg_flush)(void); | ||
18 | extern int (*udbg_getc)(void); | 19 | extern int (*udbg_getc)(void); |
19 | extern int (*udbg_getc_poll)(void); | 20 | extern int (*udbg_getc_poll)(void); |
20 | 21 | ||