diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/Makefile | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 19 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 2 |
5 files changed, 31 insertions, 12 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index b746f4ca4209..c4bcf072cb3c 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -11,10 +11,11 @@ obj-y := fault.o mem.o pgtable.o gup.o \ | |||
11 | pgtable_$(CONFIG_WORD_SIZE).o | 11 | pgtable_$(CONFIG_WORD_SIZE).o |
12 | obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ | 12 | obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ |
13 | tlb_nohash_low.o | 13 | tlb_nohash_low.o |
14 | hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o | 14 | obj-$(CONFIG_PPC64) += mmap_64.o |
15 | obj-$(CONFIG_PPC64) += hash_utils_64.o \ | 15 | hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o |
16 | obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \ | ||
16 | slb_low.o slb.o stab.o \ | 17 | slb_low.o slb.o stab.o \ |
17 | mmap_64.o $(hash-y) | 18 | mmap_64.o $(hash64-y) |
18 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o | 19 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o |
19 | obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ | 20 | obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ |
20 | tlb_hash$(CONFIG_WORD_SIZE).o \ | 21 | tlb_hash$(CONFIG_WORD_SIZE).o \ |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 34e5c0b219b9..056d23a1b105 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/cputable.h> | 27 | #include <asm/cputable.h> |
28 | #include <asm/udbg.h> | 28 | #include <asm/udbg.h> |
29 | #include <asm/kexec.h> | 29 | #include <asm/kexec.h> |
30 | #include <asm/ppc-opcode.h> | ||
30 | 31 | ||
31 | #ifdef DEBUG_LOW | 32 | #ifdef DEBUG_LOW |
32 | #define DBG_LOW(fmt...) udbg_printf(fmt) | 33 | #define DBG_LOW(fmt...) udbg_printf(fmt) |
@@ -49,14 +50,21 @@ static inline void __tlbie(unsigned long va, int psize, int ssize) | |||
49 | case MMU_PAGE_4K: | 50 | case MMU_PAGE_4K: |
50 | va &= ~0xffful; | 51 | va &= ~0xffful; |
51 | va |= ssize << 8; | 52 | va |= ssize << 8; |
52 | asm volatile("tlbie %0,0" : : "r" (va) : "memory"); | 53 | asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), |
54 | %2) | ||
55 | : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) | ||
56 | : "memory"); | ||
53 | break; | 57 | break; |
54 | default: | 58 | default: |
55 | penc = mmu_psize_defs[psize].penc; | 59 | penc = mmu_psize_defs[psize].penc; |
56 | va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); | 60 | va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); |
57 | va |= penc << 12; | 61 | va |= penc << 12; |
58 | va |= ssize << 8; | 62 | va |= ssize << 8; |
59 | asm volatile("tlbie %0,1" : : "r" (va) : "memory"); | 63 | va |= 1; /* L */ |
64 | asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), | ||
65 | %2) | ||
66 | : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) | ||
67 | : "memory"); | ||
60 | break; | 68 | break; |
61 | } | 69 | } |
62 | } | 70 | } |
@@ -80,6 +88,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize) | |||
80 | va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); | 88 | va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); |
81 | va |= penc << 12; | 89 | va |= penc << 12; |
82 | va |= ssize << 8; | 90 | va |= ssize << 8; |
91 | va |= 1; /* L */ | ||
83 | asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" | 92 | asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" |
84 | : : "r"(va) : "memory"); | 93 | : : "r"(va) : "memory"); |
85 | break; | 94 | break; |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 3e6a6543f53a..68a821add28d 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -66,6 +66,7 @@ | |||
66 | 66 | ||
67 | #include "mmu_decl.h" | 67 | #include "mmu_decl.h" |
68 | 68 | ||
69 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
69 | #if PGTABLE_RANGE > USER_VSID_RANGE | 70 | #if PGTABLE_RANGE > USER_VSID_RANGE |
70 | #warning Limited user VSID range means pagetable space is wasted | 71 | #warning Limited user VSID range means pagetable space is wasted |
71 | #endif | 72 | #endif |
@@ -73,6 +74,7 @@ | |||
73 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) | 74 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) |
74 | #warning TASK_SIZE is smaller than it needs to be. | 75 | #warning TASK_SIZE is smaller than it needs to be. |
75 | #endif | 76 | #endif |
77 | #endif /* CONFIG_PPC_STD_MMU_64 */ | ||
76 | 78 | ||
77 | phys_addr_t memstart_addr = ~0; | 79 | phys_addr_t memstart_addr = ~0; |
78 | phys_addr_t kernstart_addr; | 80 | phys_addr_t kernstart_addr; |
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 030d0005b4d2..8343986809c0 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c | |||
@@ -46,7 +46,7 @@ static unsigned int next_context, nr_free_contexts; | |||
46 | static unsigned long *context_map; | 46 | static unsigned long *context_map; |
47 | static unsigned long *stale_map[NR_CPUS]; | 47 | static unsigned long *stale_map[NR_CPUS]; |
48 | static struct mm_struct **context_mm; | 48 | static struct mm_struct **context_mm; |
49 | static spinlock_t context_lock = SPIN_LOCK_UNLOCKED; | 49 | static DEFINE_SPINLOCK(context_lock); |
50 | 50 | ||
51 | #define CTX_MAP_SIZE \ | 51 | #define CTX_MAP_SIZE \ |
52 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) | 52 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) |
@@ -73,7 +73,6 @@ static unsigned int steal_context_smp(unsigned int id) | |||
73 | struct mm_struct *mm; | 73 | struct mm_struct *mm; |
74 | unsigned int cpu, max; | 74 | unsigned int cpu, max; |
75 | 75 | ||
76 | again: | ||
77 | max = last_context - first_context; | 76 | max = last_context - first_context; |
78 | 77 | ||
79 | /* Attempt to free next_context first and then loop until we manage */ | 78 | /* Attempt to free next_context first and then loop until we manage */ |
@@ -108,7 +107,9 @@ static unsigned int steal_context_smp(unsigned int id) | |||
108 | spin_unlock(&context_lock); | 107 | spin_unlock(&context_lock); |
109 | cpu_relax(); | 108 | cpu_relax(); |
110 | spin_lock(&context_lock); | 109 | spin_lock(&context_lock); |
111 | goto again; | 110 | |
111 | /* This will cause the caller to try again */ | ||
112 | return MMU_NO_CONTEXT; | ||
112 | } | 113 | } |
113 | #endif /* CONFIG_SMP */ | 114 | #endif /* CONFIG_SMP */ |
114 | 115 | ||
@@ -194,6 +195,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
194 | WARN_ON(prev->context.active < 1); | 195 | WARN_ON(prev->context.active < 1); |
195 | prev->context.active--; | 196 | prev->context.active--; |
196 | } | 197 | } |
198 | |||
199 | again: | ||
197 | #endif /* CONFIG_SMP */ | 200 | #endif /* CONFIG_SMP */ |
198 | 201 | ||
199 | /* If we already have a valid assigned context, skip all that */ | 202 | /* If we already have a valid assigned context, skip all that */ |
@@ -212,7 +215,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
212 | #ifdef CONFIG_SMP | 215 | #ifdef CONFIG_SMP |
213 | if (num_online_cpus() > 1) { | 216 | if (num_online_cpus() > 1) { |
214 | id = steal_context_smp(id); | 217 | id = steal_context_smp(id); |
215 | goto stolen; | 218 | if (id == MMU_NO_CONTEXT) |
219 | goto again; | ||
216 | } | 220 | } |
217 | #endif /* CONFIG_SMP */ | 221 | #endif /* CONFIG_SMP */ |
218 | id = steal_context_up(id); | 222 | id = steal_context_up(id); |
@@ -272,6 +276,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) | |||
272 | */ | 276 | */ |
273 | void destroy_context(struct mm_struct *mm) | 277 | void destroy_context(struct mm_struct *mm) |
274 | { | 278 | { |
279 | unsigned long flags; | ||
275 | unsigned int id; | 280 | unsigned int id; |
276 | 281 | ||
277 | if (mm->context.id == MMU_NO_CONTEXT) | 282 | if (mm->context.id == MMU_NO_CONTEXT) |
@@ -279,18 +284,18 @@ void destroy_context(struct mm_struct *mm) | |||
279 | 284 | ||
280 | WARN_ON(mm->context.active != 0); | 285 | WARN_ON(mm->context.active != 0); |
281 | 286 | ||
282 | spin_lock(&context_lock); | 287 | spin_lock_irqsave(&context_lock, flags); |
283 | id = mm->context.id; | 288 | id = mm->context.id; |
284 | if (id != MMU_NO_CONTEXT) { | 289 | if (id != MMU_NO_CONTEXT) { |
285 | __clear_bit(id, context_map); | 290 | __clear_bit(id, context_map); |
286 | mm->context.id = MMU_NO_CONTEXT; | 291 | mm->context.id = MMU_NO_CONTEXT; |
287 | #ifdef DEBUG_MAP_CONSISTENCY | 292 | #ifdef DEBUG_MAP_CONSISTENCY |
288 | mm->context.active = 0; | 293 | mm->context.active = 0; |
289 | context_mm[id] = NULL; | ||
290 | #endif | 294 | #endif |
295 | context_mm[id] = NULL; | ||
291 | nr_free_contexts++; | 296 | nr_free_contexts++; |
292 | } | 297 | } |
293 | spin_unlock(&context_lock); | 298 | spin_unlock_irqrestore(&context_lock, flags); |
294 | } | 299 | } |
295 | 300 | ||
296 | #ifdef CONFIG_SMP | 301 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9047145095aa..b037d95eeadc 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -981,6 +981,8 @@ void __init do_init_bootmem(void) | |||
981 | mark_reserved_regions_for_nid(nid); | 981 | mark_reserved_regions_for_nid(nid); |
982 | sparse_memory_present_with_active_regions(nid); | 982 | sparse_memory_present_with_active_regions(nid); |
983 | } | 983 | } |
984 | |||
985 | init_bootmem_done = 1; | ||
984 | } | 986 | } |
985 | 987 | ||
986 | void __init paging_init(void) | 988 | void __init paging_init(void) |