aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile4
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c1
-rw-r--r--arch/powerpc/mm/hash_utils_64.c26
-rw-r--r--arch/powerpc/mm/init_64.c132
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c2
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c2
-rw-r--r--arch/powerpc/mm/stab.c286
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S69
12 files changed, 194 insertions, 337 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 51230ee6a407..d0130fff20e5 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -13,9 +13,7 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
13 tlb_nohash_low.o 13 tlb_nohash_low.o
14obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o 14obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o
15hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o 15hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
16obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \ 16obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o slb_low.o slb.o $(hash64-y)
17 slb_low.o slb.o stab.o \
18 $(hash64-y)
19obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o 17obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
20obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ 18obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
21 tlb_hash$(CONFIG_WORD_SIZE).o \ 19 tlb_hash$(CONFIG_WORD_SIZE).o \
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 7b6c10750179..d85e86aac7fb 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -33,6 +33,7 @@
33#include <linux/export.h> 33#include <linux/export.h>
34 34
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include <asm/dma.h>
36 37
37#include "mmu_decl.h" 38#include "mmu_decl.h"
38 39
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 88fdd9d25077..daee7f4e5a14 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -243,7 +243,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
243} 243}
244 244
245#ifdef CONFIG_MEMORY_HOTPLUG 245#ifdef CONFIG_MEMORY_HOTPLUG
246static int htab_remove_mapping(unsigned long vstart, unsigned long vend, 246int htab_remove_mapping(unsigned long vstart, unsigned long vend,
247 int psize, int ssize) 247 int psize, int ssize)
248{ 248{
249 unsigned long vaddr; 249 unsigned long vaddr;
@@ -821,21 +821,14 @@ static void __init htab_initialize(void)
821 821
822void __init early_init_mmu(void) 822void __init early_init_mmu(void)
823{ 823{
824 /* Setup initial STAB address in the PACA */
825 get_paca()->stab_real = __pa((u64)&initial_stab);
826 get_paca()->stab_addr = (u64)&initial_stab;
827
828 /* Initialize the MMU Hash table and create the linear mapping 824 /* Initialize the MMU Hash table and create the linear mapping
829 * of memory. Has to be done before stab/slb initialization as 825 * of memory. Has to be done before SLB initialization as this is
830 * this is currently where the page size encoding is obtained 826 * currently where the page size encoding is obtained.
831 */ 827 */
832 htab_initialize(); 828 htab_initialize();
833 829
834 /* Initialize stab / SLB management */ 830 /* Initialize SLB management */
835 if (mmu_has_feature(MMU_FTR_SLB)) 831 slb_initialize();
836 slb_initialize();
837 else
838 stab_initialize(get_paca()->stab_real);
839} 832}
840 833
841#ifdef CONFIG_SMP 834#ifdef CONFIG_SMP
@@ -845,13 +838,8 @@ void early_init_mmu_secondary(void)
845 if (!firmware_has_feature(FW_FEATURE_LPAR)) 838 if (!firmware_has_feature(FW_FEATURE_LPAR))
846 mtspr(SPRN_SDR1, _SDR1); 839 mtspr(SPRN_SDR1, _SDR1);
847 840
848 /* Initialize STAB/SLB. We use a virtual address as it works 841 /* Initialize SLB */
849 * in real mode on pSeries. 842 slb_initialize();
850 */
851 if (mmu_has_feature(MMU_FTR_SLB))
852 slb_initialize();
853 else
854 stab_initialize(get_paca()->stab_addr);
855} 843}
856#endif /* CONFIG_SMP */ 844#endif /* CONFIG_SMP */
857 845
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index e3734edffa69..253b4b971c8a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -175,9 +175,10 @@ static unsigned long __meminit vmemmap_section_start(unsigned long page)
175static int __meminit vmemmap_populated(unsigned long start, int page_size) 175static int __meminit vmemmap_populated(unsigned long start, int page_size)
176{ 176{
177 unsigned long end = start + page_size; 177 unsigned long end = start + page_size;
178 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
178 179
179 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) 180 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
180 if (pfn_valid(vmemmap_section_start(start))) 181 if (pfn_valid(page_to_pfn((struct page *)start)))
181 return 1; 182 return 1;
182 183
183 return 0; 184 return 0;
@@ -212,6 +213,13 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
212 for (i = 0; i < page_size; i += PAGE_SIZE) 213 for (i = 0; i < page_size; i += PAGE_SIZE)
213 BUG_ON(map_kernel_page(start + i, phys, flags)); 214 BUG_ON(map_kernel_page(start + i, phys, flags));
214} 215}
216
217#ifdef CONFIG_MEMORY_HOTPLUG
218static void vmemmap_remove_mapping(unsigned long start,
219 unsigned long page_size)
220{
221}
222#endif
215#else /* CONFIG_PPC_BOOK3E */ 223#else /* CONFIG_PPC_BOOK3E */
216static void __meminit vmemmap_create_mapping(unsigned long start, 224static void __meminit vmemmap_create_mapping(unsigned long start,
217 unsigned long page_size, 225 unsigned long page_size,
@@ -223,17 +231,42 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
223 mmu_kernel_ssize); 231 mmu_kernel_ssize);
224 BUG_ON(mapped < 0); 232 BUG_ON(mapped < 0);
225} 233}
234
235#ifdef CONFIG_MEMORY_HOTPLUG
236extern int htab_remove_mapping(unsigned long vstart, unsigned long vend,
237 int psize, int ssize);
238
239static void vmemmap_remove_mapping(unsigned long start,
240 unsigned long page_size)
241{
242 int mapped = htab_remove_mapping(start, start + page_size,
243 mmu_vmemmap_psize,
244 mmu_kernel_ssize);
245 BUG_ON(mapped < 0);
246}
247#endif
248
226#endif /* CONFIG_PPC_BOOK3E */ 249#endif /* CONFIG_PPC_BOOK3E */
227 250
228struct vmemmap_backing *vmemmap_list; 251struct vmemmap_backing *vmemmap_list;
252static struct vmemmap_backing *next;
253static int num_left;
254static int num_freed;
229 255
230static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) 256static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
231{ 257{
232 static struct vmemmap_backing *next; 258 struct vmemmap_backing *vmem_back;
233 static int num_left; 259 /* get from freed entries first */
260 if (num_freed) {
261 num_freed--;
262 vmem_back = next;
263 next = next->list;
264
265 return vmem_back;
266 }
234 267
235 /* allocate a page when required and hand out chunks */ 268 /* allocate a page when required and hand out chunks */
236 if (!next || !num_left) { 269 if (!num_left) {
237 next = vmemmap_alloc_block(PAGE_SIZE, node); 270 next = vmemmap_alloc_block(PAGE_SIZE, node);
238 if (unlikely(!next)) { 271 if (unlikely(!next)) {
239 WARN_ON(1); 272 WARN_ON(1);
@@ -296,10 +329,85 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
296 return 0; 329 return 0;
297} 330}
298 331
299void vmemmap_free(unsigned long start, unsigned long end) 332#ifdef CONFIG_MEMORY_HOTPLUG
333static unsigned long vmemmap_list_free(unsigned long start)
300{ 334{
335 struct vmemmap_backing *vmem_back, *vmem_back_prev;
336
337 vmem_back_prev = vmem_back = vmemmap_list;
338
339 /* look for it with prev pointer recorded */
340 for (; vmem_back; vmem_back = vmem_back->list) {
341 if (vmem_back->virt_addr == start)
342 break;
343 vmem_back_prev = vmem_back;
344 }
345
346 if (unlikely(!vmem_back)) {
347 WARN_ON(1);
348 return 0;
349 }
350
351 /* remove it from vmemmap_list */
352 if (vmem_back == vmemmap_list) /* remove head */
353 vmemmap_list = vmem_back->list;
354 else
355 vmem_back_prev->list = vmem_back->list;
356
357 /* next point to this freed entry */
358 vmem_back->list = next;
359 next = vmem_back;
360 num_freed++;
361
362 return vmem_back->phys;
301} 363}
302 364
365void __ref vmemmap_free(unsigned long start, unsigned long end)
366{
367 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
368
369 start = _ALIGN_DOWN(start, page_size);
370
371 pr_debug("vmemmap_free %lx...%lx\n", start, end);
372
373 for (; start < end; start += page_size) {
374 unsigned long addr;
375
376 /*
377 * the section has already be marked as invalid, so
378 * vmemmap_populated() true means some other sections still
379 * in this page, so skip it.
380 */
381 if (vmemmap_populated(start, page_size))
382 continue;
383
384 addr = vmemmap_list_free(start);
385 if (addr) {
386 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
387
388 if (PageReserved(page)) {
389 /* allocated from bootmem */
390 if (page_size < PAGE_SIZE) {
391 /*
392 * this shouldn't happen, but if it is
393 * the case, leave the memory there
394 */
395 WARN_ON_ONCE(1);
396 } else {
397 unsigned int nr_pages =
398 1 << get_order(page_size);
399 while (nr_pages--)
400 free_reserved_page(page++);
401 }
402 } else
403 free_pages((unsigned long)(__va(addr)),
404 get_order(page_size));
405
406 vmemmap_remove_mapping(start, page_size);
407 }
408 }
409}
410#endif
303void register_page_bootmem_memmap(unsigned long section_nr, 411void register_page_bootmem_memmap(unsigned long section_nr,
304 struct page *start_page, unsigned long size) 412 struct page *start_page, unsigned long size)
305{ 413{
@@ -331,16 +439,16 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
331 if (pg_va < vmem_back->virt_addr) 439 if (pg_va < vmem_back->virt_addr)
332 continue; 440 continue;
333 441
334 /* Check that page struct is not split between real pages */ 442 /* After vmemmap_list entry free is possible, need check all */
335 if ((pg_va + sizeof(struct page)) > 443 if ((pg_va + sizeof(struct page)) <=
336 (vmem_back->virt_addr + page_size)) 444 (vmem_back->virt_addr + page_size)) {
337 return NULL; 445 page = (struct page *) (vmem_back->phys + pg_va -
338
339 page = (struct page *) (vmem_back->phys + pg_va -
340 vmem_back->virt_addr); 446 vmem_back->virt_addr);
341 return page; 447 return page;
448 }
342 } 449 }
343 450
451 /* Probably that page struct is split between real pages */
344 return NULL; 452 return NULL;
345} 453}
346EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 454EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2c8e90f5789e..e0f7a189c48e 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -128,7 +128,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
128 return -EINVAL; 128 return -EINVAL;
129 129
130 /* this should work for most non-highmem platforms */ 130 /* this should work for most non-highmem platforms */
131 zone = pgdata->node_zones; 131 zone = pgdata->node_zones +
132 zone_for_memory(nid, start, size, 0);
132 133
133 return __add_pages(nid, zone, start_pfn, nr_pages); 134 return __add_pages(nid, zone, start_pfn, nr_pages);
134} 135}
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
index 78fef6726e10..aa5a7fd89461 100644
--- a/arch/powerpc/mm/mmu_context_hash32.c
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -2,7 +2,7 @@
2 * This file contains the routines for handling the MMU on those 2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the 3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx, 4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx. 5 * and 8260 implementations but excludes the 8xx and 4xx.
6 * -- paulus 6 * -- paulus
7 * 7 *
8 * Derived from arch/ppc/mm/init.c: 8 * Derived from arch/ppc/mm/init.c:
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3b181b22cd46..d3e9a78eaed3 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -611,8 +611,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
611 case CPU_UP_CANCELED: 611 case CPU_UP_CANCELED:
612 case CPU_UP_CANCELED_FROZEN: 612 case CPU_UP_CANCELED_FROZEN:
613 unmap_cpu_from_node(lcpu); 613 unmap_cpu_from_node(lcpu);
614 break;
615 ret = NOTIFY_OK; 614 ret = NOTIFY_OK;
615 break;
616#endif 616#endif
617 } 617 }
618 return ret; 618 return ret;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 343a87fa78b5..cf11342bf519 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -41,7 +41,7 @@ unsigned long ioremap_base;
41unsigned long ioremap_bot; 41unsigned long ioremap_bot;
42EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ 42EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
43 43
44#if defined(CONFIG_6xx) || defined(CONFIG_POWER3) 44#ifdef CONFIG_6xx
45#define HAVE_BATS 1 45#define HAVE_BATS 1
46#endif 46#endif
47 47
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index f6ce1f111f5b..3b3c4d34c7a0 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -68,7 +68,7 @@
68unsigned long ioremap_bot = IOREMAP_BASE; 68unsigned long ioremap_bot = IOREMAP_BASE;
69 69
70#ifdef CONFIG_PPC_MMU_NOHASH 70#ifdef CONFIG_PPC_MMU_NOHASH
71static void *early_alloc_pgtable(unsigned long size) 71static __ref void *early_alloc_pgtable(unsigned long size)
72{ 72{
73 void *pt; 73 void *pt;
74 74
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 11571e118831..5029dc19b517 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -2,7 +2,7 @@
2 * This file contains the routines for handling the MMU on those 2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the 3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx, 4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx. 5 * and 8260 implementations but excludes the 8xx and 4xx.
6 * -- paulus 6 * -- paulus
7 * 7 *
8 * Derived from arch/ppc/mm/init.c: 8 * Derived from arch/ppc/mm/init.c:
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
deleted file mode 100644
index 3f8efa6f2997..000000000000
--- a/arch/powerpc/mm/stab.c
+++ /dev/null
@@ -1,286 +0,0 @@
1/*
2 * PowerPC64 Segment Translation Support.
3 *
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
6 *
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/memblock.h>
16
17#include <asm/pgtable.h>
18#include <asm/mmu.h>
19#include <asm/mmu_context.h>
20#include <asm/paca.h>
21#include <asm/cputable.h>
22#include <asm/prom.h>
23
24struct stab_entry {
25 unsigned long esid_data;
26 unsigned long vsid_data;
27};
28
29#define NR_STAB_CACHE_ENTRIES 8
30static DEFINE_PER_CPU(long, stab_cache_ptr);
31static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
32
33/*
34 * Create a segment table entry for the given esid/vsid pair.
35 */
36static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
37{
38 unsigned long esid_data, vsid_data;
39 unsigned long entry, group, old_esid, castout_entry, i;
40 unsigned int global_entry;
41 struct stab_entry *ste, *castout_ste;
42 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
43
44 vsid_data = vsid << STE_VSID_SHIFT;
45 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
46 if (! kernel_segment)
47 esid_data |= STE_ESID_KS;
48
49 /* Search the primary group first. */
50 global_entry = (esid & 0x1f) << 3;
51 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
52
53 /* Find an empty entry, if one exists. */
54 for (group = 0; group < 2; group++) {
55 for (entry = 0; entry < 8; entry++, ste++) {
56 if (!(ste->esid_data & STE_ESID_V)) {
57 ste->vsid_data = vsid_data;
58 eieio();
59 ste->esid_data = esid_data;
60 return (global_entry | entry);
61 }
62 }
63 /* Now search the secondary group. */
64 global_entry = ((~esid) & 0x1f) << 3;
65 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
66 }
67
68 /*
69 * Could not find empty entry, pick one with a round robin selection.
70 * Search all entries in the two groups.
71 */
72 castout_entry = get_paca()->stab_rr;
73 for (i = 0; i < 16; i++) {
74 if (castout_entry < 8) {
75 global_entry = (esid & 0x1f) << 3;
76 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
77 castout_ste = ste + castout_entry;
78 } else {
79 global_entry = ((~esid) & 0x1f) << 3;
80 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
81 castout_ste = ste + (castout_entry - 8);
82 }
83
84 /* Dont cast out the first kernel segment */
85 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
86 break;
87
88 castout_entry = (castout_entry + 1) & 0xf;
89 }
90
91 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
92
93 /* Modify the old entry to the new value. */
94
95 /* Force previous translations to complete. DRENG */
96 asm volatile("isync" : : : "memory");
97
98 old_esid = castout_ste->esid_data >> SID_SHIFT;
99 castout_ste->esid_data = 0; /* Invalidate old entry */
100
101 asm volatile("sync" : : : "memory"); /* Order update */
102
103 castout_ste->vsid_data = vsid_data;
104 eieio(); /* Order update */
105 castout_ste->esid_data = esid_data;
106
107 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
108 /* Ensure completion of slbie */
109 asm volatile("sync" : : : "memory");
110
111 return (global_entry | (castout_entry & 0x7));
112}
113
114/*
115 * Allocate a segment table entry for the given ea and mm
116 */
117static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
118{
119 unsigned long vsid;
120 unsigned char stab_entry;
121 unsigned long offset;
122
123 /* Kernel or user address? */
124 if (is_kernel_addr(ea)) {
125 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
126 } else {
127 if ((ea >= TASK_SIZE_USER64) || (! mm))
128 return 1;
129
130 vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
131 }
132
133 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
134
135 if (!is_kernel_addr(ea)) {
136 offset = __get_cpu_var(stab_cache_ptr);
137 if (offset < NR_STAB_CACHE_ENTRIES)
138 __get_cpu_var(stab_cache[offset++]) = stab_entry;
139 else
140 offset = NR_STAB_CACHE_ENTRIES+1;
141 __get_cpu_var(stab_cache_ptr) = offset;
142
143 /* Order update */
144 asm volatile("sync":::"memory");
145 }
146
147 return 0;
148}
149
150int ste_allocate(unsigned long ea)
151{
152 return __ste_allocate(ea, current->mm);
153}
154
155/*
156 * Do the segment table work for a context switch: flush all user
157 * entries from the table, then preload some probably useful entries
158 * for the new task
159 */
160void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
161{
162 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
163 struct stab_entry *ste;
164 unsigned long offset;
165 unsigned long pc = KSTK_EIP(tsk);
166 unsigned long stack = KSTK_ESP(tsk);
167 unsigned long unmapped_base;
168
169 /* Force previous translations to complete. DRENG */
170 asm volatile("isync" : : : "memory");
171
172 /*
173 * We need interrupts hard-disabled here, not just soft-disabled,
174 * so that a PMU interrupt can't occur, which might try to access
175 * user memory (to get a stack trace) and possible cause an STAB miss
176 * which would update the stab_cache/stab_cache_ptr per-cpu variables.
177 */
178 hard_irq_disable();
179
180 offset = __get_cpu_var(stab_cache_ptr);
181 if (offset <= NR_STAB_CACHE_ENTRIES) {
182 int i;
183
184 for (i = 0; i < offset; i++) {
185 ste = stab + __get_cpu_var(stab_cache[i]);
186 ste->esid_data = 0; /* invalidate entry */
187 }
188 } else {
189 unsigned long entry;
190
191 /* Invalidate all entries. */
192 ste = stab;
193
194 /* Never flush the first entry. */
195 ste += 1;
196 for (entry = 1;
197 entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
198 entry++, ste++) {
199 unsigned long ea;
200 ea = ste->esid_data & ESID_MASK;
201 if (!is_kernel_addr(ea)) {
202 ste->esid_data = 0;
203 }
204 }
205 }
206
207 asm volatile("sync; slbia; sync":::"memory");
208
209 __get_cpu_var(stab_cache_ptr) = 0;
210
211 /* Now preload some entries for the new task */
212 if (test_tsk_thread_flag(tsk, TIF_32BIT))
213 unmapped_base = TASK_UNMAPPED_BASE_USER32;
214 else
215 unmapped_base = TASK_UNMAPPED_BASE_USER64;
216
217 __ste_allocate(pc, mm);
218
219 if (GET_ESID(pc) == GET_ESID(stack))
220 return;
221
222 __ste_allocate(stack, mm);
223
224 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
225 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
226 return;
227
228 __ste_allocate(unmapped_base, mm);
229
230 /* Order update */
231 asm volatile("sync" : : : "memory");
232}
233
234/*
235 * Allocate segment tables for secondary CPUs. These must all go in
236 * the first (bolted) segment, so that do_stab_bolted won't get a
237 * recursive segment miss on the segment table itself.
238 */
239void __init stabs_alloc(void)
240{
241 int cpu;
242
243 if (mmu_has_feature(MMU_FTR_SLB))
244 return;
245
246 for_each_possible_cpu(cpu) {
247 unsigned long newstab;
248
249 if (cpu == 0)
250 continue; /* stab for CPU 0 is statically allocated */
251
252 newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
253 1<<SID_SHIFT);
254 newstab = (unsigned long)__va(newstab);
255
256 memset((void *)newstab, 0, HW_PAGE_SIZE);
257
258 paca[cpu].stab_addr = newstab;
259 paca[cpu].stab_real = __pa(newstab);
260 printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
261 "virtual, 0x%llx absolute\n",
262 cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
263 }
264}
265
266/*
267 * Build an entry for the base kernel segment and put it into
268 * the segment table or SLB. All other segment table or SLB
269 * entries are faulted in.
270 */
271void stab_initialize(unsigned long stab)
272{
273 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
274 unsigned long stabreal;
275
276 asm volatile("isync; slbia; isync":::"memory");
277 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278
279 /* Order update */
280 asm volatile("sync":::"memory");
281
282 /* Set ASR */
283 stabreal = get_paca()->stab_real | 0x1ul;
284
285 mtspr(SPRN_ASR, stabreal);
286}
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 356e8b41fb09..89bf95bd63b1 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -296,9 +296,12 @@ itlb_miss_fault_bolted:
296 * r14 = page table base 296 * r14 = page table base
297 * r13 = PACA 297 * r13 = PACA
298 * r11 = tlb_per_core ptr 298 * r11 = tlb_per_core ptr
299 * r10 = cpu number 299 * r10 = crap (free to use)
300 */ 300 */
301tlb_miss_common_e6500: 301tlb_miss_common_e6500:
302 crmove cr2*4+2,cr0*4+2 /* cr2.eq != 0 if kernel address */
303
304BEGIN_FTR_SECTION /* CPU_FTR_SMT */
302 /* 305 /*
303 * Search if we already have an indirect entry for that virtual 306 * Search if we already have an indirect entry for that virtual
304 * address, and if we do, bail out. 307 * address, and if we do, bail out.
@@ -309,6 +312,7 @@ tlb_miss_common_e6500:
309 lhz r10,PACAPACAINDEX(r13) 312 lhz r10,PACAPACAINDEX(r13)
310 cmpdi r15,0 313 cmpdi r15,0
311 cmpdi cr1,r15,1 /* set cr1.eq = 0 for non-recursive */ 314 cmpdi cr1,r15,1 /* set cr1.eq = 0 for non-recursive */
315 addi r10,r10,1
312 bne 2f 316 bne 2f
313 stbcx. r10,0,r11 317 stbcx. r10,0,r11
314 bne 1b 318 bne 1b
@@ -322,18 +326,62 @@ tlb_miss_common_e6500:
322 b 1b 326 b 1b
323 .previous 327 .previous
324 328
329 /*
330 * Erratum A-008139 says that we can't use tlbwe to change
331 * an indirect entry in any way (including replacing or
332 * invalidating) if the other thread could be in the process
333 * of a lookup. The workaround is to invalidate the entry
334 * with tlbilx before overwriting.
335 */
336
337 lbz r15,TCD_ESEL_NEXT(r11)
338 rlwinm r10,r15,16,0xff0000
339 oris r10,r10,MAS0_TLBSEL(1)@h
340 mtspr SPRN_MAS0,r10
341 isync
342 tlbre
343 mfspr r15,SPRN_MAS1
344 andis. r15,r15,MAS1_VALID@h
345 beq 5f
346
347BEGIN_FTR_SECTION_NESTED(532)
348 mfspr r10,SPRN_MAS8
349 rlwinm r10,r10,0,0x80000fff /* tgs,tlpid -> sgs,slpid */
350 mtspr SPRN_MAS5,r10
351END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
352
353 mfspr r10,SPRN_MAS1
354 rlwinm r15,r10,0,0x3fff0000 /* tid -> spid */
355 rlwimi r15,r10,20,0x00000003 /* ind,ts -> sind,sas */
356 mfspr r10,SPRN_MAS6
357 mtspr SPRN_MAS6,r15
358
325 mfspr r15,SPRN_MAS2 359 mfspr r15,SPRN_MAS2
360 isync
361 tlbilxva 0,r15
362 isync
363
364 mtspr SPRN_MAS6,r10
365
3665:
367BEGIN_FTR_SECTION_NESTED(532)
368 li r10,0
369 mtspr SPRN_MAS8,r10
370 mtspr SPRN_MAS5,r10
371END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
326 372
327 tlbsx 0,r16 373 tlbsx 0,r16
328 mfspr r10,SPRN_MAS1 374 mfspr r10,SPRN_MAS1
329 andis. r10,r10,MAS1_VALID@h 375 andis. r15,r10,MAS1_VALID@h
330 bne tlb_miss_done_e6500 376 bne tlb_miss_done_e6500
331 377FTR_SECTION_ELSE
332 /* Undo MAS-damage from the tlbsx */
333 mfspr r10,SPRN_MAS1 378 mfspr r10,SPRN_MAS1
379ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT)
380
334 oris r10,r10,MAS1_VALID@h 381 oris r10,r10,MAS1_VALID@h
335 mtspr SPRN_MAS1,r10 382 beq cr2,4f
336 mtspr SPRN_MAS2,r15 383 rlwinm r10,r10,0,16,1 /* Clear TID */
3844: mtspr SPRN_MAS1,r10
337 385
338 /* Now, we need to walk the page tables. First check if we are in 386 /* Now, we need to walk the page tables. First check if we are in
339 * range. 387 * range.
@@ -394,11 +442,13 @@ tlb_miss_common_e6500:
394 442
395tlb_miss_done_e6500: 443tlb_miss_done_e6500:
396 .macro tlb_unlock_e6500 444 .macro tlb_unlock_e6500
445BEGIN_FTR_SECTION
397 beq cr1,1f /* no unlock if lock was recursively grabbed */ 446 beq cr1,1f /* no unlock if lock was recursively grabbed */
398 li r15,0 447 li r15,0
399 isync 448 isync
400 stb r15,0(r11) 449 stb r15,0(r11)
4011: 4501:
451END_FTR_SECTION_IFSET(CPU_FTR_SMT)
402 .endm 452 .endm
403 453
404 tlb_unlock_e6500 454 tlb_unlock_e6500
@@ -407,12 +457,9 @@ tlb_miss_done_e6500:
407 rfi 457 rfi
408 458
409tlb_miss_kernel_e6500: 459tlb_miss_kernel_e6500:
410 mfspr r10,SPRN_MAS1
411 ld r14,PACA_KERNELPGD(r13) 460 ld r14,PACA_KERNELPGD(r13)
412 cmpldi cr0,r15,8 /* Check for vmalloc region */ 461 cmpldi cr1,r15,8 /* Check for vmalloc region */
413 rlwinm r10,r10,0,16,1 /* Clear TID */ 462 beq+ cr1,tlb_miss_common_e6500
414 mtspr SPRN_MAS1,r10
415 beq+ tlb_miss_common_e6500
416 463
417tlb_miss_fault_e6500: 464tlb_miss_fault_e6500:
418 tlb_unlock_e6500 465 tlb_unlock_e6500