aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c62
-rw-r--r--arch/powerpc/mm/pgtable_32.c18
-rw-r--r--arch/powerpc/mm/pgtable_64.c6
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c5
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/mm/vphn.c70
-rw-r--r--arch/powerpc/mm/vphn.h16
15 files changed, 118 insertions, 78 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 438dcd3fd0d1..9c8770b5f96f 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_40x) += 40x_mmu.o
24obj-$(CONFIG_44x) += 44x_mmu.o 24obj-$(CONFIG_44x) += 44x_mmu.o
25obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o 25obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
26obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o 26obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
27obj-$(CONFIG_PPC_SPLPAR) += vphn.o
27obj-$(CONFIG_PPC_MM_SLICES) += slice.o 28obj-$(CONFIG_PPC_MM_SLICES) += slice.o
28obj-y += hugetlbpage.o 29obj-y += hugetlbpage.o
29ifeq ($(CONFIG_HUGETLB_PAGE),y) 30ifeq ($(CONFIG_HUGETLB_PAGE),y)
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index d85e86aac7fb..169aba446a74 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
228 do { 228 do {
229 SetPageReserved(page); 229 SetPageReserved(page);
230 map_page(vaddr, page_to_phys(page), 230 map_page(vaddr, page_to_phys(page),
231 pgprot_noncached(PAGE_KERNEL)); 231 pgprot_val(pgprot_noncached(PAGE_KERNEL)));
232 page++; 232 page++;
233 vaddr += PAGE_SIZE; 233 vaddr += PAGE_SIZE;
234 } while (size -= PAGE_SIZE); 234 } while (size -= PAGE_SIZE);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index b46912fee7cd..9c90e66cffb6 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
181 unsigned long cam_sz; 181 unsigned long cam_sz;
182 182
183 cam_sz = calc_cam_sz(ram, virt, phys); 183 cam_sz = calc_cam_sz(ram, virt, phys);
184 settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0); 184 settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
185 185
186 ram -= cam_sz; 186 ram -= cam_sz;
187 amount_mapped += cam_sz; 187 amount_mapped += cam_sz;
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 86686514ae13..43dafb9d6a46 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -33,7 +33,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
33 * atomically mark the linux large page PMD busy and dirty 33 * atomically mark the linux large page PMD busy and dirty
34 */ 34 */
35 do { 35 do {
36 pmd_t pmd = ACCESS_ONCE(*pmdp); 36 pmd_t pmd = READ_ONCE(*pmdp);
37 37
38 old_pmd = pmd_val(pmd); 38 old_pmd = pmd_val(pmd);
39 /* If PMD busy, retry the access */ 39 /* If PMD busy, retry the access */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..fa9d5c238d22 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -964,7 +964,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
964 *shift = 0; 964 *shift = 0;
965 965
966 pgdp = pgdir + pgd_index(ea); 966 pgdp = pgdir + pgd_index(ea);
967 pgd = ACCESS_ONCE(*pgdp); 967 pgd = READ_ONCE(*pgdp);
968 /* 968 /*
969 * Always operate on the local stack value. This make sure the 969 * Always operate on the local stack value. This make sure the
970 * value don't get updated by a parallel THP split/collapse, 970 * value don't get updated by a parallel THP split/collapse,
@@ -1045,7 +1045,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1045 if (pte_end < end) 1045 if (pte_end < end)
1046 end = pte_end; 1046 end = pte_end;
1047 1047
1048 pte = ACCESS_ONCE(*ptep); 1048 pte = READ_ONCE(*ptep);
1049 mask = _PAGE_PRESENT | _PAGE_USER; 1049 mask = _PAGE_PRESENT | _PAGE_USER;
1050 if (write) 1050 if (write)
1051 mask |= _PAGE_RW; 1051 mask |= _PAGE_RW;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 10471f9bb63f..d747dd7bc90b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -132,6 +132,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
132 align = max_t(unsigned long, align, minalign); 132 align = max_t(unsigned long, align, minalign);
133 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 133 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
134 new = kmem_cache_create(name, table_size, align, 0, ctor); 134 new = kmem_cache_create(name, table_size, align, 0, ctor);
135 kfree(name);
135 pgtable_cache[shift - 1] = new; 136 pgtable_cache[shift - 1] = new;
136 pr_debug("Allocated pgtable cache for order %d\n", shift); 137 pr_debug("Allocated pgtable cache for order %d\n", shift);
137} 138}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b7285a5870f8..45fda71feb27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,7 +61,6 @@
61#define CPU_FTR_NOEXECUTE 0 61#define CPU_FTR_NOEXECUTE 0
62#endif 62#endif
63 63
64int mem_init_done;
65unsigned long long memory_limit; 64unsigned long long memory_limit;
66 65
67#ifdef CONFIG_HIGHMEM 66#ifdef CONFIG_HIGHMEM
@@ -377,8 +376,6 @@ void __init mem_init(void)
377 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 376 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
378 VMALLOC_START, VMALLOC_END); 377 VMALLOC_START, VMALLOC_END);
379#endif /* CONFIG_PPC32 */ 378#endif /* CONFIG_PPC32 */
380
381 mem_init_done = 1;
382} 379}
383 380
384void free_initmem(void) 381void free_initmem(void)
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 78c45f392f5b..085b66b10891 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -96,7 +96,7 @@ extern void _tlbia(void);
96extern void mapin_ram(void); 96extern void mapin_ram(void);
97extern int map_page(unsigned long va, phys_addr_t pa, int flags); 97extern int map_page(unsigned long va, phys_addr_t pa, int flags);
98extern void setbat(int index, unsigned long virt, phys_addr_t phys, 98extern void setbat(int index, unsigned long virt, phys_addr_t phys,
99 unsigned int size, int flags); 99 unsigned int size, pgprot_t prot);
100 100
101extern int __map_without_bats; 101extern int __map_without_bats;
102extern int __allow_ioremap_reserved; 102extern int __allow_ioremap_reserved;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0257a7d659ef..5e80621d9324 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -958,6 +958,13 @@ void __init initmem_init(void)
958 958
959 memblock_dump_all(); 959 memblock_dump_all();
960 960
961 /*
962 * Reduce the possible NUMA nodes to the online NUMA nodes,
963 * since we do not support node hotplug. This ensures that we
964 * lower the maximum NUMA node ID to what is actually present.
965 */
966 nodes_and(node_possible_map, node_possible_map, node_online_map);
967
961 for_each_online_node(nid) { 968 for_each_online_node(nid) {
962 unsigned long start_pfn, end_pfn; 969 unsigned long start_pfn, end_pfn;
963 970
@@ -1177,6 +1184,9 @@ u64 memory_hotplug_max(void)
1177 1184
1178/* Virtual Processor Home Node (VPHN) support */ 1185/* Virtual Processor Home Node (VPHN) support */
1179#ifdef CONFIG_PPC_SPLPAR 1186#ifdef CONFIG_PPC_SPLPAR
1187
1188#include "vphn.h"
1189
1180struct topology_update_data { 1190struct topology_update_data {
1181 struct topology_update_data *next; 1191 struct topology_update_data *next;
1182 unsigned int cpu; 1192 unsigned int cpu;
@@ -1248,55 +1258,6 @@ static int update_cpu_associativity_changes_mask(void)
1248} 1258}
1249 1259
1250/* 1260/*
1251 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1252 * the complete property we have to add the length in the first cell.
1253 */
1254#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1255
1256/*
1257 * Convert the associativity domain numbers returned from the hypervisor
1258 * to the sequence they would appear in the ibm,associativity property.
1259 */
1260static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1261{
1262 int i, nr_assoc_doms = 0;
1263 const __be16 *field = (const __be16 *) packed;
1264
1265#define VPHN_FIELD_UNUSED (0xffff)
1266#define VPHN_FIELD_MSB (0x8000)
1267#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1268
1269 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1270 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1271 /* All significant fields processed, and remaining
1272 * fields contain the reserved value of all 1's.
1273 * Just store them.
1274 */
1275 unpacked[i] = *((__be32 *)field);
1276 field += 2;
1277 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1278 /* Data is in the lower 15 bits of this field */
1279 unpacked[i] = cpu_to_be32(
1280 be16_to_cpup(field) & VPHN_FIELD_MASK);
1281 field++;
1282 nr_assoc_doms++;
1283 } else {
1284 /* Data is in the lower 15 bits of this field
1285 * concatenated with the next 16 bit field
1286 */
1287 unpacked[i] = *((__be32 *)field);
1288 field += 2;
1289 nr_assoc_doms++;
1290 }
1291 }
1292
1293 /* The first cell contains the length of the property */
1294 unpacked[0] = cpu_to_be32(nr_assoc_doms);
1295
1296 return nr_assoc_doms;
1297}
1298
1299/*
1300 * Retrieve the new associativity information for a virtual processor's 1261 * Retrieve the new associativity information for a virtual processor's
1301 * home node. 1262 * home node.
1302 */ 1263 */
@@ -1306,11 +1267,8 @@ static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1306 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1267 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1307 u64 flags = 1; 1268 u64 flags = 1;
1308 int hwcpu = get_hard_smp_processor_id(cpu); 1269 int hwcpu = get_hard_smp_processor_id(cpu);
1309 int i;
1310 1270
1311 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 1271 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1312 for (i = 0; i < 6; i++)
1313 retbuf[i] = cpu_to_be64(retbuf[i]);
1314 vphn_unpack_associativity(retbuf, associativity); 1272 vphn_unpack_associativity(retbuf, associativity);
1315 1273
1316 return rc; 1274 return rc;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 03b1a3b0fbd5..7692d1bb1bc6 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -54,9 +54,6 @@ extern char etext[], _stext[];
54#ifdef HAVE_BATS 54#ifdef HAVE_BATS
55extern phys_addr_t v_mapped_by_bats(unsigned long va); 55extern phys_addr_t v_mapped_by_bats(unsigned long va);
56extern unsigned long p_mapped_by_bats(phys_addr_t pa); 56extern unsigned long p_mapped_by_bats(phys_addr_t pa);
57void setbat(int index, unsigned long virt, phys_addr_t phys,
58 unsigned int size, int flags);
59
60#else /* !HAVE_BATS */ 57#else /* !HAVE_BATS */
61#define v_mapped_by_bats(x) (0UL) 58#define v_mapped_by_bats(x) (0UL)
62#define p_mapped_by_bats(x) (0UL) 59#define p_mapped_by_bats(x) (0UL)
@@ -110,9 +107,8 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
110__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 107__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
111{ 108{
112 pte_t *pte; 109 pte_t *pte;
113 extern int mem_init_done;
114 110
115 if (mem_init_done) { 111 if (slab_is_available()) {
116 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 112 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
117 } else { 113 } else {
118 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 114 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
@@ -192,7 +188,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
192 188
193 /* Make sure we have the base flags */ 189 /* Make sure we have the base flags */
194 if ((flags & _PAGE_PRESENT) == 0) 190 if ((flags & _PAGE_PRESENT) == 0)
195 flags |= PAGE_KERNEL; 191 flags |= pgprot_val(PAGE_KERNEL);
196 192
197 /* Non-cacheable page cannot be coherent */ 193 /* Non-cacheable page cannot be coherent */
198 if (flags & _PAGE_NO_CACHE) 194 if (flags & _PAGE_NO_CACHE)
@@ -219,9 +215,9 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
219 * Don't allow anybody to remap normal RAM that we're using. 215 * Don't allow anybody to remap normal RAM that we're using.
220 * mem_init() sets high_memory so only do the check after that. 216 * mem_init() sets high_memory so only do the check after that.
221 */ 217 */
222 if (mem_init_done && (p < virt_to_phys(high_memory)) && 218 if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
223 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { 219 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
224 printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n", 220 printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
225 (unsigned long long)p, __builtin_return_address(0)); 221 (unsigned long long)p, __builtin_return_address(0));
226 return NULL; 222 return NULL;
227 } 223 }
@@ -247,7 +243,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
247 if ((v = p_mapped_by_tlbcam(p))) 243 if ((v = p_mapped_by_tlbcam(p)))
248 goto out; 244 goto out;
249 245
250 if (mem_init_done) { 246 if (slab_is_available()) {
251 struct vm_struct *area; 247 struct vm_struct *area;
252 area = get_vm_area_caller(size, VM_IOREMAP, caller); 248 area = get_vm_area_caller(size, VM_IOREMAP, caller);
253 if (area == 0) 249 if (area == 0)
@@ -266,7 +262,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
266 for (i = 0; i < size && err == 0; i += PAGE_SIZE) 262 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
267 err = map_page(v+i, p+i, flags); 263 err = map_page(v+i, p+i, flags);
268 if (err) { 264 if (err) {
269 if (mem_init_done) 265 if (slab_is_available())
270 vunmap((void *)v); 266 vunmap((void *)v);
271 return NULL; 267 return NULL;
272 } 268 }
@@ -327,7 +323,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
327 p = memstart_addr + s; 323 p = memstart_addr + s;
328 for (; s < top; s += PAGE_SIZE) { 324 for (; s < top; s += PAGE_SIZE) {
329 ktext = ((char *) v >= _stext && (char *) v < etext); 325 ktext = ((char *) v >= _stext && (char *) v < etext);
330 f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; 326 f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
331 map_page(v, p, f); 327 map_page(v, p, f);
332#ifdef CONFIG_PPC_STD_MMU_32 328#ifdef CONFIG_PPC_STD_MMU_32
333 if (ktext) 329 if (ktext)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 6957cc1ca0a7..59daa5eeec25 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -231,7 +231,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
231 if ((size == 0) || (paligned == 0)) 231 if ((size == 0) || (paligned == 0))
232 return NULL; 232 return NULL;
233 233
234 if (mem_init_done) { 234 if (slab_is_available()) {
235 struct vm_struct *area; 235 struct vm_struct *area;
236 236
237 area = __get_vm_area_caller(size, VM_IOREMAP, 237 area = __get_vm_area_caller(size, VM_IOREMAP,
@@ -315,7 +315,7 @@ void __iounmap(volatile void __iomem *token)
315{ 315{
316 void *addr; 316 void *addr;
317 317
318 if (!mem_init_done) 318 if (!slab_is_available())
319 return; 319 return;
320 320
321 addr = (void *) ((unsigned long __force) 321 addr = (void *) ((unsigned long __force)
@@ -723,7 +723,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
723 assert_spin_locked(&mm->page_table_lock); 723 assert_spin_locked(&mm->page_table_lock);
724 WARN_ON(!pmd_trans_huge(pmd)); 724 WARN_ON(!pmd_trans_huge(pmd));
725#endif 725#endif
726 trace_hugepage_set_pmd(addr, pmd); 726 trace_hugepage_set_pmd(addr, pmd_val(pmd));
727 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 727 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
728} 728}
729 729
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 5029dc19b517..6b2f3e457171 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -113,11 +113,12 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
113 * of 2 between 128k and 256M. 113 * of 2 between 128k and 256M.
114 */ 114 */
115void __init setbat(int index, unsigned long virt, phys_addr_t phys, 115void __init setbat(int index, unsigned long virt, phys_addr_t phys,
116 unsigned int size, int flags) 116 unsigned int size, pgprot_t prot)
117{ 117{
118 unsigned int bl; 118 unsigned int bl;
119 int wimgxpp; 119 int wimgxpp;
120 struct ppc_bat *bat = BATS[index]; 120 struct ppc_bat *bat = BATS[index];
121 unsigned long flags = pgprot_val(prot);
121 122
122 if ((flags & _PAGE_NO_CACHE) || 123 if ((flags & _PAGE_NO_CACHE) ||
123 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0)) 124 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
@@ -224,7 +225,7 @@ void __init MMU_init_hw(void)
224 */ 225 */
225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 226 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
226 Hash = __va(memblock_alloc(Hash_size, Hash_size)); 227 Hash = __va(memblock_alloc(Hash_size, Hash_size));
227 cacheable_memzero(Hash, Hash_size); 228 memset(Hash, 0, Hash_size);
228 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 229 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
229 230
230 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size); 231 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index d2a94b85dbc2..c522969f012d 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
216 continue; 216 continue;
217 pte = pte_val(*ptep); 217 pte = pte_val(*ptep);
218 if (hugepage_shift) 218 if (hugepage_shift)
219 trace_hugepage_invalidate(start, pte_val(pte)); 219 trace_hugepage_invalidate(start, pte);
220 if (!(pte & _PAGE_HASHPTE)) 220 if (!(pte & _PAGE_HASHPTE))
221 continue; 221 continue;
222 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) 222 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
diff --git a/arch/powerpc/mm/vphn.c b/arch/powerpc/mm/vphn.c
new file mode 100644
index 000000000000..5f8ef50e5c66
--- /dev/null
+++ b/arch/powerpc/mm/vphn.c
@@ -0,0 +1,70 @@
1#include <asm/byteorder.h>
2#include "vphn.h"
3
4/*
5 * The associativity domain numbers are returned from the hypervisor as a
6 * stream of mixed 16-bit and 32-bit fields. The stream is terminated by the
7 * special value of "all ones" (aka. 0xffff) and its size may not exceed 48
8 * bytes.
9 *
10 * --- 16-bit fields -->
11 * _________________________
12 * | 0 | 1 | 2 | 3 | be_packed[0]
13 * ------+-----+-----+------
14 * _________________________
15 * | 4 | 5 | 6 | 7 | be_packed[1]
16 * -------------------------
17 * ...
18 * _________________________
19 * | 20 | 21 | 22 | 23 | be_packed[5]
20 * -------------------------
21 *
22 * Convert to the sequence they would appear in the ibm,associativity property.
23 */
24int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
25{
26 __be64 be_packed[VPHN_REGISTER_COUNT];
27 int i, nr_assoc_doms = 0;
28 const __be16 *field = (const __be16 *) be_packed;
29 u16 last = 0;
30 bool is_32bit = false;
31
32#define VPHN_FIELD_UNUSED (0xffff)
33#define VPHN_FIELD_MSB (0x8000)
34#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
35
36 /* Let's fix the values returned by plpar_hcall9() */
37 for (i = 0; i < VPHN_REGISTER_COUNT; i++)
38 be_packed[i] = cpu_to_be64(packed[i]);
39
40 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
41 u16 new = be16_to_cpup(field++);
42
43 if (is_32bit) {
44 /* Let's concatenate the 16 bits of this field to the
45 * 15 lower bits of the previous field
46 */
47 unpacked[++nr_assoc_doms] =
48 cpu_to_be32(last << 16 | new);
49 is_32bit = false;
50 } else if (new == VPHN_FIELD_UNUSED)
51 /* This is the list terminator */
52 break;
53 else if (new & VPHN_FIELD_MSB) {
54 /* Data is in the lower 15 bits of this field */
55 unpacked[++nr_assoc_doms] =
56 cpu_to_be32(new & VPHN_FIELD_MASK);
57 } else {
58 /* Data is in the lower 15 bits of this field
59 * concatenated with the next 16 bit field
60 */
61 last = new;
62 is_32bit = true;
63 }
64 }
65
66 /* The first cell contains the length of the property */
67 unpacked[0] = cpu_to_be32(nr_assoc_doms);
68
69 return nr_assoc_doms;
70}
diff --git a/arch/powerpc/mm/vphn.h b/arch/powerpc/mm/vphn.h
new file mode 100644
index 000000000000..fe8b7805b78f
--- /dev/null
+++ b/arch/powerpc/mm/vphn.h
@@ -0,0 +1,16 @@
1#ifndef _ARCH_POWERPC_MM_VPHN_H_
2#define _ARCH_POWERPC_MM_VPHN_H_
3
4/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers.
5 */
6#define VPHN_REGISTER_COUNT 6
7
8/*
9 * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
10 * form the complete property we have to add the length in the first cell.
11 */
12#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
13
14extern int vphn_unpack_associativity(const long *packed, __be32 *unpacked);
15
16#endif