aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 13:43:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 13:43:24 -0500
commit8d6973327ee84c2f40dd9efd8928d4a1186c96e2 (patch)
tree1c6accd71b6e9c4e05d5aaae766b958ad440d320 /arch/powerpc/mm/mem.c
parent6d101ba6be2a26a3e1f513b5e293f0fd2b79ec5c (diff)
parent12526b0d6c580df860b31e59d68e5696e16c6e5b (diff)
Merge tag 'powerpc-4.21-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - Mitigations for Spectre v2 on some Freescale (NXP) CPUs. - A large series adding support for pass-through of Nvidia V100 GPUs to guests on Power9. - Another large series to enable hardware assistance for TLB table walk on MPC8xx CPUs. - Some preparatory changes to our DMA code, to make way for further cleanups from Christoph. - Several fixes for our Transactional Memory handling discovered by fuzzing the signal return path. - Support for generating our system call table(s) from a text file like other architectures. - A fix to our page fault handler so that instead of generating a WARN_ON_ONCE, user accesses of kernel addresses instead print a ratelimited and appropriately scary warning. - A cosmetic change to make our unhandled page fault messages more similar to other arches and also more compact and informative. - Freescale updates from Scott: "Highlights include elimination of legacy clock bindings use from dts files, an 83xx watchdog handler, fixes to old dts interrupt errors, and some minor cleanup." And many clean-ups, reworks and minor fixes etc. Thanks to: Alexandre Belloni, Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Arnd Bergmann, Benjamin Herrenschmidt, Breno Leitao, Christian Lamparter, Christophe Leroy, Christoph Hellwig, Daniel Axtens, Darren Stevens, David Gibson, Diana Craciun, Dmitry V. Levin, Firoz Khan, Geert Uytterhoeven, Greg Kurz, Gustavo Romero, Hari Bathini, Joel Stanley, Kees Cook, Madhavan Srinivasan, Mahesh Salgaonkar, Markus Elfring, Mathieu Malaterre, Michal Suchánek, Naveen N. Rao, Nick Desaulniers, Oliver O'Halloran, Paul Mackerras, Ram Pai, Ravi Bangoria, Rob Herring, Russell Currey, Sabyasachi Gupta, Sam Bobroff, Satheesh Rajendran, Scott Wood, Segher Boessenkool, Stephen Rothwell, Tang Yuantian, Thiago Jung Bauermann, Yangtao Li, Yuantian Tang, Yue Haibing" * tag 'powerpc-4.21-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (201 commits) Revert "powerpc/fsl_pci: simplify fsl_pci_dma_set_mask" powerpc/zImage: Also check for stdout-path powerpc: Fix HMIs on big-endian with CONFIG_RELOCATABLE=y macintosh: Use of_node_name_{eq, prefix} for node name comparisons ide: Use of_node_name_eq for node name comparisons powerpc: Use of_node_name_eq for node name comparisons powerpc/pseries/pmem: Convert to %pOFn instead of device_node.name powerpc/mm: Remove very old comment in hash-4k.h powerpc/pseries: Fix node leak in update_lmb_associativity_index() powerpc/configs/85xx: Enable CONFIG_DEBUG_KERNEL powerpc/dts/fsl: Fix dtc-flagged interrupt errors clk: qoriq: add more compatibles strings powerpc/fsl: Use new clockgen binding powerpc/83xx: handle machine check caused by watchdog timer powerpc/fsl-rio: fix spelling mistake "reserverd" -> "reserved" powerpc/fsl_pci: simplify fsl_pci_dma_set_mask arch/powerpc/fsl_rmu: Use dma_zalloc_coherent vfio_pci: Add NVIDIA GV100GL [Tesla V100 SXM2] subdriver vfio_pci: Allow regions to add own capabilities vfio_pci: Allow mapping extra regions ...
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r--arch/powerpc/mm/mem.c51
1 files changed, 19 insertions, 32 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0a64fffabee1..20394e52fe27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -246,35 +246,19 @@ static int __init mark_nonram_nosave(void)
246} 246}
247#endif 247#endif
248 248
249static bool zone_limits_final;
250
251/* 249/*
252 * The memory zones past TOP_ZONE are managed by generic mm code. 250 * Zones usage:
253 * These should be set to zero since that's what every other 251 *
254 * architecture does. 252 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
253 * everything else. GFP_DMA32 page allocations automatically fall back to
254 * ZONE_DMA.
255 *
256 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
257 * inform the generic DMA mapping code. 32-bit only devices (if not handled
258 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
259 * otherwise served by ZONE_DMA.
255 */ 260 */
256static unsigned long max_zone_pfns[MAX_NR_ZONES] = { 261static unsigned long max_zone_pfns[MAX_NR_ZONES];
257 [0 ... TOP_ZONE ] = ~0UL,
258 [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
259};
260
261/*
262 * Restrict the specified zone and all more restrictive zones
263 * to be below the specified pfn. May not be called after
264 * paging_init().
265 */
266void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
267{
268 int i;
269
270 if (WARN_ON(zone_limits_final))
271 return;
272
273 for (i = zone; i >= 0; i--) {
274 if (max_zone_pfns[i] > pfn_limit)
275 max_zone_pfns[i] = pfn_limit;
276 }
277}
278 262
279/* 263/*
280 * Find the least restrictive zone that is entirely below the 264 * Find the least restrictive zone that is entirely below the
@@ -324,11 +308,14 @@ void __init paging_init(void)
324 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 308 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
325 (long int)((top_of_ram - total_ram) >> 20)); 309 (long int)((top_of_ram - total_ram) >> 20));
326 310
311#ifdef CONFIG_ZONE_DMA
312 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
313#endif
314 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
327#ifdef CONFIG_HIGHMEM 315#ifdef CONFIG_HIGHMEM
328 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); 316 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
329#endif 317#endif
330 limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT); 318
331 zone_limits_final = true;
332 free_area_init_nodes(max_zone_pfns); 319 free_area_init_nodes(max_zone_pfns);
333 320
334 mark_nonram_nosave(); 321 mark_nonram_nosave();
@@ -503,7 +490,7 @@ EXPORT_SYMBOL(flush_icache_user_range);
503void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 490void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
504 pte_t *ptep) 491 pte_t *ptep)
505{ 492{
506#ifdef CONFIG_PPC_STD_MMU 493#ifdef CONFIG_PPC_BOOK3S
507 /* 494 /*
508 * We don't need to worry about _PAGE_PRESENT here because we are 495 * We don't need to worry about _PAGE_PRESENT here because we are
509 * called with either mm->page_table_lock held or ptl lock held 496 * called with either mm->page_table_lock held or ptl lock held
@@ -541,7 +528,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
541 } 528 }
542 529
543 hash_preload(vma->vm_mm, address, is_exec, trap); 530 hash_preload(vma->vm_mm, address, is_exec, trap);
544#endif /* CONFIG_PPC_STD_MMU */ 531#endif /* CONFIG_PPC_BOOK3S */
545#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 532#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
546 && defined(CONFIG_HUGETLB_PAGE) 533 && defined(CONFIG_HUGETLB_PAGE)
547 if (is_vm_hugetlb_page(vma)) 534 if (is_vm_hugetlb_page(vma))