aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2016-05-17 03:02:45 -0400
committerScott Wood <oss@buserror.net>2016-07-09 03:02:48 -0400
commit4badd43ae44109c88438cc6421d208f513cf537f (patch)
treeb228a4e7dfa771df94a052bafd7b06c87ece9eae /arch/powerpc/mm
parentf86ef74ed9193c52411277eeac2eec69af553392 (diff)
powerpc/8xx: Map IMMR area with 512k page at a fixed address
Once the linear memory space has been mapped with 8Mb pages, as seen in the related commit, we get 11 millions DTLB missed during the reference 600s period. 77% of the misses are on user addresses and 23% are on kernel addresses (1 fourth for linear address space and 3 fourth for virtual address space) Traditionaly, each driver manages one computer board which has its own components with its own memory maps. But on embedded chips like the MPC8xx, the SOC has all registers located in the same IO area. When looking at ioremaps done during startup, we see that many drivers are re-mapping small parts of the IMMR for their own use and all those small pieces gets their own 4k page, amplifying the number of TLB misses: in our system we get 0xff000000 mapped 31 times and 0xff003000 mapped 9 times. Even if each part of IMMR was mapped only once with 4k pages, it would still be several small mappings towards linear area. This patch maps the IMMR with a single 512k page. With this patch applied, the number of DTLB misses during the 10 min period is reduced to 11.8 millions for a duration of 5.8s, which represents 2% of the non-idle time hence yet another 10% reduction. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Scott Wood <oss@buserror.net>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/8xx_mmu.c56
-rw-r--r--arch/powerpc/mm/mmu_decl.h3
2 files changed, 57 insertions, 2 deletions
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 949100577db5..220772579113 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -13,10 +13,43 @@
13 */ 13 */
14 14
15#include <linux/memblock.h> 15#include <linux/memblock.h>
16#include <asm/fixmap.h>
17#include <asm/code-patching.h>
16 18
17#include "mmu_decl.h" 19#include "mmu_decl.h"
18 20
21#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
22
19extern int __map_without_ltlbs; 23extern int __map_without_ltlbs;
24
25/*
26 * Return PA for this VA if it is in IMMR area, or 0
27 */
28phys_addr_t v_block_mapped(unsigned long va)
29{
30 unsigned long p = PHYS_IMMR_BASE;
31
32 if (__map_without_ltlbs)
33 return 0;
34 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
35 return p + va - VIRT_IMMR_BASE;
36 return 0;
37}
38
39/*
40 * Return VA for a given PA or 0 if not mapped
41 */
42unsigned long p_block_mapped(phys_addr_t pa)
43{
44 unsigned long p = PHYS_IMMR_BASE;
45
46 if (__map_without_ltlbs)
47 return 0;
48 if (pa >= p && pa < p + IMMR_SIZE)
49 return VIRT_IMMR_BASE + pa - p;
50 return 0;
51}
52
20/* 53/*
21 * MMU_init_hw does the chip-specific initialization of the MMU hardware. 54 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
22 */ 55 */
@@ -29,6 +62,22 @@ void __init MMU_init_hw(void)
29#define LARGE_PAGE_SIZE_8M (1<<23) 62#define LARGE_PAGE_SIZE_8M (1<<23)
30#define LARGE_PAGE_SIZE_64M (1<<26) 63#define LARGE_PAGE_SIZE_64M (1<<26)
31 64
65static void mmu_mapin_immr(void)
66{
67 unsigned long p = PHYS_IMMR_BASE;
68 unsigned long v = VIRT_IMMR_BASE;
69 unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
70 int offset;
71
72 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
73 map_page(v + offset, p + offset, f);
74}
75
76/* Address of instructions to patch */
77#ifndef CONFIG_PIN_TLB
78extern unsigned int DTLBMiss_jmp;
79#endif
80
32unsigned long __init mmu_mapin_ram(unsigned long top) 81unsigned long __init mmu_mapin_ram(unsigned long top)
33{ 82{
34 unsigned long v, s, mapped; 83 unsigned long v, s, mapped;
@@ -38,8 +87,13 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
38 p = 0; 87 p = 0;
39 s = top; 88 s = top;
40 89
41 if (__map_without_ltlbs) 90 if (__map_without_ltlbs) {
91 mmu_mapin_immr();
92#ifndef CONFIG_PIN_TLB
93 patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
94#endif
42 return 0; 95 return 0;
96 }
43 97
44#ifdef CONFIG_PPC_4K_PAGES 98#ifdef CONFIG_PPC_4K_PAGES
45 while (s >= LARGE_PAGE_SIZE_8M) { 99 while (s >= LARGE_PAGE_SIZE_8M) {
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 6af65327c993..f988db655e5b 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -154,9 +154,10 @@ struct tlbcam {
154}; 154};
155#endif 155#endif
156 156
157#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) 157#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
158/* 6xx have BATS */ 158/* 6xx have BATS */
159/* FSL_BOOKE have TLBCAM */ 159/* FSL_BOOKE have TLBCAM */
160/* 8xx have LTLB */
160phys_addr_t v_block_mapped(unsigned long va); 161phys_addr_t v_block_mapped(unsigned long va);
161unsigned long p_block_mapped(phys_addr_t pa); 162unsigned long p_block_mapped(phys_addr_t pa);
162#else 163#else