diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2016-07-29 23:43:19 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-07-29 23:43:19 -0400 |
commit | 719dbb2df78fc9a40e28392b07cd715bfc5a665c (patch) | |
tree | 0bbc3b84b74178d18164f5522ee3715623f94bbe /arch/powerpc/mm | |
parent | fbef66f0adcddf4475e19f3d09df22fb34e633f6 (diff) | |
parent | 9f595fd8b54809fed13fc30906ef1e90a3fcfbc9 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott:
"Highlights include more 8xx optimizations, device tree updates,
and MVME7100 support."
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/8xx_mmu.c | 131 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 3 |
2 files changed, 92 insertions, 42 deletions
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index 949100577db5..6c5025e81236 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c | |||
@@ -13,62 +13,115 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/memblock.h> | 15 | #include <linux/memblock.h> |
16 | #include <asm/fixmap.h> | ||
17 | #include <asm/code-patching.h> | ||
16 | 18 | ||
17 | #include "mmu_decl.h" | 19 | #include "mmu_decl.h" |
18 | 20 | ||
21 | #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT) | ||
22 | |||
19 | extern int __map_without_ltlbs; | 23 | extern int __map_without_ltlbs; |
24 | |||
20 | /* | 25 | /* |
21 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | 26 | * Return PA for this VA if it is in IMMR area, or 0 |
22 | */ | 27 | */ |
23 | void __init MMU_init_hw(void) | 28 | phys_addr_t v_block_mapped(unsigned long va) |
24 | { | 29 | { |
25 | /* Nothing to do for the time being but keep it similar to other PPC */ | 30 | unsigned long p = PHYS_IMMR_BASE; |
31 | |||
32 | if (__map_without_ltlbs) | ||
33 | return 0; | ||
34 | if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) | ||
35 | return p + va - VIRT_IMMR_BASE; | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Return VA for a given PA or 0 if not mapped | ||
41 | */ | ||
42 | unsigned long p_block_mapped(phys_addr_t pa) | ||
43 | { | ||
44 | unsigned long p = PHYS_IMMR_BASE; | ||
45 | |||
46 | if (__map_without_ltlbs) | ||
47 | return 0; | ||
48 | if (pa >= p && pa < p + IMMR_SIZE) | ||
49 | return VIRT_IMMR_BASE + pa - p; | ||
50 | return 0; | ||
26 | } | 51 | } |
27 | 52 | ||
28 | #define LARGE_PAGE_SIZE_4M (1<<22) | ||
29 | #define LARGE_PAGE_SIZE_8M (1<<23) | 53 | #define LARGE_PAGE_SIZE_8M (1<<23) |
30 | #define LARGE_PAGE_SIZE_64M (1<<26) | ||
31 | 54 | ||
32 | unsigned long __init mmu_mapin_ram(unsigned long top) | 55 | /* |
56 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | ||
57 | */ | ||
58 | void __init MMU_init_hw(void) | ||
33 | { | 59 | { |
34 | unsigned long v, s, mapped; | 60 | /* PIN up to the 3 first 8Mb after IMMR in DTLB table */ |
35 | phys_addr_t p; | 61 | #ifdef CONFIG_PIN_TLB |
62 | unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; | ||
63 | unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY; | ||
64 | #ifdef CONFIG_PIN_TLB_IMMR | ||
65 | int i = 29; | ||
66 | #else | ||
67 | int i = 28; | ||
68 | #endif | ||
69 | unsigned long addr = 0; | ||
70 | unsigned long mem = total_lowmem; | ||
71 | |||
72 | for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { | ||
73 | mtspr(SPRN_MD_CTR, ctr | (i << 8)); | ||
74 | mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); | ||
75 | mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); | ||
76 | mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); | ||
77 | addr += LARGE_PAGE_SIZE_8M; | ||
78 | mem -= LARGE_PAGE_SIZE_8M; | ||
79 | } | ||
80 | #endif | ||
81 | } | ||
36 | 82 | ||
37 | v = KERNELBASE; | 83 | static void mmu_mapin_immr(void) |
38 | p = 0; | 84 | { |
39 | s = top; | 85 | unsigned long p = PHYS_IMMR_BASE; |
86 | unsigned long v = VIRT_IMMR_BASE; | ||
87 | unsigned long f = pgprot_val(PAGE_KERNEL_NCG); | ||
88 | int offset; | ||
40 | 89 | ||
41 | if (__map_without_ltlbs) | 90 | for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) |
42 | return 0; | 91 | map_page(v + offset, p + offset, f); |
92 | } | ||
43 | 93 | ||
44 | #ifdef CONFIG_PPC_4K_PAGES | 94 | /* Address of instructions to patch */ |
45 | while (s >= LARGE_PAGE_SIZE_8M) { | 95 | #ifndef CONFIG_PIN_TLB_IMMR |
46 | pmd_t *pmdp; | 96 | extern unsigned int DTLBMiss_jmp; |
47 | unsigned long val = p | MD_PS8MEG; | 97 | #endif |
98 | extern unsigned int DTLBMiss_cmp, FixupDAR_cmp; | ||
48 | 99 | ||
49 | pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); | 100 | void mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped) |
50 | *pmdp++ = __pmd(val); | 101 | { |
51 | *pmdp++ = __pmd(val + LARGE_PAGE_SIZE_4M); | 102 | unsigned int instr = *addr; |
52 | 103 | ||
53 | v += LARGE_PAGE_SIZE_8M; | 104 | instr &= 0xffff0000; |
54 | p += LARGE_PAGE_SIZE_8M; | 105 | instr |= (unsigned long)__va(mapped) >> 16; |
55 | s -= LARGE_PAGE_SIZE_8M; | 106 | patch_instruction(addr, instr); |
56 | } | 107 | } |
57 | #else /* CONFIG_PPC_16K_PAGES */ | ||
58 | while (s >= LARGE_PAGE_SIZE_64M) { | ||
59 | pmd_t *pmdp; | ||
60 | unsigned long val = p | MD_PS8MEG; | ||
61 | 108 | ||
62 | pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); | 109 | unsigned long __init mmu_mapin_ram(unsigned long top) |
63 | *pmdp++ = __pmd(val); | 110 | { |
111 | unsigned long mapped; | ||
64 | 112 | ||
65 | v += LARGE_PAGE_SIZE_64M; | 113 | if (__map_without_ltlbs) { |
66 | p += LARGE_PAGE_SIZE_64M; | 114 | mapped = 0; |
67 | s -= LARGE_PAGE_SIZE_64M; | 115 | mmu_mapin_immr(); |
68 | } | 116 | #ifndef CONFIG_PIN_TLB_IMMR |
117 | patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); | ||
69 | #endif | 118 | #endif |
119 | } else { | ||
120 | mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); | ||
121 | } | ||
70 | 122 | ||
71 | mapped = top - s; | 123 | mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped); |
124 | mmu_patch_cmp_limit(&FixupDAR_cmp, mapped); | ||
72 | 125 | ||
73 | /* If the size of RAM is not an exact power of two, we may not | 126 | /* If the size of RAM is not an exact power of two, we may not |
74 | * have covered RAM in its entirety with 8 MiB | 127 | * have covered RAM in its entirety with 8 MiB |
@@ -77,7 +130,8 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
77 | * coverage with normal-sized pages (or other reasons) do not | 130 | * coverage with normal-sized pages (or other reasons) do not |
78 | * attempt to allocate outside the allowed range. | 131 | * attempt to allocate outside the allowed range. |
79 | */ | 132 | */ |
80 | memblock_set_current_limit(mapped); | 133 | if (mapped) |
134 | memblock_set_current_limit(mapped); | ||
81 | 135 | ||
82 | return mapped; | 136 | return mapped; |
83 | } | 137 | } |
@@ -90,13 +144,8 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |||
90 | */ | 144 | */ |
91 | BUG_ON(first_memblock_base != 0); | 145 | BUG_ON(first_memblock_base != 0); |
92 | 146 | ||
93 | #ifdef CONFIG_PIN_TLB | ||
94 | /* 8xx can only access 24MB at the moment */ | 147 | /* 8xx can only access 24MB at the moment */ |
95 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000)); | 148 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000)); |
96 | #else | ||
97 | /* 8xx can only access 8MB at the moment */ | ||
98 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
99 | #endif | ||
100 | } | 149 | } |
101 | 150 | ||
102 | /* | 151 | /* |
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 6af65327c993..f988db655e5b 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -154,9 +154,10 @@ struct tlbcam { | |||
154 | }; | 154 | }; |
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | #if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) | 157 | #if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx) |
158 | /* 6xx have BATS */ | 158 | /* 6xx have BATS */ |
159 | /* FSL_BOOKE have TLBCAM */ | 159 | /* FSL_BOOKE have TLBCAM */ |
160 | /* 8xx have LTLB */ | ||
160 | phys_addr_t v_block_mapped(unsigned long va); | 161 | phys_addr_t v_block_mapped(unsigned long va); |
161 | unsigned long p_block_mapped(phys_addr_t pa); | 162 | unsigned long p_block_mapped(phys_addr_t pa); |
162 | #else | 163 | #else |