aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r--arch/sh/mm/init.c166
1 files changed, 93 insertions, 73 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 432acd07e76a..68028e8f26ce 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -21,25 +21,13 @@
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/sections.h> 22#include <asm/sections.h>
23#include <asm/cache.h> 23#include <asm/cache.h>
24#include <asm/sizes.h>
24 25
25DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 26DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
26pgd_t swapper_pg_dir[PTRS_PER_PGD]; 27pgd_t swapper_pg_dir[PTRS_PER_PGD];
27 28
28#ifdef CONFIG_SUPERH32
29/*
30 * Handle trivial transitions between cached and uncached
31 * segments, making use of the 1:1 mapping relationship in
32 * 512MB lowmem.
33 *
34 * This is the offset of the uncached section from its cached alias.
35 * Default value only valid in 29 bit mode, in 32bit mode will be
36 * overridden in pmb_init.
37 */
38unsigned long cached_to_uncached = P2SEG - P1SEG;
39#endif
40
41#ifdef CONFIG_MMU 29#ifdef CONFIG_MMU
42static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 30static pte_t *__get_pte_phys(unsigned long addr)
43{ 31{
44 pgd_t *pgd; 32 pgd_t *pgd;
45 pud_t *pud; 33 pud_t *pud;
@@ -49,22 +37,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
49 pgd = pgd_offset_k(addr); 37 pgd = pgd_offset_k(addr);
50 if (pgd_none(*pgd)) { 38 if (pgd_none(*pgd)) {
51 pgd_ERROR(*pgd); 39 pgd_ERROR(*pgd);
52 return; 40 return NULL;
53 } 41 }
54 42
55 pud = pud_alloc(NULL, pgd, addr); 43 pud = pud_alloc(NULL, pgd, addr);
56 if (unlikely(!pud)) { 44 if (unlikely(!pud)) {
57 pud_ERROR(*pud); 45 pud_ERROR(*pud);
58 return; 46 return NULL;
59 } 47 }
60 48
61 pmd = pmd_alloc(NULL, pud, addr); 49 pmd = pmd_alloc(NULL, pud, addr);
62 if (unlikely(!pmd)) { 50 if (unlikely(!pmd)) {
63 pmd_ERROR(*pmd); 51 pmd_ERROR(*pmd);
64 return; 52 return NULL;
65 } 53 }
66 54
67 pte = pte_offset_kernel(pmd, addr); 55 pte = pte_offset_kernel(pmd, addr);
56 return pte;
57}
58
59static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
60{
61 pte_t *pte;
62
63 pte = __get_pte_phys(addr);
68 if (!pte_none(*pte)) { 64 if (!pte_none(*pte)) {
69 pte_ERROR(*pte); 65 pte_ERROR(*pte);
70 return; 66 return;
@@ -72,23 +68,24 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
72 68
73 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 69 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
74 local_flush_tlb_one(get_asid(), addr); 70 local_flush_tlb_one(get_asid(), addr);
71
72 if (pgprot_val(prot) & _PAGE_WIRED)
73 tlb_wire_entry(NULL, addr, *pte);
74}
75
76static void clear_pte_phys(unsigned long addr, pgprot_t prot)
77{
78 pte_t *pte;
79
80 pte = __get_pte_phys(addr);
81
82 if (pgprot_val(prot) & _PAGE_WIRED)
83 tlb_unwire_entry();
84
85 set_pte(pte, pfn_pte(0, __pgprot(0)));
86 local_flush_tlb_one(get_asid(), addr);
75} 87}
76 88
77/*
78 * As a performance optimization, other platforms preserve the fixmap mapping
79 * across a context switch, we don't presently do this, but this could be done
80 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
81 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
82 * give up a TLB entry for each mapping we want to preserve. While this may be
83 * viable for a small number of fixmaps, it's not particularly useful for
84 * everything and needs to be carefully evaluated. (ie, we may want this for
85 * the vsyscall page).
86 *
87 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
88 * in at __set_fixmap() time to determine the appropriate behavior to follow.
89 *
90 * -- PFM.
91 */
92void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 89void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
93{ 90{
94 unsigned long address = __fix_to_virt(idx); 91 unsigned long address = __fix_to_virt(idx);
@@ -101,6 +98,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
101 set_pte_phys(address, phys, prot); 98 set_pte_phys(address, phys, prot);
102} 99}
103 100
101void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
102{
103 unsigned long address = __fix_to_virt(idx);
104
105 if (idx >= __end_of_fixed_addresses) {
106 BUG();
107 return;
108 }
109
110 clear_pte_phys(address, prot);
111}
112
104void __init page_table_range_init(unsigned long start, unsigned long end, 113void __init page_table_range_init(unsigned long start, unsigned long end,
105 pgd_t *pgd_base) 114 pgd_t *pgd_base)
106{ 115{
@@ -120,7 +129,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
120 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 129 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
121 pud = (pud_t *)pgd; 130 pud = (pud_t *)pgd;
122 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 131 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
132#ifdef __PAGETABLE_PMD_FOLDED
123 pmd = (pmd_t *)pud; 133 pmd = (pmd_t *)pud;
134#else
135 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
136 pud_populate(&init_mm, pud, pmd);
137 pmd += k;
138#endif
124 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 139 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
125 if (pmd_none(*pmd)) { 140 if (pmd_none(*pmd)) {
126 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 141 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
@@ -182,9 +197,6 @@ void __init paging_init(void)
182 } 197 }
183 198
184 free_area_init_nodes(max_zone_pfns); 199 free_area_init_nodes(max_zone_pfns);
185
186 /* Set up the uncached fixmap */
187 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
188} 200}
189 201
190/* 202/*
@@ -195,6 +207,8 @@ static void __init iommu_init(void)
195 no_iommu_init(); 207 no_iommu_init();
196} 208}
197 209
210unsigned int mem_init_done = 0;
211
198void __init mem_init(void) 212void __init mem_init(void)
199{ 213{
200 int codesize, datasize, initsize; 214 int codesize, datasize, initsize;
@@ -231,6 +245,8 @@ void __init mem_init(void)
231 memset(empty_zero_page, 0, PAGE_SIZE); 245 memset(empty_zero_page, 0, PAGE_SIZE);
232 __flush_wback_region(empty_zero_page, PAGE_SIZE); 246 __flush_wback_region(empty_zero_page, PAGE_SIZE);
233 247
248 vsyscall_init();
249
234 codesize = (unsigned long) &_etext - (unsigned long) &_text; 250 codesize = (unsigned long) &_etext - (unsigned long) &_text;
235 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 251 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
236 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 252 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
@@ -243,8 +259,48 @@ void __init mem_init(void)
243 datasize >> 10, 259 datasize >> 10,
244 initsize >> 10); 260 initsize >> 10);
245 261
246 /* Initialize the vDSO */ 262 printk(KERN_INFO "virtual kernel memory layout:\n"
247 vsyscall_init(); 263 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
264#ifdef CONFIG_HIGHMEM
265 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
266#endif
267 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
268 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
269#ifdef CONFIG_UNCACHED_MAPPING
270 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
271#endif
272 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
273 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
274 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
275 FIXADDR_START, FIXADDR_TOP,
276 (FIXADDR_TOP - FIXADDR_START) >> 10,
277
278#ifdef CONFIG_HIGHMEM
279 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
280 (LAST_PKMAP*PAGE_SIZE) >> 10,
281#endif
282
283 (unsigned long)VMALLOC_START, VMALLOC_END,
284 (VMALLOC_END - VMALLOC_START) >> 20,
285
286 (unsigned long)memory_start, (unsigned long)high_memory,
287 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
288
289#ifdef CONFIG_UNCACHED_MAPPING
290 uncached_start, uncached_end, uncached_size >> 20,
291#endif
292
293 (unsigned long)&__init_begin, (unsigned long)&__init_end,
294 ((unsigned long)&__init_end -
295 (unsigned long)&__init_begin) >> 10,
296
297 (unsigned long)&_etext, (unsigned long)&_edata,
298 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
299
300 (unsigned long)&_text, (unsigned long)&_etext,
301 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
302
303 mem_init_done = 1;
248} 304}
249 305
250void free_initmem(void) 306void free_initmem(void)
@@ -277,35 +333,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
277} 333}
278#endif 334#endif
279 335
280#if THREAD_SHIFT < PAGE_SHIFT
281static struct kmem_cache *thread_info_cache;
282
283struct thread_info *alloc_thread_info(struct task_struct *tsk)
284{
285 struct thread_info *ti;
286
287 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
288 if (unlikely(ti == NULL))
289 return NULL;
290#ifdef CONFIG_DEBUG_STACK_USAGE
291 memset(ti, 0, THREAD_SIZE);
292#endif
293 return ti;
294}
295
296void free_thread_info(struct thread_info *ti)
297{
298 kmem_cache_free(thread_info_cache, ti);
299}
300
301void thread_info_cache_init(void)
302{
303 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
304 THREAD_SIZE, 0, NULL);
305 BUG_ON(thread_info_cache == NULL);
306}
307#endif /* THREAD_SHIFT < PAGE_SHIFT */
308
309#ifdef CONFIG_MEMORY_HOTPLUG 336#ifdef CONFIG_MEMORY_HOTPLUG
310int arch_add_memory(int nid, u64 start, u64 size) 337int arch_add_memory(int nid, u64 start, u64 size)
311{ 338{
@@ -336,10 +363,3 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
336#endif 363#endif
337 364
338#endif /* CONFIG_MEMORY_HOTPLUG */ 365#endif /* CONFIG_MEMORY_HOTPLUG */
339
340#ifdef CONFIG_PMB
341int __in_29bit_mode(void)
342{
343 return !(ctrl_inl(PMB_PASCR) & PASCR_SE);
344}
345#endif /* CONFIG_PMB */