diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/sh/mm/init.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r-- | arch/sh/mm/init.c | 170 |
1 files changed, 105 insertions, 65 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8173e38afd38..c505de61a5ca 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -10,35 +10,25 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | 11 | #include <linux/swap.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/gfp.h> | ||
13 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
14 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
15 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
16 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/dma-mapping.h> | ||
18 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
19 | #include <asm/tlb.h> | 21 | #include <asm/tlb.h> |
20 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
21 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
22 | #include <asm/cache.h> | 24 | #include <asm/cache.h> |
25 | #include <asm/sizes.h> | ||
23 | 26 | ||
24 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 27 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
25 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 28 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
26 | 29 | ||
27 | #ifdef CONFIG_SUPERH32 | ||
28 | /* | ||
29 | * Handle trivial transitions between cached and uncached | ||
30 | * segments, making use of the 1:1 mapping relationship in | ||
31 | * 512MB lowmem. | ||
32 | * | ||
33 | * This is the offset of the uncached section from its cached alias. | ||
34 | * Default value only valid in 29 bit mode, in 32bit mode will be | ||
35 | * overridden in pmb_init. | ||
36 | */ | ||
37 | unsigned long cached_to_uncached = P2SEG - P1SEG; | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_MMU | 30 | #ifdef CONFIG_MMU |
41 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | 31 | static pte_t *__get_pte_phys(unsigned long addr) |
42 | { | 32 | { |
43 | pgd_t *pgd; | 33 | pgd_t *pgd; |
44 | pud_t *pud; | 34 | pud_t *pud; |
@@ -48,22 +38,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
48 | pgd = pgd_offset_k(addr); | 38 | pgd = pgd_offset_k(addr); |
49 | if (pgd_none(*pgd)) { | 39 | if (pgd_none(*pgd)) { |
50 | pgd_ERROR(*pgd); | 40 | pgd_ERROR(*pgd); |
51 | return; | 41 | return NULL; |
52 | } | 42 | } |
53 | 43 | ||
54 | pud = pud_alloc(NULL, pgd, addr); | 44 | pud = pud_alloc(NULL, pgd, addr); |
55 | if (unlikely(!pud)) { | 45 | if (unlikely(!pud)) { |
56 | pud_ERROR(*pud); | 46 | pud_ERROR(*pud); |
57 | return; | 47 | return NULL; |
58 | } | 48 | } |
59 | 49 | ||
60 | pmd = pmd_alloc(NULL, pud, addr); | 50 | pmd = pmd_alloc(NULL, pud, addr); |
61 | if (unlikely(!pmd)) { | 51 | if (unlikely(!pmd)) { |
62 | pmd_ERROR(*pmd); | 52 | pmd_ERROR(*pmd); |
63 | return; | 53 | return NULL; |
64 | } | 54 | } |
65 | 55 | ||
66 | pte = pte_offset_kernel(pmd, addr); | 56 | pte = pte_offset_kernel(pmd, addr); |
57 | return pte; | ||
58 | } | ||
59 | |||
60 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | ||
61 | { | ||
62 | pte_t *pte; | ||
63 | |||
64 | pte = __get_pte_phys(addr); | ||
67 | if (!pte_none(*pte)) { | 65 | if (!pte_none(*pte)) { |
68 | pte_ERROR(*pte); | 66 | pte_ERROR(*pte); |
69 | return; | 67 | return; |
@@ -71,23 +69,24 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
71 | 69 | ||
72 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 70 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); |
73 | local_flush_tlb_one(get_asid(), addr); | 71 | local_flush_tlb_one(get_asid(), addr); |
72 | |||
73 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
74 | tlb_wire_entry(NULL, addr, *pte); | ||
75 | } | ||
76 | |||
77 | static void clear_pte_phys(unsigned long addr, pgprot_t prot) | ||
78 | { | ||
79 | pte_t *pte; | ||
80 | |||
81 | pte = __get_pte_phys(addr); | ||
82 | |||
83 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
84 | tlb_unwire_entry(); | ||
85 | |||
86 | set_pte(pte, pfn_pte(0, __pgprot(0))); | ||
87 | local_flush_tlb_one(get_asid(), addr); | ||
74 | } | 88 | } |
75 | 89 | ||
76 | /* | ||
77 | * As a performance optimization, other platforms preserve the fixmap mapping | ||
78 | * across a context switch, we don't presently do this, but this could be done | ||
79 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | ||
80 | * of the memory mapped UTLB configuration) -- this unfortunately forces us to | ||
81 | * give up a TLB entry for each mapping we want to preserve. While this may be | ||
82 | * viable for a small number of fixmaps, it's not particularly useful for | ||
83 | * everything and needs to be carefully evaluated. (ie, we may want this for | ||
84 | * the vsyscall page). | ||
85 | * | ||
86 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | ||
87 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | ||
88 | * | ||
89 | * -- PFM. | ||
90 | */ | ||
91 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 90 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
92 | { | 91 | { |
93 | unsigned long address = __fix_to_virt(idx); | 92 | unsigned long address = __fix_to_virt(idx); |
@@ -100,6 +99,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |||
100 | set_pte_phys(address, phys, prot); | 99 | set_pte_phys(address, phys, prot); |
101 | } | 100 | } |
102 | 101 | ||
102 | void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) | ||
103 | { | ||
104 | unsigned long address = __fix_to_virt(idx); | ||
105 | |||
106 | if (idx >= __end_of_fixed_addresses) { | ||
107 | BUG(); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | clear_pte_phys(address, prot); | ||
112 | } | ||
113 | |||
103 | void __init page_table_range_init(unsigned long start, unsigned long end, | 114 | void __init page_table_range_init(unsigned long start, unsigned long end, |
104 | pgd_t *pgd_base) | 115 | pgd_t *pgd_base) |
105 | { | 116 | { |
@@ -119,7 +130,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
119 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | 130 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
120 | pud = (pud_t *)pgd; | 131 | pud = (pud_t *)pgd; |
121 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | 132 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
133 | #ifdef __PAGETABLE_PMD_FOLDED | ||
122 | pmd = (pmd_t *)pud; | 134 | pmd = (pmd_t *)pud; |
135 | #else | ||
136 | pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
137 | pud_populate(&init_mm, pud, pmd); | ||
138 | pmd += k; | ||
139 | #endif | ||
123 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | 140 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
124 | if (pmd_none(*pmd)) { | 141 | if (pmd_none(*pmd)) { |
125 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 142 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
@@ -181,16 +198,25 @@ void __init paging_init(void) | |||
181 | } | 198 | } |
182 | 199 | ||
183 | free_area_init_nodes(max_zone_pfns); | 200 | free_area_init_nodes(max_zone_pfns); |
201 | } | ||
184 | 202 | ||
185 | /* Set up the uncached fixmap */ | 203 | /* |
186 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | 204 | * Early initialization for any I/O MMUs we might have. |
205 | */ | ||
206 | static void __init iommu_init(void) | ||
207 | { | ||
208 | no_iommu_init(); | ||
187 | } | 209 | } |
188 | 210 | ||
211 | unsigned int mem_init_done = 0; | ||
212 | |||
189 | void __init mem_init(void) | 213 | void __init mem_init(void) |
190 | { | 214 | { |
191 | int codesize, datasize, initsize; | 215 | int codesize, datasize, initsize; |
192 | int nid; | 216 | int nid; |
193 | 217 | ||
218 | iommu_init(); | ||
219 | |||
194 | num_physpages = 0; | 220 | num_physpages = 0; |
195 | high_memory = NULL; | 221 | high_memory = NULL; |
196 | 222 | ||
@@ -220,6 +246,8 @@ void __init mem_init(void) | |||
220 | memset(empty_zero_page, 0, PAGE_SIZE); | 246 | memset(empty_zero_page, 0, PAGE_SIZE); |
221 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 247 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
222 | 248 | ||
249 | vsyscall_init(); | ||
250 | |||
223 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 251 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
224 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 252 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
225 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 253 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
@@ -232,8 +260,48 @@ void __init mem_init(void) | |||
232 | datasize >> 10, | 260 | datasize >> 10, |
233 | initsize >> 10); | 261 | initsize >> 10); |
234 | 262 | ||
235 | /* Initialize the vDSO */ | 263 | printk(KERN_INFO "virtual kernel memory layout:\n" |
236 | vsyscall_init(); | 264 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
265 | #ifdef CONFIG_HIGHMEM | ||
266 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
267 | #endif | ||
268 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
269 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" | ||
270 | #ifdef CONFIG_UNCACHED_MAPPING | ||
271 | " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" | ||
272 | #endif | ||
273 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
274 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
275 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | ||
276 | FIXADDR_START, FIXADDR_TOP, | ||
277 | (FIXADDR_TOP - FIXADDR_START) >> 10, | ||
278 | |||
279 | #ifdef CONFIG_HIGHMEM | ||
280 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | ||
281 | (LAST_PKMAP*PAGE_SIZE) >> 10, | ||
282 | #endif | ||
283 | |||
284 | (unsigned long)VMALLOC_START, VMALLOC_END, | ||
285 | (VMALLOC_END - VMALLOC_START) >> 20, | ||
286 | |||
287 | (unsigned long)memory_start, (unsigned long)high_memory, | ||
288 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, | ||
289 | |||
290 | #ifdef CONFIG_UNCACHED_MAPPING | ||
291 | uncached_start, uncached_end, uncached_size >> 20, | ||
292 | #endif | ||
293 | |||
294 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | ||
295 | ((unsigned long)&__init_end - | ||
296 | (unsigned long)&__init_begin) >> 10, | ||
297 | |||
298 | (unsigned long)&_etext, (unsigned long)&_edata, | ||
299 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | ||
300 | |||
301 | (unsigned long)&_text, (unsigned long)&_etext, | ||
302 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | ||
303 | |||
304 | mem_init_done = 1; | ||
237 | } | 305 | } |
238 | 306 | ||
239 | void free_initmem(void) | 307 | void free_initmem(void) |
@@ -266,35 +334,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
266 | } | 334 | } |
267 | #endif | 335 | #endif |
268 | 336 | ||
269 | #if THREAD_SHIFT < PAGE_SHIFT | ||
270 | static struct kmem_cache *thread_info_cache; | ||
271 | |||
272 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | ||
273 | { | ||
274 | struct thread_info *ti; | ||
275 | |||
276 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | ||
277 | if (unlikely(ti == NULL)) | ||
278 | return NULL; | ||
279 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
280 | memset(ti, 0, THREAD_SIZE); | ||
281 | #endif | ||
282 | return ti; | ||
283 | } | ||
284 | |||
285 | void free_thread_info(struct thread_info *ti) | ||
286 | { | ||
287 | kmem_cache_free(thread_info_cache, ti); | ||
288 | } | ||
289 | |||
290 | void thread_info_cache_init(void) | ||
291 | { | ||
292 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | ||
293 | THREAD_SIZE, 0, NULL); | ||
294 | BUG_ON(thread_info_cache == NULL); | ||
295 | } | ||
296 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | ||
297 | |||
298 | #ifdef CONFIG_MEMORY_HOTPLUG | 337 | #ifdef CONFIG_MEMORY_HOTPLUG |
299 | int arch_add_memory(int nid, u64 start, u64 size) | 338 | int arch_add_memory(int nid, u64 start, u64 size) |
300 | { | 339 | { |
@@ -323,4 +362,5 @@ int memory_add_physaddr_to_nid(u64 addr) | |||
323 | } | 362 | } |
324 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | 363 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
325 | #endif | 364 | #endif |
365 | |||
326 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 366 | #endif /* CONFIG_MEMORY_HOTPLUG */ |