diff options
Diffstat (limited to 'arch/tile/mm/init.c')
-rw-r--r-- | arch/tile/mm/init.c | 68 |
1 files changed, 3 insertions, 65 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index c8f58c12866d..22e41cf5a2a9 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -106,10 +106,8 @@ pte_t *get_prealloc_pte(unsigned long pfn) | |||
106 | */ | 106 | */ |
107 | static int initial_heap_home(void) | 107 | static int initial_heap_home(void) |
108 | { | 108 | { |
109 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
110 | if (hash_default) | 109 | if (hash_default) |
111 | return PAGE_HOME_HASH; | 110 | return PAGE_HOME_HASH; |
112 | #endif | ||
113 | return smp_processor_id(); | 111 | return smp_processor_id(); |
114 | } | 112 | } |
115 | 113 | ||
@@ -190,14 +188,11 @@ static void __init page_table_range_init(unsigned long start, | |||
190 | } | 188 | } |
191 | 189 | ||
192 | 190 | ||
193 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
194 | |||
195 | static int __initdata ktext_hash = 1; /* .text pages */ | 191 | static int __initdata ktext_hash = 1; /* .text pages */ |
196 | static int __initdata kdata_hash = 1; /* .data and .bss pages */ | 192 | static int __initdata kdata_hash = 1; /* .data and .bss pages */ |
197 | int __write_once hash_default = 1; /* kernel allocator pages */ | 193 | int __write_once hash_default = 1; /* kernel allocator pages */ |
198 | EXPORT_SYMBOL(hash_default); | 194 | EXPORT_SYMBOL(hash_default); |
199 | int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ | 195 | int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ |
200 | #endif /* CHIP_HAS_CBOX_HOME_MAP */ | ||
201 | 196 | ||
202 | /* | 197 | /* |
203 | * CPUs to use to for striping the pages of kernel data. If hash-for-home | 198 | * CPUs to use to for striping the pages of kernel data. If hash-for-home |
@@ -215,14 +210,12 @@ int __write_once kdata_huge; /* if no homecaching, small pages */ | |||
215 | static pgprot_t __init construct_pgprot(pgprot_t prot, int home) | 210 | static pgprot_t __init construct_pgprot(pgprot_t prot, int home) |
216 | { | 211 | { |
217 | prot = pte_set_home(prot, home); | 212 | prot = pte_set_home(prot, home); |
218 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
219 | if (home == PAGE_HOME_IMMUTABLE) { | 213 | if (home == PAGE_HOME_IMMUTABLE) { |
220 | if (ktext_hash) | 214 | if (ktext_hash) |
221 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); | 215 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); |
222 | else | 216 | else |
223 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); | 217 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); |
224 | } | 218 | } |
225 | #endif | ||
226 | return prot; | 219 | return prot; |
227 | } | 220 | } |
228 | 221 | ||
@@ -236,20 +229,15 @@ static pgprot_t __init init_pgprot(ulong address) | |||
236 | unsigned long page; | 229 | unsigned long page; |
237 | enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET }; | 230 | enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET }; |
238 | 231 | ||
239 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
240 | /* For kdata=huge, everything is just hash-for-home. */ | 232 | /* For kdata=huge, everything is just hash-for-home. */ |
241 | if (kdata_huge) | 233 | if (kdata_huge) |
242 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | 234 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); |
243 | #endif | ||
244 | 235 | ||
245 | /* We map the aliased pages of permanent text inaccessible. */ | 236 | /* We map the aliased pages of permanent text inaccessible. */ |
246 | if (address < (ulong) _sinittext - CODE_DELTA) | 237 | if (address < (ulong) _sinittext - CODE_DELTA) |
247 | return PAGE_NONE; | 238 | return PAGE_NONE; |
248 | 239 | ||
249 | /* | 240 | /* We map read-only data non-coherent for performance. */ |
250 | * We map read-only data non-coherent for performance. We could | ||
251 | * use neighborhood caching on TILE64, but it's not clear it's a win. | ||
252 | */ | ||
253 | if ((address >= (ulong) __start_rodata && | 241 | if ((address >= (ulong) __start_rodata && |
254 | address < (ulong) __end_rodata) || | 242 | address < (ulong) __end_rodata) || |
255 | address == (ulong) empty_zero_page) { | 243 | address == (ulong) empty_zero_page) { |
@@ -257,12 +245,10 @@ static pgprot_t __init init_pgprot(ulong address) | |||
257 | } | 245 | } |
258 | 246 | ||
259 | #ifndef __tilegx__ | 247 | #ifndef __tilegx__ |
260 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
261 | /* Force the atomic_locks[] array page to be hash-for-home. */ | 248 | /* Force the atomic_locks[] array page to be hash-for-home. */ |
262 | if (address == (ulong) atomic_locks) | 249 | if (address == (ulong) atomic_locks) |
263 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | 250 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); |
264 | #endif | 251 | #endif |
265 | #endif | ||
266 | 252 | ||
267 | /* | 253 | /* |
268 | * Everything else that isn't data or bss is heap, so mark it | 254 | * Everything else that isn't data or bss is heap, so mark it |
@@ -280,11 +266,9 @@ static pgprot_t __init init_pgprot(ulong address) | |||
280 | if (address >= (ulong) _end || address < (ulong) _einitdata) | 266 | if (address >= (ulong) _end || address < (ulong) _einitdata) |
281 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); | 267 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); |
282 | 268 | ||
283 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
284 | /* Use hash-for-home if requested for data/bss. */ | 269 | /* Use hash-for-home if requested for data/bss. */ |
285 | if (kdata_hash) | 270 | if (kdata_hash) |
286 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | 271 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); |
287 | #endif | ||
288 | 272 | ||
289 | /* | 273 | /* |
290 | * Make the w1data homed like heap to start with, to avoid | 274 | * Make the w1data homed like heap to start with, to avoid |
@@ -311,11 +295,9 @@ static pgprot_t __init init_pgprot(ulong address) | |||
311 | if (page == (ulong)empty_zero_page) | 295 | if (page == (ulong)empty_zero_page) |
312 | continue; | 296 | continue; |
313 | #ifndef __tilegx__ | 297 | #ifndef __tilegx__ |
314 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
315 | if (page == (ulong)atomic_locks) | 298 | if (page == (ulong)atomic_locks) |
316 | continue; | 299 | continue; |
317 | #endif | 300 | #endif |
318 | #endif | ||
319 | cpu = cpumask_next(cpu, &kdata_mask); | 301 | cpu = cpumask_next(cpu, &kdata_mask); |
320 | if (cpu == NR_CPUS) | 302 | if (cpu == NR_CPUS) |
321 | cpu = cpumask_first(&kdata_mask); | 303 | cpu = cpumask_first(&kdata_mask); |
@@ -358,7 +340,7 @@ static int __init setup_ktext(char *str) | |||
358 | 340 | ||
359 | ktext_arg_seen = 1; | 341 | ktext_arg_seen = 1; |
360 | 342 | ||
361 | /* Default setting on Tile64: use a huge page */ | 343 | /* Default setting: use a huge page */ |
362 | if (strcmp(str, "huge") == 0) | 344 | if (strcmp(str, "huge") == 0) |
363 | pr_info("ktext: using one huge locally cached page\n"); | 345 | pr_info("ktext: using one huge locally cached page\n"); |
364 | 346 | ||
@@ -404,10 +386,8 @@ static inline pgprot_t ktext_set_nocache(pgprot_t prot) | |||
404 | { | 386 | { |
405 | if (!ktext_nocache) | 387 | if (!ktext_nocache) |
406 | prot = hv_pte_set_nc(prot); | 388 | prot = hv_pte_set_nc(prot); |
407 | #if CHIP_HAS_NC_AND_NOALLOC_BITS() | ||
408 | else | 389 | else |
409 | prot = hv_pte_set_no_alloc_l2(prot); | 390 | prot = hv_pte_set_no_alloc_l2(prot); |
410 | #endif | ||
411 | return prot; | 391 | return prot; |
412 | } | 392 | } |
413 | 393 | ||
@@ -440,7 +420,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
440 | struct cpumask kstripe_mask; | 420 | struct cpumask kstripe_mask; |
441 | int rc, i; | 421 | int rc, i; |
442 | 422 | ||
443 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
444 | if (ktext_arg_seen && ktext_hash) { | 423 | if (ktext_arg_seen && ktext_hash) { |
445 | pr_warning("warning: \"ktext\" boot argument ignored" | 424 | pr_warning("warning: \"ktext\" boot argument ignored" |
446 | " if \"kcache_hash\" sets up text hash-for-home\n"); | 425 | " if \"kcache_hash\" sets up text hash-for-home\n"); |
@@ -457,7 +436,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
457 | " kcache_hash=all or =allbutstack\n"); | 436 | " kcache_hash=all or =allbutstack\n"); |
458 | kdata_huge = 0; | 437 | kdata_huge = 0; |
459 | } | 438 | } |
460 | #endif | ||
461 | 439 | ||
462 | /* | 440 | /* |
463 | * Set up a mask for cpus to use for kernel striping. | 441 | * Set up a mask for cpus to use for kernel striping. |
@@ -585,13 +563,11 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
585 | } else { | 563 | } else { |
586 | pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); | 564 | pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); |
587 | pteval = pte_mkhuge(pteval); | 565 | pteval = pte_mkhuge(pteval); |
588 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
589 | if (ktext_hash) { | 566 | if (ktext_hash) { |
590 | pteval = hv_pte_set_mode(pteval, | 567 | pteval = hv_pte_set_mode(pteval, |
591 | HV_PTE_MODE_CACHE_HASH_L3); | 568 | HV_PTE_MODE_CACHE_HASH_L3); |
592 | pteval = ktext_set_nocache(pteval); | 569 | pteval = ktext_set_nocache(pteval); |
593 | } else | 570 | } else |
594 | #endif /* CHIP_HAS_CBOX_HOME_MAP() */ | ||
595 | if (cpumask_weight(&ktext_mask) == 1) { | 571 | if (cpumask_weight(&ktext_mask) == 1) { |
596 | pteval = set_remote_cache_cpu(pteval, | 572 | pteval = set_remote_cache_cpu(pteval, |
597 | cpumask_first(&ktext_mask)); | 573 | cpumask_first(&ktext_mask)); |
@@ -938,26 +914,6 @@ void __init pgtable_cache_init(void) | |||
938 | panic("pgtable_cache_init(): Cannot create pgd cache"); | 914 | panic("pgtable_cache_init(): Cannot create pgd cache"); |
939 | } | 915 | } |
940 | 916 | ||
941 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
942 | /* | ||
943 | * The __w1data area holds data that is only written during initialization, | ||
944 | * and is read-only and thus freely cacheable thereafter. Fix the page | ||
945 | * table entries that cover that region accordingly. | ||
946 | */ | ||
947 | static void mark_w1data_ro(void) | ||
948 | { | ||
949 | /* Loop over page table entries */ | ||
950 | unsigned long addr = (unsigned long)__w1data_begin; | ||
951 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); | ||
952 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { | ||
953 | unsigned long pfn = kaddr_to_pfn((void *)addr); | ||
954 | pte_t *ptep = virt_to_kpte(addr); | ||
955 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ | ||
956 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); | ||
957 | } | ||
958 | } | ||
959 | #endif | ||
960 | |||
961 | #ifdef CONFIG_DEBUG_PAGEALLOC | 917 | #ifdef CONFIG_DEBUG_PAGEALLOC |
962 | static long __write_once initfree; | 918 | static long __write_once initfree; |
963 | #else | 919 | #else |
@@ -1026,10 +982,7 @@ void free_initmem(void) | |||
1026 | /* | 982 | /* |
1027 | * Evict the dirty initdata on the boot cpu, evict the w1data | 983 | * Evict the dirty initdata on the boot cpu, evict the w1data |
1028 | * wherever it's homed, and evict all the init code everywhere. | 984 | * wherever it's homed, and evict all the init code everywhere. |
1029 | * We are guaranteed that no one will touch the init pages any | 985 | * We are guaranteed that no one will touch the init pages any more. |
1030 | * more, and although other cpus may be touching the w1data, | ||
1031 | * we only actually change the caching on tile64, which won't | ||
1032 | * be keeping local copies in the other tiles' caches anyway. | ||
1033 | */ | 986 | */ |
1034 | homecache_evict(&cpu_cacheable_map); | 987 | homecache_evict(&cpu_cacheable_map); |
1035 | 988 | ||
@@ -1045,21 +998,6 @@ void free_initmem(void) | |||
1045 | free_init_pages("unused kernel text", | 998 | free_init_pages("unused kernel text", |
1046 | (unsigned long)_sinittext - text_delta, | 999 | (unsigned long)_sinittext - text_delta, |
1047 | (unsigned long)_einittext - text_delta); | 1000 | (unsigned long)_einittext - text_delta); |
1048 | |||
1049 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
1050 | /* | ||
1051 | * Upgrade the .w1data section to globally cached. | ||
1052 | * We don't do this on tilepro, since the cache architecture | ||
1053 | * pretty much makes it irrelevant, and in any case we end | ||
1054 | * up having racing issues with other tiles that may touch | ||
1055 | * the data after we flush the cache but before we update | ||
1056 | * the PTEs and flush the TLBs, causing sharer shootdowns | ||
1057 | * later. Even though this is to clean data, it seems like | ||
1058 | * an unnecessary complication. | ||
1059 | */ | ||
1060 | mark_w1data_ro(); | ||
1061 | #endif | ||
1062 | |||
1063 | /* Do a global TLB flush so everyone sees the changes. */ | 1001 | /* Do a global TLB flush so everyone sees the changes. */ |
1064 | flush_tlb_all(); | 1002 | flush_tlb_all(); |
1065 | } | 1003 | } |