aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-15 16:29:02 -0400
committerChris Metcalf <cmetcalf@tilera.com>2013-09-03 14:53:32 -0400
commitce61cdc270a5e0dd18057bbf29bd3471abccbda8 (patch)
tree354d229299b6b02d98f40e5c650e6ddbc3411b7b /arch/tile/mm
parentd7c9661115fd23b4dabb710b3080dd9919dfa891 (diff)
tile: make __write_once a synonym for __read_mostly
This was really only useful for TILE64 when we mapped the kernel data with small pages. Now we use a huge page and we really don't want to map different parts of the kernel data in different ways. We retain the __write_once name in case we want to bring it back to life at some point in the future. Note that this change uncovered a latent bug where the "smp_topology" variable happened to always be aligned mod 8 so we could store two "int" values at once, but when we eliminated __write_once it ended up only aligned mod 4. Fix with an explicit annotation. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/mm')
-rw-r--r--arch/tile/mm/init.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 22e41cf5a2a9..4e316deb92fd 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -271,21 +271,13 @@ static pgprot_t __init init_pgprot(ulong address)
271 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 271 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
272 272
273 /* 273 /*
274 * Make the w1data homed like heap to start with, to avoid
275 * making it part of the page-striped data area when we're just
276 * going to convert it to read-only soon anyway.
277 */
278 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
279 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
280
281 /*
282 * Otherwise we just hand out consecutive cpus. To avoid 274 * Otherwise we just hand out consecutive cpus. To avoid
283 * requiring this function to hold state, we just walk forward from 275 * requiring this function to hold state, we just walk forward from
284 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach 276 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
285 * the requested address, while walking cpu home around kdata_mask. 277 * the requested address, while walking cpu home around kdata_mask.
286 * This is typically no more than a dozen or so iterations. 278 * This is typically no more than a dozen or so iterations.
287 */ 279 */
288 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK; 280 page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
289 BUG_ON(address < page || address >= (ulong)_end); 281 BUG_ON(address < page || address >= (ulong)_end);
290 cpu = cpumask_first(&kdata_mask); 282 cpu = cpumask_first(&kdata_mask);
291 for (; page < address; page += PAGE_SIZE) { 283 for (; page < address; page += PAGE_SIZE) {
@@ -980,8 +972,7 @@ void free_initmem(void)
980 const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET; 972 const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
981 973
982 /* 974 /*
983 * Evict the dirty initdata on the boot cpu, evict the w1data 975 * Evict the cache on all cores to avoid incoherence.
984 * wherever it's homed, and evict all the init code everywhere.
985 * We are guaranteed that no one will touch the init pages any more. 976 * We are guaranteed that no one will touch the init pages any more.
986 */ 977 */
987 homecache_evict(&cpu_cacheable_map); 978 homecache_evict(&cpu_cacheable_map);