diff options
author | Jeff Garzik <jeff@garzik.org> | 2007-02-17 15:11:43 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-17 15:11:43 -0500 |
commit | f630fe2817601314b2eb7ca5ddc23c7834646731 (patch) | |
tree | 3bfb4939b7bbc3859575ca8b58fa3f929b015941 /arch/sh/mm | |
parent | 48c871c1f6a7c7044dd76774fb469e65c7e2e4e8 (diff) | |
parent | 8a03d9a498eaf02c8a118752050a5154852c13bf (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 5 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh3.c | 8 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 77 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 29 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 87 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 7 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 28 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh7705.c | 37 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 101 | ||||
-rw-r--r-- | arch/sh/mm/tlb-nommu.c | 19 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh3.c | 67 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 70 |
14 files changed, 257 insertions, 288 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 29f4ee35c6dc..6b0d28ac9241 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -20,7 +20,7 @@ config CPU_SH4 | |||
20 | bool | 20 | bool |
21 | select CPU_HAS_INTEVT | 21 | select CPU_HAS_INTEVT |
22 | select CPU_HAS_SR_RB | 22 | select CPU_HAS_SR_RB |
23 | select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 | 23 | select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2 |
24 | 24 | ||
25 | config CPU_SH4A | 25 | config CPU_SH4A |
26 | bool | 26 | bool |
@@ -72,6 +72,7 @@ config CPU_SUBTYPE_SH7705 | |||
72 | config CPU_SUBTYPE_SH7706 | 72 | config CPU_SUBTYPE_SH7706 |
73 | bool "Support SH7706 processor" | 73 | bool "Support SH7706 processor" |
74 | select CPU_SH3 | 74 | select CPU_SH3 |
75 | select CPU_HAS_IPR_IRQ | ||
75 | help | 76 | help |
76 | Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU. | 77 | Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU. |
77 | 78 | ||
@@ -92,6 +93,7 @@ config CPU_SUBTYPE_SH7708 | |||
92 | config CPU_SUBTYPE_SH7709 | 93 | config CPU_SUBTYPE_SH7709 |
93 | bool "Support SH7709 processor" | 94 | bool "Support SH7709 processor" |
94 | select CPU_SH3 | 95 | select CPU_SH3 |
96 | select CPU_HAS_IPR_IRQ | ||
95 | select CPU_HAS_PINT_IRQ | 97 | select CPU_HAS_PINT_IRQ |
96 | help | 98 | help |
97 | Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU. | 99 | Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU. |
@@ -149,6 +151,7 @@ config CPU_SUBTYPE_SH7760 | |||
149 | bool "Support SH7760 processor" | 151 | bool "Support SH7760 processor" |
150 | select CPU_SH4 | 152 | select CPU_SH4 |
151 | select CPU_HAS_INTC2_IRQ | 153 | select CPU_HAS_INTC2_IRQ |
154 | select CPU_HAS_IPR_IRQ | ||
152 | 155 | ||
153 | config CPU_SUBTYPE_SH4_202 | 156 | config CPU_SUBTYPE_SH4_202 |
154 | bool "Support SH4-202 processor" | 157 | bool "Support SH4-202 processor" |
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 909dcfa8c8c6..de6d2c9aa477 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c | |||
@@ -46,10 +46,10 @@ static int cache_seq_show(struct seq_file *file, void *iter) | |||
46 | 46 | ||
47 | if (cache_type == CACHE_TYPE_DCACHE) { | 47 | if (cache_type == CACHE_TYPE_DCACHE) { |
48 | base = CACHE_OC_ADDRESS_ARRAY; | 48 | base = CACHE_OC_ADDRESS_ARRAY; |
49 | cache = &cpu_data->dcache; | 49 | cache = ¤t_cpu_data.dcache; |
50 | } else { | 50 | } else { |
51 | base = CACHE_IC_ADDRESS_ARRAY; | 51 | base = CACHE_IC_ADDRESS_ARRAY; |
52 | cache = &cpu_data->icache; | 52 | cache = ¤t_cpu_data.icache; |
53 | } | 53 | } |
54 | 54 | ||
55 | /* | 55 | /* |
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index 838731fc608d..6d1dbec08ad4 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c | |||
@@ -44,11 +44,11 @@ void __flush_wback_region(void *start, int size) | |||
44 | 44 | ||
45 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 45 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
46 | unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; | 46 | unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; |
47 | for (j = 0; j < cpu_data->dcache.ways; j++) { | 47 | for (j = 0; j < current_cpu_data.dcache.ways; j++) { |
48 | unsigned long data, addr, p; | 48 | unsigned long data, addr, p; |
49 | 49 | ||
50 | p = __pa(v); | 50 | p = __pa(v); |
51 | addr = addrstart | (v & cpu_data->dcache.entry_mask); | 51 | addr = addrstart | (v & current_cpu_data.dcache.entry_mask); |
52 | local_irq_save(flags); | 52 | local_irq_save(flags); |
53 | data = ctrl_inl(addr); | 53 | data = ctrl_inl(addr); |
54 | 54 | ||
@@ -60,7 +60,7 @@ void __flush_wback_region(void *start, int size) | |||
60 | break; | 60 | break; |
61 | } | 61 | } |
62 | local_irq_restore(flags); | 62 | local_irq_restore(flags); |
63 | addrstart += cpu_data->dcache.way_incr; | 63 | addrstart += current_cpu_data.dcache.way_incr; |
64 | } | 64 | } |
65 | } | 65 | } |
66 | } | 66 | } |
@@ -85,7 +85,7 @@ void __flush_purge_region(void *start, int size) | |||
85 | 85 | ||
86 | data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ | 86 | data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ |
87 | addr = CACHE_OC_ADDRESS_ARRAY | | 87 | addr = CACHE_OC_ADDRESS_ARRAY | |
88 | (v & cpu_data->dcache.entry_mask) | SH_CACHE_ASSOC; | 88 | (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; |
89 | ctrl_outl(data, addr); | 89 | ctrl_outl(data, addr); |
90 | } | 90 | } |
91 | } | 91 | } |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index c6955157c989..e0cd4b7f4aeb 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -54,21 +54,21 @@ static void __init emit_cache_params(void) | |||
54 | ctrl_inl(CCN_CVR), | 54 | ctrl_inl(CCN_CVR), |
55 | ctrl_inl(CCN_PRR)); | 55 | ctrl_inl(CCN_PRR)); |
56 | printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 56 | printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", |
57 | cpu_data->icache.ways, | 57 | current_cpu_data.icache.ways, |
58 | cpu_data->icache.sets, | 58 | current_cpu_data.icache.sets, |
59 | cpu_data->icache.way_incr); | 59 | current_cpu_data.icache.way_incr); |
60 | printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 60 | printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", |
61 | cpu_data->icache.entry_mask, | 61 | current_cpu_data.icache.entry_mask, |
62 | cpu_data->icache.alias_mask, | 62 | current_cpu_data.icache.alias_mask, |
63 | cpu_data->icache.n_aliases); | 63 | current_cpu_data.icache.n_aliases); |
64 | printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | 64 | printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", |
65 | cpu_data->dcache.ways, | 65 | current_cpu_data.dcache.ways, |
66 | cpu_data->dcache.sets, | 66 | current_cpu_data.dcache.sets, |
67 | cpu_data->dcache.way_incr); | 67 | current_cpu_data.dcache.way_incr); |
68 | printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | 68 | printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", |
69 | cpu_data->dcache.entry_mask, | 69 | current_cpu_data.dcache.entry_mask, |
70 | cpu_data->dcache.alias_mask, | 70 | current_cpu_data.dcache.alias_mask, |
71 | cpu_data->dcache.n_aliases); | 71 | current_cpu_data.dcache.n_aliases); |
72 | 72 | ||
73 | if (!__flush_dcache_segment_fn) | 73 | if (!__flush_dcache_segment_fn) |
74 | panic("unknown number of cache ways\n"); | 74 | panic("unknown number of cache ways\n"); |
@@ -87,10 +87,10 @@ void __init p3_cache_init(void) | |||
87 | { | 87 | { |
88 | int i; | 88 | int i; |
89 | 89 | ||
90 | compute_alias(&cpu_data->icache); | 90 | compute_alias(¤t_cpu_data.icache); |
91 | compute_alias(&cpu_data->dcache); | 91 | compute_alias(¤t_cpu_data.dcache); |
92 | 92 | ||
93 | switch (cpu_data->dcache.ways) { | 93 | switch (current_cpu_data.dcache.ways) { |
94 | case 1: | 94 | case 1: |
95 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | 95 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; |
96 | break; | 96 | break; |
@@ -110,7 +110,7 @@ void __init p3_cache_init(void) | |||
110 | if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) | 110 | if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) |
111 | panic("%s failed.", __FUNCTION__); | 111 | panic("%s failed.", __FUNCTION__); |
112 | 112 | ||
113 | for (i = 0; i < cpu_data->dcache.n_aliases; i++) | 113 | for (i = 0; i < current_cpu_data.dcache.n_aliases; i++) |
114 | mutex_init(&p3map_mutex[i]); | 114 | mutex_init(&p3map_mutex[i]); |
115 | } | 115 | } |
116 | 116 | ||
@@ -200,13 +200,14 @@ void flush_cache_sigtramp(unsigned long addr) | |||
200 | : /* no output */ | 200 | : /* no output */ |
201 | : "m" (__m(v))); | 201 | : "m" (__m(v))); |
202 | 202 | ||
203 | index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask); | 203 | index = CACHE_IC_ADDRESS_ARRAY | |
204 | (v & current_cpu_data.icache.entry_mask); | ||
204 | 205 | ||
205 | local_irq_save(flags); | 206 | local_irq_save(flags); |
206 | jump_to_P2(); | 207 | jump_to_P2(); |
207 | 208 | ||
208 | for (i = 0; i < cpu_data->icache.ways; | 209 | for (i = 0; i < current_cpu_data.icache.ways; |
209 | i++, index += cpu_data->icache.way_incr) | 210 | i++, index += current_cpu_data.icache.way_incr) |
210 | ctrl_outl(0, index); /* Clear out Valid-bit */ | 211 | ctrl_outl(0, index); /* Clear out Valid-bit */ |
211 | 212 | ||
212 | back_to_P1(); | 213 | back_to_P1(); |
@@ -223,7 +224,7 @@ static inline void flush_cache_4096(unsigned long start, | |||
223 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 224 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. |
224 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. | 225 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. |
225 | */ | 226 | */ |
226 | if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || | 227 | if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || |
227 | (start < CACHE_OC_ADDRESS_ARRAY)) | 228 | (start < CACHE_OC_ADDRESS_ARRAY)) |
228 | exec_offset = 0x20000000; | 229 | exec_offset = 0x20000000; |
229 | 230 | ||
@@ -236,16 +237,26 @@ static inline void flush_cache_4096(unsigned long start, | |||
236 | /* | 237 | /* |
237 | * Write back & invalidate the D-cache of the page. | 238 | * Write back & invalidate the D-cache of the page. |
238 | * (To avoid "alias" issues) | 239 | * (To avoid "alias" issues) |
240 | * | ||
241 | * This uses a lazy write-back on UP, which is explicitly | ||
242 | * disabled on SMP. | ||
239 | */ | 243 | */ |
240 | void flush_dcache_page(struct page *page) | 244 | void flush_dcache_page(struct page *page) |
241 | { | 245 | { |
242 | if (test_bit(PG_mapped, &page->flags)) { | 246 | #ifndef CONFIG_SMP |
247 | struct address_space *mapping = page_mapping(page); | ||
248 | |||
249 | if (mapping && !mapping_mapped(mapping)) | ||
250 | set_bit(PG_dcache_dirty, &page->flags); | ||
251 | else | ||
252 | #endif | ||
253 | { | ||
243 | unsigned long phys = PHYSADDR(page_address(page)); | 254 | unsigned long phys = PHYSADDR(page_address(page)); |
244 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 255 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
245 | int i, n; | 256 | int i, n; |
246 | 257 | ||
247 | /* Loop all the D-cache */ | 258 | /* Loop all the D-cache */ |
248 | n = cpu_data->dcache.n_aliases; | 259 | n = current_cpu_data.dcache.n_aliases; |
249 | for (i = 0; i < n; i++, addr += 4096) | 260 | for (i = 0; i < n; i++, addr += 4096) |
250 | flush_cache_4096(addr, phys); | 261 | flush_cache_4096(addr, phys); |
251 | } | 262 | } |
@@ -277,7 +288,7 @@ static inline void flush_icache_all(void) | |||
277 | 288 | ||
278 | void flush_dcache_all(void) | 289 | void flush_dcache_all(void) |
279 | { | 290 | { |
280 | (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size); | 291 | (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size); |
281 | wmb(); | 292 | wmb(); |
282 | } | 293 | } |
283 | 294 | ||
@@ -291,8 +302,8 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, | |||
291 | unsigned long end) | 302 | unsigned long end) |
292 | { | 303 | { |
293 | unsigned long d = 0, p = start & PAGE_MASK; | 304 | unsigned long d = 0, p = start & PAGE_MASK; |
294 | unsigned long alias_mask = cpu_data->dcache.alias_mask; | 305 | unsigned long alias_mask = current_cpu_data.dcache.alias_mask; |
295 | unsigned long n_aliases = cpu_data->dcache.n_aliases; | 306 | unsigned long n_aliases = current_cpu_data.dcache.n_aliases; |
296 | unsigned long select_bit; | 307 | unsigned long select_bit; |
297 | unsigned long all_aliases_mask; | 308 | unsigned long all_aliases_mask; |
298 | unsigned long addr_offset; | 309 | unsigned long addr_offset; |
@@ -379,7 +390,7 @@ void flush_cache_mm(struct mm_struct *mm) | |||
379 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 390 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
380 | * the cache is physically tagged, the data can just be left in there. | 391 | * the cache is physically tagged, the data can just be left in there. |
381 | */ | 392 | */ |
382 | if (cpu_data->dcache.n_aliases == 0) | 393 | if (current_cpu_data.dcache.n_aliases == 0) |
383 | return; | 394 | return; |
384 | 395 | ||
385 | /* | 396 | /* |
@@ -416,7 +427,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
416 | unsigned long phys = pfn << PAGE_SHIFT; | 427 | unsigned long phys = pfn << PAGE_SHIFT; |
417 | unsigned int alias_mask; | 428 | unsigned int alias_mask; |
418 | 429 | ||
419 | alias_mask = cpu_data->dcache.alias_mask; | 430 | alias_mask = current_cpu_data.dcache.alias_mask; |
420 | 431 | ||
421 | /* We only need to flush D-cache when we have alias */ | 432 | /* We only need to flush D-cache when we have alias */ |
422 | if ((address^phys) & alias_mask) { | 433 | if ((address^phys) & alias_mask) { |
@@ -430,7 +441,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
430 | phys); | 441 | phys); |
431 | } | 442 | } |
432 | 443 | ||
433 | alias_mask = cpu_data->icache.alias_mask; | 444 | alias_mask = current_cpu_data.icache.alias_mask; |
434 | if (vma->vm_flags & VM_EXEC) { | 445 | if (vma->vm_flags & VM_EXEC) { |
435 | /* | 446 | /* |
436 | * Evict entries from the portion of the cache from which code | 447 | * Evict entries from the portion of the cache from which code |
@@ -462,7 +473,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
462 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 473 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
463 | * the cache is physically tagged, the data can just be left in there. | 474 | * the cache is physically tagged, the data can just be left in there. |
464 | */ | 475 | */ |
465 | if (cpu_data->dcache.n_aliases == 0) | 476 | if (current_cpu_data.dcache.n_aliases == 0) |
466 | return; | 477 | return; |
467 | 478 | ||
468 | /* | 479 | /* |
@@ -523,7 +534,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, | |||
523 | unsigned long a, ea, p; | 534 | unsigned long a, ea, p; |
524 | unsigned long temp_pc; | 535 | unsigned long temp_pc; |
525 | 536 | ||
526 | dcache = &cpu_data->dcache; | 537 | dcache = ¤t_cpu_data.dcache; |
527 | /* Write this way for better assembly. */ | 538 | /* Write this way for better assembly. */ |
528 | way_count = dcache->ways; | 539 | way_count = dcache->ways; |
529 | way_incr = dcache->way_incr; | 540 | way_incr = dcache->way_incr; |
@@ -598,7 +609,7 @@ static void __flush_dcache_segment_1way(unsigned long start, | |||
598 | base_addr = ((base_addr >> 16) << 16); | 609 | base_addr = ((base_addr >> 16) << 16); |
599 | base_addr |= start; | 610 | base_addr |= start; |
600 | 611 | ||
601 | dcache = &cpu_data->dcache; | 612 | dcache = ¤t_cpu_data.dcache; |
602 | linesz = dcache->linesz; | 613 | linesz = dcache->linesz; |
603 | way_incr = dcache->way_incr; | 614 | way_incr = dcache->way_incr; |
604 | way_size = dcache->way_size; | 615 | way_size = dcache->way_size; |
@@ -640,7 +651,7 @@ static void __flush_dcache_segment_2way(unsigned long start, | |||
640 | base_addr = ((base_addr >> 16) << 16); | 651 | base_addr = ((base_addr >> 16) << 16); |
641 | base_addr |= start; | 652 | base_addr |= start; |
642 | 653 | ||
643 | dcache = &cpu_data->dcache; | 654 | dcache = ¤t_cpu_data.dcache; |
644 | linesz = dcache->linesz; | 655 | linesz = dcache->linesz; |
645 | way_incr = dcache->way_incr; | 656 | way_incr = dcache->way_incr; |
646 | way_size = dcache->way_size; | 657 | way_size = dcache->way_size; |
@@ -699,7 +710,7 @@ static void __flush_dcache_segment_4way(unsigned long start, | |||
699 | base_addr = ((base_addr >> 16) << 16); | 710 | base_addr = ((base_addr >> 16) << 16); |
700 | base_addr |= start; | 711 | base_addr |= start; |
701 | 712 | ||
702 | dcache = &cpu_data->dcache; | 713 | dcache = ¤t_cpu_data.dcache; |
703 | linesz = dcache->linesz; | 714 | linesz = dcache->linesz; |
704 | way_incr = dcache->way_incr; | 715 | way_incr = dcache->way_incr; |
705 | way_size = dcache->way_size; | 716 | way_size = dcache->way_size; |
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 045abdf078f5..31f8deb7a158 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -3,11 +3,11 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000 Niibe Yutaka |
5 | * Copyright (C) 2004 Alex Song | 5 | * Copyright (C) 2004 Alex Song |
6 | * Copyright (C) 2006 Paul Mundt | ||
6 | * | 7 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
9 | * for more details. | 10 | * for more details. |
10 | * | ||
11 | */ | 11 | */ |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/mman.h> | 13 | #include <linux/mman.h> |
@@ -32,9 +32,9 @@ static inline void cache_wback_all(void) | |||
32 | { | 32 | { |
33 | unsigned long ways, waysize, addrstart; | 33 | unsigned long ways, waysize, addrstart; |
34 | 34 | ||
35 | ways = cpu_data->dcache.ways; | 35 | ways = current_cpu_data.dcache.ways; |
36 | waysize = cpu_data->dcache.sets; | 36 | waysize = current_cpu_data.dcache.sets; |
37 | waysize <<= cpu_data->dcache.entry_shift; | 37 | waysize <<= current_cpu_data.dcache.entry_shift; |
38 | 38 | ||
39 | addrstart = CACHE_OC_ADDRESS_ARRAY; | 39 | addrstart = CACHE_OC_ADDRESS_ARRAY; |
40 | 40 | ||
@@ -43,7 +43,7 @@ static inline void cache_wback_all(void) | |||
43 | 43 | ||
44 | for (addr = addrstart; | 44 | for (addr = addrstart; |
45 | addr < addrstart + waysize; | 45 | addr < addrstart + waysize; |
46 | addr += cpu_data->dcache.linesz) { | 46 | addr += current_cpu_data.dcache.linesz) { |
47 | unsigned long data; | 47 | unsigned long data; |
48 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; | 48 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; |
49 | 49 | ||
@@ -51,10 +51,9 @@ static inline void cache_wback_all(void) | |||
51 | 51 | ||
52 | if ((data & v) == v) | 52 | if ((data & v) == v) |
53 | ctrl_outl(data & ~v, addr); | 53 | ctrl_outl(data & ~v, addr); |
54 | |||
55 | } | 54 | } |
56 | 55 | ||
57 | addrstart += cpu_data->dcache.way_incr; | 56 | addrstart += current_cpu_data.dcache.way_incr; |
58 | } while (--ways); | 57 | } while (--ways); |
59 | } | 58 | } |
60 | 59 | ||
@@ -94,9 +93,9 @@ static void __flush_dcache_page(unsigned long phys) | |||
94 | local_irq_save(flags); | 93 | local_irq_save(flags); |
95 | jump_to_P2(); | 94 | jump_to_P2(); |
96 | 95 | ||
97 | ways = cpu_data->dcache.ways; | 96 | ways = current_cpu_data.dcache.ways; |
98 | waysize = cpu_data->dcache.sets; | 97 | waysize = current_cpu_data.dcache.sets; |
99 | waysize <<= cpu_data->dcache.entry_shift; | 98 | waysize <<= current_cpu_data.dcache.entry_shift; |
100 | 99 | ||
101 | addrstart = CACHE_OC_ADDRESS_ARRAY; | 100 | addrstart = CACHE_OC_ADDRESS_ARRAY; |
102 | 101 | ||
@@ -105,7 +104,7 @@ static void __flush_dcache_page(unsigned long phys) | |||
105 | 104 | ||
106 | for (addr = addrstart; | 105 | for (addr = addrstart; |
107 | addr < addrstart + waysize; | 106 | addr < addrstart + waysize; |
108 | addr += cpu_data->dcache.linesz) { | 107 | addr += current_cpu_data.dcache.linesz) { |
109 | unsigned long data; | 108 | unsigned long data; |
110 | 109 | ||
111 | data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); | 110 | data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); |
@@ -115,7 +114,7 @@ static void __flush_dcache_page(unsigned long phys) | |||
115 | } | 114 | } |
116 | } | 115 | } |
117 | 116 | ||
118 | addrstart += cpu_data->dcache.way_incr; | 117 | addrstart += current_cpu_data.dcache.way_incr; |
119 | } while (--ways); | 118 | } while (--ways); |
120 | 119 | ||
121 | back_to_P1(); | 120 | back_to_P1(); |
@@ -128,7 +127,11 @@ static void __flush_dcache_page(unsigned long phys) | |||
128 | */ | 127 | */ |
129 | void flush_dcache_page(struct page *page) | 128 | void flush_dcache_page(struct page *page) |
130 | { | 129 | { |
131 | if (test_bit(PG_mapped, &page->flags)) | 130 | struct address_space *mapping = page_mapping(page); |
131 | |||
132 | if (mapping && !mapping_mapped(mapping)) | ||
133 | set_bit(PG_dcache_dirty, &page->flags); | ||
134 | else | ||
132 | __flush_dcache_page(PHYSADDR(page_address(page))); | 135 | __flush_dcache_page(PHYSADDR(page_address(page))); |
133 | } | 136 | } |
134 | 137 | ||
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 716ebf568af2..fa5d7f0b9f18 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kprobes.h> | 17 | #include <linux/kprobes.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
20 | #include <asm/tlbflush.h> | ||
20 | #include <asm/kgdb.h> | 21 | #include <asm/kgdb.h> |
21 | 22 | ||
22 | extern void die(const char *,struct pt_regs *,long); | 23 | extern void die(const char *,struct pt_regs *,long); |
@@ -224,3 +225,89 @@ do_sigbus: | |||
224 | if (!user_mode(regs)) | 225 | if (!user_mode(regs)) |
225 | goto no_context; | 226 | goto no_context; |
226 | } | 227 | } |
228 | |||
229 | #ifdef CONFIG_SH_STORE_QUEUES | ||
230 | /* | ||
231 | * This is a special case for the SH-4 store queues, as pages for this | ||
232 | * space still need to be faulted in before it's possible to flush the | ||
233 | * store queue cache for writeout to the remapped region. | ||
234 | */ | ||
235 | #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) | ||
236 | #else | ||
237 | #define P3_ADDR_MAX P4SEG | ||
238 | #endif | ||
239 | |||
240 | /* | ||
241 | * Called with interrupts disabled. | ||
242 | */ | ||
243 | asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | ||
244 | unsigned long writeaccess, | ||
245 | unsigned long address) | ||
246 | { | ||
247 | pgd_t *pgd; | ||
248 | pud_t *pud; | ||
249 | pmd_t *pmd; | ||
250 | pte_t *pte; | ||
251 | pte_t entry; | ||
252 | struct mm_struct *mm = current->mm; | ||
253 | spinlock_t *ptl; | ||
254 | int ret = 1; | ||
255 | |||
256 | #ifdef CONFIG_SH_KGDB | ||
257 | if (kgdb_nofault && kgdb_bus_err_hook) | ||
258 | kgdb_bus_err_hook(); | ||
259 | #endif | ||
260 | |||
261 | /* | ||
262 | * We don't take page faults for P1, P2, and parts of P4, these | ||
263 | * are always mapped, whether it be due to legacy behaviour in | ||
264 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | ||
265 | */ | ||
266 | if (address >= P3SEG && address < P3_ADDR_MAX) { | ||
267 | pgd = pgd_offset_k(address); | ||
268 | mm = NULL; | ||
269 | } else { | ||
270 | if (unlikely(address >= TASK_SIZE || !mm)) | ||
271 | return 1; | ||
272 | |||
273 | pgd = pgd_offset(mm, address); | ||
274 | } | ||
275 | |||
276 | pud = pud_offset(pgd, address); | ||
277 | if (pud_none_or_clear_bad(pud)) | ||
278 | return 1; | ||
279 | pmd = pmd_offset(pud, address); | ||
280 | if (pmd_none_or_clear_bad(pmd)) | ||
281 | return 1; | ||
282 | |||
283 | if (mm) | ||
284 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
285 | else | ||
286 | pte = pte_offset_kernel(pmd, address); | ||
287 | |||
288 | entry = *pte; | ||
289 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | ||
290 | goto unlock; | ||
291 | if (unlikely(writeaccess && !pte_write(entry))) | ||
292 | goto unlock; | ||
293 | |||
294 | if (writeaccess) | ||
295 | entry = pte_mkdirty(entry); | ||
296 | entry = pte_mkyoung(entry); | ||
297 | |||
298 | #ifdef CONFIG_CPU_SH4 | ||
299 | /* | ||
300 | * ITLB is not affected by "ldtlb" instruction. | ||
301 | * So, we need to flush the entry by ourselves. | ||
302 | */ | ||
303 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | ||
304 | #endif | ||
305 | |||
306 | set_pte(pte, entry); | ||
307 | update_mmu_cache(NULL, address, entry); | ||
308 | ret = 0; | ||
309 | unlock: | ||
310 | if (mm) | ||
311 | pte_unmap_unlock(pte, ptl); | ||
312 | return ret; | ||
313 | } | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index bf0c263cb6fd..ae957a932375 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -39,11 +39,6 @@ | |||
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 40 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
41 | 41 | ||
42 | /* | ||
43 | * Cache of MMU context last used. | ||
44 | */ | ||
45 | unsigned long mmu_context_cache = NO_CONTEXT; | ||
46 | |||
47 | #ifdef CONFIG_MMU | 42 | #ifdef CONFIG_MMU |
48 | /* It'd be good if these lines were in the standard header file. */ | 43 | /* It'd be good if these lines were in the standard header file. */ |
49 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | 44 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) |
@@ -111,7 +106,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
111 | 106 | ||
112 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 107 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); |
113 | 108 | ||
114 | __flush_tlb_page(get_asid(), addr); | 109 | flush_tlb_one(get_asid(), addr); |
115 | } | 110 | } |
116 | 111 | ||
117 | /* | 112 | /* |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 90b494a0cf45..be03d74e99cb 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -45,12 +45,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
45 | return NULL; | 45 | return NULL; |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
49 | */ | ||
50 | if (phys_addr >= 0xA0000 && last_addr < 0x100000) | ||
51 | return (void __iomem *)phys_to_virt(phys_addr); | ||
52 | |||
53 | /* | ||
54 | * If we're on an SH7751 or SH7780 PCI controller, PCI memory is | 48 | * If we're on an SH7751 or SH7780 PCI controller, PCI memory is |
55 | * mapped at the end of the address space (typically 0xfd000000) | 49 | * mapped at the end of the address space (typically 0xfd000000) |
56 | * in a non-translatable area, so mapping through page tables for | 50 | * in a non-translatable area, so mapping through page tables for |
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 3f98d2a4f936..969efeceb928 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | extern struct mutex p3map_mutex[]; | 14 | extern struct mutex p3map_mutex[]; |
15 | 15 | ||
16 | #define CACHE_ALIAS (cpu_data->dcache.alias_mask) | 16 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * clear_user_page | 19 | * clear_user_page |
@@ -23,7 +23,6 @@ extern struct mutex p3map_mutex[]; | |||
23 | */ | 23 | */ |
24 | void clear_user_page(void *to, unsigned long address, struct page *page) | 24 | void clear_user_page(void *to, unsigned long address, struct page *page) |
25 | { | 25 | { |
26 | __set_bit(PG_mapped, &page->flags); | ||
27 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 26 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
28 | clear_page(to); | 27 | clear_page(to); |
29 | else { | 28 | else { |
@@ -40,7 +39,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
40 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); | 39 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
41 | set_pte(pte, entry); | 40 | set_pte(pte, entry); |
42 | local_irq_save(flags); | 41 | local_irq_save(flags); |
43 | __flush_tlb_page(get_asid(), p3_addr); | 42 | flush_tlb_one(get_asid(), p3_addr); |
44 | local_irq_restore(flags); | 43 | local_irq_restore(flags); |
45 | update_mmu_cache(NULL, p3_addr, entry); | 44 | update_mmu_cache(NULL, p3_addr, entry); |
46 | __clear_user_page((void *)p3_addr, to); | 45 | __clear_user_page((void *)p3_addr, to); |
@@ -59,7 +58,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
59 | void copy_user_page(void *to, void *from, unsigned long address, | 58 | void copy_user_page(void *to, void *from, unsigned long address, |
60 | struct page *page) | 59 | struct page *page) |
61 | { | 60 | { |
62 | __set_bit(PG_mapped, &page->flags); | ||
63 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 61 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
64 | copy_page(to, from); | 62 | copy_page(to, from); |
65 | else { | 63 | else { |
@@ -76,7 +74,7 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
76 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); | 74 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
77 | set_pte(pte, entry); | 75 | set_pte(pte, entry); |
78 | local_irq_save(flags); | 76 | local_irq_save(flags); |
79 | __flush_tlb_page(get_asid(), p3_addr); | 77 | flush_tlb_one(get_asid(), p3_addr); |
80 | local_irq_restore(flags); | 78 | local_irq_restore(flags); |
81 | update_mmu_cache(NULL, p3_addr, entry); | 79 | update_mmu_cache(NULL, p3_addr, entry); |
82 | __copy_user_page((void *)p3_addr, from, to); | 80 | __copy_user_page((void *)p3_addr, from, to); |
@@ -84,23 +82,3 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
84 | mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); | 82 | mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
85 | } | 83 | } |
86 | } | 84 | } |
87 | |||
88 | /* | ||
89 | * For SH-4, we have our own implementation for ptep_get_and_clear | ||
90 | */ | ||
91 | inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
92 | { | ||
93 | pte_t pte = *ptep; | ||
94 | |||
95 | pte_clear(mm, addr, ptep); | ||
96 | if (!pte_not_present(pte)) { | ||
97 | unsigned long pfn = pte_pfn(pte); | ||
98 | if (pfn_valid(pfn)) { | ||
99 | struct page *page = pfn_to_page(pfn); | ||
100 | struct address_space *mapping = page_mapping(page); | ||
101 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
102 | __clear_bit(PG_mapped, &page->flags); | ||
103 | } | ||
104 | } | ||
105 | return pte; | ||
106 | } | ||
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c index ff9ece986cbc..887ab9d18ccd 100644 --- a/arch/sh/mm/pg-sh7705.c +++ b/arch/sh/mm/pg-sh7705.c | |||
@@ -7,9 +7,7 @@ | |||
7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
9 | * for more details. | 9 | * for more details. |
10 | * | ||
11 | */ | 10 | */ |
12 | |||
13 | #include <linux/init.h> | 11 | #include <linux/init.h> |
14 | #include <linux/mman.h> | 12 | #include <linux/mman.h> |
15 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
@@ -45,13 +43,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) | |||
45 | 43 | ||
46 | p = __pa(p1_begin); | 44 | p = __pa(p1_begin); |
47 | 45 | ||
48 | ways = cpu_data->dcache.ways; | 46 | ways = current_cpu_data.dcache.ways; |
49 | addr = CACHE_OC_ADDRESS_ARRAY; | 47 | addr = CACHE_OC_ADDRESS_ARRAY; |
50 | 48 | ||
51 | do { | 49 | do { |
52 | unsigned long data; | 50 | unsigned long data; |
53 | 51 | ||
54 | addr |= (v & cpu_data->dcache.entry_mask); | 52 | addr |= (v & current_cpu_data.dcache.entry_mask); |
55 | 53 | ||
56 | data = ctrl_inl(addr); | 54 | data = ctrl_inl(addr); |
57 | if ((data & CACHE_PHYSADDR_MASK) == | 55 | if ((data & CACHE_PHYSADDR_MASK) == |
@@ -60,7 +58,7 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) | |||
60 | ctrl_outl(data, addr); | 58 | ctrl_outl(data, addr); |
61 | } | 59 | } |
62 | 60 | ||
63 | addr += cpu_data->dcache.way_incr; | 61 | addr += current_cpu_data.dcache.way_incr; |
64 | } while (--ways); | 62 | } while (--ways); |
65 | 63 | ||
66 | p1_begin += L1_CACHE_BYTES; | 64 | p1_begin += L1_CACHE_BYTES; |
@@ -76,7 +74,6 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) | |||
76 | { | 74 | { |
77 | struct page *page = virt_to_page(to); | 75 | struct page *page = virt_to_page(to); |
78 | 76 | ||
79 | __set_bit(PG_mapped, &page->flags); | ||
80 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | 77 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { |
81 | clear_page(to); | 78 | clear_page(to); |
82 | __flush_wback_region(to, PAGE_SIZE); | 79 | __flush_wback_region(to, PAGE_SIZE); |
@@ -95,12 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg) | |||
95 | * @from: P1 address | 92 | * @from: P1 address |
96 | * @address: U0 address to be mapped | 93 | * @address: U0 address to be mapped |
97 | */ | 94 | */ |
98 | void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) | 95 | void copy_user_page(void *to, void *from, unsigned long address, |
96 | struct page *pg) | ||
99 | { | 97 | { |
100 | struct page *page = virt_to_page(to); | 98 | struct page *page = virt_to_page(to); |
101 | 99 | ||
102 | |||
103 | __set_bit(PG_mapped, &page->flags); | ||
104 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | 100 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { |
105 | copy_page(to, from); | 101 | copy_page(to, from); |
106 | __flush_wback_region(to, PAGE_SIZE); | 102 | __flush_wback_region(to, PAGE_SIZE); |
@@ -112,26 +108,3 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg | |||
112 | __flush_wback_region(to, PAGE_SIZE); | 108 | __flush_wback_region(to, PAGE_SIZE); |
113 | } | 109 | } |
114 | } | 110 | } |
115 | |||
116 | /* | ||
117 | * For SH7705, we have our own implementation for ptep_get_and_clear | ||
118 | * Copied from pg-sh4.c | ||
119 | */ | ||
120 | inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
121 | { | ||
122 | pte_t pte = *ptep; | ||
123 | |||
124 | pte_clear(mm, addr, ptep); | ||
125 | if (!pte_not_present(pte)) { | ||
126 | unsigned long pfn = pte_pfn(pte); | ||
127 | if (pfn_valid(pfn)) { | ||
128 | struct page *page = pfn_to_page(pfn); | ||
129 | struct address_space *mapping = page_mapping(page); | ||
130 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
131 | __clear_bit(PG_mapped, &page->flags); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | return pte; | ||
136 | } | ||
137 | |||
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index 73ec7f6084fa..d2f7b4a2eb05 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c | |||
@@ -2,24 +2,28 @@ | |||
2 | * TLB flushing operations for SH with an MMU. | 2 | * TLB flushing operations for SH with an MMU. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 4 | * Copyright (C) 1999 Niibe Yutaka |
5 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 - 2006 Paul Mundt |
6 | * | 6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
9 | * for more details. | 9 | * for more details. |
10 | */ | 10 | */ |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/io.h> | ||
12 | #include <asm/mmu_context.h> | 13 | #include <asm/mmu_context.h> |
13 | #include <asm/tlbflush.h> | 14 | #include <asm/tlbflush.h> |
15 | #include <asm/cacheflush.h> | ||
14 | 16 | ||
15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 17 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
16 | { | 18 | { |
17 | if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { | 19 | unsigned int cpu = smp_processor_id(); |
20 | |||
21 | if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { | ||
18 | unsigned long flags; | 22 | unsigned long flags; |
19 | unsigned long asid; | 23 | unsigned long asid; |
20 | unsigned long saved_asid = MMU_NO_ASID; | 24 | unsigned long saved_asid = MMU_NO_ASID; |
21 | 25 | ||
22 | asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; | 26 | asid = cpu_asid(cpu, vma->vm_mm); |
23 | page &= PAGE_MASK; | 27 | page &= PAGE_MASK; |
24 | 28 | ||
25 | local_irq_save(flags); | 29 | local_irq_save(flags); |
@@ -27,33 +31,34 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
27 | saved_asid = get_asid(); | 31 | saved_asid = get_asid(); |
28 | set_asid(asid); | 32 | set_asid(asid); |
29 | } | 33 | } |
30 | __flush_tlb_page(asid, page); | 34 | local_flush_tlb_one(asid, page); |
31 | if (saved_asid != MMU_NO_ASID) | 35 | if (saved_asid != MMU_NO_ASID) |
32 | set_asid(saved_asid); | 36 | set_asid(saved_asid); |
33 | local_irq_restore(flags); | 37 | local_irq_restore(flags); |
34 | } | 38 | } |
35 | } | 39 | } |
36 | 40 | ||
37 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 41 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
38 | unsigned long end) | 42 | unsigned long end) |
39 | { | 43 | { |
40 | struct mm_struct *mm = vma->vm_mm; | 44 | struct mm_struct *mm = vma->vm_mm; |
45 | unsigned int cpu = smp_processor_id(); | ||
41 | 46 | ||
42 | if (mm->context.id != NO_CONTEXT) { | 47 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
43 | unsigned long flags; | 48 | unsigned long flags; |
44 | int size; | 49 | int size; |
45 | 50 | ||
46 | local_irq_save(flags); | 51 | local_irq_save(flags); |
47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 52 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 53 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
49 | mm->context.id = NO_CONTEXT; | 54 | cpu_context(cpu, mm) = NO_CONTEXT; |
50 | if (mm == current->mm) | 55 | if (mm == current->mm) |
51 | activate_context(mm); | 56 | activate_context(mm, cpu); |
52 | } else { | 57 | } else { |
53 | unsigned long asid; | 58 | unsigned long asid; |
54 | unsigned long saved_asid = MMU_NO_ASID; | 59 | unsigned long saved_asid = MMU_NO_ASID; |
55 | 60 | ||
56 | asid = mm->context.id & MMU_CONTEXT_ASID_MASK; | 61 | asid = cpu_asid(cpu, mm); |
57 | start &= PAGE_MASK; | 62 | start &= PAGE_MASK; |
58 | end += (PAGE_SIZE - 1); | 63 | end += (PAGE_SIZE - 1); |
59 | end &= PAGE_MASK; | 64 | end &= PAGE_MASK; |
@@ -62,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
62 | set_asid(asid); | 67 | set_asid(asid); |
63 | } | 68 | } |
64 | while (start < end) { | 69 | while (start < end) { |
65 | __flush_tlb_page(asid, start); | 70 | local_flush_tlb_one(asid, start); |
66 | start += PAGE_SIZE; | 71 | start += PAGE_SIZE; |
67 | } | 72 | } |
68 | if (saved_asid != MMU_NO_ASID) | 73 | if (saved_asid != MMU_NO_ASID) |
@@ -72,26 +77,27 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
72 | } | 77 | } |
73 | } | 78 | } |
74 | 79 | ||
75 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 80 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
76 | { | 81 | { |
82 | unsigned int cpu = smp_processor_id(); | ||
77 | unsigned long flags; | 83 | unsigned long flags; |
78 | int size; | 84 | int size; |
79 | 85 | ||
80 | local_irq_save(flags); | 86 | local_irq_save(flags); |
81 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 87 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
82 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 88 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
83 | flush_tlb_all(); | 89 | local_flush_tlb_all(); |
84 | } else { | 90 | } else { |
85 | unsigned long asid; | 91 | unsigned long asid; |
86 | unsigned long saved_asid = get_asid(); | 92 | unsigned long saved_asid = get_asid(); |
87 | 93 | ||
88 | asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; | 94 | asid = cpu_asid(cpu, &init_mm); |
89 | start &= PAGE_MASK; | 95 | start &= PAGE_MASK; |
90 | end += (PAGE_SIZE - 1); | 96 | end += (PAGE_SIZE - 1); |
91 | end &= PAGE_MASK; | 97 | end &= PAGE_MASK; |
92 | set_asid(asid); | 98 | set_asid(asid); |
93 | while (start < end) { | 99 | while (start < end) { |
94 | __flush_tlb_page(asid, start); | 100 | local_flush_tlb_one(asid, start); |
95 | start += PAGE_SIZE; | 101 | start += PAGE_SIZE; |
96 | } | 102 | } |
97 | set_asid(saved_asid); | 103 | set_asid(saved_asid); |
@@ -99,22 +105,24 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
99 | local_irq_restore(flags); | 105 | local_irq_restore(flags); |
100 | } | 106 | } |
101 | 107 | ||
102 | void flush_tlb_mm(struct mm_struct *mm) | 108 | void local_flush_tlb_mm(struct mm_struct *mm) |
103 | { | 109 | { |
110 | unsigned int cpu = smp_processor_id(); | ||
111 | |||
104 | /* Invalidate all TLB of this process. */ | 112 | /* Invalidate all TLB of this process. */ |
105 | /* Instead of invalidating each TLB, we get new MMU context. */ | 113 | /* Instead of invalidating each TLB, we get new MMU context. */ |
106 | if (mm->context.id != NO_CONTEXT) { | 114 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
107 | unsigned long flags; | 115 | unsigned long flags; |
108 | 116 | ||
109 | local_irq_save(flags); | 117 | local_irq_save(flags); |
110 | mm->context.id = NO_CONTEXT; | 118 | cpu_context(cpu, mm) = NO_CONTEXT; |
111 | if (mm == current->mm) | 119 | if (mm == current->mm) |
112 | activate_context(mm); | 120 | activate_context(mm, cpu); |
113 | local_irq_restore(flags); | 121 | local_irq_restore(flags); |
114 | } | 122 | } |
115 | } | 123 | } |
116 | 124 | ||
117 | void flush_tlb_all(void) | 125 | void local_flush_tlb_all(void) |
118 | { | 126 | { |
119 | unsigned long flags, status; | 127 | unsigned long flags, status; |
120 | 128 | ||
@@ -132,3 +140,54 @@ void flush_tlb_all(void) | |||
132 | ctrl_barrier(); | 140 | ctrl_barrier(); |
133 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
134 | } | 142 | } |
143 | |||
144 | void update_mmu_cache(struct vm_area_struct *vma, | ||
145 | unsigned long address, pte_t pte) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | unsigned long pteval; | ||
149 | unsigned long vpn; | ||
150 | struct page *page; | ||
151 | unsigned long pfn = pte_pfn(pte); | ||
152 | struct address_space *mapping; | ||
153 | |||
154 | if (!pfn_valid(pfn)) | ||
155 | return; | ||
156 | |||
157 | page = pfn_to_page(pfn); | ||
158 | mapping = page_mapping(page); | ||
159 | if (mapping) { | ||
160 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
161 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | ||
162 | |||
163 | if (dirty) | ||
164 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
165 | PAGE_SIZE); | ||
166 | } | ||
167 | |||
168 | local_irq_save(flags); | ||
169 | |||
170 | /* Set PTEH register */ | ||
171 | vpn = (address & MMU_VPN_MASK) | get_asid(); | ||
172 | ctrl_outl(vpn, MMU_PTEH); | ||
173 | |||
174 | pteval = pte_val(pte); | ||
175 | |||
176 | #ifdef CONFIG_CPU_HAS_PTEA | ||
177 | /* Set PTEA register */ | ||
178 | /* TODO: make this look less hacky */ | ||
179 | ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); | ||
180 | #endif | ||
181 | |||
182 | /* Set PTEL register */ | ||
183 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
184 | #if defined(CONFIG_SH_WRITETHROUGH) && defined(CONFIG_CPU_SH4) | ||
185 | pteval |= _PAGE_WT; | ||
186 | #endif | ||
187 | /* conveniently, we want all the software flags to be 0 anyway */ | ||
188 | ctrl_outl(pteval, MMU_PTEL); | ||
189 | |||
190 | /* Load the TLB */ | ||
191 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | ||
192 | local_irq_restore(flags); | ||
193 | } | ||
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index e55cfea01092..1ccca7c0532e 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c | |||
@@ -13,39 +13,33 @@ | |||
13 | /* | 13 | /* |
14 | * Nothing too terribly exciting here .. | 14 | * Nothing too terribly exciting here .. |
15 | */ | 15 | */ |
16 | 16 | void local_flush_tlb_all(void) | |
17 | void flush_tlb(void) | ||
18 | { | ||
19 | BUG(); | ||
20 | } | ||
21 | |||
22 | void flush_tlb_all(void) | ||
23 | { | 17 | { |
24 | BUG(); | 18 | BUG(); |
25 | } | 19 | } |
26 | 20 | ||
27 | void flush_tlb_mm(struct mm_struct *mm) | 21 | void local_flush_tlb_mm(struct mm_struct *mm) |
28 | { | 22 | { |
29 | BUG(); | 23 | BUG(); |
30 | } | 24 | } |
31 | 25 | ||
32 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 26 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
33 | unsigned long end) | 27 | unsigned long end) |
34 | { | 28 | { |
35 | BUG(); | 29 | BUG(); |
36 | } | 30 | } |
37 | 31 | ||
38 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 32 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
39 | { | 33 | { |
40 | BUG(); | 34 | BUG(); |
41 | } | 35 | } |
42 | 36 | ||
43 | void __flush_tlb_page(unsigned long asid, unsigned long page) | 37 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
44 | { | 38 | { |
45 | BUG(); | 39 | BUG(); |
46 | } | 40 | } |
47 | 41 | ||
48 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 42 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
49 | { | 43 | { |
50 | BUG(); | 44 | BUG(); |
51 | } | 45 | } |
@@ -55,4 +49,3 @@ void update_mmu_cache(struct vm_area_struct * vma, | |||
55 | { | 49 | { |
56 | BUG(); | 50 | BUG(); |
57 | } | 51 | } |
58 | |||
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 46b09e26e082..e5e76eb7ee09 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c | |||
@@ -8,71 +8,11 @@ | |||
8 | * | 8 | * |
9 | * Released under the terms of the GNU GPL v2.0. | 9 | * Released under the terms of the GNU GPL v2.0. |
10 | */ | 10 | */ |
11 | #include <linux/signal.h> | 11 | #include <linux/io.h> |
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | |||
24 | #include <asm/system.h> | 12 | #include <asm/system.h> |
25 | #include <asm/io.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/pgalloc.h> | ||
28 | #include <asm/mmu_context.h> | 13 | #include <asm/mmu_context.h> |
29 | #include <asm/cacheflush.h> | ||
30 | 14 | ||
31 | void update_mmu_cache(struct vm_area_struct * vma, | 15 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
32 | unsigned long address, pte_t pte) | ||
33 | { | ||
34 | unsigned long flags; | ||
35 | unsigned long pteval; | ||
36 | unsigned long vpn; | ||
37 | |||
38 | /* Ptrace may call this routine. */ | ||
39 | if (vma && current->active_mm != vma->vm_mm) | ||
40 | return; | ||
41 | |||
42 | #if defined(CONFIG_SH7705_CACHE_32KB) | ||
43 | { | ||
44 | struct page *page = pte_page(pte); | ||
45 | unsigned long pfn = pte_pfn(pte); | ||
46 | |||
47 | if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { | ||
48 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
49 | |||
50 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
51 | PAGE_SIZE); | ||
52 | __set_bit(PG_mapped, &page->flags); | ||
53 | } | ||
54 | } | ||
55 | #endif | ||
56 | |||
57 | local_irq_save(flags); | ||
58 | |||
59 | /* Set PTEH register */ | ||
60 | vpn = (address & MMU_VPN_MASK) | get_asid(); | ||
61 | ctrl_outl(vpn, MMU_PTEH); | ||
62 | |||
63 | pteval = pte_val(pte); | ||
64 | |||
65 | /* Set PTEL register */ | ||
66 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
67 | /* conveniently, we want all the software flags to be 0 anyway */ | ||
68 | ctrl_outl(pteval, MMU_PTEL); | ||
69 | |||
70 | /* Load the TLB */ | ||
71 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | ||
72 | local_irq_restore(flags); | ||
73 | } | ||
74 | |||
75 | void __flush_tlb_page(unsigned long asid, unsigned long page) | ||
76 | { | 16 | { |
77 | unsigned long addr, data; | 17 | unsigned long addr, data; |
78 | int i, ways = MMU_NTLB_WAYS; | 18 | int i, ways = MMU_NTLB_WAYS; |
@@ -86,7 +26,7 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) | |||
86 | addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); | 26 | addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); |
87 | data = (page & 0xfffe0000) | asid; /* VALID bit is off */ | 27 | data = (page & 0xfffe0000) | asid; /* VALID bit is off */ |
88 | 28 | ||
89 | if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) { | 29 | if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) { |
90 | addr |= MMU_PAGE_ASSOC_BIT; | 30 | addr |= MMU_PAGE_ASSOC_BIT; |
91 | ways = 1; /* we already know the way .. */ | 31 | ways = 1; /* we already know the way .. */ |
92 | } | 32 | } |
@@ -94,4 +34,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) | |||
94 | for (i = 0; i < ways; i++) | 34 | for (i = 0; i < ways; i++) |
95 | ctrl_outl(data, addr + (i << 8)); | 35 | ctrl_outl(data, addr + (i << 8)); |
96 | } | 36 | } |
97 | |||
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 812b2d567de2..221e7095473d 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -8,76 +8,11 @@ | |||
8 | * | 8 | * |
9 | * Released under the terms of the GNU GPL v2.0. | 9 | * Released under the terms of the GNU GPL v2.0. |
10 | */ | 10 | */ |
11 | #include <linux/signal.h> | 11 | #include <linux/io.h> |
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | |||
24 | #include <asm/system.h> | 12 | #include <asm/system.h> |
25 | #include <asm/io.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/pgalloc.h> | ||
28 | #include <asm/mmu_context.h> | 13 | #include <asm/mmu_context.h> |
29 | #include <asm/cacheflush.h> | ||
30 | 14 | ||
31 | void update_mmu_cache(struct vm_area_struct * vma, | 15 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
32 | unsigned long address, pte_t pte) | ||
33 | { | ||
34 | unsigned long flags; | ||
35 | unsigned long pteval; | ||
36 | unsigned long vpn; | ||
37 | struct page *page; | ||
38 | unsigned long pfn; | ||
39 | |||
40 | /* Ptrace may call this routine. */ | ||
41 | if (vma && current->active_mm != vma->vm_mm) | ||
42 | return; | ||
43 | |||
44 | pfn = pte_pfn(pte); | ||
45 | if (pfn_valid(pfn)) { | ||
46 | page = pfn_to_page(pfn); | ||
47 | if (!test_bit(PG_mapped, &page->flags)) { | ||
48 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
49 | __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); | ||
50 | __set_bit(PG_mapped, &page->flags); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | local_irq_save(flags); | ||
55 | |||
56 | /* Set PTEH register */ | ||
57 | vpn = (address & MMU_VPN_MASK) | get_asid(); | ||
58 | ctrl_outl(vpn, MMU_PTEH); | ||
59 | |||
60 | pteval = pte_val(pte); | ||
61 | |||
62 | /* Set PTEA register */ | ||
63 | if (cpu_data->flags & CPU_HAS_PTEA) | ||
64 | /* TODO: make this look less hacky */ | ||
65 | ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); | ||
66 | |||
67 | /* Set PTEL register */ | ||
68 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
69 | #ifdef CONFIG_SH_WRITETHROUGH | ||
70 | pteval |= _PAGE_WT; | ||
71 | #endif | ||
72 | /* conveniently, we want all the software flags to be 0 anyway */ | ||
73 | ctrl_outl(pteval, MMU_PTEL); | ||
74 | |||
75 | /* Load the TLB */ | ||
76 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | ||
77 | local_irq_restore(flags); | ||
78 | } | ||
79 | |||
80 | void __flush_tlb_page(unsigned long asid, unsigned long page) | ||
81 | { | 16 | { |
82 | unsigned long addr, data; | 17 | unsigned long addr, data; |
83 | 18 | ||
@@ -93,4 +28,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page) | |||
93 | ctrl_outl(data, addr); | 28 | ctrl_outl(data, addr); |
94 | back_to_P1(); | 29 | back_to_P1(); |
95 | } | 30 | } |
96 | |||