diff options
Diffstat (limited to 'arch/tile/mm')
-rw-r--r-- | arch/tile/mm/fault.c | 12 | ||||
-rw-r--r-- | arch/tile/mm/highmem.c | 86 | ||||
-rw-r--r-- | arch/tile/mm/homecache.c | 11 | ||||
-rw-r--r-- | arch/tile/mm/init.c | 2 |
4 files changed, 34 insertions, 77 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 704f3e8a4385..f295b4ac941d 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -66,10 +66,10 @@ static noinline void force_sig_info_fault(int si_signo, int si_code, | |||
66 | #ifndef __tilegx__ | 66 | #ifndef __tilegx__ |
67 | /* | 67 | /* |
68 | * Synthesize the fault a PL0 process would get by doing a word-load of | 68 | * Synthesize the fault a PL0 process would get by doing a word-load of |
69 | * an unaligned address or a high kernel address. Called indirectly | 69 | * an unaligned address or a high kernel address. |
70 | * from sys_cmpxchg() in kernel/intvec.S. | ||
71 | */ | 70 | */ |
72 | int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs) | 71 | SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address, |
72 | struct pt_regs *, regs) | ||
73 | { | 73 | { |
74 | if (address >= PAGE_OFFSET) | 74 | if (address >= PAGE_OFFSET) |
75 | force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, | 75 | force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, |
@@ -563,10 +563,10 @@ do_sigbus: | |||
563 | /* | 563 | /* |
564 | * When we take an ITLB or DTLB fault or access violation in the | 564 | * When we take an ITLB or DTLB fault or access violation in the |
565 | * supervisor while the critical section bit is set, the hypervisor is | 565 | * supervisor while the critical section bit is set, the hypervisor is |
566 | * reluctant to write new values into the EX_CONTEXT_1_x registers, | 566 | * reluctant to write new values into the EX_CONTEXT_K_x registers, |
567 | * since that might indicate we have not yet squirreled the SPR | 567 | * since that might indicate we have not yet squirreled the SPR |
568 | * contents away and can thus safely take a recursive interrupt. | 568 | * contents away and can thus safely take a recursive interrupt. |
569 | * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. | 569 | * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2. |
570 | * | 570 | * |
571 | * Note that this routine is called before homecache_tlb_defer_enter(), | 571 | * Note that this routine is called before homecache_tlb_defer_enter(), |
572 | * which means that we can properly unlock any atomics that might | 572 | * which means that we can properly unlock any atomics that might |
@@ -610,7 +610,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | |||
610 | * fault. We didn't set up a kernel stack on initial entry to | 610 | * fault. We didn't set up a kernel stack on initial entry to |
611 | * sys_cmpxchg, but instead had one set up by the fault, which | 611 | * sys_cmpxchg, but instead had one set up by the fault, which |
612 | * (because sys_cmpxchg never releases ICS) came to us via the | 612 | * (because sys_cmpxchg never releases ICS) came to us via the |
613 | * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are | 613 | * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are |
614 | * still referencing the original user code. We release the | 614 | * still referencing the original user code. We release the |
615 | * atomic lock and rewrite pt_regs so that it appears that we | 615 | * atomic lock and rewrite pt_regs so that it appears that we |
616 | * came from user-space directly, and after we finish the | 616 | * came from user-space directly, and after we finish the |
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c index 12ab137e7d4f..abb57331cf6e 100644 --- a/arch/tile/mm/highmem.c +++ b/arch/tile/mm/highmem.c | |||
@@ -56,50 +56,6 @@ void kunmap(struct page *page) | |||
56 | } | 56 | } |
57 | EXPORT_SYMBOL(kunmap); | 57 | EXPORT_SYMBOL(kunmap); |
58 | 58 | ||
59 | static void debug_kmap_atomic_prot(enum km_type type) | ||
60 | { | ||
61 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
62 | static unsigned warn_count = 10; | ||
63 | |||
64 | if (unlikely(warn_count == 0)) | ||
65 | return; | ||
66 | |||
67 | if (unlikely(in_interrupt())) { | ||
68 | if (in_irq()) { | ||
69 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
70 | type != KM_BIO_SRC_IRQ && | ||
71 | /* type != KM_BIO_DST_IRQ && */ | ||
72 | type != KM_BOUNCE_READ) { | ||
73 | WARN_ON(1); | ||
74 | warn_count--; | ||
75 | } | ||
76 | } else if (!irqs_disabled()) { /* softirq */ | ||
77 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
78 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | ||
79 | type != KM_SKB_SUNRPC_DATA && | ||
80 | type != KM_SKB_DATA_SOFTIRQ && | ||
81 | type != KM_BOUNCE_READ) { | ||
82 | WARN_ON(1); | ||
83 | warn_count--; | ||
84 | } | ||
85 | } | ||
86 | } | ||
87 | |||
88 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | ||
89 | type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) { | ||
90 | if (!irqs_disabled()) { | ||
91 | WARN_ON(1); | ||
92 | warn_count--; | ||
93 | } | ||
94 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | ||
95 | if (irq_count() == 0 && !irqs_disabled()) { | ||
96 | WARN_ON(1); | ||
97 | warn_count--; | ||
98 | } | ||
99 | } | ||
100 | #endif | ||
101 | } | ||
102 | |||
103 | /* | 59 | /* |
104 | * Describe a single atomic mapping of a page on a given cpu at a | 60 | * Describe a single atomic mapping of a page on a given cpu at a |
105 | * given address, and allow it to be linked into a list. | 61 | * given address, and allow it to be linked into a list. |
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished) | |||
240 | * When holding an atomic kmap is is not legal to sleep, so atomic | 196 | * When holding an atomic kmap is is not legal to sleep, so atomic |
241 | * kmaps are appropriate for short, tight code paths only. | 197 | * kmaps are appropriate for short, tight code paths only. |
242 | */ | 198 | */ |
243 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 199 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
244 | { | 200 | { |
245 | enum fixed_addresses idx; | ||
246 | unsigned long vaddr; | 201 | unsigned long vaddr; |
202 | int idx, type; | ||
247 | pte_t *pte; | 203 | pte_t *pte; |
248 | 204 | ||
249 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 205 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
255 | if (!PageHighMem(page)) | 211 | if (!PageHighMem(page)) |
256 | return page_address(page); | 212 | return page_address(page); |
257 | 213 | ||
258 | debug_kmap_atomic_prot(type); | 214 | type = kmap_atomic_idx_push(); |
259 | |||
260 | idx = type + KM_TYPE_NR*smp_processor_id(); | 215 | idx = type + KM_TYPE_NR*smp_processor_id(); |
261 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 216 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
262 | pte = kmap_get_pte(vaddr); | 217 | pte = kmap_get_pte(vaddr); |
@@ -269,28 +224,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
269 | } | 224 | } |
270 | EXPORT_SYMBOL(kmap_atomic_prot); | 225 | EXPORT_SYMBOL(kmap_atomic_prot); |
271 | 226 | ||
272 | void *kmap_atomic(struct page *page, enum km_type type) | 227 | void *__kmap_atomic(struct page *page) |
273 | { | 228 | { |
274 | /* PAGE_NONE is a magic value that tells us to check immutability. */ | 229 | /* PAGE_NONE is a magic value that tells us to check immutability. */ |
275 | return kmap_atomic_prot(page, type, PAGE_NONE); | 230 | return kmap_atomic_prot(page, type, PAGE_NONE); |
276 | } | 231 | } |
277 | EXPORT_SYMBOL(kmap_atomic); | 232 | EXPORT_SYMBOL(__kmap_atomic); |
278 | 233 | ||
279 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 234 | void __kunmap_atomic(void *kvaddr) |
280 | { | 235 | { |
281 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 236 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
282 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
283 | 237 | ||
284 | /* | 238 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && |
285 | * Force other mappings to Oops if they try to access this pte without | 239 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { |
286 | * first remapping it. Keeping stale mappings around is a bad idea. | ||
287 | */ | ||
288 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { | ||
289 | pte_t *pte = kmap_get_pte(vaddr); | 240 | pte_t *pte = kmap_get_pte(vaddr); |
290 | pte_t pteval = *pte; | 241 | pte_t pteval = *pte; |
242 | int idx, type; | ||
243 | |||
244 | type = kmap_atomic_idx(); | ||
245 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
246 | |||
247 | /* | ||
248 | * Force other mappings to Oops if they try to access this pte | ||
249 | * without first remapping it. Keeping stale mappings around | ||
250 | * is a bad idea. | ||
251 | */ | ||
291 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); | 252 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); |
292 | kmap_atomic_unregister(pte_page(pteval), vaddr); | 253 | kmap_atomic_unregister(pte_page(pteval), vaddr); |
293 | kpte_clear_flush(pte, vaddr); | 254 | kpte_clear_flush(pte, vaddr); |
255 | kmap_atomic_idx_pop(); | ||
294 | } else { | 256 | } else { |
295 | /* Must be a lowmem page */ | 257 | /* Must be a lowmem page */ |
296 | BUG_ON(vaddr < PAGE_OFFSET); | 258 | BUG_ON(vaddr < PAGE_OFFSET); |
@@ -300,19 +262,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | |||
300 | arch_flush_lazy_mmu_mode(); | 262 | arch_flush_lazy_mmu_mode(); |
301 | pagefault_enable(); | 263 | pagefault_enable(); |
302 | } | 264 | } |
303 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | 265 | EXPORT_SYMBOL(__kunmap_atomic); |
304 | 266 | ||
305 | /* | 267 | /* |
306 | * This API is supposed to allow us to map memory without a "struct page". | 268 | * This API is supposed to allow us to map memory without a "struct page". |
307 | * Currently we don't support this, though this may change in the future. | 269 | * Currently we don't support this, though this may change in the future. |
308 | */ | 270 | */ |
309 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | 271 | void *kmap_atomic_pfn(unsigned long pfn) |
310 | { | 272 | { |
311 | return kmap_atomic(pfn_to_page(pfn), type); | 273 | return kmap_atomic(pfn_to_page(pfn)); |
312 | } | 274 | } |
313 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 275 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) |
314 | { | 276 | { |
315 | return kmap_atomic_prot(pfn_to_page(pfn), type, prot); | 277 | return kmap_atomic_prot(pfn_to_page(pfn), prot); |
316 | } | 278 | } |
317 | 279 | ||
318 | struct page *kmap_atomic_to_page(void *ptr) | 280 | struct page *kmap_atomic_to_page(void *ptr) |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index fb3b4a55cec4..d78df3a6ee15 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
38 | #include <asm/homecache.h> | 38 | #include <asm/homecache.h> |
39 | 39 | ||
40 | #include <arch/sim.h> | ||
41 | |||
40 | #include "migrate.h" | 42 | #include "migrate.h" |
41 | 43 | ||
42 | 44 | ||
@@ -217,13 +219,6 @@ static unsigned long cache_flush_length(unsigned long length) | |||
217 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; | 219 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; |
218 | } | 220 | } |
219 | 221 | ||
220 | /* On the simulator, confirm lines have been evicted everywhere. */ | ||
221 | static void validate_lines_evicted(unsigned long pfn, size_t length) | ||
222 | { | ||
223 | sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, | ||
224 | (HV_PhysAddr)pfn << PAGE_SHIFT, length); | ||
225 | } | ||
226 | |||
227 | /* Flush a page out of whatever cache(s) it is in. */ | 222 | /* Flush a page out of whatever cache(s) it is in. */ |
228 | void homecache_flush_cache(struct page *page, int order) | 223 | void homecache_flush_cache(struct page *page, int order) |
229 | { | 224 | { |
@@ -234,7 +229,7 @@ void homecache_flush_cache(struct page *page, int order) | |||
234 | 229 | ||
235 | homecache_mask(page, pages, &home_mask); | 230 | homecache_mask(page, pages, &home_mask); |
236 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); | 231 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); |
237 | validate_lines_evicted(pfn, pages * PAGE_SIZE); | 232 | sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE); |
238 | } | 233 | } |
239 | 234 | ||
240 | 235 | ||
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index d89c9eacd162..78e1982cb6c9 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -1060,7 +1060,7 @@ void free_initmem(void) | |||
1060 | 1060 | ||
1061 | /* | 1061 | /* |
1062 | * Free the pages mapped from 0xc0000000 that correspond to code | 1062 | * Free the pages mapped from 0xc0000000 that correspond to code |
1063 | * pages from 0xfd000000 that we won't use again after init. | 1063 | * pages from MEM_SV_INTRPT that we won't use again after init. |
1064 | */ | 1064 | */ |
1065 | free_init_pages("unused kernel text", | 1065 | free_init_pages("unused kernel text", |
1066 | (unsigned long)_sinittext - text_delta, | 1066 | (unsigned long)_sinittext - text_delta, |