aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:24:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:24:14 -0400
commit6019958d146a4f127dae727a930f902c92531e6e (patch)
tree126cfbdafb7008ce9701dd2ccc27c54cab2d36f4 /arch/arc/mm
parent977b58e1dd6fc2a751fb427798bfb8256476c93e (diff)
parente7d5bab5efb94a82d80cab3ad68c357ce73ea596 (diff)
Merge tag 'arc-v3.10-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull second set of arc arch updates from Vineet Gupta: "Aliasing VIPT dcache support for ARC I'm satisified with testing, specially with fuse which has historically given grief to VIPT arches (ARM/PARISC...)" * tag 'arc-v3.10-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: [TB10x] Remove GENERIC_GPIO ARC: [mm] Aliasing VIPT dcache support 4/4 ARC: [mm] Aliasing VIPT dcache support 3/4 ARC: [mm] Aliasing VIPT dcache support 2/4 ARC: [mm] Aliasing VIPT dcache support 1/4 ARC: [mm] refactor the core (i|d)cache line ops loops ARC: [mm] serious bug in vaddr based icache flush
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/Makefile2
-rw-r--r--arch/arc/mm/cache_arc700.c221
-rw-r--r--arch/arc/mm/mmap.c78
-rw-r--r--arch/arc/mm/tlb.c29
4 files changed, 279 insertions, 51 deletions
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
index 168dc146a8f6..ac95cc239c1e 100644
--- a/arch/arc/mm/Makefile
+++ b/arch/arc/mm/Makefile
@@ -7,4 +7,4 @@
7# 7#
8 8
9obj-y := extable.o ioremap.o dma.o fault.o init.o 9obj-y := extable.o ioremap.o dma.o fault.o init.o
10obj-y += tlb.o tlbex.o cache_arc700.o 10obj-y += tlb.o tlbex.o cache_arc700.o mmap.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index c854cf95f706..2f12bca8aef3 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -68,6 +68,7 @@
68#include <linux/mmu_context.h> 68#include <linux/mmu_context.h>
69#include <linux/syscalls.h> 69#include <linux/syscalls.h>
70#include <linux/uaccess.h> 70#include <linux/uaccess.h>
71#include <linux/pagemap.h>
71#include <asm/cacheflush.h> 72#include <asm/cacheflush.h>
72#include <asm/cachectl.h> 73#include <asm/cachectl.h>
73#include <asm/setup.h> 74#include <asm/setup.h>
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void)
138 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 139 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
139 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 140 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
140 int way_pg_ratio = way_pg_ratio; 141 int way_pg_ratio = way_pg_ratio;
142 int dcache_does_alias;
141 char str[256]; 143 char str[256];
142 144
143 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 145 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -184,9 +186,13 @@ chk_dc:
184 panic("Cache H/W doesn't match kernel Config"); 186 panic("Cache H/W doesn't match kernel Config");
185 } 187 }
186 188
189 dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
190
187 /* check for D-Cache aliasing */ 191 /* check for D-Cache aliasing */
188 if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) 192 if (dcache_does_alias && !cache_is_vipt_aliasing())
189 panic("D$ aliasing not handled right now\n"); 193 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
194 else if (!dcache_does_alias && cache_is_vipt_aliasing())
195 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
190#endif 196#endif
191 197
192 /* Set the default Invalidate Mode to "simpy discard dirty lines" 198 /* Set the default Invalidate Mode to "simpy discard dirty lines"
@@ -269,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop)
269 * Per Line Operation on D-Cache 275 * Per Line Operation on D-Cache
270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete 276 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
271 * It's sole purpose is to help gcc generate ZOL 277 * It's sole purpose is to help gcc generate ZOL
278 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
272 */ 279 */
273static inline void __dc_line_loop(unsigned long start, unsigned long sz, 280static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
274 int aux_reg) 281 unsigned long sz, const int aux_reg)
275{ 282{
276 int num_lines, slack; 283 int num_lines;
277 284
278 /* Ensure we properly floor/ceil the non-line aligned/sized requests 285 /* Ensure we properly floor/ceil the non-line aligned/sized requests
279 * and have @start - aligned to cache line and integral @num_lines. 286 * and have @paddr - aligned to cache line and integral @num_lines.
280 * This however can be avoided for page sized since: 287 * This however can be avoided for page sized since:
281 * -@start will be cache-line aligned already (being page aligned) 288 * -@paddr will be cache-line aligned already (being page aligned)
282 * -@sz will be integral multiple of line size (being page sized). 289 * -@sz will be integral multiple of line size (being page sized).
283 */ 290 */
284 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 291 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
285 slack = start & ~DCACHE_LINE_MASK; 292 sz += paddr & ~DCACHE_LINE_MASK;
286 sz += slack; 293 paddr &= DCACHE_LINE_MASK;
287 start -= slack; 294 vaddr &= DCACHE_LINE_MASK;
288 } 295 }
289 296
290 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); 297 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
291 298
299#if (CONFIG_ARC_MMU_VER <= 2)
300 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
301#endif
302
292 while (num_lines-- > 0) { 303 while (num_lines-- > 0) {
293#if (CONFIG_ARC_MMU_VER > 2) 304#if (CONFIG_ARC_MMU_VER > 2)
294 /* 305 /*
295 * Just as for I$, in MMU v3, D$ ops also require 306 * Just as for I$, in MMU v3, D$ ops also require
296 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops 307 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
297 * But we pass phy addr for both. This works since Linux
298 * doesn't support aliasing configs for D$, yet.
299 * Thus paddr is enough to provide both tag and index.
300 */ 308 */
301 write_aux_reg(ARC_REG_DC_PTAG, start); 309 write_aux_reg(ARC_REG_DC_PTAG, paddr);
310
311 write_aux_reg(aux_reg, vaddr);
312 vaddr += ARC_DCACHE_LINE_LEN;
313#else
314 /* paddr contains stuffed vaddrs bits */
315 write_aux_reg(aux_reg, paddr);
302#endif 316#endif
303 write_aux_reg(aux_reg, start); 317 paddr += ARC_DCACHE_LINE_LEN;
304 start += ARC_DCACHE_LINE_LEN;
305 } 318 }
306} 319}
307 320
321/* For kernel mappings cache operation: index is same as paddr */
322#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
323
308/* 324/*
309 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) 325 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
310 */ 326 */
311static inline void __dc_line_op(unsigned long start, unsigned long sz, 327static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
312 const int cacheop) 328 unsigned long sz, const int cacheop)
313{ 329{
314 unsigned long flags, tmp = tmp; 330 unsigned long flags, tmp = tmp;
315 int aux; 331 int aux;
@@ -332,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
332 else 348 else
333 aux = ARC_REG_DC_FLDL; 349 aux = ARC_REG_DC_FLDL;
334 350
335 __dc_line_loop(start, sz, aux); 351 __dc_line_loop(paddr, vaddr, sz, aux);
336 352
337 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 353 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
338 wait_for_flush(); 354 wait_for_flush();
@@ -347,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
347#else 363#else
348 364
349#define __dc_entire_op(cacheop) 365#define __dc_entire_op(cacheop)
350#define __dc_line_op(start, sz, cacheop) 366#define __dc_line_op(paddr, vaddr, sz, cacheop)
367#define __dc_line_op_k(paddr, sz, cacheop)
351 368
352#endif /* CONFIG_ARC_HAS_DCACHE */ 369#endif /* CONFIG_ARC_HAS_DCACHE */
353 370
@@ -399,49 +416,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
399/*********************************************************** 416/***********************************************************
400 * Machine specific helper for per line I-Cache invalidate. 417 * Machine specific helper for per line I-Cache invalidate.
401 */ 418 */
402static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, 419static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
403 unsigned long sz) 420 unsigned long sz)
404{ 421{
405 unsigned long flags; 422 unsigned long flags;
406 int num_lines, slack; 423 int num_lines;
407 unsigned int addr;
408 424
409 /* 425 /*
410 * Ensure we properly floor/ceil the non-line aligned/sized requests: 426 * Ensure we properly floor/ceil the non-line aligned/sized requests:
411 * However page sized flushes can be compile time optimised. 427 * However page sized flushes can be compile time optimised.
412 * -@phy_start will be cache-line aligned already (being page aligned) 428 * -@paddr will be cache-line aligned already (being page aligned)
413 * -@sz will be integral multiple of line size (being page sized). 429 * -@sz will be integral multiple of line size (being page sized).
414 */ 430 */
415 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 431 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
416 slack = phy_start & ~ICACHE_LINE_MASK; 432 sz += paddr & ~ICACHE_LINE_MASK;
417 sz += slack; 433 paddr &= ICACHE_LINE_MASK;
418 phy_start -= slack; 434 vaddr &= ICACHE_LINE_MASK;
419 } 435 }
420 436
421 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); 437 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
422 438
423#if (CONFIG_ARC_MMU_VER > 2) 439#if (CONFIG_ARC_MMU_VER <= 2)
424 vaddr &= ~ICACHE_LINE_MASK;
425 addr = phy_start;
426#else
427 /* bits 17:13 of vaddr go as bits 4:0 of paddr */ 440 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
428 addr = phy_start | ((vaddr >> 13) & 0x1F); 441 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
429#endif 442#endif
430 443
431 local_irq_save(flags); 444 local_irq_save(flags);
432 while (num_lines-- > 0) { 445 while (num_lines-- > 0) {
433#if (CONFIG_ARC_MMU_VER > 2) 446#if (CONFIG_ARC_MMU_VER > 2)
434 /* tag comes from phy addr */ 447 /* tag comes from phy addr */
435 write_aux_reg(ARC_REG_IC_PTAG, addr); 448 write_aux_reg(ARC_REG_IC_PTAG, paddr);
436 449
437 /* index bits come from vaddr */ 450 /* index bits come from vaddr */
438 write_aux_reg(ARC_REG_IC_IVIL, vaddr); 451 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
439 vaddr += ARC_ICACHE_LINE_LEN; 452 vaddr += ARC_ICACHE_LINE_LEN;
440#else 453#else
441 /* paddr contains stuffed vaddrs bits */ 454 /* paddr contains stuffed vaddrs bits */
442 write_aux_reg(ARC_REG_IC_IVIL, addr); 455 write_aux_reg(ARC_REG_IC_IVIL, paddr);
443#endif 456#endif
444 addr += ARC_ICACHE_LINE_LEN; 457 paddr += ARC_ICACHE_LINE_LEN;
445 } 458 }
446 local_irq_restore(flags); 459 local_irq_restore(flags);
447} 460}
@@ -457,29 +470,66 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
457 * Exported APIs 470 * Exported APIs
458 */ 471 */
459 472
473/*
474 * Handle cache congruency of kernel and userspace mappings of page when kernel
475 * writes-to/reads-from
476 *
477 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
478 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
479 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
480 * -In SMP, if hardware caches are coherent
481 *
482 * There's a corollary case, where kernel READs from a userspace mapped page.
483 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
484 */
460void flush_dcache_page(struct page *page) 485void flush_dcache_page(struct page *page)
461{ 486{
462 /* Make a note that dcache is not yet flushed for this page */ 487 struct address_space *mapping;
463 set_bit(PG_arch_1, &page->flags); 488
489 if (!cache_is_vipt_aliasing()) {
490 set_bit(PG_arch_1, &page->flags);
491 return;
492 }
493
494 /* don't handle anon pages here */
495 mapping = page_mapping(page);
496 if (!mapping)
497 return;
498
499 /*
500 * pagecache page, file not yet mapped to userspace
501 * Make a note that K-mapping is dirty
502 */
503 if (!mapping_mapped(mapping)) {
504 set_bit(PG_arch_1, &page->flags);
505 } else if (page_mapped(page)) {
506
507 /* kernel reading from page with U-mapping */
508 void *paddr = page_address(page);
509 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
510
511 if (addr_not_cache_congruent(paddr, vaddr))
512 __flush_dcache_page(paddr, vaddr);
513 }
464} 514}
465EXPORT_SYMBOL(flush_dcache_page); 515EXPORT_SYMBOL(flush_dcache_page);
466 516
467 517
468void dma_cache_wback_inv(unsigned long start, unsigned long sz) 518void dma_cache_wback_inv(unsigned long start, unsigned long sz)
469{ 519{
470 __dc_line_op(start, sz, OP_FLUSH_N_INV); 520 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
471} 521}
472EXPORT_SYMBOL(dma_cache_wback_inv); 522EXPORT_SYMBOL(dma_cache_wback_inv);
473 523
474void dma_cache_inv(unsigned long start, unsigned long sz) 524void dma_cache_inv(unsigned long start, unsigned long sz)
475{ 525{
476 __dc_line_op(start, sz, OP_INV); 526 __dc_line_op_k(start, sz, OP_INV);
477} 527}
478EXPORT_SYMBOL(dma_cache_inv); 528EXPORT_SYMBOL(dma_cache_inv);
479 529
480void dma_cache_wback(unsigned long start, unsigned long sz) 530void dma_cache_wback(unsigned long start, unsigned long sz)
481{ 531{
482 __dc_line_op(start, sz, OP_FLUSH); 532 __dc_line_op_k(start, sz, OP_FLUSH);
483} 533}
484EXPORT_SYMBOL(dma_cache_wback); 534EXPORT_SYMBOL(dma_cache_wback);
485 535
@@ -560,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
560 610
561 local_irq_save(flags); 611 local_irq_save(flags);
562 __ic_line_inv_vaddr(paddr, vaddr, len); 612 __ic_line_inv_vaddr(paddr, vaddr, len);
563 __dc_line_op(paddr, len, OP_FLUSH); 613 __dc_line_op(paddr, vaddr, len, OP_FLUSH);
564 local_irq_restore(flags); 614 local_irq_restore(flags);
565} 615}
566 616
@@ -570,9 +620,13 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
570 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 620 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
571} 621}
572 622
573void __flush_dcache_page(unsigned long paddr) 623/*
624 * wrapper to clearout kernel or userspace mappings of a page
625 * For kernel mappings @vaddr == @paddr
626 */
627void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
574{ 628{
575 __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV); 629 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
576} 630}
577 631
578void flush_icache_all(void) 632void flush_icache_all(void)
@@ -601,6 +655,87 @@ noinline void flush_cache_all(void)
601 655
602} 656}
603 657
658#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
659
660void flush_cache_mm(struct mm_struct *mm)
661{
662 flush_cache_all();
663}
664
665void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
666 unsigned long pfn)
667{
668 unsigned int paddr = pfn << PAGE_SHIFT;
669
670 __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
671}
672
673void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
674 unsigned long end)
675{
676 flush_cache_all();
677}
678
679void copy_user_highpage(struct page *to, struct page *from,
680 unsigned long u_vaddr, struct vm_area_struct *vma)
681{
682 void *kfrom = page_address(from);
683 void *kto = page_address(to);
684 int clean_src_k_mappings = 0;
685
686 /*
687 * If SRC page was already mapped in userspace AND it's U-mapping is
688 * not congruent with K-mapping, sync former to physical page so that
689 * K-mapping in memcpy below, sees the right data
690 *
691 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
692 * equally valid for SRC page as well
693 */
694 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
695 __flush_dcache_page(kfrom, u_vaddr);
696 clean_src_k_mappings = 1;
697 }
698
699 copy_page(kto, kfrom);
700
701 /*
702 * Mark DST page K-mapping as dirty for a later finalization by
703 * update_mmu_cache(). Although the finalization could have been done
704 * here as well (given that both vaddr/paddr are available).
705 * But update_mmu_cache() already has code to do that for other
706 * non copied user pages (e.g. read faults which wire in pagecache page
707 * directly).
708 */
709 set_bit(PG_arch_1, &to->flags);
710
711 /*
712 * if SRC was already usermapped and non-congruent to kernel mapping
713 * sync the kernel mapping back to physical page
714 */
715 if (clean_src_k_mappings) {
716 __flush_dcache_page(kfrom, kfrom);
717 } else {
718 set_bit(PG_arch_1, &from->flags);
719 }
720}
721
722void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
723{
724 clear_page(to);
725 set_bit(PG_arch_1, &page->flags);
726}
727
728void flush_anon_page(struct vm_area_struct *vma, struct page *page,
729 unsigned long u_vaddr)
730{
731 /* TBD: do we really need to clear the kernel mapping */
732 __flush_dcache_page(page_address(page), u_vaddr);
733 __flush_dcache_page(page_address(page), page_address(page));
734
735}
736
737#endif
738
604/********************************************************************** 739/**********************************************************************
605 * Explicit Cache flush request from user space via syscall 740 * Explicit Cache flush request from user space via syscall
606 * Needed for JITs which generate code on the fly 741 * Needed for JITs which generate code on the fly
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644
index 000000000000..2e06d56e987b
--- /dev/null
+++ b/arch/arc/mm/mmap.c
@@ -0,0 +1,78 @@
1/*
2 * ARC700 mmap
3 *
4 * (started from arm version - for VIPT alias handling)
5 *
6 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/sched.h>
17#include <asm/cacheflush.h>
18
19#define COLOUR_ALIGN(addr, pgoff) \
20 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
21 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
22
23/*
24 * Ensure that shared mappings are correctly aligned to
25 * avoid aliasing issues with VIPT caches.
26 * We need to ensure that
27 * a specific page of an object is always mapped at a multiple of
28 * SHMLBA bytes.
29 */
30unsigned long
31arch_get_unmapped_area(struct file *filp, unsigned long addr,
32 unsigned long len, unsigned long pgoff, unsigned long flags)
33{
34 struct mm_struct *mm = current->mm;
35 struct vm_area_struct *vma;
36 int do_align = 0;
37 int aliasing = cache_is_vipt_aliasing();
38 struct vm_unmapped_area_info info;
39
40 /*
41 * We only need to do colour alignment if D cache aliases.
42 */
43 if (aliasing)
44 do_align = filp || (flags & MAP_SHARED);
45
46 /*
47 * We enforce the MAP_FIXED case.
48 */
49 if (flags & MAP_FIXED) {
50 if (aliasing && flags & MAP_SHARED &&
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
52 return -EINVAL;
53 return addr;
54 }
55
56 if (len > TASK_SIZE)
57 return -ENOMEM;
58
59 if (addr) {
60 if (do_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
64
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vma->vm_start))
68 return addr;
69 }
70
71 info.flags = 0;
72 info.length = len;
73 info.low_limit = mm->mmap_base;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
78}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 003d69ac6ffa..066145b5f348 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
421/* 421/*
422 * Called at the end of pagefault, for a userspace mapped page 422 * Called at the end of pagefault, for a userspace mapped page
423 * -pre-install the corresponding TLB entry into MMU 423 * -pre-install the corresponding TLB entry into MMU
424 * -Finalize the delayed D-cache flush (wback+inv kernel mapping) 424 * -Finalize the delayed D-cache flush of kernel mapping of page due to
425 * flush_dcache_page(), copy_user_page()
426 *
427 * Note that flush (when done) involves both WBACK - so physical page is
428 * in sync as well as INV - so any non-congruent aliases don't remain
425 */ 429 */
426void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, 430void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
427 pte_t *ptep) 431 pte_t *ptep)
428{ 432{
429 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 433 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
434 unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
430 435
431 create_tlb(vma, vaddr, ptep); 436 create_tlb(vma, vaddr, ptep);
432 437
433 /* icache doesn't snoop dcache, thus needs to be made coherent here */ 438 /*
434 if (vma->vm_flags & VM_EXEC) { 439 * Exec page : Independent of aliasing/page-color considerations,
440 * since icache doesn't snoop dcache on ARC, any dirty
441 * K-mapping of a code page needs to be wback+inv so that
442 * icache fetch by userspace sees code correctly.
443 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
444 * so userspace sees the right data.
445 * (Avoids the flush for Non-exec + congruent mapping case)
446 */
447 if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
435 struct page *page = pfn_to_page(pte_pfn(*ptep)); 448 struct page *page = pfn_to_page(pte_pfn(*ptep));
436 449
437 /* if page was dcache dirty, flush now */
438 int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 450 int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
439 if (dirty) { 451 if (dirty) {
440 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 452 /* wback + inv dcache lines */
441 __flush_dcache_page(paddr); 453 __flush_dcache_page(paddr, paddr);
442 __inv_icache_page(paddr, vaddr); 454
455 /* invalidate any existing icache lines */
456 if (vma->vm_flags & VM_EXEC)
457 __inv_icache_page(paddr, vaddr);
443 } 458 }
444 } 459 }
445} 460}