summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arc/include/asm/pgtable.h9
-rw-r--r--arch/arc/mm/cache.c16
-rw-r--r--arch/arc/mm/fault.c13
3 files changed, 22 insertions, 16 deletions
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index f7c7273cd537..bd771351a1d1 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -270,13 +270,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
270 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ 270 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
271 PAGE_SHIFT))) 271 PAGE_SHIFT)))
272 272
273#define mk_pte(page, pgprot) \ 273#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
274({ \
275 pte_t pte; \
276 pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
277 pte; \
278})
279
280#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 274#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
281#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 275#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
282#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 276#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -360,7 +354,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
360#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) 354#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
361#endif 355#endif
362 356
363extern void paging_init(void);
364extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); 357extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
365void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 358void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
366 pte_t *ptep); 359 pte_t *ptep);
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ae3b772ecc4d..521fb2bf90bd 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -806,8 +806,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
806void copy_user_highpage(struct page *to, struct page *from, 806void copy_user_highpage(struct page *to, struct page *from,
807 unsigned long u_vaddr, struct vm_area_struct *vma) 807 unsigned long u_vaddr, struct vm_area_struct *vma)
808{ 808{
809 unsigned long kfrom = (unsigned long)page_address(from); 809 void *kfrom = kmap_atomic(from);
810 unsigned long kto = (unsigned long)page_address(to); 810 void *kto = kmap_atomic(to);
811 int clean_src_k_mappings = 0; 811 int clean_src_k_mappings = 0;
812 812
813 /* 813 /*
@@ -817,13 +817,16 @@ void copy_user_highpage(struct page *to, struct page *from,
817 * 817 *
818 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 818 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
819 * equally valid for SRC page as well 819 * equally valid for SRC page as well
820 *
821 * For !VIPT cache, all of this gets compiled out as
822 * addr_not_cache_congruent() is 0
820 */ 823 */
821 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 824 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
822 __flush_dcache_page(kfrom, u_vaddr); 825 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
823 clean_src_k_mappings = 1; 826 clean_src_k_mappings = 1;
824 } 827 }
825 828
826 copy_page((void *)kto, (void *)kfrom); 829 copy_page(kto, kfrom);
827 830
828 /* 831 /*
829 * Mark DST page K-mapping as dirty for a later finalization by 832 * Mark DST page K-mapping as dirty for a later finalization by
@@ -840,11 +843,14 @@ void copy_user_highpage(struct page *to, struct page *from,
840 * sync the kernel mapping back to physical page 843 * sync the kernel mapping back to physical page
841 */ 844 */
842 if (clean_src_k_mappings) { 845 if (clean_src_k_mappings) {
843 __flush_dcache_page(kfrom, kfrom); 846 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
844 set_bit(PG_dc_clean, &from->flags); 847 set_bit(PG_dc_clean, &from->flags);
845 } else { 848 } else {
846 clear_bit(PG_dc_clean, &from->flags); 849 clear_bit(PG_dc_clean, &from->flags);
847 } 850 }
851
852 kunmap_atomic(kto);
853 kunmap_atomic(kfrom);
848} 854}
849 855
850void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 856void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index d948e4e9d89c..af63f4a13e60 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -18,7 +18,14 @@
18#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20 20
21static int handle_vmalloc_fault(unsigned long address) 21/*
22 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
23 * Refer to asm/processor.h for System Memory Map
24 *
25 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
26 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
27 */
28noinline static int handle_kernel_vaddr_fault(unsigned long address)
22{ 29{
23 /* 30 /*
24 * Synchronize this task's top level page-table 31 * Synchronize this task's top level page-table
@@ -72,8 +79,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
72 * only copy the information from the master page table, 79 * only copy the information from the master page table,
73 * nothing more. 80 * nothing more.
74 */ 81 */
75 if (address >= VMALLOC_START && address <= VMALLOC_END) { 82 if (address >= VMALLOC_START) {
76 ret = handle_vmalloc_fault(address); 83 ret = handle_kernel_vaddr_fault(address);
77 if (unlikely(ret)) 84 if (unlikely(ret))
78 goto bad_area_nosemaphore; 85 goto bad_area_nosemaphore;
79 else 86 else