aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/mm/pageattr.c31
-rw-r--r--include/asm-x86/cacheflush.h2
2 files changed, 25 insertions, 8 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bbfc8e2466ab..97ec9e7d29d9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -26,7 +26,6 @@ within(unsigned long addr, unsigned long start, unsigned long end)
26 * Flushing functions 26 * Flushing functions
27 */ 27 */
28 28
29
30/** 29/**
31 * clflush_cache_range - flush a cache range with clflush 30 * clflush_cache_range - flush a cache range with clflush
32 * @addr: virtual start address 31 * @addr: virtual start address
@@ -35,13 +34,19 @@ within(unsigned long addr, unsigned long start, unsigned long end)
35 * clflush is an unordered instruction which needs fencing with mfence 34 * clflush is an unordered instruction which needs fencing with mfence
36 * to avoid ordering issues. 35 * to avoid ordering issues.
37 */ 36 */
38void clflush_cache_range(void *addr, int size) 37void clflush_cache_range(void *vaddr, unsigned int size)
39{ 38{
40 int i; 39 void *vend = vaddr + size - 1;
41 40
42 mb(); 41 mb();
43 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) 42
44 clflush(addr+i); 43 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
44 clflush(vaddr);
45 /*
46 * Flush any possible final partial cacheline:
47 */
48 clflush(vend);
49
45 mb(); 50 mb();
46} 51}
47 52
@@ -74,9 +79,13 @@ static void __cpa_flush_range(void *arg)
74 __flush_tlb_all(); 79 __flush_tlb_all();
75} 80}
76 81
77static void cpa_flush_range(unsigned long addr, int numpages) 82static void cpa_flush_range(unsigned long start, int numpages)
78{ 83{
84 unsigned int i, level;
85 unsigned long addr;
86
79 BUG_ON(irqs_disabled()); 87 BUG_ON(irqs_disabled());
88 WARN_ON(PAGE_ALIGN(start) != start);
80 89
81 on_each_cpu(__cpa_flush_range, NULL, 1, 1); 90 on_each_cpu(__cpa_flush_range, NULL, 1, 1);
82 91
@@ -86,7 +95,15 @@ static void cpa_flush_range(unsigned long addr, int numpages)
86 * will cause all other CPUs to flush the same 95 * will cause all other CPUs to flush the same
87 * cachelines: 96 * cachelines:
88 */ 97 */
89 clflush_cache_range((void *) addr, numpages * PAGE_SIZE); 98 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
99 pte_t *pte = lookup_address(addr, &level);
100
101 /*
102 * Only flush present addresses:
103 */
104 if (pte && pte_present(*pte))
105 clflush_cache_range((void *) addr, PAGE_SIZE);
106 }
90} 107}
91 108
92/* 109/*
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 3e74aff90809..8dd8c5e3cc7f 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -42,7 +42,7 @@ int set_memory_ro(unsigned long addr, int numpages);
42int set_memory_rw(unsigned long addr, int numpages); 42int set_memory_rw(unsigned long addr, int numpages);
43int set_memory_np(unsigned long addr, int numpages); 43int set_memory_np(unsigned long addr, int numpages);
44 44
45void clflush_cache_range(void *addr, int size); 45void clflush_cache_range(void *addr, unsigned int size);
46 46
47#ifdef CONFIG_DEBUG_RODATA 47#ifdef CONFIG_DEBUG_RODATA
48void mark_rodata_ro(void); 48void mark_rodata_ro(void);