aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:34:08 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:08 -0500
commit3b233e52f70bf102078b2c0c3f7f86a441689056 (patch)
treef9c65948016a3bffd3a3b0c8d327d28fc34a7ea8 /arch
parentcd8ddf1a2800026dd58433333cce7a65cbc6c6d2 (diff)
x86: optimize clflush
clflush is sufficient to be issued on one CPU. The invalidation is broadcast throughout the coherence domain. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/pageattr.c22
1 files changed, 8 insertions, 14 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 90b658ac39c2..bbfc8e2466ab 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -64,35 +64,29 @@ static void cpa_flush_all(void)
64 on_each_cpu(__cpa_flush_all, NULL, 1, 1); 64 on_each_cpu(__cpa_flush_all, NULL, 1, 1);
65} 65}
66 66
67struct clflush_data {
68 unsigned long addr;
69 int numpages;
70};
71
72static void __cpa_flush_range(void *arg) 67static void __cpa_flush_range(void *arg)
73{ 68{
74 struct clflush_data *cld = arg;
75
76 /* 69 /*
77 * We could optimize that further and do individual per page 70 * We could optimize that further and do individual per page
78 * tlb invalidates for a low number of pages. Caveat: we must 71 * tlb invalidates for a low number of pages. Caveat: we must
79 * flush the high aliases on 64bit as well. 72 * flush the high aliases on 64bit as well.
80 */ 73 */
81 __flush_tlb_all(); 74 __flush_tlb_all();
82
83 clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE);
84} 75}
85 76
86static void cpa_flush_range(unsigned long addr, int numpages) 77static void cpa_flush_range(unsigned long addr, int numpages)
87{ 78{
88 struct clflush_data cld;
89
90 BUG_ON(irqs_disabled()); 79 BUG_ON(irqs_disabled());
91 80
92 cld.addr = addr; 81 on_each_cpu(__cpa_flush_range, NULL, 1, 1);
93 cld.numpages = numpages;
94 82
95 on_each_cpu(__cpa_flush_range, &cld, 1, 1); 83 /*
84 * We only need to flush on one CPU,
85 * clflush is a MESI-coherent instruction that
86 * will cause all other CPUs to flush the same
87 * cachelines:
88 */
89 clflush_cache_range((void *) addr, numpages * PAGE_SIZE);
96} 90}
97 91
98/* 92/*