diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 16:17:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 16:17:17 -0400 |
commit | 4cb38750d49010ae72e718d46605ac9ba5a851b4 (patch) | |
tree | 8c991a900fd176288f4acbc340512b90d604374d /arch/x86/platform/uv | |
parent | 0a2fe19ccc4bc552a8083a595a3aa737b8bea727 (diff) | |
parent | 7efa1c87963d23cc57ba40c07316d3e28cc75a3a (diff) |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/mm changes from Peter Anvin:
"The big change here is the patchset by Alex Shi to use INVLPG to flush
only the affected pages when we only need to flush a small page range.
It also removes the special INVALIDATE_TLB_VECTOR interrupts (32
vectors!) and replace it with an ordinary IPI function call."
Fix up trivial conflicts in arch/x86/include/asm/apic.h (added code next
to changed line)
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/tlb: Fix build warning and crash when building for !SMP
x86/tlb: do flush_tlb_kernel_range by 'invlpg'
x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR
x86/tlb: enable tlb flush range support for x86
mm/mmu_gather: enable tlb flush range in generic mmu_gather
x86/tlb: add tlb_flushall_shift knob into debugfs
x86/tlb: add tlb_flushall_shift for specific CPU
x86/tlb: fall back to flush all when meet a THP large page
x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
x86/tlb_info: get last level TLB entry number of CPU
x86: Add read_mostly declaration/definition to variables from smp.h
x86: Define early read-mostly per-cpu macros
Diffstat (limited to 'arch/x86/platform/uv')
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 71b5d5a07d7b..b8b3a37c80cd 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1055,8 +1055,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, | |||
1055 | * done. The returned pointer is valid till preemption is re-enabled. | 1055 | * done. The returned pointer is valid till preemption is re-enabled. |
1056 | */ | 1056 | */ |
1057 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 1057 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
1058 | struct mm_struct *mm, unsigned long va, | 1058 | struct mm_struct *mm, unsigned long start, |
1059 | unsigned int cpu) | 1059 | unsigned end, unsigned int cpu) |
1060 | { | 1060 | { |
1061 | int locals = 0; | 1061 | int locals = 0; |
1062 | int remotes = 0; | 1062 | int remotes = 0; |
@@ -1113,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
1113 | 1113 | ||
1114 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); | 1114 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); |
1115 | 1115 | ||
1116 | bau_desc->payload.address = va; | 1116 | bau_desc->payload.address = start; |
1117 | bau_desc->payload.sending_cpu = cpu; | 1117 | bau_desc->payload.sending_cpu = cpu; |
1118 | /* | 1118 | /* |
1119 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, | 1119 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, |