diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-08-16 07:31:40 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2013-08-22 06:20:09 -0400 |
commit | 5c474a1e2265c5156e6c63f87a7e99053039b8b9 (patch) | |
tree | a50c365553dbaf2c3d97c8a0dbca8c94cec34e9f /arch/s390 | |
parent | b6bed093f489ef0a858e63eebcf5f2fb71ed3222 (diff) |
s390/mm: introduce ptep_flush_lazy helper
Isolate the logic of IDTE vs. IPTE flushing of ptes in two functions,
ptep_flush_lazy and __tlb_flush_mm_lazy.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 31 | ||||
-rw-r--r-- | arch/s390/include/asm/tlb.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/tlbflush.h | 6 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 6 |
5 files changed, 24 insertions, 25 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 084e7755ed9b..7b7fce4e8469 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
77 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | 77 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); |
78 | atomic_inc(&next->context.attach_count); | 78 | atomic_inc(&next->context.attach_count); |
79 | /* Check for TLBs not flushed yet */ | 79 | /* Check for TLBs not flushed yet */ |
80 | if (next->context.flush_mm) | 80 | __tlb_flush_mm_lazy(next); |
81 | __tlb_flush_mm(next); | ||
82 | } | 81 | } |
83 | 82 | ||
84 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 83 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 26ee897aa1d6..125e37909998 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -414,12 +414,6 @@ extern unsigned long MODULES_END; | |||
414 | #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT) | 414 | #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT) |
415 | #define SEGMENT_WRITE __pgprot(0) | 415 | #define SEGMENT_WRITE __pgprot(0) |
416 | 416 | ||
417 | static inline int mm_exclusive(struct mm_struct *mm) | ||
418 | { | ||
419 | return likely(mm == current->active_mm && | ||
420 | atomic_read(&mm->context.attach_count) <= 1); | ||
421 | } | ||
422 | |||
423 | static inline int mm_has_pgste(struct mm_struct *mm) | 417 | static inline int mm_has_pgste(struct mm_struct *mm) |
424 | { | 418 | { |
425 | #ifdef CONFIG_PGSTE | 419 | #ifdef CONFIG_PGSTE |
@@ -1037,6 +1031,17 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | |||
1037 | } | 1031 | } |
1038 | } | 1032 | } |
1039 | 1033 | ||
1034 | static inline void ptep_flush_lazy(struct mm_struct *mm, | ||
1035 | unsigned long address, pte_t *ptep) | ||
1036 | { | ||
1037 | int active = (mm == current->active_mm) ? 1 : 0; | ||
1038 | |||
1039 | if (atomic_read(&mm->context.attach_count) > active) | ||
1040 | __ptep_ipte(address, ptep); | ||
1041 | else | ||
1042 | mm->context.flush_mm = 1; | ||
1043 | } | ||
1044 | |||
1040 | /* | 1045 | /* |
1041 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | 1046 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
1042 | * both clear the TLB for the unmapped pte. The reason is that | 1047 | * both clear the TLB for the unmapped pte. The reason is that |
@@ -1057,15 +1062,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
1057 | pgste_t pgste; | 1062 | pgste_t pgste; |
1058 | pte_t pte; | 1063 | pte_t pte; |
1059 | 1064 | ||
1060 | mm->context.flush_mm = 1; | ||
1061 | if (mm_has_pgste(mm)) { | 1065 | if (mm_has_pgste(mm)) { |
1062 | pgste = pgste_get_lock(ptep); | 1066 | pgste = pgste_get_lock(ptep); |
1063 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | 1067 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
1064 | } | 1068 | } |
1065 | 1069 | ||
1066 | pte = *ptep; | 1070 | pte = *ptep; |
1067 | if (!mm_exclusive(mm)) | 1071 | ptep_flush_lazy(mm, address, ptep); |
1068 | __ptep_ipte(address, ptep); | ||
1069 | pte_val(*ptep) = _PAGE_INVALID; | 1072 | pte_val(*ptep) = _PAGE_INVALID; |
1070 | 1073 | ||
1071 | if (mm_has_pgste(mm)) { | 1074 | if (mm_has_pgste(mm)) { |
@@ -1083,15 +1086,13 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | |||
1083 | pgste_t pgste; | 1086 | pgste_t pgste; |
1084 | pte_t pte; | 1087 | pte_t pte; |
1085 | 1088 | ||
1086 | mm->context.flush_mm = 1; | ||
1087 | if (mm_has_pgste(mm)) { | 1089 | if (mm_has_pgste(mm)) { |
1088 | pgste = pgste_get_lock(ptep); | 1090 | pgste = pgste_get_lock(ptep); |
1089 | pgste_ipte_notify(mm, address, ptep, pgste); | 1091 | pgste_ipte_notify(mm, address, ptep, pgste); |
1090 | } | 1092 | } |
1091 | 1093 | ||
1092 | pte = *ptep; | 1094 | pte = *ptep; |
1093 | if (!mm_exclusive(mm)) | 1095 | ptep_flush_lazy(mm, address, ptep); |
1094 | __ptep_ipte(address, ptep); | ||
1095 | 1096 | ||
1096 | if (mm_has_pgste(mm)) { | 1097 | if (mm_has_pgste(mm)) { |
1097 | pgste = pgste_update_all(&pte, pgste); | 1098 | pgste = pgste_update_all(&pte, pgste); |
@@ -1160,7 +1161,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
1160 | 1161 | ||
1161 | pte = *ptep; | 1162 | pte = *ptep; |
1162 | if (!full) | 1163 | if (!full) |
1163 | __ptep_ipte(address, ptep); | 1164 | ptep_flush_lazy(mm, address, ptep); |
1164 | pte_val(*ptep) = _PAGE_INVALID; | 1165 | pte_val(*ptep) = _PAGE_INVALID; |
1165 | 1166 | ||
1166 | if (!full && mm_has_pgste(mm)) { | 1167 | if (!full && mm_has_pgste(mm)) { |
@@ -1178,14 +1179,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, | |||
1178 | pte_t pte = *ptep; | 1179 | pte_t pte = *ptep; |
1179 | 1180 | ||
1180 | if (pte_write(pte)) { | 1181 | if (pte_write(pte)) { |
1181 | mm->context.flush_mm = 1; | ||
1182 | if (mm_has_pgste(mm)) { | 1182 | if (mm_has_pgste(mm)) { |
1183 | pgste = pgste_get_lock(ptep); | 1183 | pgste = pgste_get_lock(ptep); |
1184 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | 1184 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | if (!mm_exclusive(mm)) | 1187 | ptep_flush_lazy(mm, address, ptep); |
1188 | __ptep_ipte(address, ptep); | ||
1189 | pte = pte_wrprotect(pte); | 1188 | pte = pte_wrprotect(pte); |
1190 | 1189 | ||
1191 | if (mm_has_pgste(mm)) { | 1190 | if (mm_has_pgste(mm)) { |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 6d6d92b4ea11..2cb846c4b37f 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, | |||
63 | 63 | ||
64 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 64 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
65 | { | 65 | { |
66 | __tlb_flush_mm_lazy(tlb->mm); | ||
66 | tlb_table_flush(tlb); | 67 | tlb_table_flush(tlb); |
67 | } | 68 | } |
68 | 69 | ||
69 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, | 70 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, |
70 | unsigned long start, unsigned long end) | 71 | unsigned long start, unsigned long end) |
71 | { | 72 | { |
72 | tlb_table_flush(tlb); | 73 | tlb_flush_mmu(tlb); |
73 | } | 74 | } |
74 | 75 | ||
75 | /* | 76 | /* |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 6b32af30878c..f9fef0425fee 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
86 | __tlb_flush_full(mm); | 86 | __tlb_flush_full(mm); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | 89 | static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) |
90 | { | 90 | { |
91 | if (mm->context.flush_mm) { | 91 | if (mm->context.flush_mm) { |
92 | __tlb_flush_mm(mm); | 92 | __tlb_flush_mm(mm); |
@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | |||
118 | 118 | ||
119 | static inline void flush_tlb_mm(struct mm_struct *mm) | 119 | static inline void flush_tlb_mm(struct mm_struct *mm) |
120 | { | 120 | { |
121 | __tlb_flush_mm_cond(mm); | 121 | __tlb_flush_mm_lazy(mm); |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 124 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
125 | unsigned long start, unsigned long end) | 125 | unsigned long start, unsigned long end) |
126 | { | 126 | { |
127 | __tlb_flush_mm_cond(vma->vm_mm); | 127 | __tlb_flush_mm_lazy(vma->vm_mm); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline void flush_tlb_kernel_range(unsigned long start, | 130 | static inline void flush_tlb_kernel_range(unsigned long start, |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b9d35d63934e..befaea7003f7 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -1008,7 +1008,6 @@ void tlb_table_flush(struct mmu_gather *tlb) | |||
1008 | struct mmu_table_batch **batch = &tlb->batch; | 1008 | struct mmu_table_batch **batch = &tlb->batch; |
1009 | 1009 | ||
1010 | if (*batch) { | 1010 | if (*batch) { |
1011 | __tlb_flush_mm(tlb->mm); | ||
1012 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | 1011 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); |
1013 | *batch = NULL; | 1012 | *batch = NULL; |
1014 | } | 1013 | } |
@@ -1018,11 +1017,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) | |||
1018 | { | 1017 | { |
1019 | struct mmu_table_batch **batch = &tlb->batch; | 1018 | struct mmu_table_batch **batch = &tlb->batch; |
1020 | 1019 | ||
1020 | tlb->mm->context.flush_mm = 1; | ||
1021 | if (*batch == NULL) { | 1021 | if (*batch == NULL) { |
1022 | *batch = (struct mmu_table_batch *) | 1022 | *batch = (struct mmu_table_batch *) |
1023 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | 1023 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); |
1024 | if (*batch == NULL) { | 1024 | if (*batch == NULL) { |
1025 | __tlb_flush_mm(tlb->mm); | 1025 | __tlb_flush_mm_lazy(tlb->mm); |
1026 | tlb_remove_table_one(table); | 1026 | tlb_remove_table_one(table); |
1027 | return; | 1027 | return; |
1028 | } | 1028 | } |
@@ -1030,7 +1030,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) | |||
1030 | } | 1030 | } |
1031 | (*batch)->tables[(*batch)->nr++] = table; | 1031 | (*batch)->tables[(*batch)->nr++] = table; |
1032 | if ((*batch)->nr == MAX_TABLE_BATCH) | 1032 | if ((*batch)->nr == MAX_TABLE_BATCH) |
1033 | tlb_table_flush(tlb); | 1033 | tlb_flush_mmu(tlb); |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1036 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |