diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-02-03 05:03:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-02 02:42:25 -0500 |
commit | 314ff7851fc8ea66cbf48eaa93d8ebfb5ca084a9 (patch) | |
tree | 16567343faf3f9e94a56bbf431c1e54414cdbb6d | |
parent | 780de9dd2720debc14c501dab4dc80d1f75ad50e (diff) |
mm/vmacache, sched/headers: Introduce 'struct vmacache' and move it from <linux/sched.h> to <linux/mm_types>
The <linux/sched.h> header includes various vmacache related defines,
which are arguably misplaced.
Move them to mm_types.h and minimize the sched.h impact by putting
all task vmacache state into a new 'struct vmacache' structure.
No change in functionality.
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/mm_types.h | 12 | ||||
-rw-r--r-- | include/linux/sched.h | 11 | ||||
-rw-r--r-- | include/linux/vmacache.h | 2 | ||||
-rw-r--r-- | kernel/debug/debug_core.c | 4 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/vmacache.c | 10 |
6 files changed, 25 insertions, 16 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4f6d440ad785..137797cd7b50 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -360,6 +360,18 @@ struct vm_area_struct { | |||
360 | struct vm_userfaultfd_ctx vm_userfaultfd_ctx; | 360 | struct vm_userfaultfd_ctx vm_userfaultfd_ctx; |
361 | }; | 361 | }; |
362 | 362 | ||
363 | /* | ||
364 | * The per task VMA cache array: | ||
365 | */ | ||
366 | #define VMACACHE_BITS 2 | ||
367 | #define VMACACHE_SIZE (1U << VMACACHE_BITS) | ||
368 | #define VMACACHE_MASK (VMACACHE_SIZE - 1) | ||
369 | |||
370 | struct vmacache { | ||
371 | u32 seqnum; | ||
372 | struct vm_area_struct *vmas[VMACACHE_SIZE]; | ||
373 | }; | ||
374 | |||
363 | struct core_thread { | 375 | struct core_thread { |
364 | struct task_struct *task; | 376 | struct task_struct *task; |
365 | struct core_thread *next; | 377 | struct core_thread *next; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3f61baac928b..e87c97e1a947 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -134,10 +134,6 @@ struct blk_plug; | |||
134 | struct filename; | 134 | struct filename; |
135 | struct nameidata; | 135 | struct nameidata; |
136 | 136 | ||
137 | #define VMACACHE_BITS 2 | ||
138 | #define VMACACHE_SIZE (1U << VMACACHE_BITS) | ||
139 | #define VMACACHE_MASK (VMACACHE_SIZE - 1) | ||
140 | |||
141 | /* | 137 | /* |
142 | * These are the constant used to fake the fixed-point load-average | 138 | * These are the constant used to fake the fixed-point load-average |
143 | * counting. Some notes: | 139 | * counting. Some notes: |
@@ -1550,9 +1546,10 @@ struct task_struct { | |||
1550 | #endif | 1546 | #endif |
1551 | 1547 | ||
1552 | struct mm_struct *mm, *active_mm; | 1548 | struct mm_struct *mm, *active_mm; |
1553 | /* per-thread vma caching */ | 1549 | |
1554 | u32 vmacache_seqnum; | 1550 | /* Per-thread vma caching: */ |
1555 | struct vm_area_struct *vmacache[VMACACHE_SIZE]; | 1551 | struct vmacache vmacache; |
1552 | |||
1556 | #if defined(SPLIT_RSS_COUNTING) | 1553 | #if defined(SPLIT_RSS_COUNTING) |
1557 | struct task_rss_stat rss_stat; | 1554 | struct task_rss_stat rss_stat; |
1558 | #endif | 1555 | #endif |
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd43949..1081db987391 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | static inline void vmacache_flush(struct task_struct *tsk) | 13 | static inline void vmacache_flush(struct task_struct *tsk) |
14 | { | 14 | { |
15 | memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); | 15 | memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); |
16 | } | 16 | } |
17 | 17 | ||
18 | extern void vmacache_flush_all(struct mm_struct *mm); | 18 | extern void vmacache_flush_all(struct mm_struct *mm); |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 79517e5549f1..a603ef28f70c 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
@@ -232,9 +232,9 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) | |||
232 | int i; | 232 | int i; |
233 | 233 | ||
234 | for (i = 0; i < VMACACHE_SIZE; i++) { | 234 | for (i = 0; i < VMACACHE_SIZE; i++) { |
235 | if (!current->vmacache[i]) | 235 | if (!current->vmacache.vmas[i]) |
236 | continue; | 236 | continue; |
237 | flush_cache_range(current->vmacache[i], | 237 | flush_cache_range(current->vmacache.vmas[i], |
238 | addr, addr + BREAK_INSTR_SIZE); | 238 | addr, addr + BREAK_INSTR_SIZE); |
239 | } | 239 | } |
240 | } | 240 | } |
diff --git a/mm/nommu.c b/mm/nommu.c index fe9f4fa4a7a7..aae06e854552 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -757,7 +757,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
757 | mm->map_count--; | 757 | mm->map_count--; |
758 | for (i = 0; i < VMACACHE_SIZE; i++) { | 758 | for (i = 0; i < VMACACHE_SIZE; i++) { |
759 | /* if the vma is cached, invalidate the entire cache */ | 759 | /* if the vma is cached, invalidate the entire cache */ |
760 | if (curr->vmacache[i] == vma) { | 760 | if (curr->vmacache.vmas[i] == vma) { |
761 | vmacache_invalidate(mm); | 761 | vmacache_invalidate(mm); |
762 | break; | 762 | break; |
763 | } | 763 | } |
diff --git a/mm/vmacache.c b/mm/vmacache.c index 035fdeb35b43..7c233f8e20ee 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c | |||
@@ -60,7 +60,7 @@ static inline bool vmacache_valid_mm(struct mm_struct *mm) | |||
60 | void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) | 60 | void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) |
61 | { | 61 | { |
62 | if (vmacache_valid_mm(newvma->vm_mm)) | 62 | if (vmacache_valid_mm(newvma->vm_mm)) |
63 | current->vmacache[VMACACHE_HASH(addr)] = newvma; | 63 | current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; |
64 | } | 64 | } |
65 | 65 | ||
66 | static bool vmacache_valid(struct mm_struct *mm) | 66 | static bool vmacache_valid(struct mm_struct *mm) |
@@ -71,12 +71,12 @@ static bool vmacache_valid(struct mm_struct *mm) | |||
71 | return false; | 71 | return false; |
72 | 72 | ||
73 | curr = current; | 73 | curr = current; |
74 | if (mm->vmacache_seqnum != curr->vmacache_seqnum) { | 74 | if (mm->vmacache_seqnum != curr->vmacache.seqnum) { |
75 | /* | 75 | /* |
76 | * First attempt will always be invalid, initialize | 76 | * First attempt will always be invalid, initialize |
77 | * the new cache for this task here. | 77 | * the new cache for this task here. |
78 | */ | 78 | */ |
79 | curr->vmacache_seqnum = mm->vmacache_seqnum; | 79 | curr->vmacache.seqnum = mm->vmacache_seqnum; |
80 | vmacache_flush(curr); | 80 | vmacache_flush(curr); |
81 | return false; | 81 | return false; |
82 | } | 82 | } |
@@ -93,7 +93,7 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) | |||
93 | return NULL; | 93 | return NULL; |
94 | 94 | ||
95 | for (i = 0; i < VMACACHE_SIZE; i++) { | 95 | for (i = 0; i < VMACACHE_SIZE; i++) { |
96 | struct vm_area_struct *vma = current->vmacache[i]; | 96 | struct vm_area_struct *vma = current->vmacache.vmas[i]; |
97 | 97 | ||
98 | if (!vma) | 98 | if (!vma) |
99 | continue; | 99 | continue; |
@@ -121,7 +121,7 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, | |||
121 | return NULL; | 121 | return NULL; |
122 | 122 | ||
123 | for (i = 0; i < VMACACHE_SIZE; i++) { | 123 | for (i = 0; i < VMACACHE_SIZE; i++) { |
124 | struct vm_area_struct *vma = current->vmacache[i]; | 124 | struct vm_area_struct *vma = current->vmacache.vmas[i]; |
125 | 125 | ||
126 | if (vma && vma->vm_start == start && vma->vm_end == end) { | 126 | if (vma && vma->vm_start == start && vma->vm_end == end) { |
127 | count_vm_vmacache_event(VMACACHE_FIND_HITS); | 127 | count_vm_vmacache_event(VMACACHE_FIND_HITS); |