diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 2 |
5 files changed, 5 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b4edfe7ce06c..b5d8a1f820a0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -404,7 +404,7 @@ unsigned long determine_dirtyable_memory(void) | |||
404 | * - vm.dirty_background_ratio or vm.dirty_background_bytes | 404 | * - vm.dirty_background_ratio or vm.dirty_background_bytes |
405 | * - vm.dirty_ratio or vm.dirty_bytes | 405 | * - vm.dirty_ratio or vm.dirty_bytes |
406 | * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and | 406 | * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and |
407 | * runtime tasks. | 407 | * real-time tasks. |
408 | */ | 408 | */ |
409 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | 409 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) |
410 | { | 410 | { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ff7e15872398..826ba6922e84 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4014,7 +4014,7 @@ static void __init setup_usemap(struct pglist_data *pgdat, | |||
4014 | zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); | 4014 | zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); |
4015 | } | 4015 | } |
4016 | #else | 4016 | #else |
4017 | static void inline setup_usemap(struct pglist_data *pgdat, | 4017 | static inline void setup_usemap(struct pglist_data *pgdat, |
4018 | struct zone *zone, unsigned long zonesize) {} | 4018 | struct zone *zone, unsigned long zonesize) {} |
4019 | #endif /* CONFIG_SPARSEMEM */ | 4019 | #endif /* CONFIG_SPARSEMEM */ |
4020 | 4020 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index 3dd4984bdef8..3f930018aa60 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -258,7 +258,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, | |||
258 | 258 | ||
259 | /* | 259 | /* |
260 | * (Un)populated page region iterators. Iterate over (un)populated | 260 | * (Un)populated page region iterators. Iterate over (un)populated |
261 | * page regions betwen @start and @end in @chunk. @rs and @re should | 261 | * page regions between @start and @end in @chunk. @rs and @re should |
262 | * be integer variables and will be set to start and end page index of | 262 | * be integer variables and will be set to start and end page index of |
263 | * the current region. | 263 | * the current region. |
264 | */ | 264 | */ |
@@ -94,7 +94,7 @@ static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) | |||
94 | * anonymous pages mapped into it with that anon_vma. | 94 | * anonymous pages mapped into it with that anon_vma. |
95 | * | 95 | * |
96 | * The common case will be that we already have one, but if | 96 | * The common case will be that we already have one, but if |
97 | * if not we either need to find an adjacent mapping that we | 97 | * not we either need to find an adjacent mapping that we |
98 | * can re-use the anon_vma from (very common when the only | 98 | * can re-use the anon_vma from (very common when the only |
99 | * reason for splitting a vma has been mprotect()), or we | 99 | * reason for splitting a vma has been mprotect()), or we |
100 | * allocate a new one. | 100 | * allocate a new one. |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 29d6cbffb283..64b984091edb 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * However, virtual mappings need a page table and TLBs. Many Linux | 10 | * However, virtual mappings need a page table and TLBs. Many Linux |
11 | * architectures already map their physical space using 1-1 mappings | 11 | * architectures already map their physical space using 1-1 mappings |
12 | * via TLBs. For those arches the virtual memmory map is essentially | 12 | * via TLBs. For those arches the virtual memory map is essentially |
13 | * for free if we use the same page size as the 1-1 mappings. In that | 13 | * for free if we use the same page size as the 1-1 mappings. In that |
14 | * case the overhead consists of a few additional pages that are | 14 | * case the overhead consists of a few additional pages that are |
15 | * allocated to create a view of memory for vmemmap. | 15 | * allocated to create a view of memory for vmemmap. |