summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:36:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:36:48 -0400
commit1c88e19b0f6a8471ee50d5062721ba30b8fd4ba9 (patch)
tree6d227487ca2cf391589c73af1c40ec7b7126feec /drivers
parent6039b80eb50a893476fea7d56e86ed2d19290054 (diff)
parentc3486f5376696034d0fcbef8ba70c70cfcb26f51 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "The rest of MM" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (101 commits) mm, compaction: simplify contended compaction handling mm, compaction: introduce direct compaction priority mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations mm, page_alloc: make THP-specific decisions more generic mm, page_alloc: restructure direct compaction handling in slowpath mm, page_alloc: don't retry initial attempt in slowpath mm, page_alloc: set alloc_flags only once in slowpath lib/stackdepot.c: use __GFP_NOWARN for stack allocations mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB mm, kasan: account for object redzone in SLUB's nearest_obj() mm: fix use-after-free if memory allocation failed in vma_adjust() zsmalloc: Delete an unnecessary check before the function call "iput" mm/memblock.c: fix index adjustment error in __next_mem_range_rev() mem-hotplug: alloc new page from a nearest neighbor node when mem-offline mm: optimize copy_page_to/from_iter_iovec mm: add cond_resched() to generic_swapfile_activate() Revert "mm, mempool: only set __GFP_NOMEMALLOC if there are free elements" mm, compaction: don't isolate PageWriteback pages in MIGRATE_SYNC_LIGHT mode mm: hwpoison: remove incorrect comments make __section_nr() more efficient ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/node.c78
-rw-r--r--drivers/staging/android/lowmemorykiller.c12
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c6
3 files changed, 52 insertions, 44 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 51c7db2c4ee2..29cd96661b30 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -56,6 +56,7 @@ static ssize_t node_read_meminfo(struct device *dev,
56{ 56{
57 int n; 57 int n;
58 int nid = dev->id; 58 int nid = dev->id;
59 struct pglist_data *pgdat = NODE_DATA(nid);
59 struct sysinfo i; 60 struct sysinfo i;
60 61
61 si_meminfo_node(&i, nid); 62 si_meminfo_node(&i, nid);
@@ -74,16 +75,16 @@ static ssize_t node_read_meminfo(struct device *dev,
74 nid, K(i.totalram), 75 nid, K(i.totalram),
75 nid, K(i.freeram), 76 nid, K(i.freeram),
76 nid, K(i.totalram - i.freeram), 77 nid, K(i.totalram - i.freeram),
77 nid, K(node_page_state(nid, NR_ACTIVE_ANON) + 78 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
78 node_page_state(nid, NR_ACTIVE_FILE)), 79 node_page_state(pgdat, NR_ACTIVE_FILE)),
79 nid, K(node_page_state(nid, NR_INACTIVE_ANON) + 80 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
80 node_page_state(nid, NR_INACTIVE_FILE)), 81 node_page_state(pgdat, NR_INACTIVE_FILE)),
81 nid, K(node_page_state(nid, NR_ACTIVE_ANON)), 82 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
82 nid, K(node_page_state(nid, NR_INACTIVE_ANON)), 83 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
83 nid, K(node_page_state(nid, NR_ACTIVE_FILE)), 84 nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
84 nid, K(node_page_state(nid, NR_INACTIVE_FILE)), 85 nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
85 nid, K(node_page_state(nid, NR_UNEVICTABLE)), 86 nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
86 nid, K(node_page_state(nid, NR_MLOCK))); 87 nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
87 88
88#ifdef CONFIG_HIGHMEM 89#ifdef CONFIG_HIGHMEM
89 n += sprintf(buf + n, 90 n += sprintf(buf + n,
@@ -117,31 +118,30 @@ static ssize_t node_read_meminfo(struct device *dev,
117 "Node %d ShmemPmdMapped: %8lu kB\n" 118 "Node %d ShmemPmdMapped: %8lu kB\n"
118#endif 119#endif
119 , 120 ,
120 nid, K(node_page_state(nid, NR_FILE_DIRTY)), 121 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
121 nid, K(node_page_state(nid, NR_WRITEBACK)), 122 nid, K(node_page_state(pgdat, NR_WRITEBACK)),
122 nid, K(node_page_state(nid, NR_FILE_PAGES)), 123 nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
123 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 124 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
124 nid, K(node_page_state(nid, NR_ANON_PAGES)), 125 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
125 nid, K(i.sharedram), 126 nid, K(i.sharedram),
126 nid, node_page_state(nid, NR_KERNEL_STACK) * 127 nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
127 THREAD_SIZE / 1024, 128 nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
128 nid, K(node_page_state(nid, NR_PAGETABLE)), 129 nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
129 nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), 130 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
130 nid, K(node_page_state(nid, NR_BOUNCE)), 131 nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
131 nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)), 132 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
132 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + 133 sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
133 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 134 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
134 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE 135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
136 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 136 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
137 nid, K(node_page_state(nid, NR_ANON_THPS) * 137 nid, K(node_page_state(pgdat, NR_ANON_THPS) *
138 HPAGE_PMD_NR), 138 HPAGE_PMD_NR),
139 nid, K(node_page_state(nid, NR_SHMEM_THPS) * 139 nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
140 HPAGE_PMD_NR), 140 HPAGE_PMD_NR),
141 nid, K(node_page_state(nid, NR_SHMEM_PMDMAPPED) * 141 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
142 HPAGE_PMD_NR)); 142 HPAGE_PMD_NR));
143#else 143#else
144 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); 144 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
145#endif 145#endif
146 n += hugetlb_report_node_meminfo(nid, buf + n); 146 n += hugetlb_report_node_meminfo(nid, buf + n);
147 return n; 147 return n;
@@ -160,12 +160,12 @@ static ssize_t node_read_numastat(struct device *dev,
160 "interleave_hit %lu\n" 160 "interleave_hit %lu\n"
161 "local_node %lu\n" 161 "local_node %lu\n"
162 "other_node %lu\n", 162 "other_node %lu\n",
163 node_page_state(dev->id, NUMA_HIT), 163 sum_zone_node_page_state(dev->id, NUMA_HIT),
164 node_page_state(dev->id, NUMA_MISS), 164 sum_zone_node_page_state(dev->id, NUMA_MISS),
165 node_page_state(dev->id, NUMA_FOREIGN), 165 sum_zone_node_page_state(dev->id, NUMA_FOREIGN),
166 node_page_state(dev->id, NUMA_INTERLEAVE_HIT), 166 sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
167 node_page_state(dev->id, NUMA_LOCAL), 167 sum_zone_node_page_state(dev->id, NUMA_LOCAL),
168 node_page_state(dev->id, NUMA_OTHER)); 168 sum_zone_node_page_state(dev->id, NUMA_OTHER));
169} 169}
170static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); 170static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
171 171
@@ -173,12 +173,18 @@ static ssize_t node_read_vmstat(struct device *dev,
173 struct device_attribute *attr, char *buf) 173 struct device_attribute *attr, char *buf)
174{ 174{
175 int nid = dev->id; 175 int nid = dev->id;
176 struct pglist_data *pgdat = NODE_DATA(nid);
176 int i; 177 int i;
177 int n = 0; 178 int n = 0;
178 179
179 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 180 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
180 n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], 181 n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
181 node_page_state(nid, i)); 182 sum_zone_node_page_state(nid, i));
183
184 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
185 n += sprintf(buf+n, "%s %lu\n",
186 vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
187 node_page_state(pgdat, i));
182 188
183 return n; 189 return n;
184} 190}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 24d2745e9437..45a1b4ec4ca3 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -72,10 +72,10 @@ static unsigned long lowmem_deathpending_timeout;
72static unsigned long lowmem_count(struct shrinker *s, 72static unsigned long lowmem_count(struct shrinker *s,
73 struct shrink_control *sc) 73 struct shrink_control *sc)
74{ 74{
75 return global_page_state(NR_ACTIVE_ANON) + 75 return global_node_page_state(NR_ACTIVE_ANON) +
76 global_page_state(NR_ACTIVE_FILE) + 76 global_node_page_state(NR_ACTIVE_FILE) +
77 global_page_state(NR_INACTIVE_ANON) + 77 global_node_page_state(NR_INACTIVE_ANON) +
78 global_page_state(NR_INACTIVE_FILE); 78 global_node_page_state(NR_INACTIVE_FILE);
79} 79}
80 80
81static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) 81static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
@@ -91,8 +91,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
91 short selected_oom_score_adj; 91 short selected_oom_score_adj;
92 int array_size = ARRAY_SIZE(lowmem_adj); 92 int array_size = ARRAY_SIZE(lowmem_adj);
93 int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; 93 int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
94 int other_file = global_page_state(NR_FILE_PAGES) - 94 int other_file = global_node_page_state(NR_FILE_PAGES) -
95 global_page_state(NR_SHMEM) - 95 global_node_page_state(NR_SHMEM) -
96 total_swapcache_pages(); 96 total_swapcache_pages();
97 97
98 if (lowmem_adj_size < array_size) 98 if (lowmem_adj_size < array_size)
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index d1a7d6beee60..d011135802d5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1864,7 +1864,8 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
1864 LASSERT(page_count >= 0); 1864 LASSERT(page_count >= 0);
1865 1865
1866 for (i = 0; i < page_count; i++) 1866 for (i = 0; i < page_count; i++)
1867 dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); 1867 dec_node_page_state(desc->bd_iov[i].kiov_page,
1868 NR_UNSTABLE_NFS);
1868 1869
1869 atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr); 1870 atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
1870 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); 1871 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
@@ -1898,7 +1899,8 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
1898 LASSERT(page_count >= 0); 1899 LASSERT(page_count >= 0);
1899 1900
1900 for (i = 0; i < page_count; i++) 1901 for (i = 0; i < page_count; i++)
1901 inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); 1902 inc_node_page_state(desc->bd_iov[i].kiov_page,
1903 NR_UNSTABLE_NFS);
1902 1904
1903 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); 1905 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
1904 atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr); 1906 atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);