summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 22:55:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 22:55:54 -0400
commit0e06f5c0deeef0332a5da2ecb8f1fcf3e024d958 (patch)
treee0f0af4aadf10c713c5cf1b65356844b3c9b3215 /drivers/base
parentf7816ad0f878dacd5f0120476f9b836ccf8699ea (diff)
parent8f19b0c058d93a678a99dd6fec03af2e769943f2 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few misc bits - ocfs2 - most(?) of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (125 commits) thp: fix comments of __pmd_trans_huge_lock() cgroup: remove unnecessary 0 check from css_from_id() cgroup: fix idr leak for the first cgroup root mm: memcontrol: fix documentation for compound parameter mm: memcontrol: remove BUG_ON in uncharge_list mm: fix build warnings in <linux/compaction.h> mm, thp: convert from optimistic swapin collapsing to conservative mm, thp: fix comment inconsistency for swapin readahead functions thp: update Documentation/{vm/transhuge,filesystems/proc}.txt shmem: split huge pages beyond i_size under memory pressure thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE khugepaged: add support of collapse for tmpfs/shmem pages shmem: make shmem_inode_info::lock irq-safe khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() thp: extract khugepaged from mm/huge_memory.c shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings shmem: add huge pages support shmem: get_unmapped_area align huge page shmem: prepare huge= mount option and sysfs knob mm, rmap: account shmem thp pages ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/memory.c28
-rw-r--r--drivers/base/node.c13
2 files changed, 26 insertions, 15 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f46dba8b7092..dc75de9059cd 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -391,6 +391,7 @@ static ssize_t show_valid_zones(struct device *dev,
391 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 391 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
392 struct page *first_page; 392 struct page *first_page;
393 struct zone *zone; 393 struct zone *zone;
394 int zone_shift = 0;
394 395
395 start_pfn = section_nr_to_pfn(mem->start_section_nr); 396 start_pfn = section_nr_to_pfn(mem->start_section_nr);
396 end_pfn = start_pfn + nr_pages; 397 end_pfn = start_pfn + nr_pages;
@@ -402,21 +403,26 @@ static ssize_t show_valid_zones(struct device *dev,
402 403
403 zone = page_zone(first_page); 404 zone = page_zone(first_page);
404 405
405 if (zone_idx(zone) == ZONE_MOVABLE - 1) { 406 /* MMOP_ONLINE_KEEP */
406 /*The mem block is the last memoryblock of this zone.*/ 407 sprintf(buf, "%s", zone->name);
407 if (end_pfn == zone_end_pfn(zone)) 408
408 return sprintf(buf, "%s %s\n", 409 /* MMOP_ONLINE_KERNEL */
409 zone->name, (zone + 1)->name); 410 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
411 if (zone_shift) {
412 strcat(buf, " ");
413 strcat(buf, (zone + zone_shift)->name);
410 } 414 }
411 415
412 if (zone_idx(zone) == ZONE_MOVABLE) { 416 /* MMOP_ONLINE_MOVABLE */
413 /*The mem block is the first memoryblock of ZONE_MOVABLE.*/ 417 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
414 if (start_pfn == zone->zone_start_pfn) 418 if (zone_shift) {
415 return sprintf(buf, "%s %s\n", 419 strcat(buf, " ");
416 zone->name, (zone - 1)->name); 420 strcat(buf, (zone + zone_shift)->name);
417 } 421 }
418 422
419 return sprintf(buf, "%s\n", zone->name); 423 strcat(buf, "\n");
424
425 return strlen(buf);
420} 426}
421static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); 427static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
422#endif 428#endif
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 560751bad294..51c7db2c4ee2 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -113,6 +113,8 @@ static ssize_t node_read_meminfo(struct device *dev,
113 "Node %d SUnreclaim: %8lu kB\n" 113 "Node %d SUnreclaim: %8lu kB\n"
114#ifdef CONFIG_TRANSPARENT_HUGEPAGE 114#ifdef CONFIG_TRANSPARENT_HUGEPAGE
115 "Node %d AnonHugePages: %8lu kB\n" 115 "Node %d AnonHugePages: %8lu kB\n"
116 "Node %d ShmemHugePages: %8lu kB\n"
117 "Node %d ShmemPmdMapped: %8lu kB\n"
116#endif 118#endif
117 , 119 ,
118 nid, K(node_page_state(nid, NR_FILE_DIRTY)), 120 nid, K(node_page_state(nid, NR_FILE_DIRTY)),
@@ -131,10 +133,13 @@ static ssize_t node_read_meminfo(struct device *dev,
131 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 133 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
132 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), 134 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
133#ifdef CONFIG_TRANSPARENT_HUGEPAGE 135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
134 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) 136 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
135 , nid, 137 nid, K(node_page_state(nid, NR_ANON_THPS) *
136 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * 138 HPAGE_PMD_NR),
137 HPAGE_PMD_NR)); 139 nid, K(node_page_state(nid, NR_SHMEM_THPS) *
140 HPAGE_PMD_NR),
141 nid, K(node_page_state(nid, NR_SHMEM_PMDMAPPED) *
142 HPAGE_PMD_NR));
138#else 143#else
139 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); 144 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
140#endif 145#endif