aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c74
1 files changed, 32 insertions, 42 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c11935bf37cb..0f92e04b58db 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3492,14 +3492,12 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3492 * prevent from pre mature OOM 3492 * prevent from pre mature OOM
3493 */ 3493 */
3494 if (!did_some_progress) { 3494 if (!did_some_progress) {
3495 unsigned long writeback; 3495 unsigned long write_pending;
3496 unsigned long dirty;
3497 3496
3498 writeback = zone_page_state_snapshot(zone, 3497 write_pending = zone_page_state_snapshot(zone,
3499 NR_WRITEBACK); 3498 NR_ZONE_WRITE_PENDING);
3500 dirty = zone_page_state_snapshot(zone, NR_FILE_DIRTY);
3501 3499
3502 if (2*(writeback + dirty) > reclaimable) { 3500 if (2 * write_pending > reclaimable) {
3503 congestion_wait(BLK_RW_ASYNC, HZ/10); 3501 congestion_wait(BLK_RW_ASYNC, HZ/10);
3504 return true; 3502 return true;
3505 } 3503 }
@@ -4175,7 +4173,7 @@ EXPORT_SYMBOL_GPL(si_mem_available);
4175void si_meminfo(struct sysinfo *val) 4173void si_meminfo(struct sysinfo *val)
4176{ 4174{
4177 val->totalram = totalram_pages; 4175 val->totalram = totalram_pages;
4178 val->sharedram = global_page_state(NR_SHMEM); 4176 val->sharedram = global_node_page_state(NR_SHMEM);
4179 val->freeram = global_page_state(NR_FREE_PAGES); 4177 val->freeram = global_page_state(NR_FREE_PAGES);
4180 val->bufferram = nr_blockdev_pages(); 4178 val->bufferram = nr_blockdev_pages();
4181 val->totalhigh = totalhigh_pages; 4179 val->totalhigh = totalhigh_pages;
@@ -4197,7 +4195,7 @@ void si_meminfo_node(struct sysinfo *val, int nid)
4197 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 4195 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4198 managed_pages += pgdat->node_zones[zone_type].managed_pages; 4196 managed_pages += pgdat->node_zones[zone_type].managed_pages;
4199 val->totalram = managed_pages; 4197 val->totalram = managed_pages;
4200 val->sharedram = sum_zone_node_page_state(nid, NR_SHMEM); 4198 val->sharedram = node_page_state(pgdat, NR_SHMEM);
4201 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 4199 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4202#ifdef CONFIG_HIGHMEM 4200#ifdef CONFIG_HIGHMEM
4203 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 4201 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
@@ -4296,9 +4294,6 @@ void show_free_areas(unsigned int filter)
4296 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 4294 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4297 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 4295 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4298 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 4296 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4299#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4300 " anon_thp: %lu shmem_thp: %lu shmem_pmdmapped: %lu\n"
4301#endif
4302 " free:%lu free_pcp:%lu free_cma:%lu\n", 4297 " free:%lu free_pcp:%lu free_cma:%lu\n",
4303 global_node_page_state(NR_ACTIVE_ANON), 4298 global_node_page_state(NR_ACTIVE_ANON),
4304 global_node_page_state(NR_INACTIVE_ANON), 4299 global_node_page_state(NR_INACTIVE_ANON),
@@ -4307,20 +4302,15 @@ void show_free_areas(unsigned int filter)
4307 global_node_page_state(NR_INACTIVE_FILE), 4302 global_node_page_state(NR_INACTIVE_FILE),
4308 global_node_page_state(NR_ISOLATED_FILE), 4303 global_node_page_state(NR_ISOLATED_FILE),
4309 global_node_page_state(NR_UNEVICTABLE), 4304 global_node_page_state(NR_UNEVICTABLE),
4310 global_page_state(NR_FILE_DIRTY), 4305 global_node_page_state(NR_FILE_DIRTY),
4311 global_page_state(NR_WRITEBACK), 4306 global_node_page_state(NR_WRITEBACK),
4312 global_page_state(NR_UNSTABLE_NFS), 4307 global_node_page_state(NR_UNSTABLE_NFS),
4313 global_page_state(NR_SLAB_RECLAIMABLE), 4308 global_page_state(NR_SLAB_RECLAIMABLE),
4314 global_page_state(NR_SLAB_UNRECLAIMABLE), 4309 global_page_state(NR_SLAB_UNRECLAIMABLE),
4315 global_node_page_state(NR_FILE_MAPPED), 4310 global_node_page_state(NR_FILE_MAPPED),
4316 global_page_state(NR_SHMEM), 4311 global_node_page_state(NR_SHMEM),
4317 global_page_state(NR_PAGETABLE), 4312 global_page_state(NR_PAGETABLE),
4318 global_page_state(NR_BOUNCE), 4313 global_page_state(NR_BOUNCE),
4319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4320 global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR,
4321 global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR,
4322 global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR,
4323#endif
4324 global_page_state(NR_FREE_PAGES), 4314 global_page_state(NR_FREE_PAGES),
4325 free_pcp, 4315 free_pcp,
4326 global_page_state(NR_FREE_CMA_PAGES)); 4316 global_page_state(NR_FREE_CMA_PAGES));
@@ -4335,6 +4325,16 @@ void show_free_areas(unsigned int filter)
4335 " isolated(anon):%lukB" 4325 " isolated(anon):%lukB"
4336 " isolated(file):%lukB" 4326 " isolated(file):%lukB"
4337 " mapped:%lukB" 4327 " mapped:%lukB"
4328 " dirty:%lukB"
4329 " writeback:%lukB"
4330 " shmem:%lukB"
4331#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4332 " shmem_thp: %lukB"
4333 " shmem_pmdmapped: %lukB"
4334 " anon_thp: %lukB"
4335#endif
4336 " writeback_tmp:%lukB"
4337 " unstable:%lukB"
4338 " all_unreclaimable? %s" 4338 " all_unreclaimable? %s"
4339 "\n", 4339 "\n",
4340 pgdat->node_id, 4340 pgdat->node_id,
@@ -4346,6 +4346,17 @@ void show_free_areas(unsigned int filter)
4346 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 4346 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4347 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 4347 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4348 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4348 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4349 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4350 K(node_page_state(pgdat, NR_WRITEBACK)),
4351#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4352 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4353 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4354 * HPAGE_PMD_NR),
4355 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4356#endif
4357 K(node_page_state(pgdat, NR_SHMEM)),
4358 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4359 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4349 !pgdat_reclaimable(pgdat) ? "yes" : "no"); 4360 !pgdat_reclaimable(pgdat) ? "yes" : "no");
4350 } 4361 }
4351 4362
@@ -4368,24 +4379,14 @@ void show_free_areas(unsigned int filter)
4368 " present:%lukB" 4379 " present:%lukB"
4369 " managed:%lukB" 4380 " managed:%lukB"
4370 " mlocked:%lukB" 4381 " mlocked:%lukB"
4371 " dirty:%lukB"
4372 " writeback:%lukB"
4373 " shmem:%lukB"
4374#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4375 " shmem_thp: %lukB"
4376 " shmem_pmdmapped: %lukB"
4377 " anon_thp: %lukB"
4378#endif
4379 " slab_reclaimable:%lukB" 4382 " slab_reclaimable:%lukB"
4380 " slab_unreclaimable:%lukB" 4383 " slab_unreclaimable:%lukB"
4381 " kernel_stack:%lukB" 4384 " kernel_stack:%lukB"
4382 " pagetables:%lukB" 4385 " pagetables:%lukB"
4383 " unstable:%lukB"
4384 " bounce:%lukB" 4386 " bounce:%lukB"
4385 " free_pcp:%lukB" 4387 " free_pcp:%lukB"
4386 " local_pcp:%ukB" 4388 " local_pcp:%ukB"
4387 " free_cma:%lukB" 4389 " free_cma:%lukB"
4388 " writeback_tmp:%lukB"
4389 " node_pages_scanned:%lu" 4390 " node_pages_scanned:%lu"
4390 "\n", 4391 "\n",
4391 zone->name, 4392 zone->name,
@@ -4396,26 +4397,15 @@ void show_free_areas(unsigned int filter)
4396 K(zone->present_pages), 4397 K(zone->present_pages),
4397 K(zone->managed_pages), 4398 K(zone->managed_pages),
4398 K(zone_page_state(zone, NR_MLOCK)), 4399 K(zone_page_state(zone, NR_MLOCK)),
4399 K(zone_page_state(zone, NR_FILE_DIRTY)),
4400 K(zone_page_state(zone, NR_WRITEBACK)),
4401 K(zone_page_state(zone, NR_SHMEM)),
4402#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4403 K(zone_page_state(zone, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4404 K(zone_page_state(zone, NR_SHMEM_PMDMAPPED)
4405 * HPAGE_PMD_NR),
4406 K(zone_page_state(zone, NR_ANON_THPS) * HPAGE_PMD_NR),
4407#endif
4408 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 4400 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
4409 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 4401 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
4410 zone_page_state(zone, NR_KERNEL_STACK) * 4402 zone_page_state(zone, NR_KERNEL_STACK) *
4411 THREAD_SIZE / 1024, 4403 THREAD_SIZE / 1024,
4412 K(zone_page_state(zone, NR_PAGETABLE)), 4404 K(zone_page_state(zone, NR_PAGETABLE)),
4413 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
4414 K(zone_page_state(zone, NR_BOUNCE)), 4405 K(zone_page_state(zone, NR_BOUNCE)),
4415 K(free_pcp), 4406 K(free_pcp),
4416 K(this_cpu_read(zone->pageset->pcp.count)), 4407 K(this_cpu_read(zone->pageset->pcp.count)),
4417 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 4408 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
4418 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
4419 K(node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED))); 4409 K(node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED)));
4420 printk("lowmem_reserve[]:"); 4410 printk("lowmem_reserve[]:");
4421 for (i = 0; i < MAX_NR_ZONES; i++) 4411 for (i = 0; i < MAX_NR_ZONES; i++)
@@ -4458,7 +4448,7 @@ void show_free_areas(unsigned int filter)
4458 4448
4459 hugetlb_show_meminfo(); 4449 hugetlb_show_meminfo();
4460 4450
4461 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 4451 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
4462 4452
4463 show_swap_cache_info(); 4453 show_swap_cache_info();
4464} 4454}