aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h64
1 files changed, 38 insertions, 26 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f22090df7dd..93a849f742db 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -38,7 +38,7 @@ struct pglist_data;
38#if defined(CONFIG_SMP) 38#if defined(CONFIG_SMP)
39struct zone_padding { 39struct zone_padding {
40 char x[0]; 40 char x[0];
41} ____cacheline_maxaligned_in_smp; 41} ____cacheline_internodealigned_in_smp;
42#define ZONE_PADDING(name) struct zone_padding name; 42#define ZONE_PADDING(name) struct zone_padding name;
43#else 43#else
44#define ZONE_PADDING(name) 44#define ZONE_PADDING(name)
@@ -46,7 +46,6 @@ struct zone_padding {
46 46
47struct per_cpu_pages { 47struct per_cpu_pages {
48 int count; /* number of pages in the list */ 48 int count; /* number of pages in the list */
49 int low; /* low watermark, refill needed */
50 int high; /* high watermark, emptying needed */ 49 int high; /* high watermark, emptying needed */
51 int batch; /* chunk size for buddy add/remove */ 50 int batch; /* chunk size for buddy add/remove */
52 struct list_head list; /* the list of pages */ 51 struct list_head list; /* the list of pages */
@@ -99,7 +98,7 @@ struct per_cpu_pageset {
99 98
100/* 99/*
101 * On machines where it is needed (eg PCs) we divide physical memory 100 * On machines where it is needed (eg PCs) we divide physical memory
102 * into multiple physical zones. On a PC we have 4 zones: 101 * into multiple physical zones. On a 32bit PC we have 4 zones:
103 * 102 *
104 * ZONE_DMA < 16 MB ISA DMA capable memory 103 * ZONE_DMA < 16 MB ISA DMA capable memory
105 * ZONE_DMA32 0 MB Empty 104 * ZONE_DMA32 0 MB Empty
@@ -150,15 +149,17 @@ struct zone {
150 unsigned long pages_scanned; /* since last reclaim */ 149 unsigned long pages_scanned; /* since last reclaim */
151 int all_unreclaimable; /* All pages pinned */ 150 int all_unreclaimable; /* All pages pinned */
152 151
153 /*
154 * Does the allocator try to reclaim pages from the zone as soon
155 * as it fails a watermark_ok() in __alloc_pages?
156 */
157 int reclaim_pages;
158 /* A count of how many reclaimers are scanning this zone */ 152 /* A count of how many reclaimers are scanning this zone */
159 atomic_t reclaim_in_progress; 153 atomic_t reclaim_in_progress;
160 154
161 /* 155 /*
156 * timestamp (in jiffies) of the last zone reclaim that did not
157 * result in freeing of pages. This is used to avoid repeated scans
158 * if all memory in the zone is in use.
159 */
160 unsigned long last_unsuccessful_zone_reclaim;
161
162 /*
162 * prev_priority holds the scanning priority for this zone. It is 163 * prev_priority holds the scanning priority for this zone. It is
163 * defined as the scanning priority at which we achieved our reclaim 164 * defined as the scanning priority at which we achieved our reclaim
164 * target at the previous try_to_free_pages() or balance_pgdat() 165 * target at the previous try_to_free_pages() or balance_pgdat()
@@ -234,7 +235,7 @@ struct zone {
234 * rarely used fields: 235 * rarely used fields:
235 */ 236 */
236 char *name; 237 char *name;
237} ____cacheline_maxaligned_in_smp; 238} ____cacheline_internodealigned_in_smp;
238 239
239 240
240/* 241/*
@@ -389,6 +390,11 @@ static inline struct zone *next_zone(struct zone *zone)
389#define for_each_zone(zone) \ 390#define for_each_zone(zone) \
390 for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) 391 for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
391 392
393static inline int populated_zone(struct zone *zone)
394{
395 return (!!zone->present_pages);
396}
397
392static inline int is_highmem_idx(int idx) 398static inline int is_highmem_idx(int idx)
393{ 399{
394 return (idx == ZONE_HIGHMEM); 400 return (idx == ZONE_HIGHMEM);
@@ -398,6 +404,7 @@ static inline int is_normal_idx(int idx)
398{ 404{
399 return (idx == ZONE_NORMAL); 405 return (idx == ZONE_NORMAL);
400} 406}
407
401/** 408/**
402 * is_highmem - helper function to quickly check if a struct zone is a 409 * is_highmem - helper function to quickly check if a struct zone is a
403 * highmem zone or not. This is an attempt to keep references 410 * highmem zone or not. This is an attempt to keep references
@@ -414,6 +421,16 @@ static inline int is_normal(struct zone *zone)
414 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; 421 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
415} 422}
416 423
424static inline int is_dma32(struct zone *zone)
425{
426 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
427}
428
429static inline int is_dma(struct zone *zone)
430{
431 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
432}
433
417/* These two functions are used to setup the per zone pages min values */ 434/* These two functions are used to setup the per zone pages min values */
418struct ctl_table; 435struct ctl_table;
419struct file; 436struct file;
@@ -422,6 +439,8 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
422extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; 439extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
423int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, 440int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
424 void __user *, size_t *, loff_t *); 441 void __user *, size_t *, loff_t *);
442int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
443 void __user *, size_t *, loff_t *);
425 444
426#include <linux/topology.h> 445#include <linux/topology.h>
427/* Returns the number of the current Node. */ 446/* Returns the number of the current Node. */
@@ -435,7 +454,6 @@ extern struct pglist_data contig_page_data;
435#define NODE_DATA(nid) (&contig_page_data) 454#define NODE_DATA(nid) (&contig_page_data)
436#define NODE_MEM_MAP(nid) mem_map 455#define NODE_MEM_MAP(nid) mem_map
437#define MAX_NODES_SHIFT 1 456#define MAX_NODES_SHIFT 1
438#define pfn_to_nid(pfn) (0)
439 457
440#else /* CONFIG_NEED_MULTIPLE_NODES */ 458#else /* CONFIG_NEED_MULTIPLE_NODES */
441 459
@@ -470,6 +488,10 @@ extern struct pglist_data contig_page_data;
470#define early_pfn_to_nid(nid) (0UL) 488#define early_pfn_to_nid(nid) (0UL)
471#endif 489#endif
472 490
491#ifdef CONFIG_FLATMEM
492#define pfn_to_nid(pfn) (0)
493#endif
494
473#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) 495#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
474#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) 496#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
475 497
@@ -564,11 +586,6 @@ static inline int valid_section_nr(unsigned long nr)
564 return valid_section(__nr_to_section(nr)); 586 return valid_section(__nr_to_section(nr));
565} 587}
566 588
567/*
568 * Given a kernel address, find the home node of the underlying memory.
569 */
570#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
571
572static inline struct mem_section *__pfn_to_section(unsigned long pfn) 589static inline struct mem_section *__pfn_to_section(unsigned long pfn)
573{ 590{
574 return __nr_to_section(pfn_to_section_nr(pfn)); 591 return __nr_to_section(pfn_to_section_nr(pfn));
@@ -598,13 +615,14 @@ static inline int pfn_valid(unsigned long pfn)
598 * this restriction. 615 * this restriction.
599 */ 616 */
600#ifdef CONFIG_NUMA 617#ifdef CONFIG_NUMA
601#define pfn_to_nid early_pfn_to_nid 618#define pfn_to_nid(pfn) \
602#endif
603
604#define pfn_to_pgdat(pfn) \
605({ \ 619({ \
606 NODE_DATA(pfn_to_nid(pfn)); \ 620 unsigned long __pfn_to_nid_pfn = (pfn); \
621 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
607}) 622})
623#else
624#define pfn_to_nid(pfn) (0)
625#endif
608 626
609#define early_pfn_valid(pfn) pfn_valid(pfn) 627#define early_pfn_valid(pfn) pfn_valid(pfn)
610void sparse_init(void); 628void sparse_init(void);
@@ -613,12 +631,6 @@ void sparse_init(void);
613#define sparse_index_init(_sec, _nid) do {} while (0) 631#define sparse_index_init(_sec, _nid) do {} while (0)
614#endif /* CONFIG_SPARSEMEM */ 632#endif /* CONFIG_SPARSEMEM */
615 633
616#ifdef CONFIG_NODES_SPAN_OTHER_NODES
617#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
618#else
619#define early_pfn_in_nid(pfn, nid) (1)
620#endif
621
622#ifndef early_pfn_valid 634#ifndef early_pfn_valid
623#define early_pfn_valid(pfn) (1) 635#define early_pfn_valid(pfn) (1)
624#endif 636#endif