diff options
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 73 |
1 files changed, 41 insertions, 32 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 4baadd18f4ad..17b27cd269c4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page) | |||
1253 | extern void free_area_init(unsigned long * zones_size); | 1253 | extern void free_area_init(unsigned long * zones_size); |
1254 | extern void free_area_init_node(int nid, unsigned long * zones_size, | 1254 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
1255 | unsigned long zone_start_pfn, unsigned long *zholes_size); | 1255 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
1256 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP | 1256 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
1257 | /* | 1257 | /* |
1258 | * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its | 1258 | * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its |
1259 | * zones, allocate the backing mem_map and account for memory holes in a more | 1259 | * zones, allocate the backing mem_map and account for memory holes in a more |
1260 | * architecture independent manner. This is a substitute for creating the | 1260 | * architecture independent manner. This is a substitute for creating the |
1261 | * zone_sizes[] and zholes_size[] arrays and passing them to | 1261 | * zone_sizes[] and zholes_size[] arrays and passing them to |
1262 | * free_area_init_node() | 1262 | * free_area_init_node() |
1263 | * | 1263 | * |
1264 | * An architecture is expected to register range of page frames backed by | 1264 | * An architecture is expected to register range of page frames backed by |
1265 | * physical memory with add_active_range() before calling | 1265 | * physical memory with memblock_add[_node]() before calling |
1266 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic | 1266 | * free_area_init_nodes() passing in the PFN each zone ends at. At a basic |
1267 | * usage, an architecture is expected to do something like | 1267 | * usage, an architecture is expected to do something like |
1268 | * | 1268 | * |
1269 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, | 1269 | * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, |
1270 | * max_highmem_pfn}; | 1270 | * max_highmem_pfn}; |
1271 | * for_each_valid_physical_page_range() | 1271 | * for_each_valid_physical_page_range() |
1272 | * add_active_range(node_id, start_pfn, end_pfn) | 1272 | * memblock_add_node(base, size, nid) |
1273 | * free_area_init_nodes(max_zone_pfns); | 1273 | * free_area_init_nodes(max_zone_pfns); |
1274 | * | 1274 | * |
1275 | * If the architecture guarantees that there are no holes in the ranges | 1275 | * free_bootmem_with_active_regions() calls free_bootmem_node() for each |
1276 | * registered with add_active_range(), free_bootmem_active_regions() | 1276 | * registered physical page range. Similarly |
1277 | * will call free_bootmem_node() for each registered physical page range. | 1277 | * sparse_memory_present_with_active_regions() calls memory_present() for |
1278 | * Similarly sparse_memory_present_with_active_regions() calls | 1278 | * each range when SPARSEMEM is enabled. |
1279 | * memory_present() for each range when SPARSEMEM is enabled. | ||
1280 | * | 1279 | * |
1281 | * See mm/page_alloc.c for more information on each function exposed by | 1280 | * See mm/page_alloc.c for more information on each function exposed by |
1282 | * CONFIG_ARCH_POPULATES_NODE_MAP | 1281 | * CONFIG_HAVE_MEMBLOCK_NODE_MAP. |
1283 | */ | 1282 | */ |
1284 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | 1283 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); |
1285 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | ||
1286 | unsigned long end_pfn); | ||
1287 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | ||
1288 | unsigned long end_pfn); | ||
1289 | extern void remove_all_active_ranges(void); | ||
1290 | void sort_node_map(void); | ||
1291 | unsigned long node_map_pfn_alignment(void); | 1284 | unsigned long node_map_pfn_alignment(void); |
1292 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | 1285 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, |
1293 | unsigned long end_pfn); | 1286 | unsigned long end_pfn); |
@@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid, | |||
1300 | unsigned long max_low_pfn); | 1293 | unsigned long max_low_pfn); |
1301 | int add_from_early_node_map(struct range *range, int az, | 1294 | int add_from_early_node_map(struct range *range, int az, |
1302 | int nr_range, int nid); | 1295 | int nr_range, int nid); |
1303 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
1304 | u64 goal, u64 limit); | ||
1305 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | ||
1306 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | ||
1307 | extern void sparse_memory_present_with_active_regions(int nid); | 1296 | extern void sparse_memory_present_with_active_regions(int nid); |
1308 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | ||
1309 | 1297 | ||
1310 | #if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ | 1298 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
1299 | |||
1300 | #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ | ||
1311 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) | 1301 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) |
1312 | static inline int __early_pfn_to_nid(unsigned long pfn) | 1302 | static inline int __early_pfn_to_nid(unsigned long pfn) |
1313 | { | 1303 | { |
@@ -1492,6 +1482,18 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) | |||
1492 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 1482 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
1493 | } | 1483 | } |
1494 | 1484 | ||
1485 | /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ | ||
1486 | static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, | ||
1487 | unsigned long vm_start, unsigned long vm_end) | ||
1488 | { | ||
1489 | struct vm_area_struct *vma = find_vma(mm, vm_start); | ||
1490 | |||
1491 | if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) | ||
1492 | vma = NULL; | ||
1493 | |||
1494 | return vma; | ||
1495 | } | ||
1496 | |||
1495 | #ifdef CONFIG_MMU | 1497 | #ifdef CONFIG_MMU |
1496 | pgprot_t vm_get_page_prot(unsigned long vm_flags); | 1498 | pgprot_t vm_get_page_prot(unsigned long vm_flags); |
1497 | #else | 1499 | #else |
@@ -1538,23 +1540,13 @@ static inline void vm_stat_account(struct mm_struct *mm, | |||
1538 | #endif /* CONFIG_PROC_FS */ | 1540 | #endif /* CONFIG_PROC_FS */ |
1539 | 1541 | ||
1540 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1542 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1541 | extern int debug_pagealloc_enabled; | ||
1542 | |||
1543 | extern void kernel_map_pages(struct page *page, int numpages, int enable); | 1543 | extern void kernel_map_pages(struct page *page, int numpages, int enable); |
1544 | |||
1545 | static inline void enable_debug_pagealloc(void) | ||
1546 | { | ||
1547 | debug_pagealloc_enabled = 1; | ||
1548 | } | ||
1549 | #ifdef CONFIG_HIBERNATION | 1544 | #ifdef CONFIG_HIBERNATION |
1550 | extern bool kernel_page_present(struct page *page); | 1545 | extern bool kernel_page_present(struct page *page); |
1551 | #endif /* CONFIG_HIBERNATION */ | 1546 | #endif /* CONFIG_HIBERNATION */ |
1552 | #else | 1547 | #else |
1553 | static inline void | 1548 | static inline void |
1554 | kernel_map_pages(struct page *page, int numpages, int enable) {} | 1549 | kernel_map_pages(struct page *page, int numpages, int enable) {} |
1555 | static inline void enable_debug_pagealloc(void) | ||
1556 | { | ||
1557 | } | ||
1558 | #ifdef CONFIG_HIBERNATION | 1550 | #ifdef CONFIG_HIBERNATION |
1559 | static inline bool kernel_page_present(struct page *page) { return true; } | 1551 | static inline bool kernel_page_present(struct page *page) { return true; } |
1560 | #endif /* CONFIG_HIBERNATION */ | 1552 | #endif /* CONFIG_HIBERNATION */ |
@@ -1628,5 +1620,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, | |||
1628 | unsigned int pages_per_huge_page); | 1620 | unsigned int pages_per_huge_page); |
1629 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | 1621 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
1630 | 1622 | ||
1623 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1624 | extern unsigned int _debug_guardpage_minorder; | ||
1625 | |||
1626 | static inline unsigned int debug_guardpage_minorder(void) | ||
1627 | { | ||
1628 | return _debug_guardpage_minorder; | ||
1629 | } | ||
1630 | |||
1631 | static inline bool page_is_guard(struct page *page) | ||
1632 | { | ||
1633 | return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); | ||
1634 | } | ||
1635 | #else | ||
1636 | static inline unsigned int debug_guardpage_minorder(void) { return 0; } | ||
1637 | static inline bool page_is_guard(struct page *page) { return false; } | ||
1638 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
1639 | |||
1631 | #endif /* __KERNEL__ */ | 1640 | #endif /* __KERNEL__ */ |
1632 | #endif /* _LINUX_MM_H */ | 1641 | #endif /* _LINUX_MM_H */ |