aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
commitdf32e43a54d04eda35d2859beaf90e3864d53288 (patch)
tree7a61cf658b2949bd426285eb9902be7758ced1ba /include/linux/mm.h
parentfbd918a2026d0464ce9c23f57b7de4bcfccdc2e6 (diff)
parent78d5506e82b21a1a1de68c24182db2c2fe521422 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge first patch-bomb from Andrew Morton: - a couple of misc things - inotify/fsnotify work from Jan - ocfs2 updates (partial) - about half of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm/migrate: remove unused function, fail_migrate_page() mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages mm/migrate: correct failure handling if !hugepage_migration_support() mm/migrate: add comment about permanent failure path mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure mm: compaction: reset scanner positions immediately when they meet mm: compaction: do not mark unmovable pageblocks as skipped in async compaction mm: compaction: detect when scanners meet in isolate_freepages mm: compaction: reset cached scanner pfn's before reading them mm: compaction: encapsulate defer reset logic mm: compaction: trace compaction begin and end memcg, oom: lock mem_cgroup_print_oom_info sched: add tracepoints related to NUMA task migration mm: numa: do not automatically migrate KSM pages mm: numa: trace tasks that fail migration due to rate limiting mm: numa: limit scope of lock for NUMA migrate rate limiting mm: numa: make NUMA-migrate related functions static lib/show_mem.c: show num_poisoned_pages when oom mm/hwpoison: add '#' to hwpoison_inject mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter ...
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h70
1 files changed, 61 insertions, 9 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 35527173cf50..a512dd836931 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -57,6 +57,15 @@ extern int sysctl_legacy_va_layout;
57extern unsigned long sysctl_user_reserve_kbytes; 57extern unsigned long sysctl_user_reserve_kbytes;
58extern unsigned long sysctl_admin_reserve_kbytes; 58extern unsigned long sysctl_admin_reserve_kbytes;
59 59
60extern int sysctl_overcommit_memory;
61extern int sysctl_overcommit_ratio;
62extern unsigned long sysctl_overcommit_kbytes;
63
64extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
65 size_t *, loff_t *);
66extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
67 size_t *, loff_t *);
68
60#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 69#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
61 70
62/* to align the pointer to the (next) page boundary */ 71/* to align the pointer to the (next) page boundary */
@@ -414,15 +423,44 @@ static inline int page_count(struct page *page)
414 return atomic_read(&compound_head(page)->_count); 423 return atomic_read(&compound_head(page)->_count);
415} 424}
416 425
426#ifdef CONFIG_HUGETLB_PAGE
427extern int PageHeadHuge(struct page *page_head);
428#else /* CONFIG_HUGETLB_PAGE */
429static inline int PageHeadHuge(struct page *page_head)
430{
431 return 0;
432}
433#endif /* CONFIG_HUGETLB_PAGE */
434
435static inline bool __compound_tail_refcounted(struct page *page)
436{
437 return !PageSlab(page) && !PageHeadHuge(page);
438}
439
440/*
441 * This takes a head page as parameter and tells if the
442 * tail page reference counting can be skipped.
443 *
444 * For this to be safe, PageSlab and PageHeadHuge must remain true on
445 * any given page where they return true here, until all tail pins
446 * have been released.
447 */
448static inline bool compound_tail_refcounted(struct page *page)
449{
450 VM_BUG_ON(!PageHead(page));
451 return __compound_tail_refcounted(page);
452}
453
417static inline void get_huge_page_tail(struct page *page) 454static inline void get_huge_page_tail(struct page *page)
418{ 455{
419 /* 456 /*
420 * __split_huge_page_refcount() cannot run 457 * __split_huge_page_refcount() cannot run from under us.
421 * from under us.
422 */ 458 */
459 VM_BUG_ON(!PageTail(page));
423 VM_BUG_ON(page_mapcount(page) < 0); 460 VM_BUG_ON(page_mapcount(page) < 0);
424 VM_BUG_ON(atomic_read(&page->_count) != 0); 461 VM_BUG_ON(atomic_read(&page->_count) != 0);
425 atomic_inc(&page->_mapcount); 462 if (compound_tail_refcounted(page->first_page))
463 atomic_inc(&page->_mapcount);
426} 464}
427 465
428extern bool __get_page_tail(struct page *page); 466extern bool __get_page_tail(struct page *page);
@@ -846,11 +884,14 @@ static __always_inline void *lowmem_page_address(const struct page *page)
846#endif 884#endif
847 885
848#if defined(WANT_PAGE_VIRTUAL) 886#if defined(WANT_PAGE_VIRTUAL)
849#define page_address(page) ((page)->virtual) 887static inline void *page_address(const struct page *page)
850#define set_page_address(page, address) \ 888{
851 do { \ 889 return page->virtual;
852 (page)->virtual = (address); \ 890}
853 } while(0) 891static inline void set_page_address(struct page *page, void *address)
892{
893 page->virtual = address;
894}
854#define page_address_init() do { } while(0) 895#define page_address_init() do { } while(0)
855#endif 896#endif
856 897
@@ -984,7 +1025,6 @@ extern void pagefault_out_of_memory(void);
984 * various contexts. 1025 * various contexts.
985 */ 1026 */
986#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1027#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
987#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */
988 1028
989extern void show_free_areas(unsigned int flags); 1029extern void show_free_areas(unsigned int flags);
990extern bool skip_free_areas_node(unsigned int flags, int nid); 1030extern bool skip_free_areas_node(unsigned int flags, int nid);
@@ -1318,6 +1358,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
1318 1358
1319#if USE_SPLIT_PTE_PTLOCKS 1359#if USE_SPLIT_PTE_PTLOCKS
1320#if ALLOC_SPLIT_PTLOCKS 1360#if ALLOC_SPLIT_PTLOCKS
1361void __init ptlock_cache_init(void);
1321extern bool ptlock_alloc(struct page *page); 1362extern bool ptlock_alloc(struct page *page);
1322extern void ptlock_free(struct page *page); 1363extern void ptlock_free(struct page *page);
1323 1364
@@ -1326,6 +1367,10 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
1326 return page->ptl; 1367 return page->ptl;
1327} 1368}
1328#else /* ALLOC_SPLIT_PTLOCKS */ 1369#else /* ALLOC_SPLIT_PTLOCKS */
1370static inline void ptlock_cache_init(void)
1371{
1372}
1373
1329static inline bool ptlock_alloc(struct page *page) 1374static inline bool ptlock_alloc(struct page *page)
1330{ 1375{
1331 return true; 1376 return true;
@@ -1378,10 +1423,17 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1378{ 1423{
1379 return &mm->page_table_lock; 1424 return &mm->page_table_lock;
1380} 1425}
1426static inline void ptlock_cache_init(void) {}
1381static inline bool ptlock_init(struct page *page) { return true; } 1427static inline bool ptlock_init(struct page *page) { return true; }
1382static inline void pte_lock_deinit(struct page *page) {} 1428static inline void pte_lock_deinit(struct page *page) {}
1383#endif /* USE_SPLIT_PTE_PTLOCKS */ 1429#endif /* USE_SPLIT_PTE_PTLOCKS */
1384 1430
1431static inline void pgtable_init(void)
1432{
1433 ptlock_cache_init();
1434 pgtable_cache_init();
1435}
1436
1385static inline bool pgtable_page_ctor(struct page *page) 1437static inline bool pgtable_page_ctor(struct page *page)
1386{ 1438{
1387 inc_zone_page_state(page, NR_PAGETABLE); 1439 inc_zone_page_state(page, NR_PAGETABLE);