aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-11-28 12:46:22 -0500
committerTejun Heo <tj@kernel.org>2011-11-28 12:46:22 -0500
commitd4bbf7e7759afc172e2bfbc5c416324590049cdd (patch)
tree7eab5ee5481cd3dcf1162329fec827177640018a /include/linux/mm.h
parenta150439c4a97db379f0ed6faa46fbbb6e7bf3cb2 (diff)
parent401d0069cb344f401bc9d264c31db55876ff78c0 (diff)
Merge branch 'master' into x86/memblock
Conflicts & resolutions: * arch/x86/xen/setup.c dc91c728fd "xen: allow extra memory to be in multiple regions" 24aa07882b "memblock, x86: Replace memblock_x86_reserve/free..." conflicted on xen_add_extra_mem() updates. The resolution is trivial as the latter just want to replace memblock_x86_reserve_range() with memblock_reserve(). * drivers/pci/intel-iommu.c 166e9278a3f "x86/ia64: intel-iommu: move to drivers/iommu/" 5dfe8660a3d "bootmem: Replace work_with_active_regions() with..." conflicted as the former moved the file under drivers/iommu/. Resolved by applying the chnages from the latter on the moved file. * mm/Kconfig 6661672053a "memblock: add NO_BOOTMEM config symbol" c378ddd53f9 "memblock, x86: Make ARCH_DISCARD_MEMBLOCK a config option" conflicted trivially. Both added config options. Just letting both add their own options resolves the conflict. * mm/memblock.c d1f0ece6cdc "mm/memblock.c: small function definition fixes" ed7b56a799c "memblock: Remove memblock_memory_can_coalesce()" confliected. The former updates function removed by the latter. Resolution is trivial. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h139
1 files changed, 57 insertions, 82 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ceb1e4a1a736..6b365aee8396 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
15#include <linux/range.h> 15#include <linux/range.h>
16#include <linux/pfn.h> 16#include <linux/pfn.h>
17#include <linux/bit_spinlock.h> 17#include <linux/bit_spinlock.h>
18#include <linux/shrinker.h>
18 19
19struct mempolicy; 20struct mempolicy;
20struct anon_vma; 21struct anon_vma;
@@ -355,36 +356,50 @@ static inline struct page *compound_head(struct page *page)
355 return page; 356 return page;
356} 357}
357 358
359/*
360 * The atomic page->_mapcount, starts from -1: so that transitions
361 * both from it and to it can be tracked, using atomic_inc_and_test
362 * and atomic_add_negative(-1).
363 */
364static inline void reset_page_mapcount(struct page *page)
365{
366 atomic_set(&(page)->_mapcount, -1);
367}
368
369static inline int page_mapcount(struct page *page)
370{
371 return atomic_read(&(page)->_mapcount) + 1;
372}
373
358static inline int page_count(struct page *page) 374static inline int page_count(struct page *page)
359{ 375{
360 return atomic_read(&compound_head(page)->_count); 376 return atomic_read(&compound_head(page)->_count);
361} 377}
362 378
379static inline void get_huge_page_tail(struct page *page)
380{
381 /*
382 * __split_huge_page_refcount() cannot run
383 * from under us.
384 */
385 VM_BUG_ON(page_mapcount(page) < 0);
386 VM_BUG_ON(atomic_read(&page->_count) != 0);
387 atomic_inc(&page->_mapcount);
388}
389
390extern bool __get_page_tail(struct page *page);
391
363static inline void get_page(struct page *page) 392static inline void get_page(struct page *page)
364{ 393{
394 if (unlikely(PageTail(page)))
395 if (likely(__get_page_tail(page)))
396 return;
365 /* 397 /*
366 * Getting a normal page or the head of a compound page 398 * Getting a normal page or the head of a compound page
367 * requires to already have an elevated page->_count. Only if 399 * requires to already have an elevated page->_count.
368 * we're getting a tail page, the elevated page->_count is
369 * required only in the head page, so for tail pages the
370 * bugcheck only verifies that the page->_count isn't
371 * negative.
372 */ 400 */
373 VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page)); 401 VM_BUG_ON(atomic_read(&page->_count) <= 0);
374 atomic_inc(&page->_count); 402 atomic_inc(&page->_count);
375 /*
376 * Getting a tail page will elevate both the head and tail
377 * page->_count(s).
378 */
379 if (unlikely(PageTail(page))) {
380 /*
381 * This is safe only because
382 * __split_huge_page_refcount can't run under
383 * get_page().
384 */
385 VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
386 atomic_inc(&page->first_page->_count);
387 }
388} 403}
389 404
390static inline struct page *virt_to_head_page(const void *x) 405static inline struct page *virt_to_head_page(const void *x)
@@ -636,7 +651,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
636#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 651#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
637#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 652#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
638 653
639static inline enum zone_type page_zonenum(struct page *page) 654static inline enum zone_type page_zonenum(const struct page *page)
640{ 655{
641 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 656 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
642} 657}
@@ -664,15 +679,15 @@ static inline int zone_to_nid(struct zone *zone)
664} 679}
665 680
666#ifdef NODE_NOT_IN_PAGE_FLAGS 681#ifdef NODE_NOT_IN_PAGE_FLAGS
667extern int page_to_nid(struct page *page); 682extern int page_to_nid(const struct page *page);
668#else 683#else
669static inline int page_to_nid(struct page *page) 684static inline int page_to_nid(const struct page *page)
670{ 685{
671 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; 686 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
672} 687}
673#endif 688#endif
674 689
675static inline struct zone *page_zone(struct page *page) 690static inline struct zone *page_zone(const struct page *page)
676{ 691{
677 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 692 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
678} 693}
@@ -684,7 +699,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
684 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 699 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
685} 700}
686 701
687static inline unsigned long page_to_section(struct page *page) 702static inline unsigned long page_to_section(const struct page *page)
688{ 703{
689 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 704 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
690} 705}
@@ -717,7 +732,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
717 */ 732 */
718#include <linux/vmstat.h> 733#include <linux/vmstat.h>
719 734
720static __always_inline void *lowmem_page_address(struct page *page) 735static __always_inline void *lowmem_page_address(const struct page *page)
721{ 736{
722 return __va(PFN_PHYS(page_to_pfn(page))); 737 return __va(PFN_PHYS(page_to_pfn(page)));
723} 738}
@@ -736,7 +751,7 @@ static __always_inline void *lowmem_page_address(struct page *page)
736#endif 751#endif
737 752
738#if defined(HASHED_PAGE_VIRTUAL) 753#if defined(HASHED_PAGE_VIRTUAL)
739void *page_address(struct page *page); 754void *page_address(const struct page *page);
740void set_page_address(struct page *page, void *virtual); 755void set_page_address(struct page *page, void *virtual);
741void page_address_init(void); 756void page_address_init(void);
742#endif 757#endif
@@ -803,21 +818,6 @@ static inline pgoff_t page_index(struct page *page)
803} 818}
804 819
805/* 820/*
806 * The atomic page->_mapcount, like _count, starts from -1:
807 * so that transitions both from it and to it can be tracked,
808 * using atomic_inc_and_test and atomic_add_negative(-1).
809 */
810static inline void reset_page_mapcount(struct page *page)
811{
812 atomic_set(&(page)->_mapcount, -1);
813}
814
815static inline int page_mapcount(struct page *page)
816{
817 return atomic_read(&(page)->_mapcount) + 1;
818}
819
820/*
821 * Return true if this page is mapped into pagetables. 821 * Return true if this page is mapped into pagetables.
822 */ 822 */
823static inline int page_mapped(struct page *page) 823static inline int page_mapped(struct page *page)
@@ -910,6 +910,8 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
910 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 910 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
911 * @pte_hole: if set, called for each hole at all levels 911 * @pte_hole: if set, called for each hole at all levels
912 * @hugetlb_entry: if set, called for each hugetlb entry 912 * @hugetlb_entry: if set, called for each hugetlb entry
913 * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
914 * is used.
913 * 915 *
914 * (see walk_page_range for more details) 916 * (see walk_page_range for more details)
915 */ 917 */
@@ -959,6 +961,8 @@ int invalidate_inode_page(struct page *page);
959#ifdef CONFIG_MMU 961#ifdef CONFIG_MMU
960extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 962extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
961 unsigned long address, unsigned int flags); 963 unsigned long address, unsigned int flags);
964extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
965 unsigned long address, unsigned int fault_flags);
962#else 966#else
963static inline int handle_mm_fault(struct mm_struct *mm, 967static inline int handle_mm_fault(struct mm_struct *mm,
964 struct vm_area_struct *vma, unsigned long address, 968 struct vm_area_struct *vma, unsigned long address,
@@ -968,6 +972,14 @@ static inline int handle_mm_fault(struct mm_struct *mm,
968 BUG(); 972 BUG();
969 return VM_FAULT_SIGBUS; 973 return VM_FAULT_SIGBUS;
970} 974}
975static inline int fixup_user_fault(struct task_struct *tsk,
976 struct mm_struct *mm, unsigned long address,
977 unsigned int fault_flags)
978{
979 /* should never happen if there's no MMU */
980 BUG();
981 return -EFAULT;
982}
971#endif 983#endif
972 984
973extern int make_pages_present(unsigned long addr, unsigned long end); 985extern int make_pages_present(unsigned long addr, unsigned long end);
@@ -1121,44 +1133,6 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1121} 1133}
1122#endif 1134#endif
1123 1135
1124/*
1125 * This struct is used to pass information from page reclaim to the shrinkers.
1126 * We consolidate the values for easier extention later.
1127 */
1128struct shrink_control {
1129 gfp_t gfp_mask;
1130
1131 /* How many slab objects shrinker() should scan and try to reclaim */
1132 unsigned long nr_to_scan;
1133};
1134
1135/*
1136 * A callback you can register to apply pressure to ageable caches.
1137 *
1138 * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
1139 * and a 'gfpmask'. It should look through the least-recently-used
1140 * 'nr_to_scan' entries and attempt to free them up. It should return
1141 * the number of objects which remain in the cache. If it returns -1, it means
1142 * it cannot do any scanning at this time (eg. there is a risk of deadlock).
1143 *
1144 * The 'gfpmask' refers to the allocation we are currently trying to
1145 * fulfil.
1146 *
1147 * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
1148 * querying the cache size, so a fastpath for that case is appropriate.
1149 */
1150struct shrinker {
1151 int (*shrink)(struct shrinker *, struct shrink_control *sc);
1152 int seeks; /* seeks to recreate an obj */
1153
1154 /* These are for internal use */
1155 struct list_head list;
1156 long nr; /* objs pending delete */
1157};
1158#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
1159extern void register_shrinker(struct shrinker *);
1160extern void unregister_shrinker(struct shrinker *);
1161
1162int vma_wants_writenotify(struct vm_area_struct *vma); 1136int vma_wants_writenotify(struct vm_area_struct *vma);
1163 1137
1164extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1138extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1377,7 +1351,8 @@ extern void si_meminfo(struct sysinfo * val);
1377extern void si_meminfo_node(struct sysinfo *val, int nid); 1351extern void si_meminfo_node(struct sysinfo *val, int nid);
1378extern int after_bootmem; 1352extern int after_bootmem;
1379 1353
1380extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); 1354extern __printf(3, 4)
1355void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1381 1356
1382extern void setup_per_cpu_pageset(void); 1357extern void setup_per_cpu_pageset(void);
1383 1358
@@ -1464,8 +1439,7 @@ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1464 1439
1465extern unsigned long do_brk(unsigned long, unsigned long); 1440extern unsigned long do_brk(unsigned long, unsigned long);
1466 1441
1467/* filemap.c */ 1442/* truncate.c */
1468extern unsigned long page_unuse(struct page *);
1469extern void truncate_inode_pages(struct address_space *, loff_t); 1443extern void truncate_inode_pages(struct address_space *, loff_t);
1470extern void truncate_inode_pages_range(struct address_space *, 1444extern void truncate_inode_pages_range(struct address_space *,
1471 loff_t lstart, loff_t lend); 1445 loff_t lstart, loff_t lend);
@@ -1652,6 +1626,7 @@ enum mf_flags {
1652}; 1626};
1653extern void memory_failure(unsigned long pfn, int trapno); 1627extern void memory_failure(unsigned long pfn, int trapno);
1654extern int __memory_failure(unsigned long pfn, int trapno, int flags); 1628extern int __memory_failure(unsigned long pfn, int trapno, int flags);
1629extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1655extern int unpoison_memory(unsigned long pfn); 1630extern int unpoison_memory(unsigned long pfn);
1656extern int sysctl_memory_failure_early_kill; 1631extern int sysctl_memory_failure_early_kill;
1657extern int sysctl_memory_failure_recovery; 1632extern int sysctl_memory_failure_recovery;