aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h70
1 files changed, 70 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bcaab4e6fe91..7f4f906190bd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -693,6 +693,36 @@ static inline int page_to_nid(const struct page *page)
693} 693}
694#endif 694#endif
695 695
696#ifdef CONFIG_NUMA_BALANCING
697static inline int page_xchg_last_nid(struct page *page, int nid)
698{
699 return xchg(&page->_last_nid, nid);
700}
701
702static inline int page_last_nid(struct page *page)
703{
704 return page->_last_nid;
705}
706static inline void reset_page_last_nid(struct page *page)
707{
708 page->_last_nid = -1;
709}
710#else
711static inline int page_xchg_last_nid(struct page *page, int nid)
712{
713 return page_to_nid(page);
714}
715
716static inline int page_last_nid(struct page *page)
717{
718 return page_to_nid(page);
719}
720
721static inline void reset_page_last_nid(struct page *page)
722{
723}
724#endif
725
696static inline struct zone *page_zone(const struct page *page) 726static inline struct zone *page_zone(const struct page *page)
697{ 727{
698 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 728 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
@@ -1078,6 +1108,9 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
1078extern unsigned long do_mremap(unsigned long addr, 1108extern unsigned long do_mremap(unsigned long addr,
1079 unsigned long old_len, unsigned long new_len, 1109 unsigned long old_len, unsigned long new_len,
1080 unsigned long flags, unsigned long new_addr); 1110 unsigned long flags, unsigned long new_addr);
1111extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1112 unsigned long end, pgprot_t newprot,
1113 int dirty_accountable, int prot_numa);
1081extern int mprotect_fixup(struct vm_area_struct *vma, 1114extern int mprotect_fixup(struct vm_area_struct *vma,
1082 struct vm_area_struct **pprev, unsigned long start, 1115 struct vm_area_struct **pprev, unsigned long start,
1083 unsigned long end, unsigned long newflags); 1116 unsigned long end, unsigned long newflags);
@@ -1456,6 +1489,37 @@ extern unsigned long vm_mmap(struct file *, unsigned long,
1456 unsigned long, unsigned long, 1489 unsigned long, unsigned long,
1457 unsigned long, unsigned long); 1490 unsigned long, unsigned long);
1458 1491
1492struct vm_unmapped_area_info {
1493#define VM_UNMAPPED_AREA_TOPDOWN 1
1494 unsigned long flags;
1495 unsigned long length;
1496 unsigned long low_limit;
1497 unsigned long high_limit;
1498 unsigned long align_mask;
1499 unsigned long align_offset;
1500};
1501
1502extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1503extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1504
1505/*
1506 * Search for an unmapped address range.
1507 *
1508 * We are looking for a range that:
1509 * - does not intersect with any VMA;
1510 * - is contained within the [low_limit, high_limit) interval;
1511 * - is at least the desired size.
1512 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1513 */
1514static inline unsigned long
1515vm_unmapped_area(struct vm_unmapped_area_info *info)
1516{
1517 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1518 return unmapped_area(info);
1519 else
1520 return unmapped_area_topdown(info);
1521}
1522
1459/* truncate.c */ 1523/* truncate.c */
1460extern void truncate_inode_pages(struct address_space *, loff_t); 1524extern void truncate_inode_pages(struct address_space *, loff_t);
1461extern void truncate_inode_pages_range(struct address_space *, 1525extern void truncate_inode_pages_range(struct address_space *,
@@ -1548,6 +1612,11 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1548} 1612}
1549#endif 1613#endif
1550 1614
1615#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
1616unsigned long change_prot_numa(struct vm_area_struct *vma,
1617 unsigned long start, unsigned long end);
1618#endif
1619
1551struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 1620struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1552int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 1621int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1553 unsigned long pfn, unsigned long size, pgprot_t); 1622 unsigned long pfn, unsigned long size, pgprot_t);
@@ -1569,6 +1638,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
1569#define FOLL_MLOCK 0x40 /* mark page as mlocked */ 1638#define FOLL_MLOCK 0x40 /* mark page as mlocked */
1570#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 1639#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
1571#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 1640#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
1641#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
1572 1642
1573typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 1643typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1574 void *data); 1644 void *data);