aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 19:45:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 19:45:56 -0500
commit992de5a8eca7cbd3215e3eb2c439b2c11582a58b (patch)
tree863988f84c1dd57a02fa337ecbce49263a3b9511 /include/linux
parentb2718bffb4088faf13092db30c1ebf088ddee52e (diff)
parentd5b3cf7139b8770af4ed8bb36a1ab9d290ac39e9 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "Bite-sized chunks this time, to avoid the MTA ratelimiting woes. - fs/notify updates - ocfs2 - some of MM" That laconic "some MM" is mainly the removal of remap_file_pages(), which is a big simplification of the VM, and which gets rid of a *lot* of random cruft and special cases because we no longer support the non-linear mappings that it used. From a user interface perspective, nothing has changed, because the remap_file_pages() syscall still exists, it's just done by emulating the old behavior by creating a lot of individual small mappings instead of one non-linear one. The emulation is slower than the old "native" non-linear mappings, but nobody really uses or cares about remap_file_pages(), and simplifying the VM is a big advantage. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (78 commits) memcg: zap memcg_slab_caches and memcg_slab_mutex memcg: zap memcg_name argument of memcg_create_kmem_cache memcg: zap __memcg_{charge,uncharge}_slab mm/page_alloc.c: place zone_id check before VM_BUG_ON_PAGE check mm: hugetlb: fix type of hugetlb_treat_as_movable variable mm, hugetlb: remove unnecessary lower bound on sysctl handlers"? mm: memory: merge shared-writable dirtying branches in do_wp_page() mm: memory: remove ->vm_file check on shared writable vmas xtensa: drop _PAGE_FILE and pte_file()-related helpers x86: drop _PAGE_FILE and pte_file()-related helpers unicore32: drop pte_file()-related helpers um: drop _PAGE_FILE and pte_file()-related helpers tile: drop pte_file()-related helpers sparc: drop pte_file()-related helpers sh: drop _PAGE_FILE and pte_file()-related helpers score: drop _PAGE_FILE and pte_file()-related helpers s390: drop pte_file()-related helpers parisc: drop _PAGE_FILE and pte_file()-related helpers openrisc: drop _PAGE_FILE and pte_file()-related helpers nios2: drop _PAGE_FILE and pte_file()-related helpers ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/fsnotify.h6
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/memcontrol.h7
-rw-r--r--include/linux/mm.h55
-rw-r--r--include/linux/mm_types.h12
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/slab.h7
-rw-r--r--include/linux/swapops.h4
9 files changed, 49 insertions, 52 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ddd2fa7cefd3..f125b88443bd 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -401,7 +401,6 @@ struct address_space {
401 spinlock_t tree_lock; /* and lock protecting it */ 401 spinlock_t tree_lock; /* and lock protecting it */
402 atomic_t i_mmap_writable;/* count VM_SHARED mappings */ 402 atomic_t i_mmap_writable;/* count VM_SHARED mappings */
403 struct rb_root i_mmap; /* tree of private and shared mappings */ 403 struct rb_root i_mmap; /* tree of private and shared mappings */
404 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
405 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ 404 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
406 /* Protected by tree_lock together with the radix tree */ 405 /* Protected by tree_lock together with the radix tree */
407 unsigned long nrpages; /* number of total pages */ 406 unsigned long nrpages; /* number of total pages */
@@ -493,8 +492,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
493 */ 492 */
494static inline int mapping_mapped(struct address_space *mapping) 493static inline int mapping_mapped(struct address_space *mapping)
495{ 494{
496 return !RB_EMPTY_ROOT(&mapping->i_mmap) || 495 return !RB_EMPTY_ROOT(&mapping->i_mmap);
497 !list_empty(&mapping->i_mmap_nonlinear);
498} 496}
499 497
500/* 498/*
@@ -2501,8 +2499,6 @@ extern int sb_min_blocksize(struct super_block *, int);
2501 2499
2502extern int generic_file_mmap(struct file *, struct vm_area_struct *); 2500extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2503extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); 2501extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
2504extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
2505 unsigned long size, pgoff_t pgoff);
2506int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); 2502int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
2507extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); 2503extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
2508extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); 2504extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 1c804b057fb1..7ee1774edee5 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
101 new_dir_mask |= FS_ISDIR; 101 new_dir_mask |= FS_ISDIR;
102 } 102 }
103 103
104 fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); 104 fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
105 fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); 105 fs_cookie);
106 fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
107 fs_cookie);
106 108
107 if (target) 109 if (target)
108 fsnotify_link_count(target); 110 fsnotify_link_count(target);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 431b7fc605c9..7d7856359920 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -86,7 +86,7 @@ void free_huge_page(struct page *page);
86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
87#endif 87#endif
88 88
89extern unsigned long hugepages_treat_as_movable; 89extern int hugepages_treat_as_movable;
90extern int sysctl_hugetlb_shm_group; 90extern int sysctl_hugetlb_shm_group;
91extern struct list_head huge_boot_pages; 91extern struct list_head huge_boot_pages;
92 92
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7c95af8d552c..fb212e1d700d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -403,10 +403,9 @@ void memcg_update_array_size(int num_groups);
403struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); 403struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
404void __memcg_kmem_put_cache(struct kmem_cache *cachep); 404void __memcg_kmem_put_cache(struct kmem_cache *cachep);
405 405
406int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); 406int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
407void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); 407 unsigned long nr_pages);
408 408void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
409int __memcg_cleanup_cache_params(struct kmem_cache *s);
410 409
411/** 410/**
412 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 411 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 237b3ba29225..65db4aee738a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -138,7 +138,6 @@ extern unsigned int kobjsize(const void *objp);
138#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 138#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
139#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 139#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
140#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 140#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
141#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
142#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 141#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
143#define VM_ARCH_2 0x02000000 142#define VM_ARCH_2 0x02000000
144#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 143#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
@@ -206,21 +205,19 @@ extern unsigned int kobjsize(const void *objp);
206extern pgprot_t protection_map[16]; 205extern pgprot_t protection_map[16];
207 206
208#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 207#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
209#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 208#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
210#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 209#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
211#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 210#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
212#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 211#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
213#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ 212#define FAULT_FLAG_TRIED 0x20 /* Second try */
214#define FAULT_FLAG_TRIED 0x40 /* second try */ 213#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
215#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
216 214
217/* 215/*
218 * vm_fault is filled by the the pagefault handler and passed to the vma's 216 * vm_fault is filled by the the pagefault handler and passed to the vma's
219 * ->fault function. The vma's ->fault is responsible for returning a bitmask 217 * ->fault function. The vma's ->fault is responsible for returning a bitmask
220 * of VM_FAULT_xxx flags that give details about how the fault was handled. 218 * of VM_FAULT_xxx flags that give details about how the fault was handled.
221 * 219 *
222 * pgoff should be used in favour of virtual_address, if possible. If pgoff 220 * pgoff should be used in favour of virtual_address, if possible.
223 * is used, one may implement ->remap_pages to get nonlinear mapping support.
224 */ 221 */
225struct vm_fault { 222struct vm_fault {
226 unsigned int flags; /* FAULT_FLAG_xxx flags */ 223 unsigned int flags; /* FAULT_FLAG_xxx flags */
@@ -287,10 +284,6 @@ struct vm_operations_struct {
287 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 284 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
288 unsigned long addr); 285 unsigned long addr);
289#endif 286#endif
290 /* called by sys_remap_file_pages() to populate non-linear mapping */
291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
292 unsigned long size, pgoff_t pgoff);
293
294 /* 287 /*
295 * Called by vm_normal_page() for special PTEs to find the 288 * Called by vm_normal_page() for special PTEs to find the
296 * page for @addr. This is useful if the default behavior 289 * page for @addr. This is useful if the default behavior
@@ -454,6 +447,12 @@ static inline struct page *compound_head_by_tail(struct page *tail)
454 return tail; 447 return tail;
455} 448}
456 449
450/*
451 * Since either compound page could be dismantled asynchronously in THP
452 * or we access asynchronously arbitrary positioned struct page, there
453 * would be tail flag race. To handle this race, we should call
454 * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
455 */
457static inline struct page *compound_head(struct page *page) 456static inline struct page *compound_head(struct page *page)
458{ 457{
459 if (unlikely(PageTail(page))) 458 if (unlikely(PageTail(page)))
@@ -462,6 +461,18 @@ static inline struct page *compound_head(struct page *page)
462} 461}
463 462
464/* 463/*
464 * If we access compound page synchronously such as access to
465 * allocated page, there is no need to handle tail flag race, so we can
466 * check tail flag directly without any synchronization primitive.
467 */
468static inline struct page *compound_head_fast(struct page *page)
469{
470 if (unlikely(PageTail(page)))
471 return page->first_page;
472 return page;
473}
474
475/*
465 * The atomic page->_mapcount, starts from -1: so that transitions 476 * The atomic page->_mapcount, starts from -1: so that transitions
466 * both from it and to it can be tracked, using atomic_inc_and_test 477 * both from it and to it can be tracked, using atomic_inc_and_test
467 * and atomic_add_negative(-1). 478 * and atomic_add_negative(-1).
@@ -539,7 +550,14 @@ static inline void get_page(struct page *page)
539static inline struct page *virt_to_head_page(const void *x) 550static inline struct page *virt_to_head_page(const void *x)
540{ 551{
541 struct page *page = virt_to_page(x); 552 struct page *page = virt_to_page(x);
542 return compound_head(page); 553
554 /*
555 * We don't need to worry about synchronization of tail flag
556 * when we call virt_to_head_page() since it is only called for
557 * already allocated page and this page won't be freed until
558 * this virt_to_head_page() is finished. So use _fast variant.
559 */
560 return compound_head_fast(page);
543} 561}
544 562
545/* 563/*
@@ -1129,7 +1147,6 @@ extern void user_shm_unlock(size_t, struct user_struct *);
1129 * Parameter block passed down to zap_pte_range in exceptional cases. 1147 * Parameter block passed down to zap_pte_range in exceptional cases.
1130 */ 1148 */
1131struct zap_details { 1149struct zap_details {
1132 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
1133 struct address_space *check_mapping; /* Check page->mapping if set */ 1150 struct address_space *check_mapping; /* Check page->mapping if set */
1134 pgoff_t first_index; /* Lowest page->index to unmap */ 1151 pgoff_t first_index; /* Lowest page->index to unmap */
1135 pgoff_t last_index; /* Highest page->index to unmap */ 1152 pgoff_t last_index; /* Highest page->index to unmap */
@@ -1785,12 +1802,6 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1785 for (vma = vma_interval_tree_iter_first(root, start, last); \ 1802 for (vma = vma_interval_tree_iter_first(root, start, last); \
1786 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 1803 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1787 1804
1788static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1789 struct list_head *list)
1790{
1791 list_add_tail(&vma->shared.nonlinear, list);
1792}
1793
1794void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 1805void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1795 struct rb_root *root); 1806 struct rb_root *root);
1796void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 1807void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6d34aa266a8c..07c8bd3f7b48 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -273,15 +273,11 @@ struct vm_area_struct {
273 273
274 /* 274 /*
275 * For areas with an address space and backing store, 275 * For areas with an address space and backing store,
276 * linkage into the address_space->i_mmap interval tree, or 276 * linkage into the address_space->i_mmap interval tree.
277 * linkage of vma in the address_space->i_mmap_nonlinear list.
278 */ 277 */
279 union { 278 struct {
280 struct { 279 struct rb_node rb;
281 struct rb_node rb; 280 unsigned long rb_subtree_last;
282 unsigned long rb_subtree_last;
283 } linear;
284 struct list_head nonlinear;
285 } shared; 281 } shared;
286 282
287 /* 283 /*
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d9d7e7e56352..b38f559130d5 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -246,7 +246,6 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
246 * arg: passed to rmap_one() and invalid_vma() 246 * arg: passed to rmap_one() and invalid_vma()
247 * rmap_one: executed on each vma where page is mapped 247 * rmap_one: executed on each vma where page is mapped
248 * done: for checking traversing termination condition 248 * done: for checking traversing termination condition
249 * file_nonlinear: for handling file nonlinear mapping
250 * anon_lock: for getting anon_lock by optimized way rather than default 249 * anon_lock: for getting anon_lock by optimized way rather than default
251 * invalid_vma: for skipping uninterested vma 250 * invalid_vma: for skipping uninterested vma
252 */ 251 */
@@ -255,7 +254,6 @@ struct rmap_walk_control {
255 int (*rmap_one)(struct page *page, struct vm_area_struct *vma, 254 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
256 unsigned long addr, void *arg); 255 unsigned long addr, void *arg);
257 int (*done)(struct page *page); 256 int (*done)(struct page *page);
258 int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
259 struct anon_vma *(*anon_lock)(struct page *page); 257 struct anon_vma *(*anon_lock)(struct page *page);
260 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); 258 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
261}; 259};
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9a139b637069..2e3b448cfa2d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -116,9 +116,8 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long, 116 unsigned long,
117 void (*)(void *)); 117 void (*)(void *));
118#ifdef CONFIG_MEMCG_KMEM 118#ifdef CONFIG_MEMCG_KMEM
119struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *, 119void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
120 struct kmem_cache *, 120void memcg_destroy_kmem_caches(struct mem_cgroup *);
121 const char *);
122#endif 121#endif
123void kmem_cache_destroy(struct kmem_cache *); 122void kmem_cache_destroy(struct kmem_cache *);
124int kmem_cache_shrink(struct kmem_cache *); 123int kmem_cache_shrink(struct kmem_cache *);
@@ -491,7 +490,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
491 * Child caches will hold extra metadata needed for its operation. Fields are: 490 * Child caches will hold extra metadata needed for its operation. Fields are:
492 * 491 *
493 * @memcg: pointer to the memcg this cache belongs to 492 * @memcg: pointer to the memcg this cache belongs to
494 * @list: list_head for the list of all caches in this memcg
495 * @root_cache: pointer to the global, root cache, this cache was derived from 493 * @root_cache: pointer to the global, root cache, this cache was derived from
496 */ 494 */
497struct memcg_cache_params { 495struct memcg_cache_params {
@@ -503,7 +501,6 @@ struct memcg_cache_params {
503 }; 501 };
504 struct { 502 struct {
505 struct mem_cgroup *memcg; 503 struct mem_cgroup *memcg;
506 struct list_head list;
507 struct kmem_cache *root_cache; 504 struct kmem_cache *root_cache;
508 }; 505 };
509 }; 506 };
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 6adfb7bfbf44..50cbc876be56 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -54,7 +54,7 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
54/* check whether a pte points to a swap entry */ 54/* check whether a pte points to a swap entry */
55static inline int is_swap_pte(pte_t pte) 55static inline int is_swap_pte(pte_t pte)
56{ 56{
57 return !pte_none(pte) && !pte_present_nonuma(pte) && !pte_file(pte); 57 return !pte_none(pte) && !pte_present_nonuma(pte);
58} 58}
59#endif 59#endif
60 60
@@ -66,7 +66,6 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
66{ 66{
67 swp_entry_t arch_entry; 67 swp_entry_t arch_entry;
68 68
69 BUG_ON(pte_file(pte));
70 if (pte_swp_soft_dirty(pte)) 69 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte); 70 pte = pte_swp_clear_soft_dirty(pte);
72 arch_entry = __pte_to_swp_entry(pte); 71 arch_entry = __pte_to_swp_entry(pte);
@@ -82,7 +81,6 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
82 swp_entry_t arch_entry; 81 swp_entry_t arch_entry;
83 82
84 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); 83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
85 BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
86 return __swp_entry_to_pte(arch_entry); 84 return __swp_entry_to_pte(arch_entry);
87} 85}
88 86