aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/shmem.c43
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/util.c20
-rw-r--r--mm/vmalloc.c12
-rw-r--r--mm/vmscan.c28
5 files changed, 66 insertions, 41 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 19d566ccdeea..4103a239ce84 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -169,13 +169,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
169 */ 169 */
170static inline int shmem_acct_size(unsigned long flags, loff_t size) 170static inline int shmem_acct_size(unsigned long flags, loff_t size)
171{ 171{
172 return (flags & VM_ACCOUNT) ? 172 return (flags & VM_NORESERVE) ?
173 security_vm_enough_memory_kern(VM_ACCT(size)) : 0; 173 0 : security_vm_enough_memory_kern(VM_ACCT(size));
174} 174}
175 175
176static inline void shmem_unacct_size(unsigned long flags, loff_t size) 176static inline void shmem_unacct_size(unsigned long flags, loff_t size)
177{ 177{
178 if (flags & VM_ACCOUNT) 178 if (!(flags & VM_NORESERVE))
179 vm_unacct_memory(VM_ACCT(size)); 179 vm_unacct_memory(VM_ACCT(size));
180} 180}
181 181
@@ -187,13 +187,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
187 */ 187 */
188static inline int shmem_acct_block(unsigned long flags) 188static inline int shmem_acct_block(unsigned long flags)
189{ 189{
190 return (flags & VM_ACCOUNT) ? 190 return (flags & VM_NORESERVE) ?
191 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)); 191 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
192} 192}
193 193
194static inline void shmem_unacct_blocks(unsigned long flags, long pages) 194static inline void shmem_unacct_blocks(unsigned long flags, long pages)
195{ 195{
196 if (!(flags & VM_ACCOUNT)) 196 if (flags & VM_NORESERVE)
197 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 197 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
198} 198}
199 199
@@ -1515,8 +1515,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1515 return 0; 1515 return 0;
1516} 1516}
1517 1517
1518static struct inode * 1518static struct inode *shmem_get_inode(struct super_block *sb, int mode,
1519shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1519 dev_t dev, unsigned long flags)
1520{ 1520{
1521 struct inode *inode; 1521 struct inode *inode;
1522 struct shmem_inode_info *info; 1522 struct shmem_inode_info *info;
@@ -1537,6 +1537,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1537 info = SHMEM_I(inode); 1537 info = SHMEM_I(inode);
1538 memset(info, 0, (char *)inode - (char *)info); 1538 memset(info, 0, (char *)inode - (char *)info);
1539 spin_lock_init(&info->lock); 1539 spin_lock_init(&info->lock);
1540 info->flags = flags & VM_NORESERVE;
1540 INIT_LIST_HEAD(&info->swaplist); 1541 INIT_LIST_HEAD(&info->swaplist);
1541 1542
1542 switch (mode & S_IFMT) { 1543 switch (mode & S_IFMT) {
@@ -1779,9 +1780,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1779static int 1780static int
1780shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1781shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1781{ 1782{
1782 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1783 struct inode *inode;
1783 int error = -ENOSPC; 1784 int error = -ENOSPC;
1784 1785
1786 inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
1785 if (inode) { 1787 if (inode) {
1786 error = security_inode_init_security(inode, dir, NULL, NULL, 1788 error = security_inode_init_security(inode, dir, NULL, NULL,
1787 NULL); 1789 NULL);
@@ -1920,7 +1922,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1920 if (len > PAGE_CACHE_SIZE) 1922 if (len > PAGE_CACHE_SIZE)
1921 return -ENAMETOOLONG; 1923 return -ENAMETOOLONG;
1922 1924
1923 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1925 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1924 if (!inode) 1926 if (!inode)
1925 return -ENOSPC; 1927 return -ENOSPC;
1926 1928
@@ -2332,7 +2334,7 @@ static int shmem_fill_super(struct super_block *sb,
2332 sb->s_flags |= MS_POSIXACL; 2334 sb->s_flags |= MS_POSIXACL;
2333#endif 2335#endif
2334 2336
2335 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); 2337 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2336 if (!inode) 2338 if (!inode)
2337 goto failed; 2339 goto failed;
2338 inode->i_uid = sbinfo->uid; 2340 inode->i_uid = sbinfo->uid;
@@ -2574,12 +2576,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
2574 return 0; 2576 return 0;
2575} 2577}
2576 2578
2577#define shmem_file_operations ramfs_file_operations 2579#define shmem_vm_ops generic_file_vm_ops
2578#define shmem_vm_ops generic_file_vm_ops 2580#define shmem_file_operations ramfs_file_operations
2579#define shmem_get_inode ramfs_get_inode 2581#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
2580#define shmem_acct_size(a, b) 0 2582#define shmem_acct_size(flags, size) 0
2581#define shmem_unacct_size(a, b) do {} while (0) 2583#define shmem_unacct_size(flags, size) do {} while (0)
2582#define SHMEM_MAX_BYTES LLONG_MAX 2584#define SHMEM_MAX_BYTES LLONG_MAX
2583 2585
2584#endif /* CONFIG_SHMEM */ 2586#endif /* CONFIG_SHMEM */
2585 2587
@@ -2589,7 +2591,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
2589 * shmem_file_setup - get an unlinked file living in tmpfs 2591 * shmem_file_setup - get an unlinked file living in tmpfs
2590 * @name: name for dentry (to be seen in /proc/<pid>/maps 2592 * @name: name for dentry (to be seen in /proc/<pid>/maps
2591 * @size: size to be set for the file 2593 * @size: size to be set for the file
2592 * @flags: vm_flags 2594 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2593 */ 2595 */
2594struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2596struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2595{ 2597{
@@ -2623,13 +2625,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2623 goto put_dentry; 2625 goto put_dentry;
2624 2626
2625 error = -ENOSPC; 2627 error = -ENOSPC;
2626 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2628 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
2627 if (!inode) 2629 if (!inode)
2628 goto close_file; 2630 goto close_file;
2629 2631
2630#ifdef CONFIG_SHMEM
2631 SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
2632#endif
2633 d_instantiate(dentry, inode); 2632 d_instantiate(dentry, inode);
2634 inode->i_size = size; 2633 inode->i_size = size;
2635 inode->i_nlink = 0; /* It is unlinked */ 2634 inode->i_nlink = 0; /* It is unlinked */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7e6304dfafab..312fafe0ab6e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -635,7 +635,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
635 635
636 if (!bdev) { 636 if (!bdev) {
637 if (bdev_p) 637 if (bdev_p)
638 *bdev_p = sis->bdev; 638 *bdev_p = bdget(sis->bdev->bd_dev);
639 639
640 spin_unlock(&swap_lock); 640 spin_unlock(&swap_lock);
641 return i; 641 return i;
@@ -647,7 +647,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
647 struct swap_extent, list); 647 struct swap_extent, list);
648 if (se->start_block == offset) { 648 if (se->start_block == offset) {
649 if (bdev_p) 649 if (bdev_p)
650 *bdev_p = sis->bdev; 650 *bdev_p = bdget(sis->bdev->bd_dev);
651 651
652 spin_unlock(&swap_lock); 652 spin_unlock(&swap_lock);
653 bdput(bdev); 653 bdput(bdev);
diff --git a/mm/util.c b/mm/util.c
index cb00b748ce47..37eaccdf3054 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -129,6 +129,26 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
129} 129}
130EXPORT_SYMBOL(krealloc); 130EXPORT_SYMBOL(krealloc);
131 131
132/**
133 * kzfree - like kfree but zero memory
134 * @p: object to free memory of
135 *
136 * The memory of the object @p points to is zeroed before freed.
137 * If @p is %NULL, kzfree() does nothing.
138 */
139void kzfree(const void *p)
140{
141 size_t ks;
142 void *mem = (void *)p;
143
144 if (unlikely(ZERO_OR_NULL_PTR(mem)))
145 return;
146 ks = ksize(mem);
147 memset(mem, 0, ks);
148 kfree(mem);
149}
150EXPORT_SYMBOL(kzfree);
151
132/* 152/*
133 * strndup_user - duplicate an existing string from user space 153 * strndup_user - duplicate an existing string from user space
134 * @s: The string to duplicate 154 * @s: The string to duplicate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f83a70167b99..11a929872ebd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -323,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
323 unsigned long addr; 323 unsigned long addr;
324 int purged = 0; 324 int purged = 0;
325 325
326 BUG_ON(!size);
326 BUG_ON(size & ~PAGE_MASK); 327 BUG_ON(size & ~PAGE_MASK);
327 328
328 va = kmalloc_node(sizeof(struct vmap_area), 329 va = kmalloc_node(sizeof(struct vmap_area),
@@ -334,6 +335,9 @@ retry:
334 addr = ALIGN(vstart, align); 335 addr = ALIGN(vstart, align);
335 336
336 spin_lock(&vmap_area_lock); 337 spin_lock(&vmap_area_lock);
338 if (addr + size - 1 < addr)
339 goto overflow;
340
337 /* XXX: could have a last_hole cache */ 341 /* XXX: could have a last_hole cache */
338 n = vmap_area_root.rb_node; 342 n = vmap_area_root.rb_node;
339 if (n) { 343 if (n) {
@@ -365,6 +369,8 @@ retry:
365 369
366 while (addr + size > first->va_start && addr + size <= vend) { 370 while (addr + size > first->va_start && addr + size <= vend) {
367 addr = ALIGN(first->va_end + PAGE_SIZE, align); 371 addr = ALIGN(first->va_end + PAGE_SIZE, align);
372 if (addr + size - 1 < addr)
373 goto overflow;
368 374
369 n = rb_next(&first->rb_node); 375 n = rb_next(&first->rb_node);
370 if (n) 376 if (n)
@@ -375,6 +381,7 @@ retry:
375 } 381 }
376found: 382found:
377 if (addr + size > vend) { 383 if (addr + size > vend) {
384overflow:
378 spin_unlock(&vmap_area_lock); 385 spin_unlock(&vmap_area_lock);
379 if (!purged) { 386 if (!purged) {
380 purge_vmap_area_lazy(); 387 purge_vmap_area_lazy();
@@ -498,6 +505,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
498 static DEFINE_SPINLOCK(purge_lock); 505 static DEFINE_SPINLOCK(purge_lock);
499 LIST_HEAD(valist); 506 LIST_HEAD(valist);
500 struct vmap_area *va; 507 struct vmap_area *va;
508 struct vmap_area *n_va;
501 int nr = 0; 509 int nr = 0;
502 510
503 /* 511 /*
@@ -537,7 +545,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
537 545
538 if (nr) { 546 if (nr) {
539 spin_lock(&vmap_area_lock); 547 spin_lock(&vmap_area_lock);
540 list_for_each_entry(va, &valist, purge_list) 548 list_for_each_entry_safe(va, n_va, &valist, purge_list)
541 __free_vmap_area(va); 549 __free_vmap_area(va);
542 spin_unlock(&vmap_area_lock); 550 spin_unlock(&vmap_area_lock);
543 } 551 }
@@ -1012,6 +1020,8 @@ void __init vmalloc_init(void)
1012void unmap_kernel_range(unsigned long addr, unsigned long size) 1020void unmap_kernel_range(unsigned long addr, unsigned long size)
1013{ 1021{
1014 unsigned long end = addr + size; 1022 unsigned long end = addr + size;
1023
1024 flush_cache_vunmap(addr, end);
1015 vunmap_page_range(addr, end); 1025 vunmap_page_range(addr, end);
1016 flush_tlb_kernel_range(addr, end); 1026 flush_tlb_kernel_range(addr, end);
1017} 1027}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9a27c44aa327..6177e3bcd66b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2057 int pass, struct scan_control *sc) 2057 int pass, struct scan_control *sc)
2058{ 2058{
2059 struct zone *zone; 2059 struct zone *zone;
2060 unsigned long nr_to_scan, ret = 0; 2060 unsigned long ret = 0;
2061 enum lru_list l;
2062 2061
2063 for_each_zone(zone) { 2062 for_each_zone(zone) {
2063 enum lru_list l;
2064 2064
2065 if (!populated_zone(zone)) 2065 if (!populated_zone(zone))
2066 continue; 2066 continue;
2067
2068 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2067 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2069 continue; 2068 continue;
2070 2069
2071 for_each_evictable_lru(l) { 2070 for_each_evictable_lru(l) {
2071 enum zone_stat_item ls = NR_LRU_BASE + l;
2072 unsigned long lru_pages = zone_page_state(zone, ls);
2073
2072 /* For pass = 0, we don't shrink the active list */ 2074 /* For pass = 0, we don't shrink the active list */
2073 if (pass == 0 && 2075 if (pass == 0 && (l == LRU_ACTIVE_ANON ||
2074 (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) 2076 l == LRU_ACTIVE_FILE))
2075 continue; 2077 continue;
2076 2078
2077 zone->lru[l].nr_scan += 2079 zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
2078 (zone_page_state(zone, NR_LRU_BASE + l)
2079 >> prio) + 1;
2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2081 unsigned long nr_to_scan;
2082
2081 zone->lru[l].nr_scan = 0; 2083 zone->lru[l].nr_scan = 0;
2082 nr_to_scan = min(nr_pages, 2084 nr_to_scan = min(nr_pages, lru_pages);
2083 zone_page_state(zone,
2084 NR_LRU_BASE + l));
2085 ret += shrink_list(l, nr_to_scan, zone, 2085 ret += shrink_list(l, nr_to_scan, zone,
2086 sc, prio); 2086 sc, prio);
2087 if (ret >= nr_pages) 2087 if (ret >= nr_pages)
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2089 } 2089 }
2090 } 2090 }
2091 } 2091 }
2092
2093 return ret; 2092 return ret;
2094} 2093}
2095 2094
@@ -2112,7 +2111,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2112 .may_swap = 0, 2111 .may_swap = 0,
2113 .swap_cluster_max = nr_pages, 2112 .swap_cluster_max = nr_pages,
2114 .may_writepage = 1, 2113 .may_writepage = 1,
2115 .swappiness = vm_swappiness,
2116 .isolate_pages = isolate_pages_global, 2114 .isolate_pages = isolate_pages_global,
2117 }; 2115 };
2118 2116
@@ -2146,10 +2144,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2146 int prio; 2144 int prio;
2147 2145
2148 /* Force reclaiming mapped pages in the passes #3 and #4 */ 2146 /* Force reclaiming mapped pages in the passes #3 and #4 */
2149 if (pass > 2) { 2147 if (pass > 2)
2150 sc.may_swap = 1; 2148 sc.may_swap = 1;
2151 sc.swappiness = 100;
2152 }
2153 2149
2154 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 2150 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2155 unsigned long nr_to_scan = nr_pages - ret; 2151 unsigned long nr_to_scan = nr_pages - ret;