aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c15
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/msync.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/pdflush.c16
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c20
-rw-r--r--mm/vmalloc.c5
-rw-r--r--mm/vmscan.c4
-rw-r--r--mm/vmstat.c4
13 files changed, 49 insertions, 31 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index f3e5f8944d17..f5769b4dc075 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1766,7 +1766,7 @@ int should_remove_suid(struct dentry *dentry)
1766 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1766 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1767 kill |= ATTR_KILL_SGID; 1767 kill |= ATTR_KILL_SGID;
1768 1768
1769 if (unlikely(kill && !capable(CAP_FSETID))) 1769 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1770 return kill; 1770 return kill;
1771 1771
1772 return 0; 1772 return 0;
@@ -2140,19 +2140,24 @@ EXPORT_SYMBOL(generic_file_direct_write);
2140 * Find or create a page at the given pagecache position. Return the locked 2140 * Find or create a page at the given pagecache position. Return the locked
2141 * page. This function is specifically for buffered writes. 2141 * page. This function is specifically for buffered writes.
2142 */ 2142 */
2143struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index) 2143struct page *grab_cache_page_write_begin(struct address_space *mapping,
2144 pgoff_t index, unsigned flags)
2144{ 2145{
2145 int status; 2146 int status;
2146 struct page *page; 2147 struct page *page;
2148 gfp_t gfp_notmask = 0;
2149 if (flags & AOP_FLAG_NOFS)
2150 gfp_notmask = __GFP_FS;
2147repeat: 2151repeat:
2148 page = find_lock_page(mapping, index); 2152 page = find_lock_page(mapping, index);
2149 if (likely(page)) 2153 if (likely(page))
2150 return page; 2154 return page;
2151 2155
2152 page = page_cache_alloc(mapping); 2156 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2153 if (!page) 2157 if (!page)
2154 return NULL; 2158 return NULL;
2155 status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); 2159 status = add_to_page_cache_lru(page, mapping, index,
2160 GFP_KERNEL & ~gfp_notmask);
2156 if (unlikely(status)) { 2161 if (unlikely(status)) {
2157 page_cache_release(page); 2162 page_cache_release(page);
2158 if (status == -EEXIST) 2163 if (status == -EEXIST)
@@ -2161,7 +2166,7 @@ repeat:
2161 } 2166 }
2162 return page; 2167 return page;
2163} 2168}
2164EXPORT_SYMBOL(__grab_cache_page); 2169EXPORT_SYMBOL(grab_cache_page_write_begin);
2165 2170
2166static ssize_t generic_perform_write(struct file *file, 2171static ssize_t generic_perform_write(struct file *file,
2167 struct iov_iter *i, loff_t pos) 2172 struct iov_iter *i, loff_t pos)
diff --git a/mm/memory.c b/mm/memory.c
index 0a2010a9518c..7b9db658aca2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2266,7 +2266,7 @@ int vmtruncate(struct inode * inode, loff_t offset)
2266 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 2266 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
2267 } 2267 }
2268 2268
2269 if (inode->i_op && inode->i_op->truncate) 2269 if (inode->i_op->truncate)
2270 inode->i_op->truncate(inode); 2270 inode->i_op->truncate(inode);
2271 return 0; 2271 return 0;
2272 2272
@@ -2286,7 +2286,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2286 * a way to truncate a range of blocks (punch a hole) - 2286 * a way to truncate a range of blocks (punch a hole) -
2287 * we should return failure right now. 2287 * we should return failure right now.
2288 */ 2288 */
2289 if (!inode->i_op || !inode->i_op->truncate_range) 2289 if (!inode->i_op->truncate_range)
2290 return -ENOSYS; 2290 return -ENOSYS;
2291 2291
2292 mutex_lock(&inode->i_mutex); 2292 mutex_lock(&inode->i_mutex);
diff --git a/mm/mmap.c b/mm/mmap.c
index d4855a682ab6..2c778fcfd9bd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Written by obz. 4 * Written by obz.
5 * 5 *
6 * Address space accounting code <alan@redhat.com> 6 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
7 */ 7 */
8 8
9#include <linux/slab.h> 9#include <linux/slab.h>
diff --git a/mm/mprotect.c b/mm/mprotect.c
index fded06f923f4..cfb4c4852062 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -4,7 +4,7 @@
4 * (C) Copyright 1994 Linus Torvalds 4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig 5 * (C) Copyright 2002 Christoph Hellwig
6 * 6 *
7 * Address space accounting code <alan@redhat.com> 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */ 9 */
10 10
diff --git a/mm/mremap.c b/mm/mremap.c
index 58a2908f42f5..646de959aa58 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * (C) Copyright 1996 Linus Torvalds 4 * (C) Copyright 1996 Linus Torvalds
5 * 5 *
6 * Address space accounting code <alan@redhat.com> 6 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
8 */ 8 */
9 9
diff --git a/mm/msync.c b/mm/msync.c
index 144a7570535d..07dae08cf31c 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -82,7 +82,7 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
82 (vma->vm_flags & VM_SHARED)) { 82 (vma->vm_flags & VM_SHARED)) {
83 get_file(file); 83 get_file(file);
84 up_read(&mm->mmap_sem); 84 up_read(&mm->mmap_sem);
85 error = do_fsync(file, 0); 85 error = vfs_fsync(file, file->f_path.dentry, 0);
86 fput(file); 86 fput(file);
87 if (error || start >= end) 87 if (error || start >= end)
88 goto out; 88 goto out;
diff --git a/mm/nommu.c b/mm/nommu.c
index 7695dc850785..1c28ea3a4e9c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -86,7 +86,7 @@ do_expand:
86 i_size_write(inode, offset); 86 i_size_write(inode, offset);
87 87
88out_truncate: 88out_truncate:
89 if (inode->i_op && inode->i_op->truncate) 89 if (inode->i_op->truncate)
90 inode->i_op->truncate(inode); 90 inode->i_op->truncate(inode);
91 return 0; 91 return 0;
92out_sig: 92out_sig:
diff --git a/mm/pdflush.c b/mm/pdflush.c
index a0a14c4d5072..15de509b68fd 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work)
172static int pdflush(void *dummy) 172static int pdflush(void *dummy)
173{ 173{
174 struct pdflush_work my_work; 174 struct pdflush_work my_work;
175 cpumask_t cpus_allowed; 175 cpumask_var_t cpus_allowed;
176
177 /*
178 * Since the caller doesn't even check kthread_run() worked, let's not
179 * freak out too much if this fails.
180 */
181 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
182 printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
183 return 0;
184 }
176 185
177 /* 186 /*
178 * pdflush can spend a lot of time doing encryption via dm-crypt. We 187 * pdflush can spend a lot of time doing encryption via dm-crypt. We
@@ -187,8 +196,9 @@ static int pdflush(void *dummy)
187 * This is needed as pdflush's are dynamically created and destroyed. 196 * This is needed as pdflush's are dynamically created and destroyed.
188 * The boottime pdflush's are easily placed w/o these 2 lines. 197 * The boottime pdflush's are easily placed w/o these 2 lines.
189 */ 198 */
190 cpuset_cpus_allowed(current, &cpus_allowed); 199 cpuset_cpus_allowed(current, cpus_allowed);
191 set_cpus_allowed_ptr(current, &cpus_allowed); 200 set_cpus_allowed_ptr(current, cpus_allowed);
201 free_cpumask_var(cpus_allowed);
192 202
193 return __pdflush(&my_work); 203 return __pdflush(&my_work);
194} 204}
diff --git a/mm/slab.c b/mm/slab.c
index c65c52dc78d4..dae716b32915 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2166,7 +2166,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2166 2166
2167 /* 2167 /*
2168 * We use cache_chain_mutex to ensure a consistent view of 2168 * We use cache_chain_mutex to ensure a consistent view of
2169 * cpu_online_map as well. Please see cpuup_callback 2169 * cpu_online_mask as well. Please see cpuup_callback
2170 */ 2170 */
2171 get_online_cpus(); 2171 get_online_cpus();
2172 mutex_lock(&cache_chain_mutex); 2172 mutex_lock(&cache_chain_mutex);
diff --git a/mm/slub.c b/mm/slub.c
index 4fac7bbb029a..509e96f411fc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2001,7 +2001,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu,
2001 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 2001 kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
2002 2002
2003static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 2003static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
2004static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 2004static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
2005 2005
2006static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 2006static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
2007 int cpu, gfp_t flags) 2007 int cpu, gfp_t flags)
@@ -2076,13 +2076,13 @@ static void init_alloc_cpu_cpu(int cpu)
2076{ 2076{
2077 int i; 2077 int i;
2078 2078
2079 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2079 if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
2080 return; 2080 return;
2081 2081
2082 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2082 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2083 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2083 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2084 2084
2085 cpu_set(cpu, kmem_cach_cpu_free_init_once); 2085 cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
2086} 2086}
2087 2087
2088static void __init init_alloc_cpu(void) 2088static void __init init_alloc_cpu(void)
@@ -3518,7 +3518,7 @@ struct location {
3518 long max_time; 3518 long max_time;
3519 long min_pid; 3519 long min_pid;
3520 long max_pid; 3520 long max_pid;
3521 cpumask_t cpus; 3521 DECLARE_BITMAP(cpus, NR_CPUS);
3522 nodemask_t nodes; 3522 nodemask_t nodes;
3523}; 3523};
3524 3524
@@ -3593,7 +3593,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3593 if (track->pid > l->max_pid) 3593 if (track->pid > l->max_pid)
3594 l->max_pid = track->pid; 3594 l->max_pid = track->pid;
3595 3595
3596 cpu_set(track->cpu, l->cpus); 3596 cpumask_set_cpu(track->cpu,
3597 to_cpumask(l->cpus));
3597 } 3598 }
3598 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3599 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3599 return 1; 3600 return 1;
@@ -3623,8 +3624,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3623 l->max_time = age; 3624 l->max_time = age;
3624 l->min_pid = track->pid; 3625 l->min_pid = track->pid;
3625 l->max_pid = track->pid; 3626 l->max_pid = track->pid;
3626 cpus_clear(l->cpus); 3627 cpumask_clear(to_cpumask(l->cpus));
3627 cpu_set(track->cpu, l->cpus); 3628 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3628 nodes_clear(l->nodes); 3629 nodes_clear(l->nodes);
3629 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3630 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3630 return 1; 3631 return 1;
@@ -3705,11 +3706,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
3705 len += sprintf(buf + len, " pid=%ld", 3706 len += sprintf(buf + len, " pid=%ld",
3706 l->min_pid); 3707 l->min_pid);
3707 3708
3708 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3709 if (num_online_cpus() > 1 &&
3710 !cpumask_empty(to_cpumask(l->cpus)) &&
3709 len < PAGE_SIZE - 60) { 3711 len < PAGE_SIZE - 60) {
3710 len += sprintf(buf + len, " cpus="); 3712 len += sprintf(buf + len, " cpus=");
3711 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3713 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3712 l->cpus); 3714 to_cpumask(l->cpus));
3713 } 3715 }
3714 3716
3715 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3717 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1ddb77ba3995..7465f22fec0c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -151,11 +151,12 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
151 * 151 *
152 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 152 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
153 */ 153 */
154static int vmap_page_range(unsigned long addr, unsigned long end, 154static int vmap_page_range(unsigned long start, unsigned long end,
155 pgprot_t prot, struct page **pages) 155 pgprot_t prot, struct page **pages)
156{ 156{
157 pgd_t *pgd; 157 pgd_t *pgd;
158 unsigned long next; 158 unsigned long next;
159 unsigned long addr = start;
159 int err = 0; 160 int err = 0;
160 int nr = 0; 161 int nr = 0;
161 162
@@ -167,7 +168,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
167 if (err) 168 if (err)
168 break; 169 break;
169 } while (pgd++, addr = next, addr != end); 170 } while (pgd++, addr = next, addr != end);
170 flush_cache_vmap(addr, end); 171 flush_cache_vmap(start, end);
171 172
172 if (unlikely(err)) 173 if (unlikely(err))
173 return err; 174 return err;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 62e7f62fb559..d196f46c8808 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1902,7 +1902,7 @@ static int kswapd(void *p)
1902 }; 1902 };
1903 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1903 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1904 1904
1905 if (!cpus_empty(*cpumask)) 1905 if (!cpumask_empty(cpumask))
1906 set_cpus_allowed_ptr(tsk, cpumask); 1906 set_cpus_allowed_ptr(tsk, cpumask);
1907 current->reclaim_state = &reclaim_state; 1907 current->reclaim_state = &reclaim_state;
1908 1908
@@ -2141,7 +2141,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
2141 pg_data_t *pgdat = NODE_DATA(nid); 2141 pg_data_t *pgdat = NODE_DATA(nid);
2142 node_to_cpumask_ptr(mask, pgdat->node_id); 2142 node_to_cpumask_ptr(mask, pgdat->node_id);
2143 2143
2144 if (any_online_cpu(*mask) < nr_cpu_ids) 2144 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2145 /* One of our CPUs online: restore mask */ 2145 /* One of our CPUs online: restore mask */
2146 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2146 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2147 } 2147 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c3ccfda23adc..91149746bb8d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -20,7 +20,7 @@
20DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 20DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
21EXPORT_PER_CPU_SYMBOL(vm_event_states); 21EXPORT_PER_CPU_SYMBOL(vm_event_states);
22 22
23static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) 23static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
24{ 24{
25 int cpu; 25 int cpu;
26 int i; 26 int i;
@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
43void all_vm_events(unsigned long *ret) 43void all_vm_events(unsigned long *ret)
44{ 44{
45 get_online_cpus(); 45 get_online_cpus();
46 sum_vm_events(ret, &cpu_online_map); 46 sum_vm_events(ret, cpu_online_mask);
47 put_online_cpus(); 47 put_online_cpus();
48} 48}
49EXPORT_SYMBOL_GPL(all_vm_events); 49EXPORT_SYMBOL_GPL(all_vm_events);