aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-01-05 04:50:33 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-01-05 04:50:33 -0500
commit353816f43d1fb340ff2d9a911dd5d0799c09f6a5 (patch)
tree517290fd884d286fe2971137ac89f89e3567785a /mm
parent160bbab3000dafccbe43688e48208cecf4deb879 (diff)
parentfe0bdec68b77020281dc814805edfe594ae89e0f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: arch/arm/mach-pxa/corgi.c arch/arm/mach-pxa/poodle.c arch/arm/mach-pxa/spitz.c
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile1
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/bounce.c14
-rw-r--r--mm/failslab.c59
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/memory.c85
-rw-r--r--mm/mempolicy.c9
-rw-r--r--mm/migrate.c68
-rw-r--r--mm/mlock.c45
-rw-r--r--mm/oom_kill.c12
-rw-r--r--mm/page_cgroup.c3
-rw-r--r--mm/pdflush.c16
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c91
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c120
-rw-r--r--mm/swap.c13
-rw-r--r--mm/swapfile.c9
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c4
-rw-r--r--mm/vmstat.c4
21 files changed, 376 insertions, 209 deletions
diff --git a/mm/Makefile b/mm/Makefile
index c06b45a1ff5f..51c27709cc7c 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SLOB) += slob.o
28obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o 28obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
29obj-$(CONFIG_SLAB) += slab.o 29obj-$(CONFIG_SLAB) += slab.o
30obj-$(CONFIG_SLUB) += slub.o 30obj-$(CONFIG_SLUB) += slub.o
31obj-$(CONFIG_FAILSLAB) += failslab.o
31obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 32obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
32obj-$(CONFIG_FS_XIP) += filemap_xip.o 33obj-$(CONFIG_FS_XIP) += filemap_xip.o
33obj-$(CONFIG_MIGRATION) += migrate.o 34obj-$(CONFIG_MIGRATION) += migrate.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 2a56124dbc28..801c08b046e6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -176,7 +176,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
176 int ret = 0; 176 int ret = 0;
177 struct device *dev; 177 struct device *dev;
178 178
179 if (WARN_ON(bdi->dev)) 179 if (bdi->dev) /* The driver needs to use separate queues per device */
180 goto exit; 180 goto exit;
181 181
182 va_start(args, fmt); 182 va_start(args, fmt);
diff --git a/mm/bounce.c b/mm/bounce.c
index 06722c403058..e590272fe7a8 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -14,6 +14,7 @@
14#include <linux/hash.h> 14#include <linux/hash.h>
15#include <linux/highmem.h> 15#include <linux/highmem.h>
16#include <linux/blktrace_api.h> 16#include <linux/blktrace_api.h>
17#include <trace/block.h>
17#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
18 19
19#define POOL_SIZE 64 20#define POOL_SIZE 64
@@ -21,6 +22,8 @@
21 22
22static mempool_t *page_pool, *isa_page_pool; 23static mempool_t *page_pool, *isa_page_pool;
23 24
25DEFINE_TRACE(block_bio_bounce);
26
24#ifdef CONFIG_HIGHMEM 27#ifdef CONFIG_HIGHMEM
25static __init int init_emergency_pool(void) 28static __init int init_emergency_pool(void)
26{ 29{
@@ -195,8 +198,13 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
195 /* 198 /*
196 * irk, bounce it 199 * irk, bounce it
197 */ 200 */
198 if (!bio) 201 if (!bio) {
199 bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); 202 unsigned int cnt = (*bio_orig)->bi_vcnt;
203
204 bio = bio_alloc(GFP_NOIO, cnt);
205 memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
206 }
207
200 208
201 to = bio->bi_io_vec + i; 209 to = bio->bi_io_vec + i;
202 210
@@ -222,7 +230,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
222 if (!bio) 230 if (!bio)
223 return; 231 return;
224 232
225 blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); 233 trace_block_bio_bounce(q, *bio_orig);
226 234
227 /* 235 /*
228 * at least one page was bounced, fill in possible non-highmem 236 * at least one page was bounced, fill in possible non-highmem
diff --git a/mm/failslab.c b/mm/failslab.c
new file mode 100644
index 000000000000..7c6ea6493f80
--- /dev/null
+++ b/mm/failslab.c
@@ -0,0 +1,59 @@
1#include <linux/fault-inject.h>
2
3static struct {
4 struct fault_attr attr;
5 u32 ignore_gfp_wait;
6#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
7 struct dentry *ignore_gfp_wait_file;
8#endif
9} failslab = {
10 .attr = FAULT_ATTR_INITIALIZER,
11 .ignore_gfp_wait = 1,
12};
13
14bool should_failslab(size_t size, gfp_t gfpflags)
15{
16 if (gfpflags & __GFP_NOFAIL)
17 return false;
18
19 if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
20 return false;
21
22 return should_fail(&failslab.attr, size);
23}
24
25static int __init setup_failslab(char *str)
26{
27 return setup_fault_attr(&failslab.attr, str);
28}
29__setup("failslab=", setup_failslab);
30
31#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
32
33static int __init failslab_debugfs_init(void)
34{
35 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
36 struct dentry *dir;
37 int err;
38
39 err = init_fault_attr_dentries(&failslab.attr, "failslab");
40 if (err)
41 return err;
42 dir = failslab.attr.dentries.dir;
43
44 failslab.ignore_gfp_wait_file =
45 debugfs_create_bool("ignore-gfp-wait", mode, dir,
46 &failslab.ignore_gfp_wait);
47
48 if (!failslab.ignore_gfp_wait_file) {
49 err = -ENOMEM;
50 debugfs_remove(failslab.ignore_gfp_wait_file);
51 cleanup_fault_attr_dentries(&failslab.attr);
52 }
53
54 return err;
55}
56
57late_initcall(failslab_debugfs_init);
58
59#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/mm/filemap.c b/mm/filemap.c
index f3e5f8944d17..f8c69273c37f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2140,19 +2140,24 @@ EXPORT_SYMBOL(generic_file_direct_write);
2140 * Find or create a page at the given pagecache position. Return the locked 2140 * Find or create a page at the given pagecache position. Return the locked
2141 * page. This function is specifically for buffered writes. 2141 * page. This function is specifically for buffered writes.
2142 */ 2142 */
2143struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index) 2143struct page *grab_cache_page_write_begin(struct address_space *mapping,
2144 pgoff_t index, unsigned flags)
2144{ 2145{
2145 int status; 2146 int status;
2146 struct page *page; 2147 struct page *page;
2148 gfp_t gfp_notmask = 0;
2149 if (flags & AOP_FLAG_NOFS)
2150 gfp_notmask = __GFP_FS;
2147repeat: 2151repeat:
2148 page = find_lock_page(mapping, index); 2152 page = find_lock_page(mapping, index);
2149 if (likely(page)) 2153 if (likely(page))
2150 return page; 2154 return page;
2151 2155
2152 page = page_cache_alloc(mapping); 2156 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
2153 if (!page) 2157 if (!page)
2154 return NULL; 2158 return NULL;
2155 status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); 2159 status = add_to_page_cache_lru(page, mapping, index,
2160 GFP_KERNEL & ~gfp_notmask);
2156 if (unlikely(status)) { 2161 if (unlikely(status)) {
2157 page_cache_release(page); 2162 page_cache_release(page);
2158 if (status == -EEXIST) 2163 if (status == -EEXIST)
@@ -2161,7 +2166,7 @@ repeat:
2161 } 2166 }
2162 return page; 2167 return page;
2163} 2168}
2164EXPORT_SYMBOL(__grab_cache_page); 2169EXPORT_SYMBOL(grab_cache_page_write_begin);
2165 2170
2166static ssize_t generic_perform_write(struct file *file, 2171static ssize_t generic_perform_write(struct file *file,
2167 struct iov_iter *i, loff_t pos) 2172 struct iov_iter *i, loff_t pos)
diff --git a/mm/memory.c b/mm/memory.c
index 164951c47305..0a2010a9518c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -669,6 +669,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
669 if (is_vm_hugetlb_page(vma)) 669 if (is_vm_hugetlb_page(vma))
670 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 670 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
671 671
672 if (unlikely(is_pfn_mapping(vma))) {
673 /*
674 * We do not free on error cases below as remove_vma
675 * gets called on error from higher level routine
676 */
677 ret = track_pfn_vma_copy(vma);
678 if (ret)
679 return ret;
680 }
681
672 /* 682 /*
673 * We need to invalidate the secondary MMU mappings only when 683 * We need to invalidate the secondary MMU mappings only when
674 * there could be a permission downgrade on the ptes of the 684 * there could be a permission downgrade on the ptes of the
@@ -915,6 +925,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
915 if (vma->vm_flags & VM_ACCOUNT) 925 if (vma->vm_flags & VM_ACCOUNT)
916 *nr_accounted += (end - start) >> PAGE_SHIFT; 926 *nr_accounted += (end - start) >> PAGE_SHIFT;
917 927
928 if (unlikely(is_pfn_mapping(vma)))
929 untrack_pfn_vma(vma, 0, 0);
930
918 while (start != end) { 931 while (start != end) {
919 if (!tlb_start_valid) { 932 if (!tlb_start_valid) {
920 tlb_start = start; 933 tlb_start = start;
@@ -1430,6 +1443,7 @@ out:
1430int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1443int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1431 unsigned long pfn) 1444 unsigned long pfn)
1432{ 1445{
1446 int ret;
1433 /* 1447 /*
1434 * Technically, architectures with pte_special can avoid all these 1448 * Technically, architectures with pte_special can avoid all these
1435 * restrictions (same for remap_pfn_range). However we would like 1449 * restrictions (same for remap_pfn_range). However we would like
@@ -1444,7 +1458,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1444 1458
1445 if (addr < vma->vm_start || addr >= vma->vm_end) 1459 if (addr < vma->vm_start || addr >= vma->vm_end)
1446 return -EFAULT; 1460 return -EFAULT;
1447 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1461 if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
1462 return -EINVAL;
1463
1464 ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1465
1466 if (ret)
1467 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
1468
1469 return ret;
1448} 1470}
1449EXPORT_SYMBOL(vm_insert_pfn); 1471EXPORT_SYMBOL(vm_insert_pfn);
1450 1472
@@ -1575,14 +1597,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1575 * behaviour that some programs depend on. We mark the "original" 1597 * behaviour that some programs depend on. We mark the "original"
1576 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1598 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1577 */ 1599 */
1578 if (is_cow_mapping(vma->vm_flags)) { 1600 if (addr == vma->vm_start && end == vma->vm_end)
1579 if (addr != vma->vm_start || end != vma->vm_end)
1580 return -EINVAL;
1581 vma->vm_pgoff = pfn; 1601 vma->vm_pgoff = pfn;
1582 } 1602 else if (is_cow_mapping(vma->vm_flags))
1603 return -EINVAL;
1583 1604
1584 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1605 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1585 1606
1607 err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
1608 if (err)
1609 return -EINVAL;
1610
1586 BUG_ON(addr >= end); 1611 BUG_ON(addr >= end);
1587 pfn -= addr >> PAGE_SHIFT; 1612 pfn -= addr >> PAGE_SHIFT;
1588 pgd = pgd_offset(mm, addr); 1613 pgd = pgd_offset(mm, addr);
@@ -1594,6 +1619,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1594 if (err) 1619 if (err)
1595 break; 1620 break;
1596 } while (pgd++, addr = next, addr != end); 1621 } while (pgd++, addr = next, addr != end);
1622
1623 if (err)
1624 untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
1625
1597 return err; 1626 return err;
1598} 1627}
1599EXPORT_SYMBOL(remap_pfn_range); 1628EXPORT_SYMBOL(remap_pfn_range);
@@ -2865,9 +2894,9 @@ int in_gate_area_no_task(unsigned long addr)
2865#endif /* __HAVE_ARCH_GATE_AREA */ 2894#endif /* __HAVE_ARCH_GATE_AREA */
2866 2895
2867#ifdef CONFIG_HAVE_IOREMAP_PROT 2896#ifdef CONFIG_HAVE_IOREMAP_PROT
2868static resource_size_t follow_phys(struct vm_area_struct *vma, 2897int follow_phys(struct vm_area_struct *vma,
2869 unsigned long address, unsigned int flags, 2898 unsigned long address, unsigned int flags,
2870 unsigned long *prot) 2899 unsigned long *prot, resource_size_t *phys)
2871{ 2900{
2872 pgd_t *pgd; 2901 pgd_t *pgd;
2873 pud_t *pud; 2902 pud_t *pud;
@@ -2876,24 +2905,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
2876 spinlock_t *ptl; 2905 spinlock_t *ptl;
2877 resource_size_t phys_addr = 0; 2906 resource_size_t phys_addr = 0;
2878 struct mm_struct *mm = vma->vm_mm; 2907 struct mm_struct *mm = vma->vm_mm;
2908 int ret = -EINVAL;
2879 2909
2880 VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); 2910 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
2911 goto out;
2881 2912
2882 pgd = pgd_offset(mm, address); 2913 pgd = pgd_offset(mm, address);
2883 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 2914 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
2884 goto no_page_table; 2915 goto out;
2885 2916
2886 pud = pud_offset(pgd, address); 2917 pud = pud_offset(pgd, address);
2887 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 2918 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
2888 goto no_page_table; 2919 goto out;
2889 2920
2890 pmd = pmd_offset(pud, address); 2921 pmd = pmd_offset(pud, address);
2891 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 2922 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
2892 goto no_page_table; 2923 goto out;
2893 2924
2894 /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 2925 /* We cannot handle huge page PFN maps. Luckily they don't exist. */
2895 if (pmd_huge(*pmd)) 2926 if (pmd_huge(*pmd))
2896 goto no_page_table; 2927 goto out;
2897 2928
2898 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 2929 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
2899 if (!ptep) 2930 if (!ptep)
@@ -2908,13 +2939,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
2908 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ 2939 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
2909 2940
2910 *prot = pgprot_val(pte_pgprot(pte)); 2941 *prot = pgprot_val(pte_pgprot(pte));
2942 *phys = phys_addr;
2943 ret = 0;
2911 2944
2912unlock: 2945unlock:
2913 pte_unmap_unlock(ptep, ptl); 2946 pte_unmap_unlock(ptep, ptl);
2914out: 2947out:
2915 return phys_addr; 2948 return ret;
2916no_page_table:
2917 return 0;
2918} 2949}
2919 2950
2920int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 2951int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -2925,12 +2956,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2925 void *maddr; 2956 void *maddr;
2926 int offset = addr & (PAGE_SIZE-1); 2957 int offset = addr & (PAGE_SIZE-1);
2927 2958
2928 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 2959 if (follow_phys(vma, addr, write, &prot, &phys_addr))
2929 return -EINVAL;
2930
2931 phys_addr = follow_phys(vma, addr, write, &prot);
2932
2933 if (!phys_addr)
2934 return -EINVAL; 2960 return -EINVAL;
2935 2961
2936 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); 2962 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
@@ -3049,3 +3075,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
3049 } 3075 }
3050 up_read(&current->mm->mmap_sem); 3076 up_read(&current->mm->mmap_sem);
3051} 3077}
3078
3079#ifdef CONFIG_PROVE_LOCKING
3080void might_fault(void)
3081{
3082 might_sleep();
3083 /*
3084 * it would be nicer only to annotate paths which are not under
3085 * pagefault_disable, however that requires a larger audit and
3086 * providing helpers like get_user_atomic.
3087 */
3088 if (!in_atomic() && current->mm)
3089 might_lock_read(&current->mm->mmap_sem);
3090}
3091EXPORT_SYMBOL(might_fault);
3092#endif
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e9493b1c1117..e412ffa8e52e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1114,6 +1114,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1114 const unsigned long __user *old_nodes, 1114 const unsigned long __user *old_nodes,
1115 const unsigned long __user *new_nodes) 1115 const unsigned long __user *new_nodes)
1116{ 1116{
1117 const struct cred *cred = current_cred(), *tcred;
1117 struct mm_struct *mm; 1118 struct mm_struct *mm;
1118 struct task_struct *task; 1119 struct task_struct *task;
1119 nodemask_t old; 1120 nodemask_t old;
@@ -1148,12 +1149,16 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1148 * capabilities, superuser privileges or the same 1149 * capabilities, superuser privileges or the same
1149 * userid as the target process. 1150 * userid as the target process.
1150 */ 1151 */
1151 if ((current->euid != task->suid) && (current->euid != task->uid) && 1152 rcu_read_lock();
1152 (current->uid != task->suid) && (current->uid != task->uid) && 1153 tcred = __task_cred(task);
1154 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1155 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1153 !capable(CAP_SYS_NICE)) { 1156 !capable(CAP_SYS_NICE)) {
1157 rcu_read_unlock();
1154 err = -EPERM; 1158 err = -EPERM;
1155 goto out; 1159 goto out;
1156 } 1160 }
1161 rcu_read_unlock();
1157 1162
1158 task_nodes = cpuset_mems_allowed(task); 1163 task_nodes = cpuset_mems_allowed(task);
1159 /* Is the user allowed to access the target nodes? */ 1164 /* Is the user allowed to access the target nodes? */
diff --git a/mm/migrate.c b/mm/migrate.c
index 1e0d6b237f44..21631ab8c08b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -987,25 +987,18 @@ out:
987/* 987/*
988 * Determine the nodes of an array of pages and store it in an array of status. 988 * Determine the nodes of an array of pages and store it in an array of status.
989 */ 989 */
990static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 990static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
991 const void __user * __user *pages, 991 const void __user **pages, int *status)
992 int __user *status)
993{ 992{
994 unsigned long i; 993 unsigned long i;
995 int err;
996 994
997 down_read(&mm->mmap_sem); 995 down_read(&mm->mmap_sem);
998 996
999 for (i = 0; i < nr_pages; i++) { 997 for (i = 0; i < nr_pages; i++) {
1000 const void __user *p; 998 unsigned long addr = (unsigned long)(*pages);
1001 unsigned long addr;
1002 struct vm_area_struct *vma; 999 struct vm_area_struct *vma;
1003 struct page *page; 1000 struct page *page;
1004 1001 int err = -EFAULT;
1005 err = -EFAULT;
1006 if (get_user(p, pages+i))
1007 goto out;
1008 addr = (unsigned long) p;
1009 1002
1010 vma = find_vma(mm, addr); 1003 vma = find_vma(mm, addr);
1011 if (!vma) 1004 if (!vma)
@@ -1024,12 +1017,52 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1024 1017
1025 err = page_to_nid(page); 1018 err = page_to_nid(page);
1026set_status: 1019set_status:
1027 put_user(err, status+i); 1020 *status = err;
1021
1022 pages++;
1023 status++;
1024 }
1025
1026 up_read(&mm->mmap_sem);
1027}
1028
1029/*
1030 * Determine the nodes of a user array of pages and store it in
1031 * a user array of status.
1032 */
1033static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1034 const void __user * __user *pages,
1035 int __user *status)
1036{
1037#define DO_PAGES_STAT_CHUNK_NR 16
1038 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1039 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1040 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1041 int err;
1042
1043 for (i = 0; i < nr_pages; i += chunk_nr) {
1044 if (chunk_nr + i > nr_pages)
1045 chunk_nr = nr_pages - i;
1046
1047 err = copy_from_user(chunk_pages, &pages[i],
1048 chunk_nr * sizeof(*chunk_pages));
1049 if (err) {
1050 err = -EFAULT;
1051 goto out;
1052 }
1053
1054 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1055
1056 err = copy_to_user(&status[i], chunk_status,
1057 chunk_nr * sizeof(*chunk_status));
1058 if (err) {
1059 err = -EFAULT;
1060 goto out;
1061 }
1028 } 1062 }
1029 err = 0; 1063 err = 0;
1030 1064
1031out: 1065out:
1032 up_read(&mm->mmap_sem);
1033 return err; 1066 return err;
1034} 1067}
1035 1068
@@ -1042,6 +1075,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
1042 const int __user *nodes, 1075 const int __user *nodes,
1043 int __user *status, int flags) 1076 int __user *status, int flags)
1044{ 1077{
1078 const struct cred *cred = current_cred(), *tcred;
1045 struct task_struct *task; 1079 struct task_struct *task;
1046 struct mm_struct *mm; 1080 struct mm_struct *mm;
1047 int err; 1081 int err;
@@ -1072,12 +1106,16 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
1072 * capabilities, superuser privileges or the same 1106 * capabilities, superuser privileges or the same
1073 * userid as the target process. 1107 * userid as the target process.
1074 */ 1108 */
1075 if ((current->euid != task->suid) && (current->euid != task->uid) && 1109 rcu_read_lock();
1076 (current->uid != task->suid) && (current->uid != task->uid) && 1110 tcred = __task_cred(task);
1111 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1112 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1077 !capable(CAP_SYS_NICE)) { 1113 !capable(CAP_SYS_NICE)) {
1114 rcu_read_unlock();
1078 err = -EPERM; 1115 err = -EPERM;
1079 goto out; 1116 goto out;
1080 } 1117 }
1118 rcu_read_unlock();
1081 1119
1082 err = security_task_movememory(task); 1120 err = security_task_movememory(task);
1083 if (err) 1121 if (err)
diff --git a/mm/mlock.c b/mm/mlock.c
index 1ada366570cb..3035a56e7616 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -667,3 +667,48 @@ void user_shm_unlock(size_t size, struct user_struct *user)
667 spin_unlock(&shmlock_user_lock); 667 spin_unlock(&shmlock_user_lock);
668 free_uid(user); 668 free_uid(user);
669} 669}
670
671void *alloc_locked_buffer(size_t size)
672{
673 unsigned long rlim, vm, pgsz;
674 void *buffer = NULL;
675
676 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
677
678 down_write(&current->mm->mmap_sem);
679
680 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
681 vm = current->mm->total_vm + pgsz;
682 if (rlim < vm)
683 goto out;
684
685 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
686 vm = current->mm->locked_vm + pgsz;
687 if (rlim < vm)
688 goto out;
689
690 buffer = kzalloc(size, GFP_KERNEL);
691 if (!buffer)
692 goto out;
693
694 current->mm->total_vm += pgsz;
695 current->mm->locked_vm += pgsz;
696
697 out:
698 up_write(&current->mm->mmap_sem);
699 return buffer;
700}
701
702void free_locked_buffer(void *buffer, size_t size)
703{
704 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
705
706 down_write(&current->mm->mmap_sem);
707
708 current->mm->total_vm -= pgsz;
709 current->mm->locked_vm -= pgsz;
710
711 up_write(&current->mm->mmap_sem);
712
713 kfree(buffer);
714}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a0a01902f551..558f9afe6e4e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -128,8 +128,8 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
128 * Superuser processes are usually more important, so we make it 128 * Superuser processes are usually more important, so we make it
129 * less likely that we kill those. 129 * less likely that we kill those.
130 */ 130 */
131 if (has_capability(p, CAP_SYS_ADMIN) || 131 if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
132 has_capability(p, CAP_SYS_RESOURCE)) 132 has_capability_noaudit(p, CAP_SYS_RESOURCE))
133 points /= 4; 133 points /= 4;
134 134
135 /* 135 /*
@@ -138,7 +138,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
138 * tend to only have this flag set on applications they think 138 * tend to only have this flag set on applications they think
139 * of as important. 139 * of as important.
140 */ 140 */
141 if (has_capability(p, CAP_SYS_RAWIO)) 141 if (has_capability_noaudit(p, CAP_SYS_RAWIO))
142 points /= 4; 142 points /= 4;
143 143
144 /* 144 /*
@@ -299,9 +299,9 @@ static void dump_tasks(const struct mem_cgroup *mem)
299 299
300 task_lock(p); 300 task_lock(p);
301 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", 301 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
302 p->pid, p->uid, p->tgid, p->mm->total_vm, 302 p->pid, __task_cred(p)->uid, p->tgid,
303 get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj, 303 p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p),
304 p->comm); 304 p->oomkilladj, p->comm);
305 task_unlock(p); 305 task_unlock(p);
306 } while_each_thread(g, p); 306 } while_each_thread(g, p);
307} 307}
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 0b3cbf090a67..ab27ff750519 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -49,6 +49,9 @@ static int __init alloc_node_page_cgroup(int nid)
49 start_pfn = NODE_DATA(nid)->node_start_pfn; 49 start_pfn = NODE_DATA(nid)->node_start_pfn;
50 nr_pages = NODE_DATA(nid)->node_spanned_pages; 50 nr_pages = NODE_DATA(nid)->node_spanned_pages;
51 51
52 if (!nr_pages)
53 return 0;
54
52 table_size = sizeof(struct page_cgroup) * nr_pages; 55 table_size = sizeof(struct page_cgroup) * nr_pages;
53 56
54 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), 57 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
diff --git a/mm/pdflush.c b/mm/pdflush.c
index a0a14c4d5072..15de509b68fd 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work)
172static int pdflush(void *dummy) 172static int pdflush(void *dummy)
173{ 173{
174 struct pdflush_work my_work; 174 struct pdflush_work my_work;
175 cpumask_t cpus_allowed; 175 cpumask_var_t cpus_allowed;
176
177 /*
178 * Since the caller doesn't even check kthread_run() worked, let's not
179 * freak out too much if this fails.
180 */
181 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
182 printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
183 return 0;
184 }
176 185
177 /* 186 /*
178 * pdflush can spend a lot of time doing encryption via dm-crypt. We 187 * pdflush can spend a lot of time doing encryption via dm-crypt. We
@@ -187,8 +196,9 @@ static int pdflush(void *dummy)
187 * This is needed as pdflush's are dynamically created and destroyed. 196 * This is needed as pdflush's are dynamically created and destroyed.
188 * The boottime pdflush's are easily placed w/o these 2 lines. 197 * The boottime pdflush's are easily placed w/o these 2 lines.
189 */ 198 */
190 cpuset_cpus_allowed(current, &cpus_allowed); 199 cpuset_cpus_allowed(current, cpus_allowed);
191 set_cpus_allowed_ptr(current, &cpus_allowed); 200 set_cpus_allowed_ptr(current, cpus_allowed);
201 free_cpumask_var(cpus_allowed);
192 202
193 return __pdflush(&my_work); 203 return __pdflush(&my_work);
194} 204}
diff --git a/mm/shmem.c b/mm/shmem.c
index 0ed075215e5f..f1b0d4871f3a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1513,8 +1513,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1513 inode = new_inode(sb); 1513 inode = new_inode(sb);
1514 if (inode) { 1514 if (inode) {
1515 inode->i_mode = mode; 1515 inode->i_mode = mode;
1516 inode->i_uid = current->fsuid; 1516 inode->i_uid = current_fsuid();
1517 inode->i_gid = current->fsgid; 1517 inode->i_gid = current_fsgid();
1518 inode->i_blocks = 0; 1518 inode->i_blocks = 0;
1519 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1519 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1520 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1520 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -2278,8 +2278,8 @@ static int shmem_fill_super(struct super_block *sb,
2278 sbinfo->max_blocks = 0; 2278 sbinfo->max_blocks = 0;
2279 sbinfo->max_inodes = 0; 2279 sbinfo->max_inodes = 0;
2280 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2280 sbinfo->mode = S_IRWXUGO | S_ISVTX;
2281 sbinfo->uid = current->fsuid; 2281 sbinfo->uid = current_fsuid();
2282 sbinfo->gid = current->fsgid; 2282 sbinfo->gid = current_fsgid();
2283 sbinfo->mpol = NULL; 2283 sbinfo->mpol = NULL;
2284 sb->s_fs_info = sbinfo; 2284 sb->s_fs_info = sbinfo;
2285 2285
diff --git a/mm/slab.c b/mm/slab.c
index 09187517f9dc..ddc41f337d58 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2123 * 2123 *
2124 * @name must be valid until the cache is destroyed. This implies that 2124 * @name must be valid until the cache is destroyed. This implies that
2125 * the module calling this has to destroy the cache before getting unloaded. 2125 * the module calling this has to destroy the cache before getting unloaded.
2126 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2127 * therefore applications must manage it themselves.
2126 * 2128 *
2127 * The flags are 2129 * The flags are
2128 * 2130 *
@@ -2155,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2155 2157
2156 /* 2158 /*
2157 * We use cache_chain_mutex to ensure a consistent view of 2159 * We use cache_chain_mutex to ensure a consistent view of
2158 * cpu_online_map as well. Please see cpuup_callback 2160 * cpu_online_mask as well. Please see cpuup_callback
2159 */ 2161 */
2160 get_online_cpus(); 2162 get_online_cpus();
2161 mutex_lock(&cache_chain_mutex); 2163 mutex_lock(&cache_chain_mutex);
@@ -2609,7 +2611,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2609 if (OFF_SLAB(cachep)) { 2611 if (OFF_SLAB(cachep)) {
2610 /* Slab management obj is off-slab. */ 2612 /* Slab management obj is off-slab. */
2611 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2613 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2612 local_flags & ~GFP_THISNODE, nodeid); 2614 local_flags, nodeid);
2613 if (!slabp) 2615 if (!slabp)
2614 return NULL; 2616 return NULL;
2615 } else { 2617 } else {
@@ -2997,7 +2999,7 @@ retry:
2997 * there must be at least one object available for 2999 * there must be at least one object available for
2998 * allocation. 3000 * allocation.
2999 */ 3001 */
3000 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 3002 BUG_ON(slabp->inuse >= cachep->num);
3001 3003
3002 while (slabp->inuse < cachep->num && batchcount--) { 3004 while (slabp->inuse < cachep->num && batchcount--) {
3003 STATS_INC_ALLOCED(cachep); 3005 STATS_INC_ALLOCED(cachep);
@@ -3106,79 +3108,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3106#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3108#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3107#endif 3109#endif
3108 3110
3109#ifdef CONFIG_FAILSLAB 3111static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3110
3111static struct failslab_attr {
3112
3113 struct fault_attr attr;
3114
3115 u32 ignore_gfp_wait;
3116#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3117 struct dentry *ignore_gfp_wait_file;
3118#endif
3119
3120} failslab = {
3121 .attr = FAULT_ATTR_INITIALIZER,
3122 .ignore_gfp_wait = 1,
3123};
3124
3125static int __init setup_failslab(char *str)
3126{
3127 return setup_fault_attr(&failslab.attr, str);
3128}
3129__setup("failslab=", setup_failslab);
3130
3131static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3132{ 3112{
3133 if (cachep == &cache_cache) 3113 if (cachep == &cache_cache)
3134 return 0; 3114 return false;
3135 if (flags & __GFP_NOFAIL)
3136 return 0;
3137 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3138 return 0;
3139 3115
3140 return should_fail(&failslab.attr, obj_size(cachep)); 3116 return should_failslab(obj_size(cachep), flags);
3141} 3117}
3142 3118
3143#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3144
3145static int __init failslab_debugfs(void)
3146{
3147 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3148 struct dentry *dir;
3149 int err;
3150
3151 err = init_fault_attr_dentries(&failslab.attr, "failslab");
3152 if (err)
3153 return err;
3154 dir = failslab.attr.dentries.dir;
3155
3156 failslab.ignore_gfp_wait_file =
3157 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3158 &failslab.ignore_gfp_wait);
3159
3160 if (!failslab.ignore_gfp_wait_file) {
3161 err = -ENOMEM;
3162 debugfs_remove(failslab.ignore_gfp_wait_file);
3163 cleanup_fault_attr_dentries(&failslab.attr);
3164 }
3165
3166 return err;
3167}
3168
3169late_initcall(failslab_debugfs);
3170
3171#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3172
3173#else /* CONFIG_FAILSLAB */
3174
3175static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3176{
3177 return 0;
3178}
3179
3180#endif /* CONFIG_FAILSLAB */
3181
3182static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3119static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3183{ 3120{
3184 void *objp; 3121 void *objp;
@@ -3381,7 +3318,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3381 unsigned long save_flags; 3318 unsigned long save_flags;
3382 void *ptr; 3319 void *ptr;
3383 3320
3384 if (should_failslab(cachep, flags)) 3321 if (slab_should_failslab(cachep, flags))
3385 return NULL; 3322 return NULL;
3386 3323
3387 cache_alloc_debugcheck_before(cachep, flags); 3324 cache_alloc_debugcheck_before(cachep, flags);
@@ -3457,7 +3394,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3457 unsigned long save_flags; 3394 unsigned long save_flags;
3458 void *objp; 3395 void *objp;
3459 3396
3460 if (should_failslab(cachep, flags)) 3397 if (slab_should_failslab(cachep, flags))
3461 return NULL; 3398 return NULL;
3462 3399
3463 cache_alloc_debugcheck_before(cachep, flags); 3400 cache_alloc_debugcheck_before(cachep, flags);
@@ -3686,9 +3623,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3686EXPORT_SYMBOL(__kmalloc_node); 3623EXPORT_SYMBOL(__kmalloc_node);
3687 3624
3688void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3625void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3689 int node, void *caller) 3626 int node, unsigned long caller)
3690{ 3627{
3691 return __do_kmalloc_node(size, flags, node, caller); 3628 return __do_kmalloc_node(size, flags, node, (void *)caller);
3692} 3629}
3693EXPORT_SYMBOL(__kmalloc_node_track_caller); 3630EXPORT_SYMBOL(__kmalloc_node_track_caller);
3694#else 3631#else
@@ -3730,9 +3667,9 @@ void *__kmalloc(size_t size, gfp_t flags)
3730} 3667}
3731EXPORT_SYMBOL(__kmalloc); 3668EXPORT_SYMBOL(__kmalloc);
3732 3669
3733void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3670void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3734{ 3671{
3735 return __do_kmalloc(size, flags, caller); 3672 return __do_kmalloc(size, flags, (void *)caller);
3736} 3673}
3737EXPORT_SYMBOL(__kmalloc_track_caller); 3674EXPORT_SYMBOL(__kmalloc_track_caller);
3738 3675
diff --git a/mm/slob.c b/mm/slob.c
index cb675d126791..bf7e8fc3aed8 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
535 struct kmem_cache *c; 535 struct kmem_cache *c;
536 536
537 c = slob_alloc(sizeof(struct kmem_cache), 537 c = slob_alloc(sizeof(struct kmem_cache),
538 flags, ARCH_KMALLOC_MINALIGN, -1); 538 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
539 539
540 if (c) { 540 if (c) {
541 c->name = name; 541 c->name = name;
diff --git a/mm/slub.c b/mm/slub.c
index 749588a50a5a..f0e2892fe403 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -24,6 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/memory.h> 25#include <linux/memory.h>
26#include <linux/math64.h> 26#include <linux/math64.h>
27#include <linux/fault-inject.h>
27 28
28/* 29/*
29 * Lock order: 30 * Lock order:
@@ -153,6 +154,10 @@
153#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 154#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
154#endif 155#endif
155 156
157#define OO_SHIFT 16
158#define OO_MASK ((1 << OO_SHIFT) - 1)
159#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
160
156/* Internal SLUB flags */ 161/* Internal SLUB flags */
157#define __OBJECT_POISON 0x80000000 /* Poison object */ 162#define __OBJECT_POISON 0x80000000 /* Poison object */
158#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 163#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
@@ -178,7 +183,7 @@ static LIST_HEAD(slab_caches);
178 * Tracking user of a slab. 183 * Tracking user of a slab.
179 */ 184 */
180struct track { 185struct track {
181 void *addr; /* Called from address */ 186 unsigned long addr; /* Called from address */
182 int cpu; /* Was running on cpu */ 187 int cpu; /* Was running on cpu */
183 int pid; /* Pid context */ 188 int pid; /* Pid context */
184 unsigned long when; /* When did the operation occur */ 189 unsigned long when; /* When did the operation occur */
@@ -290,7 +295,7 @@ static inline struct kmem_cache_order_objects oo_make(int order,
290 unsigned long size) 295 unsigned long size)
291{ 296{
292 struct kmem_cache_order_objects x = { 297 struct kmem_cache_order_objects x = {
293 (order << 16) + (PAGE_SIZE << order) / size 298 (order << OO_SHIFT) + (PAGE_SIZE << order) / size
294 }; 299 };
295 300
296 return x; 301 return x;
@@ -298,12 +303,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
298 303
299static inline int oo_order(struct kmem_cache_order_objects x) 304static inline int oo_order(struct kmem_cache_order_objects x)
300{ 305{
301 return x.x >> 16; 306 return x.x >> OO_SHIFT;
302} 307}
303 308
304static inline int oo_objects(struct kmem_cache_order_objects x) 309static inline int oo_objects(struct kmem_cache_order_objects x)
305{ 310{
306 return x.x & ((1 << 16) - 1); 311 return x.x & OO_MASK;
307} 312}
308 313
309#ifdef CONFIG_SLUB_DEBUG 314#ifdef CONFIG_SLUB_DEBUG
@@ -367,7 +372,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
367} 372}
368 373
369static void set_track(struct kmem_cache *s, void *object, 374static void set_track(struct kmem_cache *s, void *object,
370 enum track_item alloc, void *addr) 375 enum track_item alloc, unsigned long addr)
371{ 376{
372 struct track *p; 377 struct track *p;
373 378
@@ -391,8 +396,8 @@ static void init_tracking(struct kmem_cache *s, void *object)
391 if (!(s->flags & SLAB_STORE_USER)) 396 if (!(s->flags & SLAB_STORE_USER))
392 return; 397 return;
393 398
394 set_track(s, object, TRACK_FREE, NULL); 399 set_track(s, object, TRACK_FREE, 0UL);
395 set_track(s, object, TRACK_ALLOC, NULL); 400 set_track(s, object, TRACK_ALLOC, 0UL);
396} 401}
397 402
398static void print_track(const char *s, struct track *t) 403static void print_track(const char *s, struct track *t)
@@ -401,7 +406,7 @@ static void print_track(const char *s, struct track *t)
401 return; 406 return;
402 407
403 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 408 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
404 s, t->addr, jiffies - t->when, t->cpu, t->pid); 409 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
405} 410}
406 411
407static void print_tracking(struct kmem_cache *s, void *object) 412static void print_tracking(struct kmem_cache *s, void *object)
@@ -692,7 +697,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
692 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 697 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
693 object_err(s, page, p, "Freepointer corrupt"); 698 object_err(s, page, p, "Freepointer corrupt");
694 /* 699 /*
695 * No choice but to zap it and thus loose the remainder 700 * No choice but to zap it and thus lose the remainder
696 * of the free objects in this slab. May cause 701 * of the free objects in this slab. May cause
697 * another error because the object count is now wrong. 702 * another error because the object count is now wrong.
698 */ 703 */
@@ -764,8 +769,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
764 } 769 }
765 770
766 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 771 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
767 if (max_objects > 65535) 772 if (max_objects > MAX_OBJS_PER_PAGE)
768 max_objects = 65535; 773 max_objects = MAX_OBJS_PER_PAGE;
769 774
770 if (page->objects != max_objects) { 775 if (page->objects != max_objects) {
771 slab_err(s, page, "Wrong number of objects. Found %d but " 776 slab_err(s, page, "Wrong number of objects. Found %d but "
@@ -866,7 +871,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
866} 871}
867 872
868static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 873static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
869 void *object, void *addr) 874 void *object, unsigned long addr)
870{ 875{
871 if (!check_slab(s, page)) 876 if (!check_slab(s, page))
872 goto bad; 877 goto bad;
@@ -906,7 +911,7 @@ bad:
906} 911}
907 912
908static int free_debug_processing(struct kmem_cache *s, struct page *page, 913static int free_debug_processing(struct kmem_cache *s, struct page *page,
909 void *object, void *addr) 914 void *object, unsigned long addr)
910{ 915{
911 if (!check_slab(s, page)) 916 if (!check_slab(s, page))
912 goto fail; 917 goto fail;
@@ -1029,10 +1034,10 @@ static inline void setup_object_debug(struct kmem_cache *s,
1029 struct page *page, void *object) {} 1034 struct page *page, void *object) {}
1030 1035
1031static inline int alloc_debug_processing(struct kmem_cache *s, 1036static inline int alloc_debug_processing(struct kmem_cache *s,
1032 struct page *page, void *object, void *addr) { return 0; } 1037 struct page *page, void *object, unsigned long addr) { return 0; }
1033 1038
1034static inline int free_debug_processing(struct kmem_cache *s, 1039static inline int free_debug_processing(struct kmem_cache *s,
1035 struct page *page, void *object, void *addr) { return 0; } 1040 struct page *page, void *object, unsigned long addr) { return 0; }
1036 1041
1037static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1042static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1038 { return 1; } 1043 { return 1; }
@@ -1499,8 +1504,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
1499 * we need to allocate a new slab. This is the slowest path since it involves 1504 * we need to allocate a new slab. This is the slowest path since it involves
1500 * a call to the page allocator and the setup of a new slab. 1505 * a call to the page allocator and the setup of a new slab.
1501 */ 1506 */
1502static void *__slab_alloc(struct kmem_cache *s, 1507static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1503 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1508 unsigned long addr, struct kmem_cache_cpu *c)
1504{ 1509{
1505 void **object; 1510 void **object;
1506 struct page *new; 1511 struct page *new;
@@ -1584,13 +1589,18 @@ debug:
1584 * Otherwise we can simply pick the next object from the lockless free list. 1589 * Otherwise we can simply pick the next object from the lockless free list.
1585 */ 1590 */
1586static __always_inline void *slab_alloc(struct kmem_cache *s, 1591static __always_inline void *slab_alloc(struct kmem_cache *s,
1587 gfp_t gfpflags, int node, void *addr) 1592 gfp_t gfpflags, int node, unsigned long addr)
1588{ 1593{
1589 void **object; 1594 void **object;
1590 struct kmem_cache_cpu *c; 1595 struct kmem_cache_cpu *c;
1591 unsigned long flags; 1596 unsigned long flags;
1592 unsigned int objsize; 1597 unsigned int objsize;
1593 1598
1599 might_sleep_if(gfpflags & __GFP_WAIT);
1600
1601 if (should_failslab(s->objsize, gfpflags))
1602 return NULL;
1603
1594 local_irq_save(flags); 1604 local_irq_save(flags);
1595 c = get_cpu_slab(s, smp_processor_id()); 1605 c = get_cpu_slab(s, smp_processor_id());
1596 objsize = c->objsize; 1606 objsize = c->objsize;
@@ -1613,14 +1623,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1613 1623
1614void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1624void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1615{ 1625{
1616 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1626 return slab_alloc(s, gfpflags, -1, _RET_IP_);
1617} 1627}
1618EXPORT_SYMBOL(kmem_cache_alloc); 1628EXPORT_SYMBOL(kmem_cache_alloc);
1619 1629
1620#ifdef CONFIG_NUMA 1630#ifdef CONFIG_NUMA
1621void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1631void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1622{ 1632{
1623 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1633 return slab_alloc(s, gfpflags, node, _RET_IP_);
1624} 1634}
1625EXPORT_SYMBOL(kmem_cache_alloc_node); 1635EXPORT_SYMBOL(kmem_cache_alloc_node);
1626#endif 1636#endif
@@ -1634,7 +1644,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
1634 * handling required then we can return immediately. 1644 * handling required then we can return immediately.
1635 */ 1645 */
1636static void __slab_free(struct kmem_cache *s, struct page *page, 1646static void __slab_free(struct kmem_cache *s, struct page *page,
1637 void *x, void *addr, unsigned int offset) 1647 void *x, unsigned long addr, unsigned int offset)
1638{ 1648{
1639 void *prior; 1649 void *prior;
1640 void **object = (void *)x; 1650 void **object = (void *)x;
@@ -1704,7 +1714,7 @@ debug:
1704 * with all sorts of special processing. 1714 * with all sorts of special processing.
1705 */ 1715 */
1706static __always_inline void slab_free(struct kmem_cache *s, 1716static __always_inline void slab_free(struct kmem_cache *s,
1707 struct page *page, void *x, void *addr) 1717 struct page *page, void *x, unsigned long addr)
1708{ 1718{
1709 void **object = (void *)x; 1719 void **object = (void *)x;
1710 struct kmem_cache_cpu *c; 1720 struct kmem_cache_cpu *c;
@@ -1731,11 +1741,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1731 1741
1732 page = virt_to_head_page(x); 1742 page = virt_to_head_page(x);
1733 1743
1734 slab_free(s, page, x, __builtin_return_address(0)); 1744 slab_free(s, page, x, _RET_IP_);
1735} 1745}
1736EXPORT_SYMBOL(kmem_cache_free); 1746EXPORT_SYMBOL(kmem_cache_free);
1737 1747
1738/* Figure out on which slab object the object resides */ 1748/* Figure out on which slab page the object resides */
1739static struct page *get_object_page(const void *x) 1749static struct page *get_object_page(const void *x)
1740{ 1750{
1741 struct page *page = virt_to_head_page(x); 1751 struct page *page = virt_to_head_page(x);
@@ -1807,8 +1817,8 @@ static inline int slab_order(int size, int min_objects,
1807 int rem; 1817 int rem;
1808 int min_order = slub_min_order; 1818 int min_order = slub_min_order;
1809 1819
1810 if ((PAGE_SIZE << min_order) / size > 65535) 1820 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1811 return get_order(size * 65535) - 1; 1821 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1812 1822
1813 for (order = max(min_order, 1823 for (order = max(min_order,
1814 fls(min_objects * size - 1) - PAGE_SHIFT); 1824 fls(min_objects * size - 1) - PAGE_SHIFT);
@@ -1960,7 +1970,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu,
1960 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1970 kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1961 1971
1962static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1972static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1963static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 1973static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
1964 1974
1965static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1975static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1966 int cpu, gfp_t flags) 1976 int cpu, gfp_t flags)
@@ -2035,13 +2045,13 @@ static void init_alloc_cpu_cpu(int cpu)
2035{ 2045{
2036 int i; 2046 int i;
2037 2047
2038 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2048 if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
2039 return; 2049 return;
2040 2050
2041 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2051 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2042 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2052 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2043 2053
2044 cpu_set(cpu, kmem_cach_cpu_free_init_once); 2054 cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
2045} 2055}
2046 2056
2047static void __init init_alloc_cpu(void) 2057static void __init init_alloc_cpu(void)
@@ -2073,8 +2083,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2073 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2083 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2074 * memory on a fresh node that has no slab structures yet. 2084 * memory on a fresh node that has no slab structures yet.
2075 */ 2085 */
2076static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, 2086static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2077 int node)
2078{ 2087{
2079 struct page *page; 2088 struct page *page;
2080 struct kmem_cache_node *n; 2089 struct kmem_cache_node *n;
@@ -2112,7 +2121,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2112 local_irq_save(flags); 2121 local_irq_save(flags);
2113 add_partial(n, page, 0); 2122 add_partial(n, page, 0);
2114 local_irq_restore(flags); 2123 local_irq_restore(flags);
2115 return n;
2116} 2124}
2117 2125
2118static void free_kmem_cache_nodes(struct kmem_cache *s) 2126static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2144,8 +2152,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2144 n = &s->local_node; 2152 n = &s->local_node;
2145 else { 2153 else {
2146 if (slab_state == DOWN) { 2154 if (slab_state == DOWN) {
2147 n = early_kmem_cache_node_alloc(gfpflags, 2155 early_kmem_cache_node_alloc(gfpflags, node);
2148 node);
2149 continue; 2156 continue;
2150 } 2157 }
2151 n = kmem_cache_alloc_node(kmalloc_caches, 2158 n = kmem_cache_alloc_node(kmalloc_caches,
@@ -2659,7 +2666,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2659 if (unlikely(ZERO_OR_NULL_PTR(s))) 2666 if (unlikely(ZERO_OR_NULL_PTR(s)))
2660 return s; 2667 return s;
2661 2668
2662 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2669 return slab_alloc(s, flags, -1, _RET_IP_);
2663} 2670}
2664EXPORT_SYMBOL(__kmalloc); 2671EXPORT_SYMBOL(__kmalloc);
2665 2672
@@ -2687,7 +2694,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2687 if (unlikely(ZERO_OR_NULL_PTR(s))) 2694 if (unlikely(ZERO_OR_NULL_PTR(s)))
2688 return s; 2695 return s;
2689 2696
2690 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2697 return slab_alloc(s, flags, node, _RET_IP_);
2691} 2698}
2692EXPORT_SYMBOL(__kmalloc_node); 2699EXPORT_SYMBOL(__kmalloc_node);
2693#endif 2700#endif
@@ -2744,7 +2751,7 @@ void kfree(const void *x)
2744 put_page(page); 2751 put_page(page);
2745 return; 2752 return;
2746 } 2753 }
2747 slab_free(page->slab, page, object, __builtin_return_address(0)); 2754 slab_free(page->slab, page, object, _RET_IP_);
2748} 2755}
2749EXPORT_SYMBOL(kfree); 2756EXPORT_SYMBOL(kfree);
2750 2757
@@ -3123,8 +3130,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3123 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3130 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3124 up_write(&slub_lock); 3131 up_write(&slub_lock);
3125 3132
3126 if (sysfs_slab_alias(s, name)) 3133 if (sysfs_slab_alias(s, name)) {
3134 down_write(&slub_lock);
3135 s->refcount--;
3136 up_write(&slub_lock);
3127 goto err; 3137 goto err;
3138 }
3128 return s; 3139 return s;
3129 } 3140 }
3130 3141
@@ -3134,8 +3145,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3134 size, align, flags, ctor)) { 3145 size, align, flags, ctor)) {
3135 list_add(&s->list, &slab_caches); 3146 list_add(&s->list, &slab_caches);
3136 up_write(&slub_lock); 3147 up_write(&slub_lock);
3137 if (sysfs_slab_add(s)) 3148 if (sysfs_slab_add(s)) {
3149 down_write(&slub_lock);
3150 list_del(&s->list);
3151 up_write(&slub_lock);
3152 kfree(s);
3138 goto err; 3153 goto err;
3154 }
3139 return s; 3155 return s;
3140 } 3156 }
3141 kfree(s); 3157 kfree(s);
@@ -3202,7 +3218,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
3202 3218
3203#endif 3219#endif
3204 3220
3205void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3221void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3206{ 3222{
3207 struct kmem_cache *s; 3223 struct kmem_cache *s;
3208 3224
@@ -3218,7 +3234,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3218} 3234}
3219 3235
3220void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3236void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3221 int node, void *caller) 3237 int node, unsigned long caller)
3222{ 3238{
3223 struct kmem_cache *s; 3239 struct kmem_cache *s;
3224 3240
@@ -3429,13 +3445,13 @@ static void resiliency_test(void) {};
3429 3445
3430struct location { 3446struct location {
3431 unsigned long count; 3447 unsigned long count;
3432 void *addr; 3448 unsigned long addr;
3433 long long sum_time; 3449 long long sum_time;
3434 long min_time; 3450 long min_time;
3435 long max_time; 3451 long max_time;
3436 long min_pid; 3452 long min_pid;
3437 long max_pid; 3453 long max_pid;
3438 cpumask_t cpus; 3454 DECLARE_BITMAP(cpus, NR_CPUS);
3439 nodemask_t nodes; 3455 nodemask_t nodes;
3440}; 3456};
3441 3457
@@ -3477,7 +3493,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3477{ 3493{
3478 long start, end, pos; 3494 long start, end, pos;
3479 struct location *l; 3495 struct location *l;
3480 void *caddr; 3496 unsigned long caddr;
3481 unsigned long age = jiffies - track->when; 3497 unsigned long age = jiffies - track->when;
3482 3498
3483 start = -1; 3499 start = -1;
@@ -3510,7 +3526,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3510 if (track->pid > l->max_pid) 3526 if (track->pid > l->max_pid)
3511 l->max_pid = track->pid; 3527 l->max_pid = track->pid;
3512 3528
3513 cpu_set(track->cpu, l->cpus); 3529 cpumask_set_cpu(track->cpu,
3530 to_cpumask(l->cpus));
3514 } 3531 }
3515 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3532 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3516 return 1; 3533 return 1;
@@ -3540,8 +3557,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3540 l->max_time = age; 3557 l->max_time = age;
3541 l->min_pid = track->pid; 3558 l->min_pid = track->pid;
3542 l->max_pid = track->pid; 3559 l->max_pid = track->pid;
3543 cpus_clear(l->cpus); 3560 cpumask_clear(to_cpumask(l->cpus));
3544 cpu_set(track->cpu, l->cpus); 3561 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3545 nodes_clear(l->nodes); 3562 nodes_clear(l->nodes);
3546 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3563 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3547 return 1; 3564 return 1;
@@ -3597,7 +3614,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3597 for (i = 0; i < t.count; i++) { 3614 for (i = 0; i < t.count; i++) {
3598 struct location *l = &t.loc[i]; 3615 struct location *l = &t.loc[i];
3599 3616
3600 if (len > PAGE_SIZE - 100) 3617 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3601 break; 3618 break;
3602 len += sprintf(buf + len, "%7ld ", l->count); 3619 len += sprintf(buf + len, "%7ld ", l->count);
3603 3620
@@ -3622,11 +3639,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
3622 len += sprintf(buf + len, " pid=%ld", 3639 len += sprintf(buf + len, " pid=%ld",
3623 l->min_pid); 3640 l->min_pid);
3624 3641
3625 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3642 if (num_online_cpus() > 1 &&
3643 !cpumask_empty(to_cpumask(l->cpus)) &&
3626 len < PAGE_SIZE - 60) { 3644 len < PAGE_SIZE - 60) {
3627 len += sprintf(buf + len, " cpus="); 3645 len += sprintf(buf + len, " cpus=");
3628 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3646 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3629 l->cpus); 3647 to_cpumask(l->cpus));
3630 } 3648 }
3631 3649
3632 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3650 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
@@ -4345,7 +4363,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
4345 4363
4346/* 4364/*
4347 * Need to buffer aliases during bootup until sysfs becomes 4365 * Need to buffer aliases during bootup until sysfs becomes
4348 * available lest we loose that information. 4366 * available lest we lose that information.
4349 */ 4367 */
4350struct saved_alias { 4368struct saved_alias {
4351 struct kmem_cache *s; 4369 struct kmem_cache *s;
diff --git a/mm/swap.c b/mm/swap.c
index 2881987603eb..b135ec90cdeb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -299,7 +299,6 @@ void lru_add_drain(void)
299 put_cpu(); 299 put_cpu();
300} 300}
301 301
302#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
303static void lru_add_drain_per_cpu(struct work_struct *dummy) 302static void lru_add_drain_per_cpu(struct work_struct *dummy)
304{ 303{
305 lru_add_drain(); 304 lru_add_drain();
@@ -313,18 +312,6 @@ int lru_add_drain_all(void)
313 return schedule_on_each_cpu(lru_add_drain_per_cpu); 312 return schedule_on_each_cpu(lru_add_drain_per_cpu);
314} 313}
315 314
316#else
317
318/*
319 * Returns 0 for success
320 */
321int lru_add_drain_all(void)
322{
323 lru_add_drain();
324 return 0;
325}
326#endif
327
328/* 315/*
329 * Batched page_cache_release(). Decrement the reference count on all the 316 * Batched page_cache_release(). Decrement the reference count on all the
330 * passed pages. If it fell to zero then remove the page from the LRU and 317 * passed pages. If it fell to zero then remove the page from the LRU and
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 90cb67a5417c..54a9f87e5162 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1462,6 +1462,15 @@ static int __init procswaps_init(void)
1462__initcall(procswaps_init); 1462__initcall(procswaps_init);
1463#endif /* CONFIG_PROC_FS */ 1463#endif /* CONFIG_PROC_FS */
1464 1464
1465#ifdef MAX_SWAPFILES_CHECK
1466static int __init max_swapfiles_check(void)
1467{
1468 MAX_SWAPFILES_CHECK();
1469 return 0;
1470}
1471late_initcall(max_swapfiles_check);
1472#endif
1473
1465/* 1474/*
1466 * Written 01/25/92 by Simmule Turner, heavily changed by Linus. 1475 * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1467 * 1476 *
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f3f6e0758562..7465f22fec0c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -151,11 +151,12 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
151 * 151 *
152 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 152 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
153 */ 153 */
154static int vmap_page_range(unsigned long addr, unsigned long end, 154static int vmap_page_range(unsigned long start, unsigned long end,
155 pgprot_t prot, struct page **pages) 155 pgprot_t prot, struct page **pages)
156{ 156{
157 pgd_t *pgd; 157 pgd_t *pgd;
158 unsigned long next; 158 unsigned long next;
159 unsigned long addr = start;
159 int err = 0; 160 int err = 0;
160 int nr = 0; 161 int nr = 0;
161 162
@@ -167,7 +168,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
167 if (err) 168 if (err)
168 break; 169 break;
169 } while (pgd++, addr = next, addr != end); 170 } while (pgd++, addr = next, addr != end);
170 flush_cache_vmap(addr, end); 171 flush_cache_vmap(start, end);
171 172
172 if (unlikely(err)) 173 if (unlikely(err))
173 return err; 174 return err;
@@ -1717,7 +1718,7 @@ static int s_show(struct seq_file *m, void *p)
1717 v->addr, v->addr + v->size, v->size); 1718 v->addr, v->addr + v->size, v->size);
1718 1719
1719 if (v->caller) { 1720 if (v->caller) {
1720 char buff[2 * KSYM_NAME_LEN]; 1721 char buff[KSYM_SYMBOL_LEN];
1721 1722
1722 seq_putc(m, ' '); 1723 seq_putc(m, ' ');
1723 sprint_symbol(buff, (unsigned long)v->caller); 1724 sprint_symbol(buff, (unsigned long)v->caller);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 62e7f62fb559..d196f46c8808 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1902,7 +1902,7 @@ static int kswapd(void *p)
1902 }; 1902 };
1903 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1903 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1904 1904
1905 if (!cpus_empty(*cpumask)) 1905 if (!cpumask_empty(cpumask))
1906 set_cpus_allowed_ptr(tsk, cpumask); 1906 set_cpus_allowed_ptr(tsk, cpumask);
1907 current->reclaim_state = &reclaim_state; 1907 current->reclaim_state = &reclaim_state;
1908 1908
@@ -2141,7 +2141,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
2141 pg_data_t *pgdat = NODE_DATA(nid); 2141 pg_data_t *pgdat = NODE_DATA(nid);
2142 node_to_cpumask_ptr(mask, pgdat->node_id); 2142 node_to_cpumask_ptr(mask, pgdat->node_id);
2143 2143
2144 if (any_online_cpu(*mask) < nr_cpu_ids) 2144 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2145 /* One of our CPUs online: restore mask */ 2145 /* One of our CPUs online: restore mask */
2146 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2146 set_cpus_allowed_ptr(pgdat->kswapd, mask);
2147 } 2147 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c3ccfda23adc..91149746bb8d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -20,7 +20,7 @@
20DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 20DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
21EXPORT_PER_CPU_SYMBOL(vm_event_states); 21EXPORT_PER_CPU_SYMBOL(vm_event_states);
22 22
23static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) 23static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
24{ 24{
25 int cpu; 25 int cpu;
26 int i; 26 int i;
@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
43void all_vm_events(unsigned long *ret) 43void all_vm_events(unsigned long *ret)
44{ 44{
45 get_online_cpus(); 45 get_online_cpus();
46 sum_vm_events(ret, &cpu_online_map); 46 sum_vm_events(ret, cpu_online_mask);
47 put_online_cpus(); 47 put_online_cpus();
48} 48}
49EXPORT_SYMBOL_GPL(all_vm_events); 49EXPORT_SYMBOL_GPL(all_vm_events);