diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 16:32:35 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 16:32:35 -0500 |
commit | 33edcf133ba93ecba2e4b6472e97b689895d805c (patch) | |
tree | 327d7a20acef64005e7c5ccbfa1265be28aeb6ac /mm | |
parent | be4d638c1597580ed2294d899d9f1a2cd10e462c (diff) | |
parent | 3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bounce.c | 5 | ||||
-rw-r--r-- | mm/memory.c | 70 | ||||
-rw-r--r-- | mm/mempolicy.c | 9 | ||||
-rw-r--r-- | mm/migrate.c | 11 | ||||
-rw-r--r-- | mm/mlock.c | 45 | ||||
-rw-r--r-- | mm/oom_kill.c | 12 | ||||
-rw-r--r-- | mm/shmem.c | 8 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/swapfile.c | 9 |
9 files changed, 132 insertions, 39 deletions
diff --git a/mm/bounce.c b/mm/bounce.c index 06722c403058..bf0cf7c8387b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/hash.h> | 14 | #include <linux/hash.h> |
15 | #include <linux/highmem.h> | 15 | #include <linux/highmem.h> |
16 | #include <linux/blktrace_api.h> | 16 | #include <linux/blktrace_api.h> |
17 | #include <trace/block.h> | ||
17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
18 | 19 | ||
19 | #define POOL_SIZE 64 | 20 | #define POOL_SIZE 64 |
@@ -21,6 +22,8 @@ | |||
21 | 22 | ||
22 | static mempool_t *page_pool, *isa_page_pool; | 23 | static mempool_t *page_pool, *isa_page_pool; |
23 | 24 | ||
25 | DEFINE_TRACE(block_bio_bounce); | ||
26 | |||
24 | #ifdef CONFIG_HIGHMEM | 27 | #ifdef CONFIG_HIGHMEM |
25 | static __init int init_emergency_pool(void) | 28 | static __init int init_emergency_pool(void) |
26 | { | 29 | { |
@@ -222,7 +225,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
222 | if (!bio) | 225 | if (!bio) |
223 | return; | 226 | return; |
224 | 227 | ||
225 | blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); | 228 | trace_block_bio_bounce(q, *bio_orig); |
226 | 229 | ||
227 | /* | 230 | /* |
228 | * at least one page was bounced, fill in possible non-highmem | 231 | * at least one page was bounced, fill in possible non-highmem |
diff --git a/mm/memory.c b/mm/memory.c index 164951c47305..f01b7eed6e16 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -669,6 +669,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
669 | if (is_vm_hugetlb_page(vma)) | 669 | if (is_vm_hugetlb_page(vma)) |
670 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); | 670 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); |
671 | 671 | ||
672 | if (unlikely(is_pfn_mapping(vma))) { | ||
673 | /* | ||
674 | * We do not free on error cases below as remove_vma | ||
675 | * gets called on error from higher level routine | ||
676 | */ | ||
677 | ret = track_pfn_vma_copy(vma); | ||
678 | if (ret) | ||
679 | return ret; | ||
680 | } | ||
681 | |||
672 | /* | 682 | /* |
673 | * We need to invalidate the secondary MMU mappings only when | 683 | * We need to invalidate the secondary MMU mappings only when |
674 | * there could be a permission downgrade on the ptes of the | 684 | * there could be a permission downgrade on the ptes of the |
@@ -915,6 +925,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, | |||
915 | if (vma->vm_flags & VM_ACCOUNT) | 925 | if (vma->vm_flags & VM_ACCOUNT) |
916 | *nr_accounted += (end - start) >> PAGE_SHIFT; | 926 | *nr_accounted += (end - start) >> PAGE_SHIFT; |
917 | 927 | ||
928 | if (unlikely(is_pfn_mapping(vma))) | ||
929 | untrack_pfn_vma(vma, 0, 0); | ||
930 | |||
918 | while (start != end) { | 931 | while (start != end) { |
919 | if (!tlb_start_valid) { | 932 | if (!tlb_start_valid) { |
920 | tlb_start = start; | 933 | tlb_start = start; |
@@ -1430,6 +1443,7 @@ out: | |||
1430 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | 1443 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, |
1431 | unsigned long pfn) | 1444 | unsigned long pfn) |
1432 | { | 1445 | { |
1446 | int ret; | ||
1433 | /* | 1447 | /* |
1434 | * Technically, architectures with pte_special can avoid all these | 1448 | * Technically, architectures with pte_special can avoid all these |
1435 | * restrictions (same for remap_pfn_range). However we would like | 1449 | * restrictions (same for remap_pfn_range). However we would like |
@@ -1444,7 +1458,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | |||
1444 | 1458 | ||
1445 | if (addr < vma->vm_start || addr >= vma->vm_end) | 1459 | if (addr < vma->vm_start || addr >= vma->vm_end) |
1446 | return -EFAULT; | 1460 | return -EFAULT; |
1447 | return insert_pfn(vma, addr, pfn, vma->vm_page_prot); | 1461 | if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) |
1462 | return -EINVAL; | ||
1463 | |||
1464 | ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); | ||
1465 | |||
1466 | if (ret) | ||
1467 | untrack_pfn_vma(vma, pfn, PAGE_SIZE); | ||
1468 | |||
1469 | return ret; | ||
1448 | } | 1470 | } |
1449 | EXPORT_SYMBOL(vm_insert_pfn); | 1471 | EXPORT_SYMBOL(vm_insert_pfn); |
1450 | 1472 | ||
@@ -1575,14 +1597,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
1575 | * behaviour that some programs depend on. We mark the "original" | 1597 | * behaviour that some programs depend on. We mark the "original" |
1576 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". | 1598 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". |
1577 | */ | 1599 | */ |
1578 | if (is_cow_mapping(vma->vm_flags)) { | 1600 | if (addr == vma->vm_start && end == vma->vm_end) |
1579 | if (addr != vma->vm_start || end != vma->vm_end) | ||
1580 | return -EINVAL; | ||
1581 | vma->vm_pgoff = pfn; | 1601 | vma->vm_pgoff = pfn; |
1582 | } | 1602 | else if (is_cow_mapping(vma->vm_flags)) |
1603 | return -EINVAL; | ||
1583 | 1604 | ||
1584 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 1605 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
1585 | 1606 | ||
1607 | err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); | ||
1608 | if (err) | ||
1609 | return -EINVAL; | ||
1610 | |||
1586 | BUG_ON(addr >= end); | 1611 | BUG_ON(addr >= end); |
1587 | pfn -= addr >> PAGE_SHIFT; | 1612 | pfn -= addr >> PAGE_SHIFT; |
1588 | pgd = pgd_offset(mm, addr); | 1613 | pgd = pgd_offset(mm, addr); |
@@ -1594,6 +1619,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
1594 | if (err) | 1619 | if (err) |
1595 | break; | 1620 | break; |
1596 | } while (pgd++, addr = next, addr != end); | 1621 | } while (pgd++, addr = next, addr != end); |
1622 | |||
1623 | if (err) | ||
1624 | untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); | ||
1625 | |||
1597 | return err; | 1626 | return err; |
1598 | } | 1627 | } |
1599 | EXPORT_SYMBOL(remap_pfn_range); | 1628 | EXPORT_SYMBOL(remap_pfn_range); |
@@ -2865,9 +2894,9 @@ int in_gate_area_no_task(unsigned long addr) | |||
2865 | #endif /* __HAVE_ARCH_GATE_AREA */ | 2894 | #endif /* __HAVE_ARCH_GATE_AREA */ |
2866 | 2895 | ||
2867 | #ifdef CONFIG_HAVE_IOREMAP_PROT | 2896 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
2868 | static resource_size_t follow_phys(struct vm_area_struct *vma, | 2897 | int follow_phys(struct vm_area_struct *vma, |
2869 | unsigned long address, unsigned int flags, | 2898 | unsigned long address, unsigned int flags, |
2870 | unsigned long *prot) | 2899 | unsigned long *prot, resource_size_t *phys) |
2871 | { | 2900 | { |
2872 | pgd_t *pgd; | 2901 | pgd_t *pgd; |
2873 | pud_t *pud; | 2902 | pud_t *pud; |
@@ -2876,24 +2905,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma, | |||
2876 | spinlock_t *ptl; | 2905 | spinlock_t *ptl; |
2877 | resource_size_t phys_addr = 0; | 2906 | resource_size_t phys_addr = 0; |
2878 | struct mm_struct *mm = vma->vm_mm; | 2907 | struct mm_struct *mm = vma->vm_mm; |
2908 | int ret = -EINVAL; | ||
2879 | 2909 | ||
2880 | VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); | 2910 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
2911 | goto out; | ||
2881 | 2912 | ||
2882 | pgd = pgd_offset(mm, address); | 2913 | pgd = pgd_offset(mm, address); |
2883 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | 2914 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
2884 | goto no_page_table; | 2915 | goto out; |
2885 | 2916 | ||
2886 | pud = pud_offset(pgd, address); | 2917 | pud = pud_offset(pgd, address); |
2887 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | 2918 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) |
2888 | goto no_page_table; | 2919 | goto out; |
2889 | 2920 | ||
2890 | pmd = pmd_offset(pud, address); | 2921 | pmd = pmd_offset(pud, address); |
2891 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | 2922 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) |
2892 | goto no_page_table; | 2923 | goto out; |
2893 | 2924 | ||
2894 | /* We cannot handle huge page PFN maps. Luckily they don't exist. */ | 2925 | /* We cannot handle huge page PFN maps. Luckily they don't exist. */ |
2895 | if (pmd_huge(*pmd)) | 2926 | if (pmd_huge(*pmd)) |
2896 | goto no_page_table; | 2927 | goto out; |
2897 | 2928 | ||
2898 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | 2929 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
2899 | if (!ptep) | 2930 | if (!ptep) |
@@ -2908,13 +2939,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma, | |||
2908 | phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ | 2939 | phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ |
2909 | 2940 | ||
2910 | *prot = pgprot_val(pte_pgprot(pte)); | 2941 | *prot = pgprot_val(pte_pgprot(pte)); |
2942 | *phys = phys_addr; | ||
2943 | ret = 0; | ||
2911 | 2944 | ||
2912 | unlock: | 2945 | unlock: |
2913 | pte_unmap_unlock(ptep, ptl); | 2946 | pte_unmap_unlock(ptep, ptl); |
2914 | out: | 2947 | out: |
2915 | return phys_addr; | 2948 | return ret; |
2916 | no_page_table: | ||
2917 | return 0; | ||
2918 | } | 2949 | } |
2919 | 2950 | ||
2920 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | 2951 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
@@ -2925,12 +2956,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | |||
2925 | void *maddr; | 2956 | void *maddr; |
2926 | int offset = addr & (PAGE_SIZE-1); | 2957 | int offset = addr & (PAGE_SIZE-1); |
2927 | 2958 | ||
2928 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) | 2959 | if (follow_phys(vma, addr, write, &prot, &phys_addr)) |
2929 | return -EINVAL; | ||
2930 | |||
2931 | phys_addr = follow_phys(vma, addr, write, &prot); | ||
2932 | |||
2933 | if (!phys_addr) | ||
2934 | return -EINVAL; | 2960 | return -EINVAL; |
2935 | 2961 | ||
2936 | maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); | 2962 | maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e9493b1c1117..e412ffa8e52e 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1114,6 +1114,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, | |||
1114 | const unsigned long __user *old_nodes, | 1114 | const unsigned long __user *old_nodes, |
1115 | const unsigned long __user *new_nodes) | 1115 | const unsigned long __user *new_nodes) |
1116 | { | 1116 | { |
1117 | const struct cred *cred = current_cred(), *tcred; | ||
1117 | struct mm_struct *mm; | 1118 | struct mm_struct *mm; |
1118 | struct task_struct *task; | 1119 | struct task_struct *task; |
1119 | nodemask_t old; | 1120 | nodemask_t old; |
@@ -1148,12 +1149,16 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, | |||
1148 | * capabilities, superuser privileges or the same | 1149 | * capabilities, superuser privileges or the same |
1149 | * userid as the target process. | 1150 | * userid as the target process. |
1150 | */ | 1151 | */ |
1151 | if ((current->euid != task->suid) && (current->euid != task->uid) && | 1152 | rcu_read_lock(); |
1152 | (current->uid != task->suid) && (current->uid != task->uid) && | 1153 | tcred = __task_cred(task); |
1154 | if (cred->euid != tcred->suid && cred->euid != tcred->uid && | ||
1155 | cred->uid != tcred->suid && cred->uid != tcred->uid && | ||
1153 | !capable(CAP_SYS_NICE)) { | 1156 | !capable(CAP_SYS_NICE)) { |
1157 | rcu_read_unlock(); | ||
1154 | err = -EPERM; | 1158 | err = -EPERM; |
1155 | goto out; | 1159 | goto out; |
1156 | } | 1160 | } |
1161 | rcu_read_unlock(); | ||
1157 | 1162 | ||
1158 | task_nodes = cpuset_mems_allowed(task); | 1163 | task_nodes = cpuset_mems_allowed(task); |
1159 | /* Is the user allowed to access the target nodes? */ | 1164 | /* Is the user allowed to access the target nodes? */ |
diff --git a/mm/migrate.c b/mm/migrate.c index d8f07667fc80..21631ab8c08b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -998,7 +998,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, | |||
998 | unsigned long addr = (unsigned long)(*pages); | 998 | unsigned long addr = (unsigned long)(*pages); |
999 | struct vm_area_struct *vma; | 999 | struct vm_area_struct *vma; |
1000 | struct page *page; | 1000 | struct page *page; |
1001 | int err; | 1001 | int err = -EFAULT; |
1002 | 1002 | ||
1003 | vma = find_vma(mm, addr); | 1003 | vma = find_vma(mm, addr); |
1004 | if (!vma) | 1004 | if (!vma) |
@@ -1075,6 +1075,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, | |||
1075 | const int __user *nodes, | 1075 | const int __user *nodes, |
1076 | int __user *status, int flags) | 1076 | int __user *status, int flags) |
1077 | { | 1077 | { |
1078 | const struct cred *cred = current_cred(), *tcred; | ||
1078 | struct task_struct *task; | 1079 | struct task_struct *task; |
1079 | struct mm_struct *mm; | 1080 | struct mm_struct *mm; |
1080 | int err; | 1081 | int err; |
@@ -1105,12 +1106,16 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, | |||
1105 | * capabilities, superuser privileges or the same | 1106 | * capabilities, superuser privileges or the same |
1106 | * userid as the target process. | 1107 | * userid as the target process. |
1107 | */ | 1108 | */ |
1108 | if ((current->euid != task->suid) && (current->euid != task->uid) && | 1109 | rcu_read_lock(); |
1109 | (current->uid != task->suid) && (current->uid != task->uid) && | 1110 | tcred = __task_cred(task); |
1111 | if (cred->euid != tcred->suid && cred->euid != tcred->uid && | ||
1112 | cred->uid != tcred->suid && cred->uid != tcred->uid && | ||
1110 | !capable(CAP_SYS_NICE)) { | 1113 | !capable(CAP_SYS_NICE)) { |
1114 | rcu_read_unlock(); | ||
1111 | err = -EPERM; | 1115 | err = -EPERM; |
1112 | goto out; | 1116 | goto out; |
1113 | } | 1117 | } |
1118 | rcu_read_unlock(); | ||
1114 | 1119 | ||
1115 | err = security_task_movememory(task); | 1120 | err = security_task_movememory(task); |
1116 | if (err) | 1121 | if (err) |
diff --git a/mm/mlock.c b/mm/mlock.c index 1ada366570cb..3035a56e7616 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -667,3 +667,48 @@ void user_shm_unlock(size_t size, struct user_struct *user) | |||
667 | spin_unlock(&shmlock_user_lock); | 667 | spin_unlock(&shmlock_user_lock); |
668 | free_uid(user); | 668 | free_uid(user); |
669 | } | 669 | } |
670 | |||
671 | void *alloc_locked_buffer(size_t size) | ||
672 | { | ||
673 | unsigned long rlim, vm, pgsz; | ||
674 | void *buffer = NULL; | ||
675 | |||
676 | pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
677 | |||
678 | down_write(¤t->mm->mmap_sem); | ||
679 | |||
680 | rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; | ||
681 | vm = current->mm->total_vm + pgsz; | ||
682 | if (rlim < vm) | ||
683 | goto out; | ||
684 | |||
685 | rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; | ||
686 | vm = current->mm->locked_vm + pgsz; | ||
687 | if (rlim < vm) | ||
688 | goto out; | ||
689 | |||
690 | buffer = kzalloc(size, GFP_KERNEL); | ||
691 | if (!buffer) | ||
692 | goto out; | ||
693 | |||
694 | current->mm->total_vm += pgsz; | ||
695 | current->mm->locked_vm += pgsz; | ||
696 | |||
697 | out: | ||
698 | up_write(¤t->mm->mmap_sem); | ||
699 | return buffer; | ||
700 | } | ||
701 | |||
702 | void free_locked_buffer(void *buffer, size_t size) | ||
703 | { | ||
704 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
705 | |||
706 | down_write(¤t->mm->mmap_sem); | ||
707 | |||
708 | current->mm->total_vm -= pgsz; | ||
709 | current->mm->locked_vm -= pgsz; | ||
710 | |||
711 | up_write(¤t->mm->mmap_sem); | ||
712 | |||
713 | kfree(buffer); | ||
714 | } | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index a0a01902f551..558f9afe6e4e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -128,8 +128,8 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
128 | * Superuser processes are usually more important, so we make it | 128 | * Superuser processes are usually more important, so we make it |
129 | * less likely that we kill those. | 129 | * less likely that we kill those. |
130 | */ | 130 | */ |
131 | if (has_capability(p, CAP_SYS_ADMIN) || | 131 | if (has_capability_noaudit(p, CAP_SYS_ADMIN) || |
132 | has_capability(p, CAP_SYS_RESOURCE)) | 132 | has_capability_noaudit(p, CAP_SYS_RESOURCE)) |
133 | points /= 4; | 133 | points /= 4; |
134 | 134 | ||
135 | /* | 135 | /* |
@@ -138,7 +138,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) | |||
138 | * tend to only have this flag set on applications they think | 138 | * tend to only have this flag set on applications they think |
139 | * of as important. | 139 | * of as important. |
140 | */ | 140 | */ |
141 | if (has_capability(p, CAP_SYS_RAWIO)) | 141 | if (has_capability_noaudit(p, CAP_SYS_RAWIO)) |
142 | points /= 4; | 142 | points /= 4; |
143 | 143 | ||
144 | /* | 144 | /* |
@@ -299,9 +299,9 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
299 | 299 | ||
300 | task_lock(p); | 300 | task_lock(p); |
301 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", | 301 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", |
302 | p->pid, p->uid, p->tgid, p->mm->total_vm, | 302 | p->pid, __task_cred(p)->uid, p->tgid, |
303 | get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj, | 303 | p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), |
304 | p->comm); | 304 | p->oomkilladj, p->comm); |
305 | task_unlock(p); | 305 | task_unlock(p); |
306 | } while_each_thread(g, p); | 306 | } while_each_thread(g, p); |
307 | } | 307 | } |
diff --git a/mm/shmem.c b/mm/shmem.c index 0ed075215e5f..f1b0d4871f3a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1513,8 +1513,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | |||
1513 | inode = new_inode(sb); | 1513 | inode = new_inode(sb); |
1514 | if (inode) { | 1514 | if (inode) { |
1515 | inode->i_mode = mode; | 1515 | inode->i_mode = mode; |
1516 | inode->i_uid = current->fsuid; | 1516 | inode->i_uid = current_fsuid(); |
1517 | inode->i_gid = current->fsgid; | 1517 | inode->i_gid = current_fsgid(); |
1518 | inode->i_blocks = 0; | 1518 | inode->i_blocks = 0; |
1519 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; | 1519 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; |
1520 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 1520 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
@@ -2278,8 +2278,8 @@ static int shmem_fill_super(struct super_block *sb, | |||
2278 | sbinfo->max_blocks = 0; | 2278 | sbinfo->max_blocks = 0; |
2279 | sbinfo->max_inodes = 0; | 2279 | sbinfo->max_inodes = 0; |
2280 | sbinfo->mode = S_IRWXUGO | S_ISVTX; | 2280 | sbinfo->mode = S_IRWXUGO | S_ISVTX; |
2281 | sbinfo->uid = current->fsuid; | 2281 | sbinfo->uid = current_fsuid(); |
2282 | sbinfo->gid = current->fsgid; | 2282 | sbinfo->gid = current_fsgid(); |
2283 | sbinfo->mpol = NULL; | 2283 | sbinfo->mpol = NULL; |
2284 | sb->s_fs_info = sbinfo; | 2284 | sb->s_fs_info = sbinfo; |
2285 | 2285 | ||
@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
535 | struct kmem_cache *c; | 535 | struct kmem_cache *c; |
536 | 536 | ||
537 | c = slob_alloc(sizeof(struct kmem_cache), | 537 | c = slob_alloc(sizeof(struct kmem_cache), |
538 | flags, ARCH_KMALLOC_MINALIGN, -1); | 538 | GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); |
539 | 539 | ||
540 | if (c) { | 540 | if (c) { |
541 | c->name = name; | 541 | c->name = name; |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 90cb67a5417c..54a9f87e5162 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1462,6 +1462,15 @@ static int __init procswaps_init(void) | |||
1462 | __initcall(procswaps_init); | 1462 | __initcall(procswaps_init); |
1463 | #endif /* CONFIG_PROC_FS */ | 1463 | #endif /* CONFIG_PROC_FS */ |
1464 | 1464 | ||
1465 | #ifdef MAX_SWAPFILES_CHECK | ||
1466 | static int __init max_swapfiles_check(void) | ||
1467 | { | ||
1468 | MAX_SWAPFILES_CHECK(); | ||
1469 | return 0; | ||
1470 | } | ||
1471 | late_initcall(max_swapfiles_check); | ||
1472 | #endif | ||
1473 | |||
1465 | /* | 1474 | /* |
1466 | * Written 01/25/92 by Simmule Turner, heavily changed by Linus. | 1475 | * Written 01/25/92 by Simmule Turner, heavily changed by Linus. |
1467 | * | 1476 | * |