aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c23
-rw-r--r--mm/mempolicy.c84
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slub.c46
10 files changed, 137 insertions, 43 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index e24d348083c3..a7609cbcb00d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -137,6 +137,7 @@ config SPLIT_PTLOCK_CPUS
137 int 137 int
138 default "4096" if ARM && !CPU_CACHE_VIPT 138 default "4096" if ARM && !CPU_CACHE_VIPT
139 default "4096" if PARISC && !PA20 139 default "4096" if PARISC && !PA20
140 default "4096" if XEN
140 default "4" 141 default "4"
141 142
142# 143#
diff --git a/mm/filemap.c b/mm/filemap.c
index 90b657b50f81..15c8413ee929 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1388,6 +1388,7 @@ retry_find:
1388 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1388 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1389 if (unlikely(vmf->pgoff >= size)) { 1389 if (unlikely(vmf->pgoff >= size)) {
1390 unlock_page(page); 1390 unlock_page(page);
1391 page_cache_release(page);
1391 goto outside_data_content; 1392 goto outside_data_content;
1392 } 1393 }
1393 1394
diff --git a/mm/fremap.c b/mm/fremap.c
index c395b1abf082..95bcb5641c72 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -160,7 +160,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
160 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) 160 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
161 goto out; 161 goto out;
162 162
163 if (!vma->vm_flags & VM_CAN_NONLINEAR) 163 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
164 goto out; 164 goto out;
165 165
166 if (end <= start || start < vma->vm_start || end > vma->vm_end) 166 if (end <= start || start < vma->vm_start || end > vma->vm_end)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index de4cf458d6e1..eab8c428cc93 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
42 might_sleep(); 42 might_sleep();
43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44 cond_resched(); 44 cond_resched();
45 clear_user_highpage(page + i, addr); 45 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
46 } 46 }
47} 47}
48 48
@@ -71,8 +71,9 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
71{ 71{
72 int nid; 72 int nid;
73 struct page *page = NULL; 73 struct page *page = NULL;
74 struct mempolicy *mpol;
74 struct zonelist *zonelist = huge_zonelist(vma, address, 75 struct zonelist *zonelist = huge_zonelist(vma, address,
75 htlb_alloc_mask); 76 htlb_alloc_mask, &mpol);
76 struct zone **z; 77 struct zone **z;
77 78
78 for (z = zonelist->zones; *z; z++) { 79 for (z = zonelist->zones; *z; z++) {
@@ -87,6 +88,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
87 break; 88 break;
88 } 89 }
89 } 90 }
91 mpol_free(mpol); /* unref if mpol !NULL */
90 return page; 92 return page;
91} 93}
92 94
diff --git a/mm/memory.c b/mm/memory.c
index ca8cac11bd2c..f82b359b2745 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1639 struct page *old_page, *new_page; 1639 struct page *old_page, *new_page;
1640 pte_t entry; 1640 pte_t entry;
1641 int reuse = 0, ret = 0; 1641 int reuse = 0, ret = 0;
1642 int page_mkwrite = 0;
1642 struct page *dirty_page = NULL; 1643 struct page *dirty_page = NULL;
1643 1644
1644 old_page = vm_normal_page(vma, address, orig_pte); 1645 old_page = vm_normal_page(vma, address, orig_pte);
@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1687 page_cache_release(old_page); 1688 page_cache_release(old_page);
1688 if (!pte_same(*page_table, orig_pte)) 1689 if (!pte_same(*page_table, orig_pte))
1689 goto unlock; 1690 goto unlock;
1691
1692 page_mkwrite = 1;
1690 } 1693 }
1691 dirty_page = old_page; 1694 dirty_page = old_page;
1692 get_page(dirty_page); 1695 get_page(dirty_page);
@@ -1774,7 +1777,7 @@ unlock:
1774 * do_no_page is protected similarly. 1777 * do_no_page is protected similarly.
1775 */ 1778 */
1776 wait_on_page_locked(dirty_page); 1779 wait_on_page_locked(dirty_page);
1777 set_page_dirty_balance(dirty_page); 1780 set_page_dirty_balance(dirty_page, page_mkwrite);
1778 put_page(dirty_page); 1781 put_page(dirty_page);
1779 } 1782 }
1780 return ret; 1783 return ret;
@@ -2307,13 +2310,14 @@ oom:
2307 * do not need to flush old virtual caches or the TLB. 2310 * do not need to flush old virtual caches or the TLB.
2308 * 2311 *
2309 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2312 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2310 * but allow concurrent faults), and pte mapped but not yet locked. 2313 * but allow concurrent faults), and pte neither mapped nor locked.
2311 * We return with mmap_sem still held, but pte unmapped and unlocked. 2314 * We return with mmap_sem still held, but pte unmapped and unlocked.
2312 */ 2315 */
2313static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2316static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2314 unsigned long address, pte_t *page_table, pmd_t *pmd, 2317 unsigned long address, pmd_t *pmd,
2315 pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 2318 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2316{ 2319{
2320 pte_t *page_table;
2317 spinlock_t *ptl; 2321 spinlock_t *ptl;
2318 struct page *page; 2322 struct page *page;
2319 pte_t entry; 2323 pte_t entry;
@@ -2321,13 +2325,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2321 struct page *dirty_page = NULL; 2325 struct page *dirty_page = NULL;
2322 struct vm_fault vmf; 2326 struct vm_fault vmf;
2323 int ret; 2327 int ret;
2328 int page_mkwrite = 0;
2324 2329
2325 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2330 vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2326 vmf.pgoff = pgoff; 2331 vmf.pgoff = pgoff;
2327 vmf.flags = flags; 2332 vmf.flags = flags;
2328 vmf.page = NULL; 2333 vmf.page = NULL;
2329 2334
2330 pte_unmap(page_table);
2331 BUG_ON(vma->vm_flags & VM_PFNMAP); 2335 BUG_ON(vma->vm_flags & VM_PFNMAP);
2332 2336
2333 if (likely(vma->vm_ops->fault)) { 2337 if (likely(vma->vm_ops->fault)) {
@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2398 anon = 1; /* no anon but release vmf.page */ 2402 anon = 1; /* no anon but release vmf.page */
2399 goto out; 2403 goto out;
2400 } 2404 }
2405 page_mkwrite = 1;
2401 } 2406 }
2402 } 2407 }
2403 2408
@@ -2453,7 +2458,7 @@ out_unlocked:
2453 if (anon) 2458 if (anon)
2454 page_cache_release(vmf.page); 2459 page_cache_release(vmf.page);
2455 else if (dirty_page) { 2460 else if (dirty_page) {
2456 set_page_dirty_balance(dirty_page); 2461 set_page_dirty_balance(dirty_page, page_mkwrite);
2457 put_page(dirty_page); 2462 put_page(dirty_page);
2458 } 2463 }
2459 2464
@@ -2468,8 +2473,8 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2468 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff; 2473 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
2469 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 2474 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2470 2475
2471 return __do_fault(mm, vma, address, page_table, pmd, pgoff, 2476 pte_unmap(page_table);
2472 flags, orig_pte); 2477 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2473} 2478}
2474 2479
2475 2480
@@ -2552,9 +2557,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2552 } 2557 }
2553 2558
2554 pgoff = pte_to_pgoff(orig_pte); 2559 pgoff = pte_to_pgoff(orig_pte);
2555 2560 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2556 return __do_fault(mm, vma, address, page_table, pmd, pgoff,
2557 flags, orig_pte);
2558} 2561}
2559 2562
2560/* 2563/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 172abffeb2e3..3d6ac9505d07 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -955,6 +955,11 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
955 goto out; 955 goto out;
956 } 956 }
957 957
958 if (!nodes_subset(new, node_online_map)) {
959 err = -EINVAL;
960 goto out;
961 }
962
958 err = security_task_movememory(task); 963 err = security_task_movememory(task);
959 if (err) 964 if (err)
960 goto out; 965 goto out;
@@ -1072,21 +1077,37 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1072 1077
1073#endif 1078#endif
1074 1079
1075/* Return effective policy for a VMA */ 1080/*
1081 * get_vma_policy(@task, @vma, @addr)
1082 * @task - task for fallback if vma policy == default
1083 * @vma - virtual memory area whose policy is sought
1084 * @addr - address in @vma for shared policy lookup
1085 *
1086 * Returns effective policy for a VMA at specified address.
1087 * Falls back to @task or system default policy, as necessary.
1088 * Returned policy has extra reference count if shared, vma,
1089 * or some other task's policy [show_numa_maps() can pass
1090 * @task != current]. It is the caller's responsibility to
1091 * free the reference in these cases.
1092 */
1076static struct mempolicy * get_vma_policy(struct task_struct *task, 1093static struct mempolicy * get_vma_policy(struct task_struct *task,
1077 struct vm_area_struct *vma, unsigned long addr) 1094 struct vm_area_struct *vma, unsigned long addr)
1078{ 1095{
1079 struct mempolicy *pol = task->mempolicy; 1096 struct mempolicy *pol = task->mempolicy;
1097 int shared_pol = 0;
1080 1098
1081 if (vma) { 1099 if (vma) {
1082 if (vma->vm_ops && vma->vm_ops->get_policy) 1100 if (vma->vm_ops && vma->vm_ops->get_policy) {
1083 pol = vma->vm_ops->get_policy(vma, addr); 1101 pol = vma->vm_ops->get_policy(vma, addr);
1084 else if (vma->vm_policy && 1102 shared_pol = 1; /* if pol non-NULL, add ref below */
1103 } else if (vma->vm_policy &&
1085 vma->vm_policy->policy != MPOL_DEFAULT) 1104 vma->vm_policy->policy != MPOL_DEFAULT)
1086 pol = vma->vm_policy; 1105 pol = vma->vm_policy;
1087 } 1106 }
1088 if (!pol) 1107 if (!pol)
1089 pol = &default_policy; 1108 pol = &default_policy;
1109 else if (!shared_pol && pol != current->mempolicy)
1110 mpol_get(pol); /* vma or other task's policy */
1090 return pol; 1111 return pol;
1091} 1112}
1092 1113
@@ -1202,19 +1223,45 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
1202} 1223}
1203 1224
1204#ifdef CONFIG_HUGETLBFS 1225#ifdef CONFIG_HUGETLBFS
1205/* Return a zonelist suitable for a huge page allocation. */ 1226/*
1227 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1228 * @vma = virtual memory area whose policy is sought
1229 * @addr = address in @vma for shared policy lookup and interleave policy
1230 * @gfp_flags = for requested zone
1231 * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1232 *
1233 * Returns a zonelist suitable for a huge page allocation.
1234 * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1235 * If it is also a policy for which get_vma_policy() returns an extra
1236 * reference, we must hold that reference until after allocation.
1237 * In that case, return policy via @mpol so hugetlb allocation can drop
1238 * the reference. For non-'BIND referenced policies, we can/do drop the
1239 * reference here, so the caller doesn't need to know about the special case
1240 * for default and current task policy.
1241 */
1206struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, 1242struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1207 gfp_t gfp_flags) 1243 gfp_t gfp_flags, struct mempolicy **mpol)
1208{ 1244{
1209 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1245 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1246 struct zonelist *zl;
1210 1247
1248 *mpol = NULL; /* probably no unref needed */
1211 if (pol->policy == MPOL_INTERLEAVE) { 1249 if (pol->policy == MPOL_INTERLEAVE) {
1212 unsigned nid; 1250 unsigned nid;
1213 1251
1214 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); 1252 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1253 __mpol_free(pol); /* finished with pol */
1215 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags); 1254 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
1216 } 1255 }
1217 return zonelist_policy(GFP_HIGHUSER, pol); 1256
1257 zl = zonelist_policy(GFP_HIGHUSER, pol);
1258 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1259 if (pol->policy != MPOL_BIND)
1260 __mpol_free(pol); /* finished with pol */
1261 else
1262 *mpol = pol; /* unref needed after allocation */
1263 }
1264 return zl;
1218} 1265}
1219#endif 1266#endif
1220 1267
@@ -1259,6 +1306,7 @@ struct page *
1259alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 1306alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1260{ 1307{
1261 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1308 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1309 struct zonelist *zl;
1262 1310
1263 cpuset_update_task_memory_state(); 1311 cpuset_update_task_memory_state();
1264 1312
@@ -1268,7 +1316,19 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1268 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 1316 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1269 return alloc_page_interleave(gfp, 0, nid); 1317 return alloc_page_interleave(gfp, 0, nid);
1270 } 1318 }
1271 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); 1319 zl = zonelist_policy(gfp, pol);
1320 if (pol != &default_policy && pol != current->mempolicy) {
1321 /*
1322 * slow path: ref counted policy -- shared or vma
1323 */
1324 struct page *page = __alloc_pages(gfp, 0, zl);
1325 __mpol_free(pol);
1326 return page;
1327 }
1328 /*
1329 * fast path: default or task policy
1330 */
1331 return __alloc_pages(gfp, 0, zl);
1272} 1332}
1273 1333
1274/** 1334/**
@@ -1867,6 +1927,7 @@ int show_numa_map(struct seq_file *m, void *v)
1867 struct numa_maps *md; 1927 struct numa_maps *md;
1868 struct file *file = vma->vm_file; 1928 struct file *file = vma->vm_file;
1869 struct mm_struct *mm = vma->vm_mm; 1929 struct mm_struct *mm = vma->vm_mm;
1930 struct mempolicy *pol;
1870 int n; 1931 int n;
1871 char buffer[50]; 1932 char buffer[50];
1872 1933
@@ -1877,8 +1938,13 @@ int show_numa_map(struct seq_file *m, void *v)
1877 if (!md) 1938 if (!md)
1878 return 0; 1939 return 0;
1879 1940
1880 mpol_to_str(buffer, sizeof(buffer), 1941 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1881 get_vma_policy(priv->task, vma, vma->vm_start)); 1942 mpol_to_str(buffer, sizeof(buffer), pol);
1943 /*
1944 * unref shared or other task's mempolicy
1945 */
1946 if (pol != &default_policy && pol != current->mempolicy)
1947 __mpol_free(pol);
1882 1948
1883 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1949 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1884 1950
diff --git a/mm/migrate.c b/mm/migrate.c
index 37c73b902008..e2fdbce1874b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -611,6 +611,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
611 int rc = 0; 611 int rc = 0;
612 int *result = NULL; 612 int *result = NULL;
613 struct page *newpage = get_new_page(page, private, &result); 613 struct page *newpage = get_new_page(page, private, &result);
614 int rcu_locked = 0;
614 615
615 if (!newpage) 616 if (!newpage)
616 return -ENOMEM; 617 return -ENOMEM;
@@ -636,8 +637,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
636 * we cannot notice that anon_vma is freed while we migrates a page. 637 * we cannot notice that anon_vma is freed while we migrates a page.
637 * This rcu_read_lock() delays freeing anon_vma pointer until the end 638 * This rcu_read_lock() delays freeing anon_vma pointer until the end
638 * of migration. File cache pages are no problem because of page_lock() 639 * of migration. File cache pages are no problem because of page_lock()
640 * File Caches may use write_page() or lock_page() in migration, then,
641 * just care Anon page here.
639 */ 642 */
640 rcu_read_lock(); 643 if (PageAnon(page)) {
644 rcu_read_lock();
645 rcu_locked = 1;
646 }
641 /* 647 /*
642 * This is a corner case handling. 648 * This is a corner case handling.
643 * When a new swap-cache is read into, it is linked to LRU 649 * When a new swap-cache is read into, it is linked to LRU
@@ -656,7 +662,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
656 if (rc) 662 if (rc)
657 remove_migration_ptes(page, page); 663 remove_migration_ptes(page, page);
658rcu_unlock: 664rcu_unlock:
659 rcu_read_unlock(); 665 if (rcu_locked)
666 rcu_read_unlock();
660 667
661unlock: 668unlock:
662 669
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 63512a9ed57e..44720363374c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping)
274 pdflush_operation(background_writeout, 0); 274 pdflush_operation(background_writeout, 0);
275} 275}
276 276
277void set_page_dirty_balance(struct page *page) 277void set_page_dirty_balance(struct page *page, int page_mkwrite)
278{ 278{
279 if (set_page_dirty(page)) { 279 if (set_page_dirty(page) || page_mkwrite) {
280 struct address_space *mapping = page_mapping(page); 280 struct address_space *mapping = page_mapping(page);
281 281
282 if (mapping) 282 if (mapping)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6427653023aa..1a8c59571cb7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2345,6 +2345,8 @@ static int __cpuinit process_zones(int cpu)
2345 return 0; 2345 return 0;
2346bad: 2346bad:
2347 for_each_zone(dzone) { 2347 for_each_zone(dzone) {
2348 if (!populated_zone(dzone))
2349 continue;
2348 if (dzone == zone) 2350 if (dzone == zone)
2349 break; 2351 break;
2350 kfree(zone_pcp(dzone, cpu)); 2352 kfree(zone_pcp(dzone, cpu));
diff --git a/mm/slub.c b/mm/slub.c
index 04151da399c6..addb20a6d67d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -986,7 +986,9 @@ out:
986 986
987__setup("slub_debug", setup_slub_debug); 987__setup("slub_debug", setup_slub_debug);
988 988
989static void kmem_cache_open_debug_check(struct kmem_cache *s) 989static unsigned long kmem_cache_flags(unsigned long objsize,
990 unsigned long flags, const char *name,
991 void (*ctor)(void *, struct kmem_cache *, unsigned long))
990{ 992{
991 /* 993 /*
992 * The page->offset field is only 16 bit wide. This is an offset 994 * The page->offset field is only 16 bit wide. This is an offset
@@ -1000,19 +1002,21 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
1000 * Debugging or ctor may create a need to move the free 1002 * Debugging or ctor may create a need to move the free
1001 * pointer. Fail if this happens. 1003 * pointer. Fail if this happens.
1002 */ 1004 */
1003 if (s->objsize >= 65535 * sizeof(void *)) { 1005 if (objsize >= 65535 * sizeof(void *)) {
1004 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 1006 BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
1005 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 1007 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
1006 BUG_ON(s->ctor); 1008 BUG_ON(ctor);
1007 } 1009 } else {
1008 else
1009 /* 1010 /*
1010 * Enable debugging if selected on the kernel commandline. 1011 * Enable debugging if selected on the kernel commandline.
1011 */ 1012 */
1012 if (slub_debug && (!slub_debug_slabs || 1013 if (slub_debug && (!slub_debug_slabs ||
1013 strncmp(slub_debug_slabs, s->name, 1014 strncmp(slub_debug_slabs, name,
1014 strlen(slub_debug_slabs)) == 0)) 1015 strlen(slub_debug_slabs)) == 0))
1015 s->flags |= slub_debug; 1016 flags |= slub_debug;
1017 }
1018
1019 return flags;
1016} 1020}
1017#else 1021#else
1018static inline void setup_object_debug(struct kmem_cache *s, 1022static inline void setup_object_debug(struct kmem_cache *s,
@@ -1029,7 +1033,12 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1029static inline int check_object(struct kmem_cache *s, struct page *page, 1033static inline int check_object(struct kmem_cache *s, struct page *page,
1030 void *object, int active) { return 1; } 1034 void *object, int active) { return 1; }
1031static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1035static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1032static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 1036static inline unsigned long kmem_cache_flags(unsigned long objsize,
1037 unsigned long flags, const char *name,
1038 void (*ctor)(void *, struct kmem_cache *, unsigned long))
1039{
1040 return flags;
1041}
1033#define slub_debug 0 1042#define slub_debug 0
1034#endif 1043#endif
1035/* 1044/*
@@ -2088,9 +2097,8 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2088 s->name = name; 2097 s->name = name;
2089 s->ctor = ctor; 2098 s->ctor = ctor;
2090 s->objsize = size; 2099 s->objsize = size;
2091 s->flags = flags;
2092 s->align = align; 2100 s->align = align;
2093 kmem_cache_open_debug_check(s); 2101 s->flags = kmem_cache_flags(size, flags, name, ctor);
2094 2102
2095 if (!calculate_sizes(s)) 2103 if (!calculate_sizes(s))
2096 goto error; 2104 goto error;
@@ -2660,7 +2668,7 @@ static int slab_unmergeable(struct kmem_cache *s)
2660} 2668}
2661 2669
2662static struct kmem_cache *find_mergeable(size_t size, 2670static struct kmem_cache *find_mergeable(size_t size,
2663 size_t align, unsigned long flags, 2671 size_t align, unsigned long flags, const char *name,
2664 void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2672 void (*ctor)(void *, struct kmem_cache *, unsigned long))
2665{ 2673{
2666 struct kmem_cache *s; 2674 struct kmem_cache *s;
@@ -2674,6 +2682,7 @@ static struct kmem_cache *find_mergeable(size_t size,
2674 size = ALIGN(size, sizeof(void *)); 2682 size = ALIGN(size, sizeof(void *));
2675 align = calculate_alignment(flags, align, size); 2683 align = calculate_alignment(flags, align, size);
2676 size = ALIGN(size, align); 2684 size = ALIGN(size, align);
2685 flags = kmem_cache_flags(size, flags, name, NULL);
2677 2686
2678 list_for_each_entry(s, &slab_caches, list) { 2687 list_for_each_entry(s, &slab_caches, list) {
2679 if (slab_unmergeable(s)) 2688 if (slab_unmergeable(s))
@@ -2682,8 +2691,7 @@ static struct kmem_cache *find_mergeable(size_t size,
2682 if (size > s->size) 2691 if (size > s->size)
2683 continue; 2692 continue;
2684 2693
2685 if (((flags | slub_debug) & SLUB_MERGE_SAME) != 2694 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
2686 (s->flags & SLUB_MERGE_SAME))
2687 continue; 2695 continue;
2688 /* 2696 /*
2689 * Check if alignment is compatible. 2697 * Check if alignment is compatible.
@@ -2707,7 +2715,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2707 struct kmem_cache *s; 2715 struct kmem_cache *s;
2708 2716
2709 down_write(&slub_lock); 2717 down_write(&slub_lock);
2710 s = find_mergeable(size, align, flags, ctor); 2718 s = find_mergeable(size, align, flags, name, ctor);
2711 if (s) { 2719 if (s) {
2712 s->refcount++; 2720 s->refcount++;
2713 /* 2721 /*
@@ -3813,7 +3821,9 @@ static int __init slab_sysfs_init(void)
3813 3821
3814 list_for_each_entry(s, &slab_caches, list) { 3822 list_for_each_entry(s, &slab_caches, list) {
3815 err = sysfs_slab_add(s); 3823 err = sysfs_slab_add(s);
3816 BUG_ON(err); 3824 if (err)
3825 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
3826 " to sysfs\n", s->name);
3817 } 3827 }
3818 3828
3819 while (alias_list) { 3829 while (alias_list) {
@@ -3821,7 +3831,9 @@ static int __init slab_sysfs_init(void)
3821 3831
3822 alias_list = alias_list->next; 3832 alias_list = alias_list->next;
3823 err = sysfs_slab_alias(al->s, al->name); 3833 err = sysfs_slab_alias(al->s, al->name);
3824 BUG_ON(err); 3834 if (err)
3835 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
3836 " %s to sysfs\n", s->name);
3825 kfree(al); 3837 kfree(al);
3826 } 3838 }
3827 3839