aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-18 05:39:39 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-18 05:39:39 -0400
commit49997d75152b3d23c53b0fa730599f2f74c92c65 (patch)
tree46e93126170d02cfec9505172e545732c1b69656 /mm
parenta0c80b80e0fb48129e4e9d6a9ede914f9ff1850d (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/powerpc/booting-without-of.txt drivers/atm/Makefile drivers/net/fs_enet/fs_enet-main.c drivers/pci/pci-acpi.c net/8021q/vlan.c net/iucv/iucv.c
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mprotect.c12
-rw-r--r--mm/page-writeback.c13
-rw-r--r--mm/page_alloc.c97
-rw-r--r--mm/slab.c18
-rw-r--r--mm/slub.c28
-rw-r--r--mm/sparse-vmemmap.c2
13 files changed, 145 insertions, 63 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3aa819d628c1..c4de85285bb4 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -129,7 +129,7 @@ config MEMORY_HOTPLUG
129 bool "Allow for memory hot-add" 129 bool "Allow for memory hot-add"
130 depends on SPARSEMEM || X86_64_ACPI_NUMA 130 depends on SPARSEMEM || X86_64_ACPI_NUMA
131 depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG 131 depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG
132 depends on (IA64 || X86 || PPC64 || SUPERH) 132 depends on (IA64 || X86 || PPC64 || SUPERH || S390)
133 133
134comment "Memory hotplug is currently incompatible with Software Suspend" 134comment "Memory hotplug is currently incompatible with Software Suspend"
135 depends on SPARSEMEM && HOTPLUG && HIBERNATION 135 depends on SPARSEMEM && HOTPLUG && HIBERNATION
@@ -199,7 +199,7 @@ config BOUNCE
199config NR_QUICK 199config NR_QUICK
200 int 200 int
201 depends on QUICKLIST 201 depends on QUICKLIST
202 default "2" if SUPERH 202 default "2" if SUPERH || AVR32
203 default "1" 203 default "1"
204 204
205config VIRT_TO_BUS 205config VIRT_TO_BUS
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index f4026bae6eed..05f2b4009ccc 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/mm/allocpercpu.c 2 * linux/mm/allocpercpu.c
3 * 3 *
4 * Separated from slab.c August 11, 2006 Christoph Lameter <clameter@sgi.com> 4 * Separated from slab.c August 11, 2006 Christoph Lameter
5 */ 5 */
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/module.h> 7#include <linux/module.h>
diff --git a/mm/filemap.c b/mm/filemap.c
index 1e6a7d34874f..65d9d9e2b755 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -236,11 +236,12 @@ int filemap_fdatawrite(struct address_space *mapping)
236} 236}
237EXPORT_SYMBOL(filemap_fdatawrite); 237EXPORT_SYMBOL(filemap_fdatawrite);
238 238
239static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 239int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
240 loff_t end) 240 loff_t end)
241{ 241{
242 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 242 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
243} 243}
244EXPORT_SYMBOL(filemap_fdatawrite_range);
244 245
245/** 246/**
246 * filemap_flush - mostly a non-blocking flush 247 * filemap_flush - mostly a non-blocking flush
diff --git a/mm/memory.c b/mm/memory.c
index d14b251a25a6..2302d228fe04 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1151,7 +1151,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1151 * be processed until returning to user space. 1151 * be processed until returning to user space.
1152 */ 1152 */
1153 if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) 1153 if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
1154 return -ENOMEM; 1154 return i ? i : -ENOMEM;
1155 1155
1156 if (write) 1156 if (write)
1157 foll_flags |= FOLL_WRITE; 1157 foll_flags |= FOLL_WRITE;
@@ -1697,8 +1697,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1697 struct page *dirty_page = NULL; 1697 struct page *dirty_page = NULL;
1698 1698
1699 old_page = vm_normal_page(vma, address, orig_pte); 1699 old_page = vm_normal_page(vma, address, orig_pte);
1700 if (!old_page) 1700 if (!old_page) {
1701 /*
1702 * VM_MIXEDMAP !pfn_valid() case
1703 *
1704 * We should not cow pages in a shared writeable mapping.
1705 * Just mark the pages writable as we can't do any dirty
1706 * accounting on raw pfn maps.
1707 */
1708 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
1709 (VM_WRITE|VM_SHARED))
1710 goto reuse;
1701 goto gotten; 1711 goto gotten;
1712 }
1702 1713
1703 /* 1714 /*
1704 * Take out anonymous pages first, anonymous shared vmas are 1715 * Take out anonymous pages first, anonymous shared vmas are
@@ -1751,6 +1762,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1751 } 1762 }
1752 1763
1753 if (reuse) { 1764 if (reuse) {
1765reuse:
1754 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1766 flush_cache_page(vma, address, pte_pfn(orig_pte));
1755 entry = pte_mkyoung(orig_pte); 1767 entry = pte_mkyoung(orig_pte);
1756 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1768 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a37a5034f63d..c94e58b192c3 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -729,7 +729,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
729 } else { 729 } else {
730 *policy = pol == &default_policy ? MPOL_DEFAULT : 730 *policy = pol == &default_policy ? MPOL_DEFAULT :
731 pol->mode; 731 pol->mode;
732 *policy |= pol->flags; 732 /*
733 * Internal mempolicy flags must be masked off before exposing
734 * the policy to userspace.
735 */
736 *policy |= (pol->flags & MPOL_MODE_FLAGS);
733 } 737 }
734 738
735 if (vma) { 739 if (vma) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 112bcaeaa104..55bd355d170d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -9,7 +9,7 @@
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com> 11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com> 12 * Christoph Lameter
13 */ 13 */
14 14
15#include <linux/migrate.h> 15#include <linux/migrate.h>
diff --git a/mm/mmap.c b/mm/mmap.c
index 3354fdd83d4b..1d102b956fd8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -72,8 +72,9 @@ pgprot_t protection_map[16] = {
72 72
73pgprot_t vm_get_page_prot(unsigned long vm_flags) 73pgprot_t vm_get_page_prot(unsigned long vm_flags)
74{ 74{
75 return protection_map[vm_flags & 75 return __pgprot(pgprot_val(protection_map[vm_flags &
76 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; 76 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
77 pgprot_val(arch_vm_get_page_prot(vm_flags)));
77} 78}
78EXPORT_SYMBOL(vm_get_page_prot); 79EXPORT_SYMBOL(vm_get_page_prot);
79 80
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a5bf31c27375..360d9cc8b38c 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -47,19 +47,17 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
47 if (pte_present(oldpte)) { 47 if (pte_present(oldpte)) {
48 pte_t ptent; 48 pte_t ptent;
49 49
50 /* Avoid an SMP race with hardware updated dirty/clean 50 ptent = ptep_modify_prot_start(mm, addr, pte);
51 * bits by wiping the pte and then setting the new pte
52 * into place.
53 */
54 ptent = ptep_get_and_clear(mm, addr, pte);
55 ptent = pte_modify(ptent, newprot); 51 ptent = pte_modify(ptent, newprot);
52
56 /* 53 /*
57 * Avoid taking write faults for pages we know to be 54 * Avoid taking write faults for pages we know to be
58 * dirty. 55 * dirty.
59 */ 56 */
60 if (dirty_accountable && pte_dirty(ptent)) 57 if (dirty_accountable && pte_dirty(ptent))
61 ptent = pte_mkwrite(ptent); 58 ptent = pte_mkwrite(ptent);
62 set_pte_at(mm, addr, pte, ptent); 59
60 ptep_modify_prot_commit(mm, addr, pte, ptent);
63#ifdef CONFIG_MIGRATION 61#ifdef CONFIG_MIGRATION
64 } else if (!pte_file(oldpte)) { 62 } else if (!pte_file(oldpte)) {
65 swp_entry_t entry = pte_to_swp_entry(oldpte); 63 swp_entry_t entry = pte_to_swp_entry(oldpte);
@@ -239,7 +237,7 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
239 end = start + len; 237 end = start + len;
240 if (end <= start) 238 if (end <= start)
241 return -ENOMEM; 239 return -ENOMEM;
242 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) 240 if (!arch_validate_prot(prot))
243 return -EINVAL; 241 return -EINVAL;
244 242
245 reqprot = prot; 243 reqprot = prot;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 789b6adbef37..94c6d8988ab3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -126,8 +126,6 @@ static void background_writeout(unsigned long _min_pages);
126static struct prop_descriptor vm_completions; 126static struct prop_descriptor vm_completions;
127static struct prop_descriptor vm_dirties; 127static struct prop_descriptor vm_dirties;
128 128
129static unsigned long determine_dirtyable_memory(void);
130
131/* 129/*
132 * couple the period to the dirty_ratio: 130 * couple the period to the dirty_ratio:
133 * 131 *
@@ -347,7 +345,13 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
347#endif 345#endif
348} 346}
349 347
350static unsigned long determine_dirtyable_memory(void) 348/**
349 * determine_dirtyable_memory - amount of memory that may be used
350 *
351 * Returns the numebr of pages that can currently be freed and used
352 * by the kernel for direct mappings.
353 */
354unsigned long determine_dirtyable_memory(void)
351{ 355{
352 unsigned long x; 356 unsigned long x;
353 357
@@ -956,6 +960,9 @@ retry:
956 } 960 }
957 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 961 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
958 mapping->writeback_index = index; 962 mapping->writeback_index = index;
963
964 if (wbc->range_cont)
965 wbc->range_start = index << PAGE_CACHE_SHIFT;
959 return ret; 966 return ret;
960} 967}
961EXPORT_SYMBOL(write_cache_pages); 968EXPORT_SYMBOL(write_cache_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2f552955a02f..79ac4afc908c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -918,7 +918,7 @@ void drain_local_pages(void *arg)
918 */ 918 */
919void drain_all_pages(void) 919void drain_all_pages(void)
920{ 920{
921 on_each_cpu(drain_local_pages, NULL, 0, 1); 921 on_each_cpu(drain_local_pages, NULL, 1);
922} 922}
923 923
924#ifdef CONFIG_HIBERNATION 924#ifdef CONFIG_HIBERNATION
@@ -2328,7 +2328,6 @@ static void build_zonelists(pg_data_t *pgdat)
2328static void build_zonelist_cache(pg_data_t *pgdat) 2328static void build_zonelist_cache(pg_data_t *pgdat)
2329{ 2329{
2330 pgdat->node_zonelists[0].zlcache_ptr = NULL; 2330 pgdat->node_zonelists[0].zlcache_ptr = NULL;
2331 pgdat->node_zonelists[1].zlcache_ptr = NULL;
2332} 2331}
2333 2332
2334#endif /* CONFIG_NUMA */ 2333#endif /* CONFIG_NUMA */
@@ -2930,6 +2929,18 @@ void __init free_bootmem_with_active_regions(int nid,
2930 } 2929 }
2931} 2930}
2932 2931
2932void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
2933{
2934 int i;
2935 int ret;
2936
2937 for_each_active_range_index_in_nid(i, nid) {
2938 ret = work_fn(early_node_map[i].start_pfn,
2939 early_node_map[i].end_pfn, data);
2940 if (ret)
2941 break;
2942 }
2943}
2933/** 2944/**
2934 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2945 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2935 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2946 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3462,6 +3473,11 @@ void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3462 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3473 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3463 3474
3464 alloc_node_mem_map(pgdat); 3475 alloc_node_mem_map(pgdat);
3476#ifdef CONFIG_FLAT_NODE_MEM_MAP
3477 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3478 nid, (unsigned long)pgdat,
3479 (unsigned long)pgdat->node_mem_map);
3480#endif
3465 3481
3466 free_area_init_core(pgdat, zones_size, zholes_size); 3482 free_area_init_core(pgdat, zones_size, zholes_size);
3467} 3483}
@@ -3504,7 +3520,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3504{ 3520{
3505 int i; 3521 int i;
3506 3522
3507 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 3523 printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) "
3508 "%d entries of %d used\n", 3524 "%d entries of %d used\n",
3509 nid, start_pfn, end_pfn, 3525 nid, start_pfn, end_pfn,
3510 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 3526 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
@@ -3548,27 +3564,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3548} 3564}
3549 3565
3550/** 3566/**
3551 * shrink_active_range - Shrink an existing registered range of PFNs 3567 * remove_active_range - Shrink an existing registered range of PFNs
3552 * @nid: The node id the range is on that should be shrunk 3568 * @nid: The node id the range is on that should be shrunk
3553 * @old_end_pfn: The old end PFN of the range 3569 * @start_pfn: The new PFN of the range
3554 * @new_end_pfn: The new PFN of the range 3570 * @end_pfn: The new PFN of the range
3555 * 3571 *
3556 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3572 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3557 * The map is kept at the end physical page range that has already been 3573 * The map is kept near the end physical page range that has already been
3558 * registered with add_active_range(). This function allows an arch to shrink 3574 * registered. This function allows an arch to shrink an existing registered
3559 * an existing registered range. 3575 * range.
3560 */ 3576 */
3561void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3577void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3562 unsigned long new_end_pfn) 3578 unsigned long end_pfn)
3563{ 3579{
3564 int i; 3580 int i, j;
3581 int removed = 0;
3582
3583 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3584 nid, start_pfn, end_pfn);
3565 3585
3566 /* Find the old active region end and shrink */ 3586 /* Find the old active region end and shrink */
3567 for_each_active_range_index_in_nid(i, nid) 3587 for_each_active_range_index_in_nid(i, nid) {
3568 if (early_node_map[i].end_pfn == old_end_pfn) { 3588 if (early_node_map[i].start_pfn >= start_pfn &&
3569 early_node_map[i].end_pfn = new_end_pfn; 3589 early_node_map[i].end_pfn <= end_pfn) {
3570 break; 3590 /* clear it */
3591 early_node_map[i].start_pfn = 0;
3592 early_node_map[i].end_pfn = 0;
3593 removed = 1;
3594 continue;
3595 }
3596 if (early_node_map[i].start_pfn < start_pfn &&
3597 early_node_map[i].end_pfn > start_pfn) {
3598 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3599 early_node_map[i].end_pfn = start_pfn;
3600 if (temp_end_pfn > end_pfn)
3601 add_active_range(nid, end_pfn, temp_end_pfn);
3602 continue;
3571 } 3603 }
3604 if (early_node_map[i].start_pfn >= start_pfn &&
3605 early_node_map[i].end_pfn > end_pfn &&
3606 early_node_map[i].start_pfn < end_pfn) {
3607 early_node_map[i].start_pfn = end_pfn;
3608 continue;
3609 }
3610 }
3611
3612 if (!removed)
3613 return;
3614
3615 /* remove the blank ones */
3616 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3617 if (early_node_map[i].nid != nid)
3618 continue;
3619 if (early_node_map[i].end_pfn)
3620 continue;
3621 /* we found it, get rid of it */
3622 for (j = i; j < nr_nodemap_entries - 1; j++)
3623 memcpy(&early_node_map[j], &early_node_map[j+1],
3624 sizeof(early_node_map[j]));
3625 j = nr_nodemap_entries - 1;
3626 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3627 nr_nodemap_entries--;
3628 }
3572} 3629}
3573 3630
3574/** 3631/**
@@ -3612,7 +3669,7 @@ static void __init sort_node_map(void)
3612} 3669}
3613 3670
3614/* Find the lowest pfn for a node */ 3671/* Find the lowest pfn for a node */
3615unsigned long __init find_min_pfn_for_node(unsigned long nid) 3672unsigned long __init find_min_pfn_for_node(int nid)
3616{ 3673{
3617 int i; 3674 int i;
3618 unsigned long min_pfn = ULONG_MAX; 3675 unsigned long min_pfn = ULONG_MAX;
@@ -3623,7 +3680,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
3623 3680
3624 if (min_pfn == ULONG_MAX) { 3681 if (min_pfn == ULONG_MAX) {
3625 printk(KERN_WARNING 3682 printk(KERN_WARNING
3626 "Could not find start_pfn for node %lu\n", nid); 3683 "Could not find start_pfn for node %d\n", nid);
3627 return 0; 3684 return 0;
3628 } 3685 }
3629 3686
@@ -3879,7 +3936,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3879 for (i = 0; i < MAX_NR_ZONES; i++) { 3936 for (i = 0; i < MAX_NR_ZONES; i++) {
3880 if (i == ZONE_MOVABLE) 3937 if (i == ZONE_MOVABLE)
3881 continue; 3938 continue;
3882 printk(" %-8s %8lu -> %8lu\n", 3939 printk(" %-8s %0#10lx -> %0#10lx\n",
3883 zone_names[i], 3940 zone_names[i],
3884 arch_zone_lowest_possible_pfn[i], 3941 arch_zone_lowest_possible_pfn[i],
3885 arch_zone_highest_possible_pfn[i]); 3942 arch_zone_highest_possible_pfn[i]);
@@ -3895,7 +3952,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3895 /* Print out the early_node_map[] */ 3952 /* Print out the early_node_map[] */
3896 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 3953 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3897 for (i = 0; i < nr_nodemap_entries; i++) 3954 for (i = 0; i < nr_nodemap_entries; i++)
3898 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 3955 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
3899 early_node_map[i].start_pfn, 3956 early_node_map[i].start_pfn,
3900 early_node_map[i].end_pfn); 3957 early_node_map[i].end_pfn);
3901 3958
diff --git a/mm/slab.c b/mm/slab.c
index 046607f05f3e..052e7d64537e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1901,15 +1901,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1901#endif 1901#endif
1902 1902
1903#if DEBUG 1903#if DEBUG
1904/** 1904static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1905 * slab_destroy_objs - destroy a slab and its objects
1906 * @cachep: cache pointer being destroyed
1907 * @slabp: slab pointer being destroyed
1908 *
1909 * Call the registered destructor for each object in a slab that is being
1910 * destroyed.
1911 */
1912static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1913{ 1905{
1914 int i; 1906 int i;
1915 for (i = 0; i < cachep->num; i++) { 1907 for (i = 0; i < cachep->num; i++) {
@@ -1938,7 +1930,7 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1938 } 1930 }
1939} 1931}
1940#else 1932#else
1941static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1933static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1942{ 1934{
1943} 1935}
1944#endif 1936#endif
@@ -1956,7 +1948,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1956{ 1948{
1957 void *addr = slabp->s_mem - slabp->colouroff; 1949 void *addr = slabp->s_mem - slabp->colouroff;
1958 1950
1959 slab_destroy_objs(cachep, slabp); 1951 slab_destroy_debugcheck(cachep, slabp);
1960 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1952 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1961 struct slab_rcu *slab_rcu; 1953 struct slab_rcu *slab_rcu;
1962 1954
@@ -2454,7 +2446,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2454 struct kmem_list3 *l3; 2446 struct kmem_list3 *l3;
2455 int node; 2447 int node;
2456 2448
2457 on_each_cpu(do_drain, cachep, 1, 1); 2449 on_each_cpu(do_drain, cachep, 1);
2458 check_irq_on(); 2450 check_irq_on();
2459 for_each_online_node(node) { 2451 for_each_online_node(node) {
2460 l3 = cachep->nodelists[node]; 2452 l3 = cachep->nodelists[node];
@@ -3939,7 +3931,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3939 } 3931 }
3940 new->cachep = cachep; 3932 new->cachep = cachep;
3941 3933
3942 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3934 on_each_cpu(do_ccupdate_local, (void *)new, 1);
3943 3935
3944 check_irq_on(); 3936 check_irq_on();
3945 cachep->batchcount = batchcount; 3937 cachep->batchcount = batchcount;
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1cd943c..35ab38a94b46 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
5 * The allocator synchronizes using per slab locks and only 5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs. 6 * uses a centralized lock to manage a pool of partial slabs.
7 * 7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> 8 * (C) 2007 SGI, Christoph Lameter
9 */ 9 */
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
@@ -411,7 +411,7 @@ static void set_track(struct kmem_cache *s, void *object,
411 if (addr) { 411 if (addr) {
412 p->addr = addr; 412 p->addr = addr;
413 p->cpu = smp_processor_id(); 413 p->cpu = smp_processor_id();
414 p->pid = current ? current->pid : -1; 414 p->pid = current->pid;
415 p->when = jiffies; 415 p->when = jiffies;
416 } else 416 } else
417 memset(p, 0, sizeof(struct track)); 417 memset(p, 0, sizeof(struct track));
@@ -431,9 +431,8 @@ static void print_track(const char *s, struct track *t)
431 if (!t->addr) 431 if (!t->addr)
432 return; 432 return;
433 433
434 printk(KERN_ERR "INFO: %s in ", s); 434 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
435 __print_symbol("%s", (unsigned long)t->addr); 435 s, t->addr, jiffies - t->when, t->cpu, t->pid);
436 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
437} 436}
438 437
439static void print_tracking(struct kmem_cache *s, void *object) 438static void print_tracking(struct kmem_cache *s, void *object)
@@ -1497,7 +1496,7 @@ static void flush_cpu_slab(void *d)
1497static void flush_all(struct kmem_cache *s) 1496static void flush_all(struct kmem_cache *s)
1498{ 1497{
1499#ifdef CONFIG_SMP 1498#ifdef CONFIG_SMP
1500 on_each_cpu(flush_cpu_slab, s, 1, 1); 1499 on_each_cpu(flush_cpu_slab, s, 1);
1501#else 1500#else
1502 unsigned long flags; 1501 unsigned long flags;
1503 1502
@@ -1628,9 +1627,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1628 void **object; 1627 void **object;
1629 struct kmem_cache_cpu *c; 1628 struct kmem_cache_cpu *c;
1630 unsigned long flags; 1629 unsigned long flags;
1630 unsigned int objsize;
1631 1631
1632 local_irq_save(flags); 1632 local_irq_save(flags);
1633 c = get_cpu_slab(s, smp_processor_id()); 1633 c = get_cpu_slab(s, smp_processor_id());
1634 objsize = c->objsize;
1634 if (unlikely(!c->freelist || !node_match(c, node))) 1635 if (unlikely(!c->freelist || !node_match(c, node)))
1635 1636
1636 object = __slab_alloc(s, gfpflags, node, addr, c); 1637 object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1643,7 +1644,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1643 local_irq_restore(flags); 1644 local_irq_restore(flags);
1644 1645
1645 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1646 if (unlikely((gfpflags & __GFP_ZERO) && object))
1646 memset(object, 0, c->objsize); 1647 memset(object, 0, objsize);
1647 1648
1648 return object; 1649 return object;
1649} 1650}
@@ -2765,6 +2766,7 @@ void kfree(const void *x)
2765 2766
2766 page = virt_to_head_page(x); 2767 page = virt_to_head_page(x);
2767 if (unlikely(!PageSlab(page))) { 2768 if (unlikely(!PageSlab(page))) {
2769 BUG_ON(!PageCompound(page));
2768 put_page(page); 2770 put_page(page);
2769 return; 2771 return;
2770 } 2772 }
@@ -2995,8 +2997,6 @@ void __init kmem_cache_init(void)
2995 create_kmalloc_cache(&kmalloc_caches[1], 2997 create_kmalloc_cache(&kmalloc_caches[1],
2996 "kmalloc-96", 96, GFP_KERNEL); 2998 "kmalloc-96", 96, GFP_KERNEL);
2997 caches++; 2999 caches++;
2998 }
2999 if (KMALLOC_MIN_SIZE <= 128) {
3000 create_kmalloc_cache(&kmalloc_caches[2], 3000 create_kmalloc_cache(&kmalloc_caches[2],
3001 "kmalloc-192", 192, GFP_KERNEL); 3001 "kmalloc-192", 192, GFP_KERNEL);
3002 caches++; 3002 caches++;
@@ -3026,6 +3026,16 @@ void __init kmem_cache_init(void)
3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3028 3028
3029 if (KMALLOC_MIN_SIZE == 128) {
3030 /*
3031 * The 192 byte sized cache is not used if the alignment
3032 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3033 * instead.
3034 */
3035 for (i = 128 + 8; i <= 192; i += 8)
3036 size_index[(i - 1) / 8] = 8;
3037 }
3038
3029 slab_state = UP; 3039 slab_state = UP;
3030 3040
3031 /* Provide the correct kmalloc names now that the caches are up */ 3041 /* Provide the correct kmalloc names now that the caches are up */
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 99c4f36eb8a3..a91b5f8fcaf6 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Virtual Memory Map support 2 * Virtual Memory Map support
3 * 3 *
4 * (C) 2007 sgi. Christoph Lameter <clameter@sgi.com>. 4 * (C) 2007 sgi. Christoph Lameter.
5 * 5 *
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, 6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset 7 * virt_to_page, page_address() to be implemented as a base offset