aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-15 21:07:59 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-15 21:07:59 -0400
commit84c3d4aaec3338201b449034beac41635866bddf (patch)
tree3412951682fb2dd4feb8a5532f8efbaf8b345933 /mm
parent43d2548bb2ef7e6d753f91468a746784041e522d (diff)
parentfafa3a3f16723997f039a0193997464d66dafd8f (diff)
Merge commit 'origin/master'
Manual merge of: arch/powerpc/Kconfig arch/powerpc/kernel/stacktrace.c arch/powerpc/mm/slice.c arch/ppc/kernel/smp.c
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c18
-rw-r--r--mm/slub.c5
5 files changed, 14 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 1e6a7d34874f..65d9d9e2b755 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -236,11 +236,12 @@ int filemap_fdatawrite(struct address_space *mapping)
236} 236}
237EXPORT_SYMBOL(filemap_fdatawrite); 237EXPORT_SYMBOL(filemap_fdatawrite);
238 238
239static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 239int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
240 loff_t end) 240 loff_t end)
241{ 241{
242 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 242 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
243} 243}
244EXPORT_SYMBOL(filemap_fdatawrite_range);
244 245
245/** 246/**
246 * filemap_flush - mostly a non-blocking flush 247 * filemap_flush - mostly a non-blocking flush
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b38f700825fc..94c6d8988ab3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -960,6 +960,9 @@ retry:
960 } 960 }
961 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 961 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
962 mapping->writeback_index = index; 962 mapping->writeback_index = index;
963
964 if (wbc->range_cont)
965 wbc->range_start = index << PAGE_CACHE_SHIFT;
963 return ret; 966 return ret;
964} 967}
965EXPORT_SYMBOL(write_cache_pages); 968EXPORT_SYMBOL(write_cache_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f024b9b3a2a6..79ac4afc908c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -918,7 +918,7 @@ void drain_local_pages(void *arg)
918 */ 918 */
919void drain_all_pages(void) 919void drain_all_pages(void)
920{ 920{
921 on_each_cpu(drain_local_pages, NULL, 0, 1); 921 on_each_cpu(drain_local_pages, NULL, 1);
922} 922}
923 923
924#ifdef CONFIG_HIBERNATION 924#ifdef CONFIG_HIBERNATION
diff --git a/mm/slab.c b/mm/slab.c
index 046607f05f3e..052e7d64537e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1901,15 +1901,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1901#endif 1901#endif
1902 1902
1903#if DEBUG 1903#if DEBUG
1904/** 1904static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1905 * slab_destroy_objs - destroy a slab and its objects
1906 * @cachep: cache pointer being destroyed
1907 * @slabp: slab pointer being destroyed
1908 *
1909 * Call the registered destructor for each object in a slab that is being
1910 * destroyed.
1911 */
1912static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1913{ 1905{
1914 int i; 1906 int i;
1915 for (i = 0; i < cachep->num; i++) { 1907 for (i = 0; i < cachep->num; i++) {
@@ -1938,7 +1930,7 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1938 } 1930 }
1939} 1931}
1940#else 1932#else
1941static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1933static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1942{ 1934{
1943} 1935}
1944#endif 1936#endif
@@ -1956,7 +1948,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1956{ 1948{
1957 void *addr = slabp->s_mem - slabp->colouroff; 1949 void *addr = slabp->s_mem - slabp->colouroff;
1958 1950
1959 slab_destroy_objs(cachep, slabp); 1951 slab_destroy_debugcheck(cachep, slabp);
1960 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1952 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1961 struct slab_rcu *slab_rcu; 1953 struct slab_rcu *slab_rcu;
1962 1954
@@ -2454,7 +2446,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2454 struct kmem_list3 *l3; 2446 struct kmem_list3 *l3;
2455 int node; 2447 int node;
2456 2448
2457 on_each_cpu(do_drain, cachep, 1, 1); 2449 on_each_cpu(do_drain, cachep, 1);
2458 check_irq_on(); 2450 check_irq_on();
2459 for_each_online_node(node) { 2451 for_each_online_node(node) {
2460 l3 = cachep->nodelists[node]; 2452 l3 = cachep->nodelists[node];
@@ -3939,7 +3931,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3939 } 3931 }
3940 new->cachep = cachep; 3932 new->cachep = cachep;
3941 3933
3942 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3934 on_each_cpu(do_ccupdate_local, (void *)new, 1);
3943 3935
3944 check_irq_on(); 3936 check_irq_on();
3945 cachep->batchcount = batchcount; 3937 cachep->batchcount = batchcount;
diff --git a/mm/slub.c b/mm/slub.c
index 5f6e2c4a2ba7..35ab38a94b46 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -411,7 +411,7 @@ static void set_track(struct kmem_cache *s, void *object,
411 if (addr) { 411 if (addr) {
412 p->addr = addr; 412 p->addr = addr;
413 p->cpu = smp_processor_id(); 413 p->cpu = smp_processor_id();
414 p->pid = current ? current->pid : -1; 414 p->pid = current->pid;
415 p->when = jiffies; 415 p->when = jiffies;
416 } else 416 } else
417 memset(p, 0, sizeof(struct track)); 417 memset(p, 0, sizeof(struct track));
@@ -1496,7 +1496,7 @@ static void flush_cpu_slab(void *d)
1496static void flush_all(struct kmem_cache *s) 1496static void flush_all(struct kmem_cache *s)
1497{ 1497{
1498#ifdef CONFIG_SMP 1498#ifdef CONFIG_SMP
1499 on_each_cpu(flush_cpu_slab, s, 1, 1); 1499 on_each_cpu(flush_cpu_slab, s, 1);
1500#else 1500#else
1501 unsigned long flags; 1501 unsigned long flags;
1502 1502
@@ -2766,6 +2766,7 @@ void kfree(const void *x)
2766 2766
2767 page = virt_to_head_page(x); 2767 page = virt_to_head_page(x);
2768 if (unlikely(!PageSlab(page))) { 2768 if (unlikely(!PageSlab(page))) {
2769 BUG_ON(!PageCompound(page));
2769 put_page(page); 2770 put_page(page);
2770 return; 2771 return;
2771 } 2772 }