aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-08-30 18:30:38 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2006-08-30 18:30:38 -0400
commit0a7d5f8ce960e74fa22986bda4af488539796e49 (patch)
treee29ad17808a5c3410518e22dae8dfe94801b59f3 /mm
parent0165508c80a2b5d5268d9c5dfa9b30c534a33693 (diff)
parentdc709bd190c130b299ac19d596594256265c042a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c4
-rw-r--r--mm/fadvise.c15
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memory.c8
-rw-r--r--mm/memory_hotplug.c44
-rw-r--r--mm/mmzone.c6
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/slab.c78
-rw-r--r--mm/swap.c20
-rw-r--r--mm/swapfile.c3
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmstat.c1
12 files changed, 119 insertions, 71 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d213feded10d..50353e0dac12 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -29,9 +29,7 @@ unsigned long max_low_pfn;
29unsigned long min_low_pfn; 29unsigned long min_low_pfn;
30unsigned long max_pfn; 30unsigned long max_pfn;
31 31
32EXPORT_SYMBOL(max_pfn); /* This is exported so 32EXPORT_UNUSED_SYMBOL(max_pfn); /* June 2006 */
33 * dma_get_required_mask(), which uses
34 * it, can be an inline function */
35 33
36static LIST_HEAD(bdata_list); 34static LIST_HEAD(bdata_list);
37#ifdef CONFIG_CRASH_DUMP 35#ifdef CONFIG_CRASH_DUMP
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 0a03357a1f8e..168c78a121bb 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -23,18 +23,6 @@
23/* 23/*
24 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could 24 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
25 * deactivate the pages and clear PG_Referenced. 25 * deactivate the pages and clear PG_Referenced.
26 *
27 * LINUX_FADV_ASYNC_WRITE: start async writeout of any dirty pages between file
28 * offsets `offset' and `offset+len' inclusive. Any pages which are currently
29 * under writeout are skipped, whether or not they are dirty.
30 *
31 * LINUX_FADV_WRITE_WAIT: wait upon writeout of any dirty pages between file
32 * offsets `offset' and `offset+len'.
33 *
34 * By combining these two operations the application may do several things:
35 *
36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
37 *
38 */ 26 */
39asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) 27asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
40{ 28{
@@ -85,7 +73,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
85 file->f_ra.ra_pages = bdi->ra_pages * 2; 73 file->f_ra.ra_pages = bdi->ra_pages * 2;
86 break; 74 break;
87 case POSIX_FADV_WILLNEED: 75 case POSIX_FADV_WILLNEED:
88 case POSIX_FADV_NOREUSE:
89 if (!mapping->a_ops->readpage) { 76 if (!mapping->a_ops->readpage) {
90 ret = -EINVAL; 77 ret = -EINVAL;
91 break; 78 break;
@@ -106,6 +93,8 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
106 if (ret > 0) 93 if (ret > 0)
107 ret = 0; 94 ret = 0;
108 break; 95 break;
96 case POSIX_FADV_NOREUSE:
97 break;
109 case POSIX_FADV_DONTNEED: 98 case POSIX_FADV_DONTNEED:
110 if (!bdi_write_congested(mapping->backing_dev_info)) 99 if (!bdi_write_congested(mapping->backing_dev_info))
111 filemap_flush(mapping); 100 filemap_flush(mapping);
diff --git a/mm/filemap.c b/mm/filemap.c
index d087fc3d3281..b9a60c43b61a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -849,8 +849,6 @@ static void shrink_readahead_size_eio(struct file *filp,
849 return; 849 return;
850 850
851 ra->ra_pages /= 4; 851 ra->ra_pages /= 4;
852 printk(KERN_WARNING "Reducing readahead size to %luK\n",
853 ra->ra_pages << (PAGE_CACHE_SHIFT - 10));
854} 852}
855 853
856/** 854/**
diff --git a/mm/memory.c b/mm/memory.c
index c1e14c9e67e4..109e9866237e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -47,6 +47,7 @@
47#include <linux/pagemap.h> 47#include <linux/pagemap.h>
48#include <linux/rmap.h> 48#include <linux/rmap.h>
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/delayacct.h>
50#include <linux/init.h> 51#include <linux/init.h>
51 52
52#include <asm/pgalloc.h> 53#include <asm/pgalloc.h>
@@ -1549,9 +1550,9 @@ gotten:
1549 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1550 flush_cache_page(vma, address, pte_pfn(orig_pte));
1550 entry = mk_pte(new_page, vma->vm_page_prot); 1551 entry = mk_pte(new_page, vma->vm_page_prot);
1551 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1552 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1553 lazy_mmu_prot_update(entry);
1552 ptep_establish(vma, address, page_table, entry); 1554 ptep_establish(vma, address, page_table, entry);
1553 update_mmu_cache(vma, address, entry); 1555 update_mmu_cache(vma, address, entry);
1554 lazy_mmu_prot_update(entry);
1555 lru_cache_add_active(new_page); 1556 lru_cache_add_active(new_page);
1556 page_add_new_anon_rmap(new_page, vma, address); 1557 page_add_new_anon_rmap(new_page, vma, address);
1557 1558
@@ -1853,7 +1854,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1853 1854
1854 return 0; 1855 return 0;
1855} 1856}
1856EXPORT_SYMBOL(vmtruncate_range); 1857EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */
1857 1858
1858/* 1859/*
1859 * Primitive swap readahead code. We simply read an aligned block of 1860 * Primitive swap readahead code. We simply read an aligned block of
@@ -1934,6 +1935,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1934 migration_entry_wait(mm, pmd, address); 1935 migration_entry_wait(mm, pmd, address);
1935 goto out; 1936 goto out;
1936 } 1937 }
1938 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
1937 page = lookup_swap_cache(entry); 1939 page = lookup_swap_cache(entry);
1938 if (!page) { 1940 if (!page) {
1939 swapin_readahead(entry, address, vma); 1941 swapin_readahead(entry, address, vma);
@@ -1946,6 +1948,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1946 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1948 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1947 if (likely(pte_same(*page_table, orig_pte))) 1949 if (likely(pte_same(*page_table, orig_pte)))
1948 ret = VM_FAULT_OOM; 1950 ret = VM_FAULT_OOM;
1951 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
1949 goto unlock; 1952 goto unlock;
1950 } 1953 }
1951 1954
@@ -1955,6 +1958,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1955 grab_swap_token(); 1958 grab_swap_token();
1956 } 1959 }
1957 1960
1961 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
1958 mark_page_accessed(page); 1962 mark_page_accessed(page);
1959 lock_page(page); 1963 lock_page(page);
1960 1964
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 01c9fb97c619..c37319542b70 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -52,6 +52,9 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
52 int nr_pages = PAGES_PER_SECTION; 52 int nr_pages = PAGES_PER_SECTION;
53 int ret; 53 int ret;
54 54
55 if (pfn_valid(phys_start_pfn))
56 return -EEXIST;
57
55 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 58 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
56 59
57 if (ret < 0) 60 if (ret < 0)
@@ -76,15 +79,22 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
76{ 79{
77 unsigned long i; 80 unsigned long i;
78 int err = 0; 81 int err = 0;
82 int start_sec, end_sec;
83 /* during initialize mem_map, align hot-added range to section */
84 start_sec = pfn_to_section_nr(phys_start_pfn);
85 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
79 86
80 for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { 87 for (i = start_sec; i <= end_sec; i++) {
81 err = __add_section(zone, phys_start_pfn + i); 88 err = __add_section(zone, i << PFN_SECTION_SHIFT);
82 89
83 /* We want to keep adding the rest of the 90 /*
84 * sections if the first ones already exist 91 * EEXIST is finally dealed with by ioresource collision
92 * check. see add_memory() => register_memory_resource()
93 * Warning will be printed if there is collision.
85 */ 94 */
86 if (err && (err != -EEXIST)) 95 if (err && (err != -EEXIST))
87 break; 96 break;
97 err = 0;
88 } 98 }
89 99
90 return err; 100 return err;
@@ -156,7 +166,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
156 res.flags = IORESOURCE_MEM; /* we just need system ram */ 166 res.flags = IORESOURCE_MEM; /* we just need system ram */
157 section_end = res.end; 167 section_end = res.end;
158 168
159 while (find_next_system_ram(&res) >= 0) { 169 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
160 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); 170 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
161 nr_pages = (unsigned long) 171 nr_pages = (unsigned long)
162 ((res.end + 1 - res.start) >> PAGE_SHIFT); 172 ((res.end + 1 - res.start) >> PAGE_SHIFT);
@@ -213,10 +223,9 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
213} 223}
214 224
215/* add this memory to iomem resource */ 225/* add this memory to iomem resource */
216static void register_memory_resource(u64 start, u64 size) 226static struct resource *register_memory_resource(u64 start, u64 size)
217{ 227{
218 struct resource *res; 228 struct resource *res;
219
220 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 229 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
221 BUG_ON(!res); 230 BUG_ON(!res);
222 231
@@ -228,7 +237,18 @@ static void register_memory_resource(u64 start, u64 size)
228 printk("System RAM resource %llx - %llx cannot be added\n", 237 printk("System RAM resource %llx - %llx cannot be added\n",
229 (unsigned long long)res->start, (unsigned long long)res->end); 238 (unsigned long long)res->start, (unsigned long long)res->end);
230 kfree(res); 239 kfree(res);
240 res = NULL;
231 } 241 }
242 return res;
243}
244
245static void release_memory_resource(struct resource *res)
246{
247 if (!res)
248 return;
249 release_resource(res);
250 kfree(res);
251 return;
232} 252}
233 253
234 254
@@ -237,8 +257,13 @@ int add_memory(int nid, u64 start, u64 size)
237{ 257{
238 pg_data_t *pgdat = NULL; 258 pg_data_t *pgdat = NULL;
239 int new_pgdat = 0; 259 int new_pgdat = 0;
260 struct resource *res;
240 int ret; 261 int ret;
241 262
263 res = register_memory_resource(start, size);
264 if (!res)
265 return -EEXIST;
266
242 if (!node_online(nid)) { 267 if (!node_online(nid)) {
243 pgdat = hotadd_new_pgdat(nid, start); 268 pgdat = hotadd_new_pgdat(nid, start);
244 if (!pgdat) 269 if (!pgdat)
@@ -268,14 +293,13 @@ int add_memory(int nid, u64 start, u64 size)
268 BUG_ON(ret); 293 BUG_ON(ret);
269 } 294 }
270 295
271 /* register this memory as resource */
272 register_memory_resource(start, size);
273
274 return ret; 296 return ret;
275error: 297error:
276 /* rollback pgdat allocation and others */ 298 /* rollback pgdat allocation and others */
277 if (new_pgdat) 299 if (new_pgdat)
278 rollback_node_hotadd(nid, pgdat); 300 rollback_node_hotadd(nid, pgdat);
301 if (res)
302 release_memory_resource(res);
279 303
280 return ret; 304 return ret;
281} 305}
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 0959ee1a4795..febea1c98168 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -14,7 +14,7 @@ struct pglist_data *first_online_pgdat(void)
14 return NODE_DATA(first_online_node); 14 return NODE_DATA(first_online_node);
15} 15}
16 16
17EXPORT_SYMBOL(first_online_pgdat); 17EXPORT_UNUSED_SYMBOL(first_online_pgdat); /* June 2006 */
18 18
19struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) 19struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
20{ 20{
@@ -24,7 +24,7 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
24 return NULL; 24 return NULL;
25 return NODE_DATA(nid); 25 return NODE_DATA(nid);
26} 26}
27EXPORT_SYMBOL(next_online_pgdat); 27EXPORT_UNUSED_SYMBOL(next_online_pgdat); /* June 2006 */
28 28
29 29
30/* 30/*
@@ -45,5 +45,5 @@ struct zone *next_zone(struct zone *zone)
45 } 45 }
46 return zone; 46 return zone;
47} 47}
48EXPORT_SYMBOL(next_zone); 48EXPORT_UNUSED_SYMBOL(next_zone); /* June 2006 */
49 49
diff --git a/mm/nommu.c b/mm/nommu.c
index 5151c44a8257..c576df71e3bb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1070,6 +1070,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1070 vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; 1070 vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
1071 return 0; 1071 return 0;
1072} 1072}
1073EXPORT_SYMBOL(remap_pfn_range);
1073 1074
1074void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1075void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1075{ 1076{
@@ -1090,6 +1091,7 @@ void unmap_mapping_range(struct address_space *mapping,
1090 int even_cows) 1091 int even_cows)
1091{ 1092{
1092} 1093}
1094EXPORT_SYMBOL(unmap_mapping_range);
1093 1095
1094/* 1096/*
1095 * Check that a process has enough memory to allocate a new virtual 1097 * Check that a process has enough memory to allocate a new virtual
diff --git a/mm/slab.c b/mm/slab.c
index 85c2e03098a7..21ba06035700 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -674,6 +674,37 @@ static struct kmem_cache cache_cache = {
674#endif 674#endif
675}; 675};
676 676
677#ifdef CONFIG_LOCKDEP
678
679/*
680 * Slab sometimes uses the kmalloc slabs to store the slab headers
681 * for other slabs "off slab".
682 * The locking for this is tricky in that it nests within the locks
683 * of all other slabs in a few places; to deal with this special
684 * locking we put on-slab caches into a separate lock-class.
685 */
686static struct lock_class_key on_slab_key;
687
688static inline void init_lock_keys(struct cache_sizes *s)
689{
690 int q;
691
692 for (q = 0; q < MAX_NUMNODES; q++) {
693 if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
694 continue;
695 lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
696 &on_slab_key);
697 }
698}
699
700#else
701static inline void init_lock_keys(struct cache_sizes *s)
702{
703}
704#endif
705
706
707
677/* Guard access to the cache-chain. */ 708/* Guard access to the cache-chain. */
678static DEFINE_MUTEX(cache_chain_mutex); 709static DEFINE_MUTEX(cache_chain_mutex);
679static struct list_head cache_chain; 710static struct list_head cache_chain;
@@ -1021,8 +1052,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
1021 } 1052 }
1022} 1053}
1023 1054
1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1055static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1025 int nesting)
1026{ 1056{
1027 struct slab *slabp = virt_to_slab(objp); 1057 struct slab *slabp = virt_to_slab(objp);
1028 int nodeid = slabp->nodeid; 1058 int nodeid = slabp->nodeid;
@@ -1040,7 +1070,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
1040 STATS_INC_NODEFREES(cachep); 1070 STATS_INC_NODEFREES(cachep);
1041 if (l3->alien && l3->alien[nodeid]) { 1071 if (l3->alien && l3->alien[nodeid]) {
1042 alien = l3->alien[nodeid]; 1072 alien = l3->alien[nodeid];
1043 spin_lock_nested(&alien->lock, nesting); 1073 spin_lock(&alien->lock);
1044 if (unlikely(alien->avail == alien->limit)) { 1074 if (unlikely(alien->avail == alien->limit)) {
1045 STATS_INC_ACOVERFLOW(cachep); 1075 STATS_INC_ACOVERFLOW(cachep);
1046 __drain_alien_cache(cachep, alien, nodeid); 1076 __drain_alien_cache(cachep, alien, nodeid);
@@ -1069,15 +1099,14 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
1069{ 1099{
1070} 1100}
1071 1101
1072static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1102static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1073 int nesting)
1074{ 1103{
1075 return 0; 1104 return 0;
1076} 1105}
1077 1106
1078#endif 1107#endif
1079 1108
1080static int __devinit cpuup_callback(struct notifier_block *nfb, 1109static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1081 unsigned long action, void *hcpu) 1110 unsigned long action, void *hcpu)
1082{ 1111{
1083 long cpu = (long)hcpu; 1112 long cpu = (long)hcpu;
@@ -1393,6 +1422,7 @@ void __init kmem_cache_init(void)
1393 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1422 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1394 NULL, NULL); 1423 NULL, NULL);
1395 } 1424 }
1425 init_lock_keys(sizes);
1396 1426
1397 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1427 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1398 sizes->cs_size, 1428 sizes->cs_size,
@@ -1760,8 +1790,6 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1760} 1790}
1761#endif 1791#endif
1762 1792
1763static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting);
1764
1765/** 1793/**
1766 * slab_destroy - destroy and release all objects in a slab 1794 * slab_destroy - destroy and release all objects in a slab
1767 * @cachep: cache pointer being destroyed 1795 * @cachep: cache pointer being destroyed
@@ -1785,17 +1813,8 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1785 call_rcu(&slab_rcu->head, kmem_rcu_free); 1813 call_rcu(&slab_rcu->head, kmem_rcu_free);
1786 } else { 1814 } else {
1787 kmem_freepages(cachep, addr); 1815 kmem_freepages(cachep, addr);
1788 if (OFF_SLAB(cachep)) { 1816 if (OFF_SLAB(cachep))
1789 unsigned long flags; 1817 kmem_cache_free(cachep->slabp_cache, slabp);
1790
1791 /*
1792 * lockdep: we may nest inside an already held
1793 * ac->lock, so pass in a nesting flag:
1794 */
1795 local_irq_save(flags);
1796 __cache_free(cachep->slabp_cache, slabp, 1);
1797 local_irq_restore(flags);
1798 }
1799 } 1818 }
1800} 1819}
1801 1820
@@ -3100,16 +3119,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3100 if (slabp->inuse == 0) { 3119 if (slabp->inuse == 0) {
3101 if (l3->free_objects > l3->free_limit) { 3120 if (l3->free_objects > l3->free_limit) {
3102 l3->free_objects -= cachep->num; 3121 l3->free_objects -= cachep->num;
3103 /*
3104 * It is safe to drop the lock. The slab is
3105 * no longer linked to the cache. cachep
3106 * cannot disappear - we are using it and
3107 * all destruction of caches must be
3108 * serialized properly by the user.
3109 */
3110 spin_unlock(&l3->list_lock);
3111 slab_destroy(cachep, slabp); 3122 slab_destroy(cachep, slabp);
3112 spin_lock(&l3->list_lock);
3113 } else { 3123 } else {
3114 list_add(&slabp->list, &l3->slabs_free); 3124 list_add(&slabp->list, &l3->slabs_free);
3115 } 3125 }
@@ -3135,7 +3145,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3135#endif 3145#endif
3136 check_irq_off(); 3146 check_irq_off();
3137 l3 = cachep->nodelists[node]; 3147 l3 = cachep->nodelists[node];
3138 spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING); 3148 spin_lock(&l3->list_lock);
3139 if (l3->shared) { 3149 if (l3->shared) {
3140 struct array_cache *shared_array = l3->shared; 3150 struct array_cache *shared_array = l3->shared;
3141 int max = shared_array->limit - shared_array->avail; 3151 int max = shared_array->limit - shared_array->avail;
@@ -3178,14 +3188,14 @@ free_done:
3178 * Release an obj back to its cache. If the obj has a constructed state, it must 3188 * Release an obj back to its cache. If the obj has a constructed state, it must
3179 * be in this state _before_ it is released. Called with disabled ints. 3189 * be in this state _before_ it is released. Called with disabled ints.
3180 */ 3190 */
3181static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting) 3191static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3182{ 3192{
3183 struct array_cache *ac = cpu_cache_get(cachep); 3193 struct array_cache *ac = cpu_cache_get(cachep);
3184 3194
3185 check_irq_off(); 3195 check_irq_off();
3186 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3196 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3187 3197
3188 if (cache_free_alien(cachep, objp, nesting)) 3198 if (cache_free_alien(cachep, objp))
3189 return; 3199 return;
3190 3200
3191 if (likely(ac->avail < ac->limit)) { 3201 if (likely(ac->avail < ac->limit)) {
@@ -3214,7 +3224,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3214EXPORT_SYMBOL(kmem_cache_alloc); 3224EXPORT_SYMBOL(kmem_cache_alloc);
3215 3225
3216/** 3226/**
3217 * kmem_cache_alloc - Allocate an object. The memory is set to zero. 3227 * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
3218 * @cache: The cache to allocate from. 3228 * @cache: The cache to allocate from.
3219 * @flags: See kmalloc(). 3229 * @flags: See kmalloc().
3220 * 3230 *
@@ -3424,7 +3434,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3424 BUG_ON(virt_to_cache(objp) != cachep); 3434 BUG_ON(virt_to_cache(objp) != cachep);
3425 3435
3426 local_irq_save(flags); 3436 local_irq_save(flags);
3427 __cache_free(cachep, objp, 0); 3437 __cache_free(cachep, objp);
3428 local_irq_restore(flags); 3438 local_irq_restore(flags);
3429} 3439}
3430EXPORT_SYMBOL(kmem_cache_free); 3440EXPORT_SYMBOL(kmem_cache_free);
@@ -3449,7 +3459,7 @@ void kfree(const void *objp)
3449 kfree_debugcheck(objp); 3459 kfree_debugcheck(objp);
3450 c = virt_to_cache(objp); 3460 c = virt_to_cache(objp);
3451 debug_check_no_locks_freed(objp, obj_size(c)); 3461 debug_check_no_locks_freed(objp, obj_size(c));
3452 __cache_free(c, (void *)objp, 0); 3462 __cache_free(c, (void *)objp);
3453 local_irq_restore(flags); 3463 local_irq_restore(flags);
3454} 3464}
3455EXPORT_SYMBOL(kfree); 3465EXPORT_SYMBOL(kfree);
diff --git a/mm/swap.c b/mm/swap.c
index 8fd095c4ae51..687686a61f7c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -54,6 +54,26 @@ void put_page(struct page *page)
54} 54}
55EXPORT_SYMBOL(put_page); 55EXPORT_SYMBOL(put_page);
56 56
57/**
58 * put_pages_list(): release a list of pages
59 *
60 * Release a list of pages which are strung together on page.lru. Currently
61 * used by read_cache_pages() and related error recovery code.
62 *
63 * @pages: list of pages threaded on page->lru
64 */
65void put_pages_list(struct list_head *pages)
66{
67 while (!list_empty(pages)) {
68 struct page *victim;
69
70 victim = list_entry(pages->prev, struct page, lru);
71 list_del(&victim->lru);
72 page_cache_release(victim);
73 }
74}
75EXPORT_SYMBOL(put_pages_list);
76
57/* 77/*
58 * Writeback is about to end against a page which has been marked for immediate 78 * Writeback is about to end against a page which has been marked for immediate
59 * reclaim. If it still appears to be reclaimable, move it to the tail of the 79 * reclaim. If it still appears to be reclaimable, move it to the tail of the
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e70d6c6d6fee..f1f5ec783781 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -442,11 +442,12 @@ int swap_type_of(dev_t device)
442 442
443 if (!(swap_info[i].flags & SWP_WRITEOK)) 443 if (!(swap_info[i].flags & SWP_WRITEOK))
444 continue; 444 continue;
445
445 if (!device) { 446 if (!device) {
446 spin_unlock(&swap_lock); 447 spin_unlock(&swap_lock);
447 return i; 448 return i;
448 } 449 }
449 inode = swap_info->swap_file->f_dentry->d_inode; 450 inode = swap_info[i].swap_file->f_dentry->d_inode;
450 if (S_ISBLK(inode->i_mode) && 451 if (S_ISBLK(inode->i_mode) &&
451 device == MKDEV(imajor(inode), iminor(inode))) { 452 device == MKDEV(imajor(inode), iminor(inode))) {
452 spin_unlock(&swap_lock); 453 spin_unlock(&swap_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7b450798b458..266162d2ba28 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -340,7 +340,7 @@ void __vunmap(void *addr, int deallocate_pages)
340 __free_page(area->pages[i]); 340 __free_page(area->pages[i]);
341 } 341 }
342 342
343 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) 343 if (area->flags & VM_VPAGES)
344 vfree(area->pages); 344 vfree(area->pages);
345 else 345 else
346 kfree(area->pages); 346 kfree(area->pages);
@@ -427,9 +427,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
427 427
428 area->nr_pages = nr_pages; 428 area->nr_pages = nr_pages;
429 /* Please note that the recursion is strictly bounded. */ 429 /* Please note that the recursion is strictly bounded. */
430 if (array_size > PAGE_SIZE) 430 if (array_size > PAGE_SIZE) {
431 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 431 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
432 else 432 area->flags |= VM_VPAGES;
433 } else
433 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); 434 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
434 area->pages = pages; 435 area->pages = pages;
435 if (!area->pages) { 436 if (!area->pages) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 73b83d67bab6..dfdf24133901 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -81,6 +81,7 @@ void all_vm_events(unsigned long *ret)
81{ 81{
82 sum_vm_events(ret, &cpu_online_map); 82 sum_vm_events(ret, &cpu_online_map);
83} 83}
84EXPORT_SYMBOL_GPL(all_vm_events);
84 85
85#ifdef CONFIG_HOTPLUG 86#ifdef CONFIG_HOTPLUG
86/* 87/*