aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c4
-rw-r--r--mm/fadvise.c12
-rw-r--r--mm/memory.c8
-rw-r--r--mm/mmzone.c6
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/slab.c74
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmstat.c1
8 files changed, 59 insertions, 55 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d213feded10d..50353e0dac12 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -29,9 +29,7 @@ unsigned long max_low_pfn;
29unsigned long min_low_pfn; 29unsigned long min_low_pfn;
30unsigned long max_pfn; 30unsigned long max_pfn;
31 31
32EXPORT_SYMBOL(max_pfn); /* This is exported so 32EXPORT_UNUSED_SYMBOL(max_pfn); /* June 2006 */
33 * dma_get_required_mask(), which uses
34 * it, can be an inline function */
35 33
36static LIST_HEAD(bdata_list); 34static LIST_HEAD(bdata_list);
37#ifdef CONFIG_CRASH_DUMP 35#ifdef CONFIG_CRASH_DUMP
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 0a03357a1f8e..60a5d55e51d9 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -23,18 +23,6 @@
23/* 23/*
24 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could 24 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
25 * deactivate the pages and clear PG_Referenced. 25 * deactivate the pages and clear PG_Referenced.
26 *
27 * LINUX_FADV_ASYNC_WRITE: start async writeout of any dirty pages between file
28 * offsets `offset' and `offset+len' inclusive. Any pages which are currently
29 * under writeout are skipped, whether or not they are dirty.
30 *
31 * LINUX_FADV_WRITE_WAIT: wait upon writeout of any dirty pages between file
32 * offsets `offset' and `offset+len'.
33 *
34 * By combining these two operations the application may do several things:
35 *
36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
37 *
38 */ 26 */
39asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) 27asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
40{ 28{
diff --git a/mm/memory.c b/mm/memory.c
index c1e14c9e67e4..109e9866237e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -47,6 +47,7 @@
47#include <linux/pagemap.h> 47#include <linux/pagemap.h>
48#include <linux/rmap.h> 48#include <linux/rmap.h>
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/delayacct.h>
50#include <linux/init.h> 51#include <linux/init.h>
51 52
52#include <asm/pgalloc.h> 53#include <asm/pgalloc.h>
@@ -1549,9 +1550,9 @@ gotten:
1549 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1550 flush_cache_page(vma, address, pte_pfn(orig_pte));
1550 entry = mk_pte(new_page, vma->vm_page_prot); 1551 entry = mk_pte(new_page, vma->vm_page_prot);
1551 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1552 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1553 lazy_mmu_prot_update(entry);
1552 ptep_establish(vma, address, page_table, entry); 1554 ptep_establish(vma, address, page_table, entry);
1553 update_mmu_cache(vma, address, entry); 1555 update_mmu_cache(vma, address, entry);
1554 lazy_mmu_prot_update(entry);
1555 lru_cache_add_active(new_page); 1556 lru_cache_add_active(new_page);
1556 page_add_new_anon_rmap(new_page, vma, address); 1557 page_add_new_anon_rmap(new_page, vma, address);
1557 1558
@@ -1853,7 +1854,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1853 1854
1854 return 0; 1855 return 0;
1855} 1856}
1856EXPORT_SYMBOL(vmtruncate_range); 1857EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */
1857 1858
1858/* 1859/*
1859 * Primitive swap readahead code. We simply read an aligned block of 1860 * Primitive swap readahead code. We simply read an aligned block of
@@ -1934,6 +1935,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1934 migration_entry_wait(mm, pmd, address); 1935 migration_entry_wait(mm, pmd, address);
1935 goto out; 1936 goto out;
1936 } 1937 }
1938 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
1937 page = lookup_swap_cache(entry); 1939 page = lookup_swap_cache(entry);
1938 if (!page) { 1940 if (!page) {
1939 swapin_readahead(entry, address, vma); 1941 swapin_readahead(entry, address, vma);
@@ -1946,6 +1948,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1946 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1948 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1947 if (likely(pte_same(*page_table, orig_pte))) 1949 if (likely(pte_same(*page_table, orig_pte)))
1948 ret = VM_FAULT_OOM; 1950 ret = VM_FAULT_OOM;
1951 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
1949 goto unlock; 1952 goto unlock;
1950 } 1953 }
1951 1954
@@ -1955,6 +1958,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1955 grab_swap_token(); 1958 grab_swap_token();
1956 } 1959 }
1957 1960
1961 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
1958 mark_page_accessed(page); 1962 mark_page_accessed(page);
1959 lock_page(page); 1963 lock_page(page);
1960 1964
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 0959ee1a4795..febea1c98168 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -14,7 +14,7 @@ struct pglist_data *first_online_pgdat(void)
14 return NODE_DATA(first_online_node); 14 return NODE_DATA(first_online_node);
15} 15}
16 16
17EXPORT_SYMBOL(first_online_pgdat); 17EXPORT_UNUSED_SYMBOL(first_online_pgdat); /* June 2006 */
18 18
19struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) 19struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
20{ 20{
@@ -24,7 +24,7 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
24 return NULL; 24 return NULL;
25 return NODE_DATA(nid); 25 return NODE_DATA(nid);
26} 26}
27EXPORT_SYMBOL(next_online_pgdat); 27EXPORT_UNUSED_SYMBOL(next_online_pgdat); /* June 2006 */
28 28
29 29
30/* 30/*
@@ -45,5 +45,5 @@ struct zone *next_zone(struct zone *zone)
45 } 45 }
46 return zone; 46 return zone;
47} 47}
48EXPORT_SYMBOL(next_zone); 48EXPORT_UNUSED_SYMBOL(next_zone); /* June 2006 */
49 49
diff --git a/mm/nommu.c b/mm/nommu.c
index 5151c44a8257..c576df71e3bb 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1070,6 +1070,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1070 vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; 1070 vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
1071 return 0; 1071 return 0;
1072} 1072}
1073EXPORT_SYMBOL(remap_pfn_range);
1073 1074
1074void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1075void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1075{ 1076{
@@ -1090,6 +1091,7 @@ void unmap_mapping_range(struct address_space *mapping,
1090 int even_cows) 1091 int even_cows)
1091{ 1092{
1092} 1093}
1094EXPORT_SYMBOL(unmap_mapping_range);
1093 1095
1094/* 1096/*
1095 * Check that a process has enough memory to allocate a new virtual 1097 * Check that a process has enough memory to allocate a new virtual
diff --git a/mm/slab.c b/mm/slab.c
index 85c2e03098a7..0f20843beffd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -674,6 +674,37 @@ static struct kmem_cache cache_cache = {
674#endif 674#endif
675}; 675};
676 676
677#ifdef CONFIG_LOCKDEP
678
679/*
680 * Slab sometimes uses the kmalloc slabs to store the slab headers
681 * for other slabs "off slab".
682 * The locking for this is tricky in that it nests within the locks
683 * of all other slabs in a few places; to deal with this special
684 * locking we put on-slab caches into a separate lock-class.
685 */
686static struct lock_class_key on_slab_key;
687
688static inline void init_lock_keys(struct cache_sizes *s)
689{
690 int q;
691
692 for (q = 0; q < MAX_NUMNODES; q++) {
693 if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
694 continue;
695 lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
696 &on_slab_key);
697 }
698}
699
700#else
701static inline void init_lock_keys(struct cache_sizes *s)
702{
703}
704#endif
705
706
707
677/* Guard access to the cache-chain. */ 708/* Guard access to the cache-chain. */
678static DEFINE_MUTEX(cache_chain_mutex); 709static DEFINE_MUTEX(cache_chain_mutex);
679static struct list_head cache_chain; 710static struct list_head cache_chain;
@@ -1021,8 +1052,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
1021 } 1052 }
1022} 1053}
1023 1054
1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1055static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1025 int nesting)
1026{ 1056{
1027 struct slab *slabp = virt_to_slab(objp); 1057 struct slab *slabp = virt_to_slab(objp);
1028 int nodeid = slabp->nodeid; 1058 int nodeid = slabp->nodeid;
@@ -1040,7 +1070,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
1040 STATS_INC_NODEFREES(cachep); 1070 STATS_INC_NODEFREES(cachep);
1041 if (l3->alien && l3->alien[nodeid]) { 1071 if (l3->alien && l3->alien[nodeid]) {
1042 alien = l3->alien[nodeid]; 1072 alien = l3->alien[nodeid];
1043 spin_lock_nested(&alien->lock, nesting); 1073 spin_lock(&alien->lock);
1044 if (unlikely(alien->avail == alien->limit)) { 1074 if (unlikely(alien->avail == alien->limit)) {
1045 STATS_INC_ACOVERFLOW(cachep); 1075 STATS_INC_ACOVERFLOW(cachep);
1046 __drain_alien_cache(cachep, alien, nodeid); 1076 __drain_alien_cache(cachep, alien, nodeid);
@@ -1069,8 +1099,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
1069{ 1099{
1070} 1100}
1071 1101
1072static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1102static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1073 int nesting)
1074{ 1103{
1075 return 0; 1104 return 0;
1076} 1105}
@@ -1393,6 +1422,7 @@ void __init kmem_cache_init(void)
1393 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1422 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1394 NULL, NULL); 1423 NULL, NULL);
1395 } 1424 }
1425 init_lock_keys(sizes);
1396 1426
1397 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1427 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1398 sizes->cs_size, 1428 sizes->cs_size,
@@ -1760,8 +1790,6 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1760} 1790}
1761#endif 1791#endif
1762 1792
1763static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting);
1764
1765/** 1793/**
1766 * slab_destroy - destroy and release all objects in a slab 1794 * slab_destroy - destroy and release all objects in a slab
1767 * @cachep: cache pointer being destroyed 1795 * @cachep: cache pointer being destroyed
@@ -1785,17 +1813,8 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1785 call_rcu(&slab_rcu->head, kmem_rcu_free); 1813 call_rcu(&slab_rcu->head, kmem_rcu_free);
1786 } else { 1814 } else {
1787 kmem_freepages(cachep, addr); 1815 kmem_freepages(cachep, addr);
1788 if (OFF_SLAB(cachep)) { 1816 if (OFF_SLAB(cachep))
1789 unsigned long flags; 1817 kmem_cache_free(cachep->slabp_cache, slabp);
1790
1791 /*
1792 * lockdep: we may nest inside an already held
1793 * ac->lock, so pass in a nesting flag:
1794 */
1795 local_irq_save(flags);
1796 __cache_free(cachep->slabp_cache, slabp, 1);
1797 local_irq_restore(flags);
1798 }
1799 } 1818 }
1800} 1819}
1801 1820
@@ -3100,16 +3119,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3100 if (slabp->inuse == 0) { 3119 if (slabp->inuse == 0) {
3101 if (l3->free_objects > l3->free_limit) { 3120 if (l3->free_objects > l3->free_limit) {
3102 l3->free_objects -= cachep->num; 3121 l3->free_objects -= cachep->num;
3103 /*
3104 * It is safe to drop the lock. The slab is
3105 * no longer linked to the cache. cachep
3106 * cannot disappear - we are using it and
3107 * all destruction of caches must be
3108 * serialized properly by the user.
3109 */
3110 spin_unlock(&l3->list_lock);
3111 slab_destroy(cachep, slabp); 3122 slab_destroy(cachep, slabp);
3112 spin_lock(&l3->list_lock);
3113 } else { 3123 } else {
3114 list_add(&slabp->list, &l3->slabs_free); 3124 list_add(&slabp->list, &l3->slabs_free);
3115 } 3125 }
@@ -3135,7 +3145,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3135#endif 3145#endif
3136 check_irq_off(); 3146 check_irq_off();
3137 l3 = cachep->nodelists[node]; 3147 l3 = cachep->nodelists[node];
3138 spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING); 3148 spin_lock(&l3->list_lock);
3139 if (l3->shared) { 3149 if (l3->shared) {
3140 struct array_cache *shared_array = l3->shared; 3150 struct array_cache *shared_array = l3->shared;
3141 int max = shared_array->limit - shared_array->avail; 3151 int max = shared_array->limit - shared_array->avail;
@@ -3178,14 +3188,14 @@ free_done:
3178 * Release an obj back to its cache. If the obj has a constructed state, it must 3188 * Release an obj back to its cache. If the obj has a constructed state, it must
3179 * be in this state _before_ it is released. Called with disabled ints. 3189 * be in this state _before_ it is released. Called with disabled ints.
3180 */ 3190 */
3181static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting) 3191static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3182{ 3192{
3183 struct array_cache *ac = cpu_cache_get(cachep); 3193 struct array_cache *ac = cpu_cache_get(cachep);
3184 3194
3185 check_irq_off(); 3195 check_irq_off();
3186 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3196 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3187 3197
3188 if (cache_free_alien(cachep, objp, nesting)) 3198 if (cache_free_alien(cachep, objp))
3189 return; 3199 return;
3190 3200
3191 if (likely(ac->avail < ac->limit)) { 3201 if (likely(ac->avail < ac->limit)) {
@@ -3424,7 +3434,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3424 BUG_ON(virt_to_cache(objp) != cachep); 3434 BUG_ON(virt_to_cache(objp) != cachep);
3425 3435
3426 local_irq_save(flags); 3436 local_irq_save(flags);
3427 __cache_free(cachep, objp, 0); 3437 __cache_free(cachep, objp);
3428 local_irq_restore(flags); 3438 local_irq_restore(flags);
3429} 3439}
3430EXPORT_SYMBOL(kmem_cache_free); 3440EXPORT_SYMBOL(kmem_cache_free);
@@ -3449,7 +3459,7 @@ void kfree(const void *objp)
3449 kfree_debugcheck(objp); 3459 kfree_debugcheck(objp);
3450 c = virt_to_cache(objp); 3460 c = virt_to_cache(objp);
3451 debug_check_no_locks_freed(objp, obj_size(c)); 3461 debug_check_no_locks_freed(objp, obj_size(c));
3452 __cache_free(c, (void *)objp, 0); 3462 __cache_free(c, (void *)objp);
3453 local_irq_restore(flags); 3463 local_irq_restore(flags);
3454} 3464}
3455EXPORT_SYMBOL(kfree); 3465EXPORT_SYMBOL(kfree);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7b450798b458..266162d2ba28 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -340,7 +340,7 @@ void __vunmap(void *addr, int deallocate_pages)
340 __free_page(area->pages[i]); 340 __free_page(area->pages[i]);
341 } 341 }
342 342
343 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) 343 if (area->flags & VM_VPAGES)
344 vfree(area->pages); 344 vfree(area->pages);
345 else 345 else
346 kfree(area->pages); 346 kfree(area->pages);
@@ -427,9 +427,10 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
427 427
428 area->nr_pages = nr_pages; 428 area->nr_pages = nr_pages;
429 /* Please note that the recursion is strictly bounded. */ 429 /* Please note that the recursion is strictly bounded. */
430 if (array_size > PAGE_SIZE) 430 if (array_size > PAGE_SIZE) {
431 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 431 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
432 else 432 area->flags |= VM_VPAGES;
433 } else
433 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); 434 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
434 area->pages = pages; 435 area->pages = pages;
435 if (!area->pages) { 436 if (!area->pages) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 73b83d67bab6..dfdf24133901 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -81,6 +81,7 @@ void all_vm_events(unsigned long *ret)
81{ 81{
82 sum_vm_events(ret, &cpu_online_map); 82 sum_vm_events(ret, &cpu_online_map);
83} 83}
84EXPORT_SYMBOL_GPL(all_vm_events);
84 85
85#ifdef CONFIG_HOTPLUG 86#ifdef CONFIG_HOTPLUG
86/* 87/*