aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/readahead.c31
-rw-r--r--mm/slab.c74
-rw-r--r--mm/swap.c1
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmalloc.c4
12 files changed, 64 insertions, 62 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 1a4473fcb2ca..ae9ce6b73e8a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -126,9 +126,11 @@ comment "Memory hotplug is currently incompatible with Software Suspend"
126# Default to 4 for wider testing, though 8 might be more appropriate. 126# Default to 4 for wider testing, though 8 might be more appropriate.
127# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. 127# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
128# PA-RISC's debug spinlock_t is too large for the 32-bit struct page. 128# PA-RISC's debug spinlock_t is too large for the 32-bit struct page.
129# ARM26 and SPARC32 and PPC64 may use one page for multiple page tables.
129# 130#
130config SPLIT_PTLOCK_CPUS 131config SPLIT_PTLOCK_CPUS
131 int 132 int
132 default "4096" if ARM && !CPU_CACHE_VIPT 133 default "4096" if ARM && !CPU_CACHE_VIPT
133 default "4096" if PARISC && DEBUG_SPINLOCK && !64BIT 134 default "4096" if PARISC && DEBUG_SPINLOCK && !64BIT
135 default "4096" if ARM26 || SPARC32 || PPC64
134 default "4" 136 default "4"
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9a565808da3f..728e9bda12ea 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -237,7 +237,6 @@ unsigned long hugetlb_total_pages(void)
237{ 237{
238 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 238 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
239} 239}
240EXPORT_SYMBOL(hugetlb_total_pages);
241 240
242/* 241/*
243 * We cannot handle pagefaults against hugetlb pages at all. They cause 242 * We cannot handle pagefaults against hugetlb pages at all. They cause
diff --git a/mm/mmap.c b/mm/mmap.c
index 320dda1778c3..6c997b159600 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -155,10 +155,6 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
155 return -ENOMEM; 155 return -ENOMEM;
156} 156}
157 157
158EXPORT_SYMBOL(sysctl_overcommit_memory);
159EXPORT_SYMBOL(sysctl_overcommit_ratio);
160EXPORT_SYMBOL(sysctl_max_map_count);
161EXPORT_SYMBOL(vm_committed_space);
162EXPORT_SYMBOL(__vm_enough_memory); 158EXPORT_SYMBOL(__vm_enough_memory);
163 159
164/* 160/*
diff --git a/mm/nommu.c b/mm/nommu.c
index d1e076a487cb..6deb6ab3d6ad 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -44,10 +44,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
44int heap_stack_gap = 0; 44int heap_stack_gap = 0;
45 45
46EXPORT_SYMBOL(mem_map); 46EXPORT_SYMBOL(mem_map);
47EXPORT_SYMBOL(sysctl_max_map_count);
48EXPORT_SYMBOL(sysctl_overcommit_memory);
49EXPORT_SYMBOL(sysctl_overcommit_ratio);
50EXPORT_SYMBOL(vm_committed_space);
51EXPORT_SYMBOL(__vm_enough_memory); 47EXPORT_SYMBOL(__vm_enough_memory);
52 48
53/* list of shareable VMAs */ 49/* list of shareable VMAs */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0166ea15c9ee..74138c9a22b9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -750,7 +750,6 @@ int clear_page_dirty_for_io(struct page *page)
750 } 750 }
751 return TestClearPageDirty(page); 751 return TestClearPageDirty(page);
752} 752}
753EXPORT_SYMBOL(clear_page_dirty_for_io);
754 753
755int test_clear_page_writeback(struct page *page) 754int test_clear_page_writeback(struct page *page)
756{ 755{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2dbdd98426fd..ff81b5c65511 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -64,7 +64,6 @@ long nr_swap_pages;
64int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 }; 64int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
65 65
66EXPORT_SYMBOL(totalram_pages); 66EXPORT_SYMBOL(totalram_pages);
67EXPORT_SYMBOL(nr_swap_pages);
68 67
69/* 68/*
70 * Used by page_zone() to look up the address of the struct zone whose 69 * Used by page_zone() to look up the address of the struct zone whose
diff --git a/mm/readahead.c b/mm/readahead.c
index d0b50034e245..72e7adbb87c7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -254,7 +254,7 @@ out:
254 */ 254 */
255static int 255static int
256__do_page_cache_readahead(struct address_space *mapping, struct file *filp, 256__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
257 unsigned long offset, unsigned long nr_to_read) 257 pgoff_t offset, unsigned long nr_to_read)
258{ 258{
259 struct inode *inode = mapping->host; 259 struct inode *inode = mapping->host;
260 struct page *page; 260 struct page *page;
@@ -274,7 +274,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
274 */ 274 */
275 read_lock_irq(&mapping->tree_lock); 275 read_lock_irq(&mapping->tree_lock);
276 for (page_idx = 0; page_idx < nr_to_read; page_idx++) { 276 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
277 unsigned long page_offset = offset + page_idx; 277 pgoff_t page_offset = offset + page_idx;
278 278
279 if (page_offset > end_index) 279 if (page_offset > end_index)
280 break; 280 break;
@@ -311,7 +311,7 @@ out:
311 * memory at once. 311 * memory at once.
312 */ 312 */
313int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 313int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
314 unsigned long offset, unsigned long nr_to_read) 314 pgoff_t offset, unsigned long nr_to_read)
315{ 315{
316 int ret = 0; 316 int ret = 0;
317 317
@@ -368,7 +368,7 @@ static inline int check_ra_success(struct file_ra_state *ra,
368 * request queues. 368 * request queues.
369 */ 369 */
370int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 370int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
371 unsigned long offset, unsigned long nr_to_read) 371 pgoff_t offset, unsigned long nr_to_read)
372{ 372{
373 if (bdi_read_congested(mapping->backing_dev_info)) 373 if (bdi_read_congested(mapping->backing_dev_info))
374 return -1; 374 return -1;
@@ -385,7 +385,7 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
385 */ 385 */
386static int 386static int
387blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, 387blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
388 unsigned long offset, unsigned long nr_to_read, 388 pgoff_t offset, unsigned long nr_to_read,
389 struct file_ra_state *ra, int block) 389 struct file_ra_state *ra, int block)
390{ 390{
391 int actual; 391 int actual;
@@ -430,14 +430,27 @@ static int make_ahead_window(struct address_space *mapping, struct file *filp,
430 return ret; 430 return ret;
431} 431}
432 432
433/* 433/**
434 * page_cache_readahead is the main function. If performs the adaptive 434 * page_cache_readahead - generic adaptive readahead
435 * @mapping: address_space which holds the pagecache and I/O vectors
436 * @ra: file_ra_state which holds the readahead state
437 * @filp: passed on to ->readpage() and ->readpages()
438 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
439 * @req_size: hint: total size of the read which the caller is performing in
440 * PAGE_CACHE_SIZE units
441 *
442 * page_cache_readahead() is the main function. If performs the adaptive
435 * readahead window size management and submits the readahead I/O. 443 * readahead window size management and submits the readahead I/O.
444 *
445 * Note that @filp is purely used for passing on to the ->readpage[s]()
446 * handler: it may refer to a different file from @mapping (so we may not use
447 * @filp->f_mapping or @filp->f_dentry->d_inode here).
448 * Also, @ra may not be equal to &@filp->f_ra.
449 *
436 */ 450 */
437unsigned long 451unsigned long
438page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, 452page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
439 struct file *filp, unsigned long offset, 453 struct file *filp, pgoff_t offset, unsigned long req_size)
440 unsigned long req_size)
441{ 454{
442 unsigned long max, newsize; 455 unsigned long max, newsize;
443 int sequential; 456 int sequential;
diff --git a/mm/slab.c b/mm/slab.c
index 22bfb0b2ac8b..e291f5e1afbb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -368,7 +368,7 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
368 * manages a cache. 368 * manages a cache.
369 */ 369 */
370 370
371struct kmem_cache_s { 371struct kmem_cache {
372/* 1) per-cpu data, touched during every alloc/free */ 372/* 1) per-cpu data, touched during every alloc/free */
373 struct array_cache *array[NR_CPUS]; 373 struct array_cache *array[NR_CPUS];
374 unsigned int batchcount; 374 unsigned int batchcount;
@@ -1502,6 +1502,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1502{ 1502{
1503 size_t left_over, slab_size, ralign; 1503 size_t left_over, slab_size, ralign;
1504 kmem_cache_t *cachep = NULL; 1504 kmem_cache_t *cachep = NULL;
1505 struct list_head *p;
1505 1506
1506 /* 1507 /*
1507 * Sanity checks... these are all serious usage bugs. 1508 * Sanity checks... these are all serious usage bugs.
@@ -1516,6 +1517,35 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1516 BUG(); 1517 BUG();
1517 } 1518 }
1518 1519
1520 down(&cache_chain_sem);
1521
1522 list_for_each(p, &cache_chain) {
1523 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
1524 mm_segment_t old_fs = get_fs();
1525 char tmp;
1526 int res;
1527
1528 /*
1529 * This happens when the module gets unloaded and doesn't
1530 * destroy its slab cache and no-one else reuses the vmalloc
1531 * area of the module. Print a warning.
1532 */
1533 set_fs(KERNEL_DS);
1534 res = __get_user(tmp, pc->name);
1535 set_fs(old_fs);
1536 if (res) {
1537 printk("SLAB: cache with size %d has lost its name\n",
1538 pc->objsize);
1539 continue;
1540 }
1541
1542 if (!strcmp(pc->name,name)) {
1543 printk("kmem_cache_create: duplicate cache %s\n", name);
1544 dump_stack();
1545 goto oops;
1546 }
1547 }
1548
1519#if DEBUG 1549#if DEBUG
1520 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 1550 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
1521 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { 1551 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
@@ -1592,7 +1622,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1592 /* Get cache's description obj. */ 1622 /* Get cache's description obj. */
1593 cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); 1623 cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
1594 if (!cachep) 1624 if (!cachep)
1595 goto opps; 1625 goto oops;
1596 memset(cachep, 0, sizeof(kmem_cache_t)); 1626 memset(cachep, 0, sizeof(kmem_cache_t));
1597 1627
1598#if DEBUG 1628#if DEBUG
@@ -1686,7 +1716,7 @@ next:
1686 printk("kmem_cache_create: couldn't create cache %s.\n", name); 1716 printk("kmem_cache_create: couldn't create cache %s.\n", name);
1687 kmem_cache_free(&cache_cache, cachep); 1717 kmem_cache_free(&cache_cache, cachep);
1688 cachep = NULL; 1718 cachep = NULL;
1689 goto opps; 1719 goto oops;
1690 } 1720 }
1691 slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) 1721 slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
1692 + sizeof(struct slab), align); 1722 + sizeof(struct slab), align);
@@ -1781,43 +1811,14 @@ next:
1781 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1811 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1782 } 1812 }
1783 1813
1784 /* Need the semaphore to access the chain. */
1785 down(&cache_chain_sem);
1786 {
1787 struct list_head *p;
1788 mm_segment_t old_fs;
1789
1790 old_fs = get_fs();
1791 set_fs(KERNEL_DS);
1792 list_for_each(p, &cache_chain) {
1793 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
1794 char tmp;
1795 /* This happens when the module gets unloaded and doesn't
1796 destroy its slab cache and noone else reuses the vmalloc
1797 area of the module. Print a warning. */
1798 if (__get_user(tmp,pc->name)) {
1799 printk("SLAB: cache with size %d has lost its name\n",
1800 pc->objsize);
1801 continue;
1802 }
1803 if (!strcmp(pc->name,name)) {
1804 printk("kmem_cache_create: duplicate cache %s\n",name);
1805 up(&cache_chain_sem);
1806 unlock_cpu_hotplug();
1807 BUG();
1808 }
1809 }
1810 set_fs(old_fs);
1811 }
1812
1813 /* cache setup completed, link it into the list */ 1814 /* cache setup completed, link it into the list */
1814 list_add(&cachep->next, &cache_chain); 1815 list_add(&cachep->next, &cache_chain);
1815 up(&cache_chain_sem);
1816 unlock_cpu_hotplug(); 1816 unlock_cpu_hotplug();
1817opps: 1817oops:
1818 if (!cachep && (flags & SLAB_PANIC)) 1818 if (!cachep && (flags & SLAB_PANIC))
1819 panic("kmem_cache_create(): failed to create slab `%s'\n", 1819 panic("kmem_cache_create(): failed to create slab `%s'\n",
1820 name); 1820 name);
1821 up(&cache_chain_sem);
1821 return cachep; 1822 return cachep;
1822} 1823}
1823EXPORT_SYMBOL(kmem_cache_create); 1824EXPORT_SYMBOL(kmem_cache_create);
@@ -3262,6 +3263,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
3262 3263
3263/** 3264/**
3264 * cache_reap - Reclaim memory from caches. 3265 * cache_reap - Reclaim memory from caches.
3266 * @unused: unused parameter
3265 * 3267 *
3266 * Called from workqueue/eventd every few seconds. 3268 * Called from workqueue/eventd every few seconds.
3267 * Purpose: 3269 * Purpose:
@@ -3278,7 +3280,7 @@ static void cache_reap(void *unused)
3278 3280
3279 if (down_trylock(&cache_chain_sem)) { 3281 if (down_trylock(&cache_chain_sem)) {
3280 /* Give up. Setup the next iteration. */ 3282 /* Give up. Setup the next iteration. */
3281 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id()); 3283 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
3282 return; 3284 return;
3283 } 3285 }
3284 3286
@@ -3347,7 +3349,7 @@ next:
3347 up(&cache_chain_sem); 3349 up(&cache_chain_sem);
3348 drain_remote_pages(); 3350 drain_remote_pages();
3349 /* Setup the next iteration */ 3351 /* Setup the next iteration */
3350 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id()); 3352 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
3351} 3353}
3352 3354
3353#ifdef CONFIG_PROC_FS 3355#ifdef CONFIG_PROC_FS
diff --git a/mm/swap.c b/mm/swap.c
index 154ae13d8b7e..d09cf7f03e76 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -413,7 +413,6 @@ void vm_acct_memory(long pages)
413 } 413 }
414 preempt_enable(); 414 preempt_enable();
415} 415}
416EXPORT_SYMBOL(vm_acct_memory);
417 416
418#ifdef CONFIG_HOTPLUG_CPU 417#ifdef CONFIG_HOTPLUG_CPU
419static void lru_drain_cache(unsigned int cpu) 418static void lru_drain_cache(unsigned int cpu)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index dfd9a46755b8..0df9a57b1de8 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -40,7 +40,6 @@ struct address_space swapper_space = {
40 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), 40 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
41 .backing_dev_info = &swap_backing_dev_info, 41 .backing_dev_info = &swap_backing_dev_info,
42}; 42};
43EXPORT_SYMBOL(swapper_space);
44 43
45#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) 44#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
46 45
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8970c0b74194..edafeace301f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -36,8 +36,6 @@ unsigned int nr_swapfiles;
36long total_swap_pages; 36long total_swap_pages;
37static int swap_overflow; 37static int swap_overflow;
38 38
39EXPORT_SYMBOL(total_swap_pages);
40
41static const char Bad_file[] = "Bad swap file entry "; 39static const char Bad_file[] = "Bad swap file entry ";
42static const char Unused_file[] = "Unused swap file entry "; 40static const char Unused_file[] = "Unused swap file entry ";
43static const char Bad_offset[] = "Bad swap offset entry "; 41static const char Bad_offset[] = "Bad swap offset entry ";
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 54a90e83cb31..729eb3eec75f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -457,7 +457,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
457 * @size: allocation size 457 * @size: allocation size
458 * @gfp_mask: flags for the page level allocator 458 * @gfp_mask: flags for the page level allocator
459 * @prot: protection mask for the allocated pages 459 * @prot: protection mask for the allocated pages
460 * @node node to use for allocation or -1 460 * @node: node to use for allocation or -1
461 * 461 *
462 * Allocate enough pages to cover @size from the page level 462 * Allocate enough pages to cover @size from the page level
463 * allocator with @gfp_mask flags. Map them into contiguous 463 * allocator with @gfp_mask flags. Map them into contiguous
@@ -507,7 +507,7 @@ EXPORT_SYMBOL(vmalloc);
507 * vmalloc_node - allocate memory on a specific node 507 * vmalloc_node - allocate memory on a specific node
508 * 508 *
509 * @size: allocation size 509 * @size: allocation size
510 * @node; numa node 510 * @node: numa node
511 * 511 *
512 * Allocate enough pages to cover @size from the page level 512 * Allocate enough pages to cover @size from the page level
513 * allocator and map them into contiguous kernel virtual space. 513 * allocator and map them into contiguous kernel virtual space.