aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@stusta.de>2006-09-26 02:31:02 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:45 -0400
commitb221385bc41d6789edde3d2fa0cb20d5045730eb (patch)
tree93f3317247d587fd011eb9d77cd73a49670d8d5f /mm
parent204ec841fbea3e5138168edbc3a76d46747cc987 (diff)
[PATCH] mm/: make functions static
This patch makes the following needlessly global functions static: - slab.c: kmem_find_general_cachep() - swap.c: __page_cache_release() - vmalloc.c: __vmalloc_node() Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c3
-rw-r--r--mm/swap.c39
-rw-r--r--mm/vmalloc.c8
3 files changed, 25 insertions, 25 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 21ba06035700..5870bcbd33cf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
768 return csizep->cs_cachep; 768 return csizep->cs_cachep;
769} 769}
770 770
771struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 771static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
772{ 772{
773 return __find_general_cachep(size, gfpflags); 773 return __find_general_cachep(size, gfpflags);
774} 774}
775EXPORT_SYMBOL(kmem_find_general_cachep);
776 775
777static size_t slab_mgmt_size(size_t nr_objs, size_t align) 776static size_t slab_mgmt_size(size_t nr_objs, size_t align)
778{ 777{
diff --git a/mm/swap.c b/mm/swap.c
index 600235e43704..2e0e871f542f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,6 +34,25 @@
34/* How many pages do we try to swap or page in/out together? */ 34/* How many pages do we try to swap or page in/out together? */
35int page_cluster; 35int page_cluster;
36 36
37/*
38 * This path almost never happens for VM activity - pages are normally
39 * freed via pagevecs. But it gets used by networking.
40 */
41static void fastcall __page_cache_release(struct page *page)
42{
43 if (PageLRU(page)) {
44 unsigned long flags;
45 struct zone *zone = page_zone(page);
46
47 spin_lock_irqsave(&zone->lru_lock, flags);
48 VM_BUG_ON(!PageLRU(page));
49 __ClearPageLRU(page);
50 del_page_from_lru(zone, page);
51 spin_unlock_irqrestore(&zone->lru_lock, flags);
52 }
53 free_hot_page(page);
54}
55
37static void put_compound_page(struct page *page) 56static void put_compound_page(struct page *page)
38{ 57{
39 page = (struct page *)page_private(page); 58 page = (struct page *)page_private(page);
@@ -223,26 +242,6 @@ int lru_add_drain_all(void)
223#endif 242#endif
224 243
225/* 244/*
226 * This path almost never happens for VM activity - pages are normally
227 * freed via pagevecs. But it gets used by networking.
228 */
229void fastcall __page_cache_release(struct page *page)
230{
231 if (PageLRU(page)) {
232 unsigned long flags;
233 struct zone *zone = page_zone(page);
234
235 spin_lock_irqsave(&zone->lru_lock, flags);
236 VM_BUG_ON(!PageLRU(page));
237 __ClearPageLRU(page);
238 del_page_from_lru(zone, page);
239 spin_unlock_irqrestore(&zone->lru_lock, flags);
240 }
241 free_hot_page(page);
242}
243EXPORT_SYMBOL(__page_cache_release);
244
245/*
246 * Batched page_cache_release(). Decrement the reference count on all the 245 * Batched page_cache_release(). Decrement the reference count on all the
247 * passed pages. If it fell to zero then remove the page from the LRU and 246 * passed pages. If it fell to zero then remove the page from the LRU and
248 * free it. 247 * free it.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 266162d2ba28..9aad8b0cc6ee 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,9 @@
24DEFINE_RWLOCK(vmlist_lock); 24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist; 25struct vm_struct *vmlist;
26 26
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 int node);
29
27static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28{ 31{
29 pte_t *pte; 32 pte_t *pte;
@@ -478,8 +481,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
478 * allocator with @gfp_mask flags. Map them into contiguous 481 * allocator with @gfp_mask flags. Map them into contiguous
479 * kernel virtual space, using a pagetable protection of @prot. 482 * kernel virtual space, using a pagetable protection of @prot.
480 */ 483 */
481void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 484static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
482 int node) 485 int node)
483{ 486{
484 struct vm_struct *area; 487 struct vm_struct *area;
485 488
@@ -493,7 +496,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
493 496
494 return __vmalloc_area_node(area, gfp_mask, prot, node); 497 return __vmalloc_area_node(area, gfp_mask, prot, node);
495} 498}
496EXPORT_SYMBOL(__vmalloc_node);
497 499
498void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 500void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
499{ 501{