diff options
author | Adrian Bunk <bunk@stusta.de> | 2006-09-26 02:31:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:45 -0400 |
commit | b221385bc41d6789edde3d2fa0cb20d5045730eb (patch) | |
tree | 93f3317247d587fd011eb9d77cd73a49670d8d5f | |
parent | 204ec841fbea3e5138168edbc3a76d46747cc987 (diff) |
[PATCH] mm/: make functions static
This patch makes the following needlessly global functions static:
- slab.c: kmem_find_general_cachep()
- swap.c: __page_cache_release()
- vmalloc.c: __vmalloc_node()
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | include/linux/slab.h | 2 | ||||
-rw-r--r-- | include/linux/vmalloc.h | 2 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/swap.c | 39 | ||||
-rw-r--r-- | mm/vmalloc.c | 8 |
6 files changed, 25 insertions, 31 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 449841413cf1..45678b036955 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -318,8 +318,6 @@ static inline int get_page_unless_zero(struct page *page) | |||
318 | return atomic_inc_not_zero(&page->_count); | 318 | return atomic_inc_not_zero(&page->_count); |
319 | } | 319 | } |
320 | 320 | ||
321 | extern void FASTCALL(__page_cache_release(struct page *)); | ||
322 | |||
323 | static inline int page_count(struct page *page) | 321 | static inline int page_count(struct page *page) |
324 | { | 322 | { |
325 | if (unlikely(PageCompound(page))) | 323 | if (unlikely(PageCompound(page))) |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 45ad55b70d1c..193c03c547ec 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -67,7 +67,6 @@ extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); | |||
67 | extern void kmem_cache_free(kmem_cache_t *, void *); | 67 | extern void kmem_cache_free(kmem_cache_t *, void *); |
68 | extern unsigned int kmem_cache_size(kmem_cache_t *); | 68 | extern unsigned int kmem_cache_size(kmem_cache_t *); |
69 | extern const char *kmem_cache_name(kmem_cache_t *); | 69 | extern const char *kmem_cache_name(kmem_cache_t *); |
70 | extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags); | ||
71 | 70 | ||
72 | /* Size description struct for general caches. */ | 71 | /* Size description struct for general caches. */ |
73 | struct cache_sizes { | 72 | struct cache_sizes { |
@@ -223,7 +222,6 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); | |||
223 | /* SLOB allocator routines */ | 222 | /* SLOB allocator routines */ |
224 | 223 | ||
225 | void kmem_cache_init(void); | 224 | void kmem_cache_init(void); |
226 | struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags); | ||
227 | struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, | 225 | struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, |
228 | unsigned long, | 226 | unsigned long, |
229 | void (*)(void *, struct kmem_cache *, unsigned long), | 227 | void (*)(void *, struct kmem_cache *, unsigned long), |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 71b6363caaaf..dee88c6b6fa7 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size); | |||
44 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); | 44 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
45 | extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, | 45 | extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, |
46 | pgprot_t prot); | 46 | pgprot_t prot); |
47 | extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, | ||
48 | pgprot_t prot, int node); | ||
49 | extern void vfree(void *addr); | 47 | extern void vfree(void *addr); |
50 | 48 | ||
51 | extern void *vmap(struct page **pages, unsigned int count, | 49 | extern void *vmap(struct page **pages, unsigned int count, |
@@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, | |||
768 | return csizep->cs_cachep; | 768 | return csizep->cs_cachep; |
769 | } | 769 | } |
770 | 770 | ||
771 | struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | 771 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) |
772 | { | 772 | { |
773 | return __find_general_cachep(size, gfpflags); | 773 | return __find_general_cachep(size, gfpflags); |
774 | } | 774 | } |
775 | EXPORT_SYMBOL(kmem_find_general_cachep); | ||
776 | 775 | ||
777 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) | 776 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) |
778 | { | 777 | { |
@@ -34,6 +34,25 @@ | |||
34 | /* How many pages do we try to swap or page in/out together? */ | 34 | /* How many pages do we try to swap or page in/out together? */ |
35 | int page_cluster; | 35 | int page_cluster; |
36 | 36 | ||
37 | /* | ||
38 | * This path almost never happens for VM activity - pages are normally | ||
39 | * freed via pagevecs. But it gets used by networking. | ||
40 | */ | ||
41 | static void fastcall __page_cache_release(struct page *page) | ||
42 | { | ||
43 | if (PageLRU(page)) { | ||
44 | unsigned long flags; | ||
45 | struct zone *zone = page_zone(page); | ||
46 | |||
47 | spin_lock_irqsave(&zone->lru_lock, flags); | ||
48 | VM_BUG_ON(!PageLRU(page)); | ||
49 | __ClearPageLRU(page); | ||
50 | del_page_from_lru(zone, page); | ||
51 | spin_unlock_irqrestore(&zone->lru_lock, flags); | ||
52 | } | ||
53 | free_hot_page(page); | ||
54 | } | ||
55 | |||
37 | static void put_compound_page(struct page *page) | 56 | static void put_compound_page(struct page *page) |
38 | { | 57 | { |
39 | page = (struct page *)page_private(page); | 58 | page = (struct page *)page_private(page); |
@@ -223,26 +242,6 @@ int lru_add_drain_all(void) | |||
223 | #endif | 242 | #endif |
224 | 243 | ||
225 | /* | 244 | /* |
226 | * This path almost never happens for VM activity - pages are normally | ||
227 | * freed via pagevecs. But it gets used by networking. | ||
228 | */ | ||
229 | void fastcall __page_cache_release(struct page *page) | ||
230 | { | ||
231 | if (PageLRU(page)) { | ||
232 | unsigned long flags; | ||
233 | struct zone *zone = page_zone(page); | ||
234 | |||
235 | spin_lock_irqsave(&zone->lru_lock, flags); | ||
236 | VM_BUG_ON(!PageLRU(page)); | ||
237 | __ClearPageLRU(page); | ||
238 | del_page_from_lru(zone, page); | ||
239 | spin_unlock_irqrestore(&zone->lru_lock, flags); | ||
240 | } | ||
241 | free_hot_page(page); | ||
242 | } | ||
243 | EXPORT_SYMBOL(__page_cache_release); | ||
244 | |||
245 | /* | ||
246 | * Batched page_cache_release(). Decrement the reference count on all the | 245 | * Batched page_cache_release(). Decrement the reference count on all the |
247 | * passed pages. If it fell to zero then remove the page from the LRU and | 246 | * passed pages. If it fell to zero then remove the page from the LRU and |
248 | * free it. | 247 | * free it. |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 266162d2ba28..9aad8b0cc6ee 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -24,6 +24,9 @@ | |||
24 | DEFINE_RWLOCK(vmlist_lock); | 24 | DEFINE_RWLOCK(vmlist_lock); |
25 | struct vm_struct *vmlist; | 25 | struct vm_struct *vmlist; |
26 | 26 | ||
27 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | ||
28 | int node); | ||
29 | |||
27 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | 30 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
28 | { | 31 | { |
29 | pte_t *pte; | 32 | pte_t *pte; |
@@ -478,8 +481,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
478 | * allocator with @gfp_mask flags. Map them into contiguous | 481 | * allocator with @gfp_mask flags. Map them into contiguous |
479 | * kernel virtual space, using a pagetable protection of @prot. | 482 | * kernel virtual space, using a pagetable protection of @prot. |
480 | */ | 483 | */ |
481 | void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 484 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
482 | int node) | 485 | int node) |
483 | { | 486 | { |
484 | struct vm_struct *area; | 487 | struct vm_struct *area; |
485 | 488 | ||
@@ -493,7 +496,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
493 | 496 | ||
494 | return __vmalloc_area_node(area, gfp_mask, prot, node); | 497 | return __vmalloc_area_node(area, gfp_mask, prot, node); |
495 | } | 498 | } |
496 | EXPORT_SYMBOL(__vmalloc_node); | ||
497 | 499 | ||
498 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 500 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
499 | { | 501 | { |