aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--mm/slab.c3
-rw-r--r--mm/swap.c39
-rw-r--r--mm/vmalloc.c8
6 files changed, 25 insertions, 31 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 449841413cf1..45678b036955 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -318,8 +318,6 @@ static inline int get_page_unless_zero(struct page *page)
318 return atomic_inc_not_zero(&page->_count); 318 return atomic_inc_not_zero(&page->_count);
319} 319}
320 320
321extern void FASTCALL(__page_cache_release(struct page *));
322
323static inline int page_count(struct page *page) 321static inline int page_count(struct page *page)
324{ 322{
325 if (unlikely(PageCompound(page))) 323 if (unlikely(PageCompound(page)))
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 45ad55b70d1c..193c03c547ec 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -67,7 +67,6 @@ extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
67extern void kmem_cache_free(kmem_cache_t *, void *); 67extern void kmem_cache_free(kmem_cache_t *, void *);
68extern unsigned int kmem_cache_size(kmem_cache_t *); 68extern unsigned int kmem_cache_size(kmem_cache_t *);
69extern const char *kmem_cache_name(kmem_cache_t *); 69extern const char *kmem_cache_name(kmem_cache_t *);
70extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
71 70
72/* Size description struct for general caches. */ 71/* Size description struct for general caches. */
73struct cache_sizes { 72struct cache_sizes {
@@ -223,7 +222,6 @@ extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
223/* SLOB allocator routines */ 222/* SLOB allocator routines */
224 223
225void kmem_cache_init(void); 224void kmem_cache_init(void);
226struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
227struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t, 225struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
228 unsigned long, 226 unsigned long,
229 void (*)(void *, struct kmem_cache *, unsigned long), 227 void (*)(void *, struct kmem_cache *, unsigned long),
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 71b6363caaaf..dee88c6b6fa7 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -44,8 +44,6 @@ extern void *vmalloc_32_user(unsigned long size);
44extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 44extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
45extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, 45extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
46 pgprot_t prot); 46 pgprot_t prot);
47extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
48 pgprot_t prot, int node);
49extern void vfree(void *addr); 47extern void vfree(void *addr);
50 48
51extern void *vmap(struct page **pages, unsigned int count, 49extern void *vmap(struct page **pages, unsigned int count,
diff --git a/mm/slab.c b/mm/slab.c
index 21ba06035700..5870bcbd33cf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -768,11 +768,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
768 return csizep->cs_cachep; 768 return csizep->cs_cachep;
769} 769}
770 770
771struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 771static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
772{ 772{
773 return __find_general_cachep(size, gfpflags); 773 return __find_general_cachep(size, gfpflags);
774} 774}
775EXPORT_SYMBOL(kmem_find_general_cachep);
776 775
777static size_t slab_mgmt_size(size_t nr_objs, size_t align) 776static size_t slab_mgmt_size(size_t nr_objs, size_t align)
778{ 777{
diff --git a/mm/swap.c b/mm/swap.c
index 600235e43704..2e0e871f542f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,6 +34,25 @@
34/* How many pages do we try to swap or page in/out together? */ 34/* How many pages do we try to swap or page in/out together? */
35int page_cluster; 35int page_cluster;
36 36
37/*
38 * This path almost never happens for VM activity - pages are normally
39 * freed via pagevecs. But it gets used by networking.
40 */
41static void fastcall __page_cache_release(struct page *page)
42{
43 if (PageLRU(page)) {
44 unsigned long flags;
45 struct zone *zone = page_zone(page);
46
47 spin_lock_irqsave(&zone->lru_lock, flags);
48 VM_BUG_ON(!PageLRU(page));
49 __ClearPageLRU(page);
50 del_page_from_lru(zone, page);
51 spin_unlock_irqrestore(&zone->lru_lock, flags);
52 }
53 free_hot_page(page);
54}
55
37static void put_compound_page(struct page *page) 56static void put_compound_page(struct page *page)
38{ 57{
39 page = (struct page *)page_private(page); 58 page = (struct page *)page_private(page);
@@ -223,26 +242,6 @@ int lru_add_drain_all(void)
223#endif 242#endif
224 243
225/* 244/*
226 * This path almost never happens for VM activity - pages are normally
227 * freed via pagevecs. But it gets used by networking.
228 */
229void fastcall __page_cache_release(struct page *page)
230{
231 if (PageLRU(page)) {
232 unsigned long flags;
233 struct zone *zone = page_zone(page);
234
235 spin_lock_irqsave(&zone->lru_lock, flags);
236 VM_BUG_ON(!PageLRU(page));
237 __ClearPageLRU(page);
238 del_page_from_lru(zone, page);
239 spin_unlock_irqrestore(&zone->lru_lock, flags);
240 }
241 free_hot_page(page);
242}
243EXPORT_SYMBOL(__page_cache_release);
244
245/*
246 * Batched page_cache_release(). Decrement the reference count on all the 245 * Batched page_cache_release(). Decrement the reference count on all the
247 * passed pages. If it fell to zero then remove the page from the LRU and 246 * passed pages. If it fell to zero then remove the page from the LRU and
248 * free it. 247 * free it.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 266162d2ba28..9aad8b0cc6ee 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,9 @@
24DEFINE_RWLOCK(vmlist_lock); 24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist; 25struct vm_struct *vmlist;
26 26
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 int node);
29
27static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28{ 31{
29 pte_t *pte; 32 pte_t *pte;
@@ -478,8 +481,8 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
478 * allocator with @gfp_mask flags. Map them into contiguous 481 * allocator with @gfp_mask flags. Map them into contiguous
479 * kernel virtual space, using a pagetable protection of @prot. 482 * kernel virtual space, using a pagetable protection of @prot.
480 */ 483 */
481void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 484static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
482 int node) 485 int node)
483{ 486{
484 struct vm_struct *area; 487 struct vm_struct *area;
485 488
@@ -493,7 +496,6 @@ void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
493 496
494 return __vmalloc_area_node(area, gfp_mask, prot, node); 497 return __vmalloc_area_node(area, gfp_mask, prot, node);
495} 498}
496EXPORT_SYMBOL(__vmalloc_node);
497 499
498void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 500void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
499{ 501{