aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2005-10-21 03:18:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 11:16:47 -0400
commit6daa0e28627abf362138244a620a821a9027d816 (patch)
tree5ca9cbc421cc1adf228cdd30cd627bca8be6242c
parentaf4ca457eaf2d6682059c18463eb106e2ce58198 (diff)
[PATCH] gfp_t: mm/* (easy parts)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/pagemap.h6
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/swap.h4
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c8
-rw-r--r--mm/vmscan.c8
9 files changed, 22 insertions, 22 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 097b3a3c693d..e1649578fb0c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -747,7 +747,7 @@ extern unsigned long do_mremap(unsigned long addr,
747 * The callback will be passed nr_to_scan == 0 when the VM is querying the 747 * The callback will be passed nr_to_scan == 0 when the VM is querying the
748 * cache size, so a fastpath for that case is appropriate. 748 * cache size, so a fastpath for that case is appropriate.
749 */ 749 */
750typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask); 750typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
751 751
752/* 752/*
753 * Add an aging callback. The int is the number of 'seeks' it takes 753 * Add an aging callback. The int is the number of 'seeks' it takes
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index acbf31c154f8..efbae53fb078 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -69,7 +69,7 @@ extern struct page * find_lock_page(struct address_space *mapping,
69extern struct page * find_trylock_page(struct address_space *mapping, 69extern struct page * find_trylock_page(struct address_space *mapping,
70 unsigned long index); 70 unsigned long index);
71extern struct page * find_or_create_page(struct address_space *mapping, 71extern struct page * find_or_create_page(struct address_space *mapping,
72 unsigned long index, unsigned int gfp_mask); 72 unsigned long index, gfp_t gfp_mask);
73unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 73unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
74 unsigned int nr_pages, struct page **pages); 74 unsigned int nr_pages, struct page **pages);
75unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 75unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
@@ -92,9 +92,9 @@ extern int read_cache_pages(struct address_space *mapping,
92 struct list_head *pages, filler_t *filler, void *data); 92 struct list_head *pages, filler_t *filler, void *data);
93 93
94int add_to_page_cache(struct page *page, struct address_space *mapping, 94int add_to_page_cache(struct page *page, struct address_space *mapping,
95 unsigned long index, int gfp_mask); 95 unsigned long index, gfp_t gfp_mask);
96int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 96int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
97 unsigned long index, int gfp_mask); 97 unsigned long index, gfp_t gfp_mask);
98extern void remove_from_page_cache(struct page *page); 98extern void remove_from_page_cache(struct page *page);
99extern void __remove_from_page_cache(struct page *page); 99extern void __remove_from_page_cache(struct page *page);
100 100
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5fc04a16ecb0..09b9aa60063d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -121,7 +121,7 @@ extern unsigned int ksize(const void *);
121extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); 121extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
122extern void *kmalloc_node(size_t size, gfp_t flags, int node); 122extern void *kmalloc_node(size_t size, gfp_t flags, int node);
123#else 123#else
124static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) 124static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
125{ 125{
126 return kmem_cache_alloc(cachep, flags); 126 return kmem_cache_alloc(cachep, flags);
127} 127}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a7bf1a3b1496..20c975642cab 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -171,8 +171,8 @@ extern int rotate_reclaimable_page(struct page *page);
171extern void swap_setup(void); 171extern void swap_setup(void);
172 172
173/* linux/mm/vmscan.c */ 173/* linux/mm/vmscan.c */
174extern int try_to_free_pages(struct zone **, unsigned int); 174extern int try_to_free_pages(struct zone **, gfp_t);
175extern int zone_reclaim(struct zone *, unsigned int, unsigned int); 175extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
176extern int shrink_all_memory(int); 176extern int shrink_all_memory(int);
177extern int vm_swappiness; 177extern int vm_swappiness;
178 178
diff --git a/mm/filemap.c b/mm/filemap.c
index b5346576e58d..1c31b2fd2ca5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
377 * This function does not add the page to the LRU. The caller must do that. 377 * This function does not add the page to the LRU. The caller must do that.
378 */ 378 */
379int add_to_page_cache(struct page *page, struct address_space *mapping, 379int add_to_page_cache(struct page *page, struct address_space *mapping,
380 pgoff_t offset, int gfp_mask) 380 pgoff_t offset, gfp_t gfp_mask)
381{ 381{
382 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 382 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
383 383
@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
401EXPORT_SYMBOL(add_to_page_cache); 401EXPORT_SYMBOL(add_to_page_cache);
402 402
403int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 403int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
404 pgoff_t offset, int gfp_mask) 404 pgoff_t offset, gfp_t gfp_mask)
405{ 405{
406 int ret = add_to_page_cache(page, mapping, offset, gfp_mask); 406 int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
407 if (ret == 0) 407 if (ret == 0)
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
591 * memory exhaustion. 591 * memory exhaustion.
592 */ 592 */
593struct page *find_or_create_page(struct address_space *mapping, 593struct page *find_or_create_page(struct address_space *mapping,
594 unsigned long index, unsigned int gfp_mask) 594 unsigned long index, gfp_t gfp_mask)
595{ 595{
596 struct page *page, *cached_page = NULL; 596 struct page *page, *cached_page = NULL;
597 int err; 597 int err;
@@ -683,7 +683,7 @@ struct page *
683grab_cache_page_nowait(struct address_space *mapping, unsigned long index) 683grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
684{ 684{
685 struct page *page = find_get_page(mapping, index); 685 struct page *page = find_get_page(mapping, index);
686 unsigned int gfp_mask; 686 gfp_t gfp_mask;
687 687
688 if (page) { 688 if (page) {
689 if (!TestSetPageLocked(page)) 689 if (!TestSetPageLocked(page))
diff --git a/mm/mempool.c b/mm/mempool.c
index 9e377ea700b2..1a99b80480d3 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
205 void *element; 205 void *element;
206 unsigned long flags; 206 unsigned long flags;
207 wait_queue_t wait; 207 wait_queue_t wait;
208 unsigned int gfp_temp; 208 gfp_t gfp_temp;
209 209
210 might_sleep_if(gfp_mask & __GFP_WAIT); 210 might_sleep_if(gfp_mask & __GFP_WAIT);
211 211
diff --git a/mm/shmem.c b/mm/shmem.c
index ea064d89cda9..55e04a0734c1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ enum sgp_type {
85static int shmem_getpage(struct inode *inode, unsigned long idx, 85static int shmem_getpage(struct inode *inode, unsigned long idx,
86 struct page **pagep, enum sgp_type sgp, int *type); 86 struct page **pagep, enum sgp_type sgp, int *type);
87 87
88static inline struct page *shmem_dir_alloc(unsigned int gfp_mask) 88static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
89{ 89{
90 /* 90 /*
91 * The above definition of ENTRIES_PER_PAGE, and the use of 91 * The above definition of ENTRIES_PER_PAGE, and the use of
@@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
898} 898}
899 899
900static struct page * 900static struct page *
901shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, 901shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
902 unsigned long idx) 902 unsigned long idx)
903{ 903{
904 struct vm_area_struct pvma; 904 struct vm_area_struct pvma;
diff --git a/mm/slab.c b/mm/slab.c
index d05c678bceb3..d30423f167a2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,7 +386,7 @@ struct kmem_cache_s {
386 unsigned int gfporder; 386 unsigned int gfporder;
387 387
388 /* force GFP flags, e.g. GFP_DMA */ 388 /* force GFP flags, e.g. GFP_DMA */
389 unsigned int gfpflags; 389 gfp_t gfpflags;
390 390
391 size_t colour; /* cache colouring range */ 391 size_t colour; /* cache colouring range */
392 unsigned int colour_off; /* colour offset */ 392 unsigned int colour_off; /* colour offset */
@@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
2117 slabp->free = 0; 2117 slabp->free = 0;
2118} 2118}
2119 2119
2120static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags) 2120static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
2121{ 2121{
2122 if (flags & SLAB_DMA) { 2122 if (flags & SLAB_DMA) {
2123 if (!(cachep->gfpflags & GFP_DMA)) 2123 if (!(cachep->gfpflags & GFP_DMA))
@@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2152 struct slab *slabp; 2152 struct slab *slabp;
2153 void *objp; 2153 void *objp;
2154 size_t offset; 2154 size_t offset;
2155 unsigned int local_flags; 2155 gfp_t local_flags;
2156 unsigned long ctor_flags; 2156 unsigned long ctor_flags;
2157 struct kmem_list3 *l3; 2157 struct kmem_list3 *l3;
2158 2158
@@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2546/* 2546/*
2547 * A interface to enable slab creation on nodeid 2547 * A interface to enable slab creation on nodeid
2548 */ 2548 */
2549static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) 2549static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2550{ 2550{
2551 struct list_head *entry; 2551 struct list_head *entry;
2552 struct slab *slabp; 2552 struct slab *slabp;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 64f9570cff56..843c87d1e61f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -70,7 +70,7 @@ struct scan_control {
70 unsigned int priority; 70 unsigned int priority;
71 71
72 /* This context's GFP mask */ 72 /* This context's GFP mask */
73 unsigned int gfp_mask; 73 gfp_t gfp_mask;
74 74
75 int may_writepage; 75 int may_writepage;
76 76
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker);
186 * 186 *
187 * Returns the number of slab objects which we shrunk. 187 * Returns the number of slab objects which we shrunk.
188 */ 188 */
189static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, 189static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
190 unsigned long lru_pages) 190 unsigned long lru_pages)
191{ 191{
192 struct shrinker *shrinker; 192 struct shrinker *shrinker;
@@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
926 * holds filesystem locks which prevent writeout this might not work, and the 926 * holds filesystem locks which prevent writeout this might not work, and the
927 * allocation attempt will fail. 927 * allocation attempt will fail.
928 */ 928 */
929int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) 929int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
930{ 930{
931 int priority; 931 int priority;
932 int ret = 0; 932 int ret = 0;
@@ -1338,7 +1338,7 @@ module_init(kswapd_init)
1338/* 1338/*
1339 * Try to free up some pages from this zone through reclaim. 1339 * Try to free up some pages from this zone through reclaim.
1340 */ 1340 */
1341int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) 1341int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1342{ 1342{
1343 struct scan_control sc; 1343 struct scan_control sc;
1344 int nr_pages = 1 << order; 1344 int nr_pages = 1 << order;