aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h246
1 files changed, 246 insertions, 0 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
new file mode 100644
index 000000000000..0422031161ba
--- /dev/null
+++ b/include/linux/pagemap.h
@@ -0,0 +1,246 @@
1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14
15/*
16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
17 * allocation mode flags.
18 */
19#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
20#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
21
22static inline unsigned int __nocast mapping_gfp_mask(struct address_space * mapping)
23{
24 return mapping->flags & __GFP_BITS_MASK;
25}
26
27/*
28 * This is non-atomic. Only to be used before the mapping is activated.
29 * Probably needs a barrier...
30 */
31static inline void mapping_set_gfp_mask(struct address_space *m, int mask)
32{
33 m->flags = (m->flags & ~__GFP_BITS_MASK) | mask;
34}
35
36/*
37 * The page cache can done in larger chunks than
38 * one page, because it allows for more efficient
39 * throughput (it can then be mapped into user
40 * space in smaller chunks for same flexibility).
41 *
42 * Or rather, it _will_ be done in larger chunks.
43 */
44#define PAGE_CACHE_SHIFT PAGE_SHIFT
45#define PAGE_CACHE_SIZE PAGE_SIZE
46#define PAGE_CACHE_MASK PAGE_MASK
47#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
48
49#define page_cache_get(page) get_page(page)
50#define page_cache_release(page) put_page(page)
51void release_pages(struct page **pages, int nr, int cold);
52
53static inline struct page *page_cache_alloc(struct address_space *x)
54{
55 return alloc_pages(mapping_gfp_mask(x), 0);
56}
57
58static inline struct page *page_cache_alloc_cold(struct address_space *x)
59{
60 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
61}
62
63typedef int filler_t(void *, struct page *);
64
65extern struct page * find_get_page(struct address_space *mapping,
66 unsigned long index);
67extern struct page * find_lock_page(struct address_space *mapping,
68 unsigned long index);
69extern struct page * find_trylock_page(struct address_space *mapping,
70 unsigned long index);
71extern struct page * find_or_create_page(struct address_space *mapping,
72 unsigned long index, unsigned int gfp_mask);
73unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
74 unsigned int nr_pages, struct page **pages);
75unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
76 int tag, unsigned int nr_pages, struct page **pages);
77
78/*
79 * Returns locked page at given index in given cache, creating it if needed.
80 */
81static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
82{
83 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
84}
85
86extern struct page * grab_cache_page_nowait(struct address_space *mapping,
87 unsigned long index);
88extern struct page * read_cache_page(struct address_space *mapping,
89 unsigned long index, filler_t *filler,
90 void *data);
91extern int read_cache_pages(struct address_space *mapping,
92 struct list_head *pages, filler_t *filler, void *data);
93
94int add_to_page_cache(struct page *page, struct address_space *mapping,
95 unsigned long index, int gfp_mask);
96int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
97 unsigned long index, int gfp_mask);
98extern void remove_from_page_cache(struct page *page);
99extern void __remove_from_page_cache(struct page *page);
100
101extern atomic_t nr_pagecache;
102
103#ifdef CONFIG_SMP
104
105#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
106DECLARE_PER_CPU(long, nr_pagecache_local);
107
108/*
109 * pagecache_acct implements approximate accounting for pagecache.
110 * vm_enough_memory() do not need high accuracy. Writers will keep
111 * an offset in their per-cpu arena and will spill that into the
112 * global count whenever the absolute value of the local count
113 * exceeds the counter's threshold.
114 *
115 * MUST be protected from preemption.
116 * current protection is mapping->page_lock.
117 */
118static inline void pagecache_acct(int count)
119{
120 long *local;
121
122 local = &__get_cpu_var(nr_pagecache_local);
123 *local += count;
124 if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
125 atomic_add(*local, &nr_pagecache);
126 *local = 0;
127 }
128}
129
130#else
131
132static inline void pagecache_acct(int count)
133{
134 atomic_add(count, &nr_pagecache);
135}
136#endif
137
138static inline unsigned long get_page_cache_size(void)
139{
140 int ret = atomic_read(&nr_pagecache);
141 if (unlikely(ret < 0))
142 ret = 0;
143 return ret;
144}
145
146/*
147 * Return byte-offset into filesystem object for page.
148 */
149static inline loff_t page_offset(struct page *page)
150{
151 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
152}
153
154static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
155 unsigned long address)
156{
157 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
158 pgoff += vma->vm_pgoff;
159 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
160}
161
162extern void FASTCALL(__lock_page(struct page *page));
163extern void FASTCALL(unlock_page(struct page *page));
164
165static inline void lock_page(struct page *page)
166{
167 might_sleep();
168 if (TestSetPageLocked(page))
169 __lock_page(page);
170}
171
172/*
173 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
174 * Never use this directly!
175 */
176extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
177
178/*
179 * Wait for a page to be unlocked.
180 *
181 * This must be called with the caller "holding" the page,
182 * ie with increased "page->count" so that the page won't
183 * go away during the wait..
184 */
185static inline void wait_on_page_locked(struct page *page)
186{
187 if (PageLocked(page))
188 wait_on_page_bit(page, PG_locked);
189}
190
191/*
192 * Wait for a page to complete writeback
193 */
194static inline void wait_on_page_writeback(struct page *page)
195{
196 if (PageWriteback(page))
197 wait_on_page_bit(page, PG_writeback);
198}
199
200extern void end_page_writeback(struct page *page);
201
202/*
203 * Fault a userspace page into pagetables. Return non-zero on a fault.
204 *
205 * This assumes that two userspace pages are always sufficient. That's
206 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
207 */
208static inline int fault_in_pages_writeable(char __user *uaddr, int size)
209{
210 int ret;
211
212 /*
213 * Writing zeroes into userspace here is OK, because we know that if
214 * the zero gets there, we'll be overwriting it.
215 */
216 ret = __put_user(0, uaddr);
217 if (ret == 0) {
218 char __user *end = uaddr + size - 1;
219
220 /*
221 * If the page was already mapped, this will get a cache miss
222 * for sure, so try to avoid doing it.
223 */
224 if (((unsigned long)uaddr & PAGE_MASK) !=
225 ((unsigned long)end & PAGE_MASK))
226 ret = __put_user(0, end);
227 }
228 return ret;
229}
230
231static inline void fault_in_pages_readable(const char __user *uaddr, int size)
232{
233 volatile char c;
234 int ret;
235
236 ret = __get_user(c, uaddr);
237 if (ret == 0) {
238 const char __user *end = uaddr + size - 1;
239
240 if (((unsigned long)uaddr & PAGE_MASK) !=
241 ((unsigned long)end & PAGE_MASK))
242 __get_user(c, end);
243 }
244}
245
246#endif /* _LINUX_PAGEMAP_H */