diff options
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 60 |
1 files changed, 15 insertions, 45 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1245df7141a..64f95092515 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |||
113 | extern void remove_from_page_cache(struct page *page); | 113 | extern void remove_from_page_cache(struct page *page); |
114 | extern void __remove_from_page_cache(struct page *page); | 114 | extern void __remove_from_page_cache(struct page *page); |
115 | 115 | ||
116 | extern atomic_t nr_pagecache; | ||
117 | |||
118 | #ifdef CONFIG_SMP | ||
119 | |||
120 | #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) | ||
121 | DECLARE_PER_CPU(long, nr_pagecache_local); | ||
122 | |||
123 | /* | ||
124 | * pagecache_acct implements approximate accounting for pagecache. | ||
125 | * vm_enough_memory() do not need high accuracy. Writers will keep | ||
126 | * an offset in their per-cpu arena and will spill that into the | ||
127 | * global count whenever the absolute value of the local count | ||
128 | * exceeds the counter's threshold. | ||
129 | * | ||
130 | * MUST be protected from preemption. | ||
131 | * current protection is mapping->page_lock. | ||
132 | */ | ||
133 | static inline void pagecache_acct(int count) | ||
134 | { | ||
135 | long *local; | ||
136 | |||
137 | local = &__get_cpu_var(nr_pagecache_local); | ||
138 | *local += count; | ||
139 | if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { | ||
140 | atomic_add(*local, &nr_pagecache); | ||
141 | *local = 0; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | #else | ||
146 | |||
147 | static inline void pagecache_acct(int count) | ||
148 | { | ||
149 | atomic_add(count, &nr_pagecache); | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | static inline unsigned long get_page_cache_size(void) | ||
154 | { | ||
155 | int ret = atomic_read(&nr_pagecache); | ||
156 | if (unlikely(ret < 0)) | ||
157 | ret = 0; | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | /* | 116 | /* |
162 | * Return byte-offset into filesystem object for page. | 117 | * Return byte-offset into filesystem object for page. |
163 | */ | 118 | */ |
@@ -175,14 +130,29 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |||
175 | } | 130 | } |
176 | 131 | ||
177 | extern void FASTCALL(__lock_page(struct page *page)); | 132 | extern void FASTCALL(__lock_page(struct page *page)); |
133 | extern void FASTCALL(__lock_page_nosync(struct page *page)); | ||
178 | extern void FASTCALL(unlock_page(struct page *page)); | 134 | extern void FASTCALL(unlock_page(struct page *page)); |
179 | 135 | ||
136 | /* | ||
137 | * lock_page may only be called if we have the page's inode pinned. | ||
138 | */ | ||
180 | static inline void lock_page(struct page *page) | 139 | static inline void lock_page(struct page *page) |
181 | { | 140 | { |
182 | might_sleep(); | 141 | might_sleep(); |
183 | if (TestSetPageLocked(page)) | 142 | if (TestSetPageLocked(page)) |
184 | __lock_page(page); | 143 | __lock_page(page); |
185 | } | 144 | } |
145 | |||
146 | /* | ||
147 | * lock_page_nosync should only be used if we can't pin the page's inode. | ||
148 | * Doesn't play quite so well with block device plugging. | ||
149 | */ | ||
150 | static inline void lock_page_nosync(struct page *page) | ||
151 | { | ||
152 | might_sleep(); | ||
153 | if (TestSetPageLocked(page)) | ||
154 | __lock_page_nosync(page); | ||
155 | } | ||
186 | 156 | ||
187 | /* | 157 | /* |
188 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. | 158 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |