aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c41
1 files changed, 35 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index e8f58f7dd7a5..3ef20739e725 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,6 +29,7 @@
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/security.h> 30#include <linux/security.h>
31#include <linux/syscalls.h> 31#include <linux/syscalls.h>
32#include <linux/cpuset.h>
32#include "filemap.h" 33#include "filemap.h"
33#include "internal.h" 34#include "internal.h"
34 35
@@ -174,7 +175,7 @@ static int sync_page(void *word)
174 * dirty pages that lie within the byte offsets <start, end> 175 * dirty pages that lie within the byte offsets <start, end>
175 * @mapping: address space structure to write 176 * @mapping: address space structure to write
176 * @start: offset in bytes where the range starts 177 * @start: offset in bytes where the range starts
177 * @end: offset in bytes where the range ends 178 * @end: offset in bytes where the range ends (inclusive)
178 * @sync_mode: enable synchronous operation 179 * @sync_mode: enable synchronous operation
179 * 180 *
180 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 181 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
@@ -182,8 +183,8 @@ static int sync_page(void *word)
182 * these two operations is that if a dirty page/buffer is encountered, it must 183 * these two operations is that if a dirty page/buffer is encountered, it must
183 * be waited upon, and not just skipped over. 184 * be waited upon, and not just skipped over.
184 */ 185 */
185static int __filemap_fdatawrite_range(struct address_space *mapping, 186int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
186 loff_t start, loff_t end, int sync_mode) 187 loff_t end, int sync_mode)
187{ 188{
188 int ret; 189 int ret;
189 struct writeback_control wbc = { 190 struct writeback_control wbc = {
@@ -212,8 +213,8 @@ int filemap_fdatawrite(struct address_space *mapping)
212} 213}
213EXPORT_SYMBOL(filemap_fdatawrite); 214EXPORT_SYMBOL(filemap_fdatawrite);
214 215
215static int filemap_fdatawrite_range(struct address_space *mapping, 216static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
216 loff_t start, loff_t end) 217 loff_t end)
217{ 218{
218 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 219 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
219} 220}
@@ -232,7 +233,7 @@ EXPORT_SYMBOL(filemap_flush);
232 * Wait for writeback to complete against pages indexed by start->end 233 * Wait for writeback to complete against pages indexed by start->end
233 * inclusive 234 * inclusive
234 */ 235 */
235static int wait_on_page_writeback_range(struct address_space *mapping, 236int wait_on_page_writeback_range(struct address_space *mapping,
236 pgoff_t start, pgoff_t end) 237 pgoff_t start, pgoff_t end)
237{ 238{
238 struct pagevec pvec; 239 struct pagevec pvec;
@@ -367,6 +368,12 @@ int filemap_write_and_wait(struct address_space *mapping)
367} 368}
368EXPORT_SYMBOL(filemap_write_and_wait); 369EXPORT_SYMBOL(filemap_write_and_wait);
369 370
371/*
372 * Write out and wait upon file offsets lstart->lend, inclusive.
373 *
374 * Note that `lend' is inclusive (describes the last byte to be written) so
375 * that this function can be used to write to the very end-of-file (end = -1).
376 */
370int filemap_write_and_wait_range(struct address_space *mapping, 377int filemap_write_and_wait_range(struct address_space *mapping,
371 loff_t lstart, loff_t lend) 378 loff_t lstart, loff_t lend)
372{ 379{
@@ -427,6 +434,28 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
427 return ret; 434 return ret;
428} 435}
429 436
437#ifdef CONFIG_NUMA
438struct page *page_cache_alloc(struct address_space *x)
439{
440 if (cpuset_do_page_mem_spread()) {
441 int n = cpuset_mem_spread_node();
442 return alloc_pages_node(n, mapping_gfp_mask(x), 0);
443 }
444 return alloc_pages(mapping_gfp_mask(x), 0);
445}
446EXPORT_SYMBOL(page_cache_alloc);
447
448struct page *page_cache_alloc_cold(struct address_space *x)
449{
450 if (cpuset_do_page_mem_spread()) {
451 int n = cpuset_mem_spread_node();
452 return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
453 }
454 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
455}
456EXPORT_SYMBOL(page_cache_alloc_cold);
457#endif
458
430/* 459/*
431 * In order to wait for pages to become available there must be 460 * In order to wait for pages to become available there must be
432 * waitqueues associated with pages. By using a hash table of 461 * waitqueues associated with pages. By using a hash table of