aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2007-12-06 11:18:49 -0500
committerMatthew Wilcox <willy@linux.intel.com>2007-12-06 17:35:41 -0500
commit2687a3569e40b1302f96698bcd6329aeb0ce3dd2 (patch)
tree7bb5e1ffd807ef94b145f6829bf4326a98b8fd99
parentf776d12dd16da1b0cd55a1240002c1b31f315d5d (diff)
Add lock_page_killable
This routine is like lock_page, but can be interrupted by a fatal signal Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--mm/filemap.c14
2 files changed, 28 insertions, 0 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index db8a410ae9e1..4b62a105622b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -157,6 +157,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
157} 157}
158 158
159extern void FASTCALL(__lock_page(struct page *page)); 159extern void FASTCALL(__lock_page(struct page *page));
160extern int FASTCALL(__lock_page_killable(struct page *page));
160extern void FASTCALL(__lock_page_nosync(struct page *page)); 161extern void FASTCALL(__lock_page_nosync(struct page *page));
161extern void FASTCALL(unlock_page(struct page *page)); 162extern void FASTCALL(unlock_page(struct page *page));
162 163
@@ -171,6 +172,19 @@ static inline void lock_page(struct page *page)
171} 172}
172 173
173/* 174/*
175 * lock_page_killable is like lock_page but can be interrupted by fatal
176 * signals. It returns 0 if it locked the page and -EINTR if it was
177 * killed while waiting.
178 */
179static inline int lock_page_killable(struct page *page)
180{
181 might_sleep();
182 if (TestSetPageLocked(page))
183 return __lock_page_killable(page);
184 return 0;
185}
186
187/*
174 * lock_page_nosync should only be used if we can't pin the page's inode. 188 * lock_page_nosync should only be used if we can't pin the page's inode.
175 * Doesn't play quite so well with block device plugging. 189 * Doesn't play quite so well with block device plugging.
176 */ 190 */
diff --git a/mm/filemap.c b/mm/filemap.c
index 188cf5fd3e8d..ac8f690d2885 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -173,6 +173,12 @@ static int sync_page(void *word)
173 return 0; 173 return 0;
174} 174}
175 175
176static int sync_page_killable(void *word)
177{
178 sync_page(word);
179 return fatal_signal_pending(current) ? -EINTR : 0;
180}
181
176/** 182/**
177 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 183 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
178 * @mapping: address space structure to write 184 * @mapping: address space structure to write
@@ -577,6 +583,14 @@ void fastcall __lock_page(struct page *page)
577} 583}
578EXPORT_SYMBOL(__lock_page); 584EXPORT_SYMBOL(__lock_page);
579 585
586int fastcall __lock_page_killable(struct page *page)
587{
588 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
589
590 return __wait_on_bit_lock(page_waitqueue(page), &wait,
591 sync_page_killable, TASK_KILLABLE);
592}
593
580/* 594/*
581 * Variant of lock_page that does not require the caller to hold a reference 595 * Variant of lock_page that does not require the caller to hold a reference
582 * on the page's mapping. 596 * on the page's mapping.