aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video
diff options
context:
space:
mode:
authorAlbert Herranz <albert_herranz@yahoo.es>2010-06-04 17:14:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-06-04 18:21:45 -0400
commit3f505ca45735c35576dab4ceb3e3736d528b6672 (patch)
tree447973161e0589a39c2831c7ef3225f49ae4f85b /drivers/video
parent1da083c9b23dafd6bcb08dcfec443e66e90efff0 (diff)
Revert "fb_defio: fix for non-dirty ptes"
This reverts commit 49bbd815fd8ba26d0354900b783b767c7f47c816 ("fb_defio: fix for non-dirty ptes"). Although the fix provided is correct, it's been suggested to avoid the underlying race in the same way as it is currently done in filesystems like NFS, for maintainability. A following patch "fb_defio: redo fix for non-dirty ptes" will provide such an alternate fix. Signed-off-by: Albert Herranz <albert_herranz@yahoo.es> Cc: Jaya Kumar <jayakumar.lkml@gmail.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/video')
-rw-r--r--drivers/video/fb_defio.c40
1 files changed, 8 insertions, 32 deletions
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 073c9b408cf7..137100ea8ad7 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -155,41 +155,25 @@ static void fb_deferred_io_work(struct work_struct *work)
155{ 155{
156 struct fb_info *info = container_of(work, struct fb_info, 156 struct fb_info *info = container_of(work, struct fb_info,
157 deferred_work.work); 157 deferred_work.work);
158 struct list_head *node, *next;
159 struct page *cur;
158 struct fb_deferred_io *fbdefio = info->fbdefio; 160 struct fb_deferred_io *fbdefio = info->fbdefio;
159 struct page *page, *tmp_page;
160 struct list_head *node, *tmp_node;
161 struct list_head non_dirty;
162
163 INIT_LIST_HEAD(&non_dirty);
164 161
165 /* here we mkclean the pages, then do all deferred IO */ 162 /* here we mkclean the pages, then do all deferred IO */
166 mutex_lock(&fbdefio->lock); 163 mutex_lock(&fbdefio->lock);
167 list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) { 164 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
168 lock_page(page); 165 lock_page(cur);
169 /* 166 page_mkclean(cur);
170 * The workqueue callback can be triggered after a 167 unlock_page(cur);
171 * ->page_mkwrite() call but before the PTE has been marked
172 * dirty. In this case page_mkclean() won't "rearm" the page.
173 *
174 * To avoid this, remove those "non-dirty" pages from the
175 * pagelist before calling the driver's callback, then add
176 * them back to get processed on the next work iteration.
177 * At that time, their PTEs will hopefully be dirty for real.
178 */
179 if (!page_mkclean(page))
180 list_move_tail(&page->lru, &non_dirty);
181 unlock_page(page);
182 } 168 }
183 169
184 /* driver's callback with pagelist */ 170 /* driver's callback with pagelist */
185 fbdefio->deferred_io(info, &fbdefio->pagelist); 171 fbdefio->deferred_io(info, &fbdefio->pagelist);
186 172
187 /* clear the list... */ 173 /* clear the list */
188 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) { 174 list_for_each_safe(node, next, &fbdefio->pagelist) {
189 list_del(node); 175 list_del(node);
190 } 176 }
191 /* ... and add back the "non-dirty" pages to the list */
192 list_splice_tail(&non_dirty, &fbdefio->pagelist);
193 mutex_unlock(&fbdefio->lock); 177 mutex_unlock(&fbdefio->lock);
194} 178}
195 179
@@ -218,7 +202,6 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
218void fb_deferred_io_cleanup(struct fb_info *info) 202void fb_deferred_io_cleanup(struct fb_info *info)
219{ 203{
220 struct fb_deferred_io *fbdefio = info->fbdefio; 204 struct fb_deferred_io *fbdefio = info->fbdefio;
221 struct list_head *node, *tmp_node;
222 struct page *page; 205 struct page *page;
223 int i; 206 int i;
224 207
@@ -226,13 +209,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
226 cancel_delayed_work(&info->deferred_work); 209 cancel_delayed_work(&info->deferred_work);
227 flush_scheduled_work(); 210 flush_scheduled_work();
228 211
229 /* the list may have still some non-dirty pages at this point */
230 mutex_lock(&fbdefio->lock);
231 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
232 list_del(node);
233 }
234 mutex_unlock(&fbdefio->lock);
235
236 /* clear out the mapping that we setup */ 212 /* clear out the mapping that we setup */
237 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { 213 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
238 page = fb_deferred_io_page(info, i); 214 page = fb_deferred_io_page(info, i);