aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/fb_defio.c
diff options
context:
space:
mode:
authorAlbert Herranz <albert_herranz@yahoo.es>2010-05-24 17:34:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:07:09 -0400
commit49bbd815fd8ba26d0354900b783b767c7f47c816 (patch)
tree6e118691727380045c1d22d93452dc99f1ccc04d /drivers/video/fb_defio.c
parent0d5b02641faade3dfd376abdcffbd8ccb03c7a91 (diff)
fb_defio: fix for non-dirty ptes
Fix a problem observed while using fb_defio with a short delay on a PowerPC platform. It is possible that page_mkclean() is invoked in the deferred io work function _before_ a PTE has been marked dirty. In this case, the page is removed from the defio pagelist but page_mkclean() does not write-protect the page again. The end result is that defio ignores all subsequent writes to the page and the corresponding portions of the framebuffer never get updated. The fix consists in keeping track of the pages with non-dirty PTEs, re-checking them again on the next deferred io work iteration. Note that those pages are not passed to the defio callback as they are not written by userspace yet. Signed-off-by: Albert Herranz <albert_herranz@yahoo.es> Acked-by: Jaya Kumar <jayakumar.lkml@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/video/fb_defio.c')
-rw-r--r--drivers/video/fb_defio.c40
1 files changed, 32 insertions, 8 deletions
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 6113c47e095a..1105a591dcc1 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -155,25 +155,41 @@ static void fb_deferred_io_work(struct work_struct *work)
155{ 155{
156 struct fb_info *info = container_of(work, struct fb_info, 156 struct fb_info *info = container_of(work, struct fb_info,
157 deferred_work.work); 157 deferred_work.work);
158 struct list_head *node, *next;
159 struct page *cur;
160 struct fb_deferred_io *fbdefio = info->fbdefio; 158 struct fb_deferred_io *fbdefio = info->fbdefio;
159 struct page *page, *tmp_page;
160 struct list_head *node, *tmp_node;
161 struct list_head non_dirty;
162
163 INIT_LIST_HEAD(&non_dirty);
161 164
162 /* here we mkclean the pages, then do all deferred IO */ 165 /* here we mkclean the pages, then do all deferred IO */
163 mutex_lock(&fbdefio->lock); 166 mutex_lock(&fbdefio->lock);
164 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 167 list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
165 lock_page(cur); 168 lock_page(page);
166 page_mkclean(cur); 169 /*
167 unlock_page(cur); 170 * The workqueue callback can be triggered after a
171 * ->page_mkwrite() call but before the PTE has been marked
172 * dirty. In this case page_mkclean() won't "rearm" the page.
173 *
174 * To avoid this, remove those "non-dirty" pages from the
175 * pagelist before calling the driver's callback, then add
176 * them back to get processed on the next work iteration.
177 * At that time, their PTEs will hopefully be dirty for real.
178 */
179 if (!page_mkclean(page))
180 list_move_tail(&page->lru, &non_dirty);
181 unlock_page(page);
168 } 182 }
169 183
170 /* driver's callback with pagelist */ 184 /* driver's callback with pagelist */
171 fbdefio->deferred_io(info, &fbdefio->pagelist); 185 fbdefio->deferred_io(info, &fbdefio->pagelist);
172 186
173 /* clear the list */ 187 /* clear the list... */
174 list_for_each_safe(node, next, &fbdefio->pagelist) { 188 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
175 list_del(node); 189 list_del(node);
176 } 190 }
191 /* ... and add back the "non-dirty" pages to the list */
192 list_splice_tail(&non_dirty, &fbdefio->pagelist);
177 mutex_unlock(&fbdefio->lock); 193 mutex_unlock(&fbdefio->lock);
178} 194}
179 195
@@ -202,6 +218,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
202void fb_deferred_io_cleanup(struct fb_info *info) 218void fb_deferred_io_cleanup(struct fb_info *info)
203{ 219{
204 struct fb_deferred_io *fbdefio = info->fbdefio; 220 struct fb_deferred_io *fbdefio = info->fbdefio;
221 struct list_head *node, *tmp_node;
205 struct page *page; 222 struct page *page;
206 int i; 223 int i;
207 224
@@ -209,6 +226,13 @@ void fb_deferred_io_cleanup(struct fb_info *info)
209 cancel_delayed_work(&info->deferred_work); 226 cancel_delayed_work(&info->deferred_work);
210 flush_scheduled_work(); 227 flush_scheduled_work();
211 228
229 /* the list may have still some non-dirty pages at this point */
230 mutex_lock(&fbdefio->lock);
231 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
232 list_del(node);
233 }
234 mutex_unlock(&fbdefio->lock);
235
212 /* clear out the mapping that we setup */ 236 /* clear out the mapping that we setup */
213 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { 237 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
214 page = fb_deferred_io_page(info, i); 238 page = fb_deferred_io_page(info, i);