diff options
Diffstat (limited to 'drivers/video/fb_defio.c')
-rw-r--r-- | drivers/video/fb_defio.c | 52 |
1 files changed, 19 insertions, 33 deletions
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 073c9b408cf7..6b93ef93cb12 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c | |||
@@ -100,6 +100,16 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, | |||
100 | /* protect against the workqueue changing the page list */ | 100 | /* protect against the workqueue changing the page list */ |
101 | mutex_lock(&fbdefio->lock); | 101 | mutex_lock(&fbdefio->lock); |
102 | 102 | ||
103 | /* | ||
104 | * We want the page to remain locked from ->page_mkwrite until | ||
105 | * the PTE is marked dirty to avoid page_mkclean() being called | ||
106 | * before the PTE is updated, which would leave the page ignored | ||
107 | * by defio. | ||
108 | * Do this by locking the page here and informing the caller | ||
109 | * about it with VM_FAULT_LOCKED. | ||
110 | */ | ||
111 | lock_page(page); | ||
112 | |||
103 | /* we loop through the pagelist before adding in order | 113 | /* we loop through the pagelist before adding in order |
104 | to keep the pagelist sorted */ | 114 | to keep the pagelist sorted */ |
105 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { | 115 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { |
@@ -121,7 +131,7 @@ page_already_added: | |||
121 | 131 | ||
122 | /* come back after delay to process the deferred IO */ | 132 | /* come back after delay to process the deferred IO */ |
123 | schedule_delayed_work(&info->deferred_work, fbdefio->delay); | 133 | schedule_delayed_work(&info->deferred_work, fbdefio->delay); |
124 | return 0; | 134 | return VM_FAULT_LOCKED; |
125 | } | 135 | } |
126 | 136 | ||
127 | static const struct vm_operations_struct fb_deferred_io_vm_ops = { | 137 | static const struct vm_operations_struct fb_deferred_io_vm_ops = { |
@@ -155,41 +165,25 @@ static void fb_deferred_io_work(struct work_struct *work) | |||
155 | { | 165 | { |
156 | struct fb_info *info = container_of(work, struct fb_info, | 166 | struct fb_info *info = container_of(work, struct fb_info, |
157 | deferred_work.work); | 167 | deferred_work.work); |
168 | struct list_head *node, *next; | ||
169 | struct page *cur; | ||
158 | struct fb_deferred_io *fbdefio = info->fbdefio; | 170 | struct fb_deferred_io *fbdefio = info->fbdefio; |
159 | struct page *page, *tmp_page; | ||
160 | struct list_head *node, *tmp_node; | ||
161 | struct list_head non_dirty; | ||
162 | |||
163 | INIT_LIST_HEAD(&non_dirty); | ||
164 | 171 | ||
165 | /* here we mkclean the pages, then do all deferred IO */ | 172 | /* here we mkclean the pages, then do all deferred IO */ |
166 | mutex_lock(&fbdefio->lock); | 173 | mutex_lock(&fbdefio->lock); |
167 | list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) { | 174 | list_for_each_entry(cur, &fbdefio->pagelist, lru) { |
168 | lock_page(page); | 175 | lock_page(cur); |
169 | /* | 176 | page_mkclean(cur); |
170 | * The workqueue callback can be triggered after a | 177 | unlock_page(cur); |
171 | * ->page_mkwrite() call but before the PTE has been marked | ||
172 | * dirty. In this case page_mkclean() won't "rearm" the page. | ||
173 | * | ||
174 | * To avoid this, remove those "non-dirty" pages from the | ||
175 | * pagelist before calling the driver's callback, then add | ||
176 | * them back to get processed on the next work iteration. | ||
177 | * At that time, their PTEs will hopefully be dirty for real. | ||
178 | */ | ||
179 | if (!page_mkclean(page)) | ||
180 | list_move_tail(&page->lru, &non_dirty); | ||
181 | unlock_page(page); | ||
182 | } | 178 | } |
183 | 179 | ||
184 | /* driver's callback with pagelist */ | 180 | /* driver's callback with pagelist */ |
185 | fbdefio->deferred_io(info, &fbdefio->pagelist); | 181 | fbdefio->deferred_io(info, &fbdefio->pagelist); |
186 | 182 | ||
187 | /* clear the list... */ | 183 | /* clear the list */ |
188 | list_for_each_safe(node, tmp_node, &fbdefio->pagelist) { | 184 | list_for_each_safe(node, next, &fbdefio->pagelist) { |
189 | list_del(node); | 185 | list_del(node); |
190 | } | 186 | } |
191 | /* ... and add back the "non-dirty" pages to the list */ | ||
192 | list_splice_tail(&non_dirty, &fbdefio->pagelist); | ||
193 | mutex_unlock(&fbdefio->lock); | 187 | mutex_unlock(&fbdefio->lock); |
194 | } | 188 | } |
195 | 189 | ||
@@ -218,7 +212,6 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open); | |||
218 | void fb_deferred_io_cleanup(struct fb_info *info) | 212 | void fb_deferred_io_cleanup(struct fb_info *info) |
219 | { | 213 | { |
220 | struct fb_deferred_io *fbdefio = info->fbdefio; | 214 | struct fb_deferred_io *fbdefio = info->fbdefio; |
221 | struct list_head *node, *tmp_node; | ||
222 | struct page *page; | 215 | struct page *page; |
223 | int i; | 216 | int i; |
224 | 217 | ||
@@ -226,13 +219,6 @@ void fb_deferred_io_cleanup(struct fb_info *info) | |||
226 | cancel_delayed_work(&info->deferred_work); | 219 | cancel_delayed_work(&info->deferred_work); |
227 | flush_scheduled_work(); | 220 | flush_scheduled_work(); |
228 | 221 | ||
229 | /* the list may have still some non-dirty pages at this point */ | ||
230 | mutex_lock(&fbdefio->lock); | ||
231 | list_for_each_safe(node, tmp_node, &fbdefio->pagelist) { | ||
232 | list_del(node); | ||
233 | } | ||
234 | mutex_unlock(&fbdefio->lock); | ||
235 | |||
236 | /* clear out the mapping that we setup */ | 222 | /* clear out the mapping that we setup */ |
237 | for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { | 223 | for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { |
238 | page = fb_deferred_io_page(info, i); | 224 | page = fb_deferred_io_page(info, i); |