aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNoralf Trønnes <noralf@tronnes.org>2016-04-28 11:18:37 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-05-02 10:25:55 -0400
commite375882406d0cc24030746638592004755ed4ae0 (patch)
treeb5bb8f69db129f84ee6e4190edab829ed4dde4e8
parent6819c3c2517604f979da3de773f2420e07dd4f4b (diff)
drm/udl: Use drm_fb_helper deferred_io support
Use the fbdev deferred io support in drm_fb_helper. The (struct fb_ops *)->fb_{fillrect,copyarea,imageblit} functions will now schedule a worker instead of being flushed directly like it was previously (recorded when in atomic). This patch has only been compile tested. Signed-off-by: Noralf Trønnes <noralf@tronnes.org> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Tested-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1461856717-6476-8-git-send-email-noralf@tronnes.org
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c140
2 files changed, 6 insertions, 136 deletions
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 4a064efcea58..0b03d34ffdee 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -81,8 +81,6 @@ struct udl_framebuffer {
81 struct drm_framebuffer base; 81 struct drm_framebuffer base;
82 struct udl_gem_object *obj; 82 struct udl_gem_object *obj;
83 bool active_16; /* active on the 16-bit channel */ 83 bool active_16; /* active on the 16-bit channel */
84 int x1, y1, x2, y2; /* dirty rect */
85 spinlock_t dirty_lock;
86}; 84};
87 85
88#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base) 86#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a52de2fc6087..4a9b43217900 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -77,68 +77,6 @@ static uint16_t rgb16(uint32_t col)
77} 77}
78#endif 78#endif
79 79
80/*
81 * NOTE: fb_defio.c is holding info->fbdefio.mutex
82 * Touching ANY framebuffer memory that triggers a page fault
83 * in fb_defio will cause a deadlock, when it also tries to
84 * grab the same mutex.
85 */
86static void udlfb_dpy_deferred_io(struct fb_info *info,
87 struct list_head *pagelist)
88{
89 struct page *cur;
90 struct fb_deferred_io *fbdefio = info->fbdefio;
91 struct udl_fbdev *ufbdev = info->par;
92 struct drm_device *dev = ufbdev->ufb.base.dev;
93 struct udl_device *udl = dev->dev_private;
94 struct urb *urb;
95 char *cmd;
96 cycles_t start_cycles, end_cycles;
97 int bytes_sent = 0;
98 int bytes_identical = 0;
99 int bytes_rendered = 0;
100
101 if (!fb_defio)
102 return;
103
104 start_cycles = get_cycles();
105
106 urb = udl_get_urb(dev);
107 if (!urb)
108 return;
109
110 cmd = urb->transfer_buffer;
111
112 /* walk the written page list and render each to device */
113 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
114
115 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
116 &urb, (char *) info->fix.smem_start,
117 &cmd, cur->index << PAGE_SHIFT,
118 cur->index << PAGE_SHIFT,
119 PAGE_SIZE, &bytes_identical, &bytes_sent))
120 goto error;
121 bytes_rendered += PAGE_SIZE;
122 }
123
124 if (cmd > (char *) urb->transfer_buffer) {
125 /* Send partial buffer remaining before exiting */
126 int len = cmd - (char *) urb->transfer_buffer;
127 udl_submit_urb(dev, urb, len);
128 bytes_sent += len;
129 } else
130 udl_urb_completion(urb);
131
132error:
133 atomic_add(bytes_sent, &udl->bytes_sent);
134 atomic_add(bytes_identical, &udl->bytes_identical);
135 atomic_add(bytes_rendered, &udl->bytes_rendered);
136 end_cycles = get_cycles();
137 atomic_add(((unsigned int) ((end_cycles - start_cycles)
138 >> 10)), /* Kcycles */
139 &udl->cpu_kcycles_used);
140}
141
142int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, 80int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
143 int width, int height) 81 int width, int height)
144{ 82{
@@ -152,9 +90,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
152 struct urb *urb; 90 struct urb *urb;
153 int aligned_x; 91 int aligned_x;
154 int bpp = (fb->base.bits_per_pixel / 8); 92 int bpp = (fb->base.bits_per_pixel / 8);
155 int x2, y2;
156 bool store_for_later = false;
157 unsigned long flags;
158 93
159 if (!fb->active_16) 94 if (!fb->active_16)
160 return 0; 95 return 0;
@@ -180,38 +115,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
180 (y + height > fb->base.height)) 115 (y + height > fb->base.height))
181 return -EINVAL; 116 return -EINVAL;
182 117
183 /* if we are in atomic just store the info
184 can't test inside spin lock */
185 if (in_atomic())
186 store_for_later = true;
187
188 x2 = x + width - 1;
189 y2 = y + height - 1;
190
191 spin_lock_irqsave(&fb->dirty_lock, flags);
192
193 if (fb->y1 < y)
194 y = fb->y1;
195 if (fb->y2 > y2)
196 y2 = fb->y2;
197 if (fb->x1 < x)
198 x = fb->x1;
199 if (fb->x2 > x2)
200 x2 = fb->x2;
201
202 if (store_for_later) {
203 fb->x1 = x;
204 fb->x2 = x2;
205 fb->y1 = y;
206 fb->y2 = y2;
207 spin_unlock_irqrestore(&fb->dirty_lock, flags);
208 return 0;
209 }
210
211 fb->x1 = fb->y1 = INT_MAX;
212 fb->x2 = fb->y2 = 0;
213
214 spin_unlock_irqrestore(&fb->dirty_lock, flags);
215 start_cycles = get_cycles(); 118 start_cycles = get_cycles();
216 119
217 urb = udl_get_urb(dev); 120 urb = udl_get_urb(dev);
@@ -219,14 +122,14 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
219 return 0; 122 return 0;
220 cmd = urb->transfer_buffer; 123 cmd = urb->transfer_buffer;
221 124
222 for (i = y; i <= y2 ; i++) { 125 for (i = y; i < height ; i++) {
223 const int line_offset = fb->base.pitches[0] * i; 126 const int line_offset = fb->base.pitches[0] * i;
224 const int byte_offset = line_offset + (x * bpp); 127 const int byte_offset = line_offset + (x * bpp);
225 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); 128 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
226 if (udl_render_hline(dev, bpp, &urb, 129 if (udl_render_hline(dev, bpp, &urb,
227 (char *) fb->obj->vmapping, 130 (char *) fb->obj->vmapping,
228 &cmd, byte_offset, dev_byte_offset, 131 &cmd, byte_offset, dev_byte_offset,
229 (x2 - x + 1) * bpp, 132 width * bpp,
230 &bytes_identical, &bytes_sent)) 133 &bytes_identical, &bytes_sent))
231 goto error; 134 goto error;
232 } 135 }
@@ -283,36 +186,6 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
283 return 0; 186 return 0;
284} 187}
285 188
286static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
287{
288 struct udl_fbdev *ufbdev = info->par;
289
290 sys_fillrect(info, rect);
291
292 udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
293 rect->height);
294}
295
296static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
297{
298 struct udl_fbdev *ufbdev = info->par;
299
300 sys_copyarea(info, region);
301
302 udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
303 region->height);
304}
305
306static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
307{
308 struct udl_fbdev *ufbdev = info->par;
309
310 sys_imageblit(info, image);
311
312 udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
313 image->height);
314}
315
316/* 189/*
317 * It's common for several clients to have framebuffer open simultaneously. 190 * It's common for several clients to have framebuffer open simultaneously.
318 * e.g. both fbcon and X. Makes things interesting. 191 * e.g. both fbcon and X. Makes things interesting.
@@ -339,7 +212,7 @@ static int udl_fb_open(struct fb_info *info, int user)
339 212
340 if (fbdefio) { 213 if (fbdefio) {
341 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 214 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
342 fbdefio->deferred_io = udlfb_dpy_deferred_io; 215 fbdefio->deferred_io = drm_fb_helper_deferred_io;
343 } 216 }
344 217
345 info->fbdefio = fbdefio; 218 info->fbdefio = fbdefio;
@@ -379,9 +252,9 @@ static struct fb_ops udlfb_ops = {
379 .owner = THIS_MODULE, 252 .owner = THIS_MODULE,
380 .fb_check_var = drm_fb_helper_check_var, 253 .fb_check_var = drm_fb_helper_check_var,
381 .fb_set_par = drm_fb_helper_set_par, 254 .fb_set_par = drm_fb_helper_set_par,
382 .fb_fillrect = udl_fb_fillrect, 255 .fb_fillrect = drm_fb_helper_sys_fillrect,
383 .fb_copyarea = udl_fb_copyarea, 256 .fb_copyarea = drm_fb_helper_sys_copyarea,
384 .fb_imageblit = udl_fb_imageblit, 257 .fb_imageblit = drm_fb_helper_sys_imageblit,
385 .fb_pan_display = drm_fb_helper_pan_display, 258 .fb_pan_display = drm_fb_helper_pan_display,
386 .fb_blank = drm_fb_helper_blank, 259 .fb_blank = drm_fb_helper_blank,
387 .fb_setcmap = drm_fb_helper_setcmap, 260 .fb_setcmap = drm_fb_helper_setcmap,
@@ -458,7 +331,6 @@ udl_framebuffer_init(struct drm_device *dev,
458{ 331{
459 int ret; 332 int ret;
460 333
461 spin_lock_init(&ufb->dirty_lock);
462 ufb->obj = obj; 334 ufb->obj = obj;
463 drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); 335 drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
464 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); 336 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);