diff options
author | Dave Airlie <airlied@redhat.com> | 2013-05-02 02:40:25 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-05-01 22:46:47 -0400 |
commit | 306373b645d80625335b8e684fa09b14ba460cec (patch) | |
tree | 56b69864e7941570c697b9da778d9a2886e0b8bd /drivers/gpu | |
parent | 641719599528d806e00de8ae8c8453361266a312 (diff) |
drm/ast: deal with bo reserve fail in dirty update path
Port over the mgag200 fix to ast as it suffers the same issue.
On F19 testing, it was noticed we get a lot of errors in dmesg
about being unable to reserve the buffer when plymouth starts,
this is due to the buffer being in the process of migrating,
so it makes sense we can't reserve it.
In order to deal with it, this adds delayed updates for the dirty
updates, when the bo is unreservable, in the normal console case
this shouldn't ever happen, its just when plymouth or X is
pushing the console bo to system memory.
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/ast/ast_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ast/ast_fb.c | 43 | ||||
-rw-r--r-- | drivers/gpu/drm/ast/ast_ttm.c | 2 |
3 files changed, 43 insertions, 4 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 528429252f0f..02e52d543e4b 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h | |||
@@ -241,6 +241,8 @@ struct ast_fbdev { | |||
241 | void *sysram; | 241 | void *sysram; |
242 | int size; | 242 | int size; |
243 | struct ttm_bo_kmap_obj mapping; | 243 | struct ttm_bo_kmap_obj mapping; |
244 | int x1, y1, x2, y2; /* dirty rect */ | ||
245 | spinlock_t dirty_lock; | ||
244 | }; | 246 | }; |
245 | 247 | ||
246 | #define to_ast_crtc(x) container_of(x, struct ast_crtc, base) | 248 | #define to_ast_crtc(x) container_of(x, struct ast_crtc, base) |
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 34931fe7d2c5..fbc0823cfa18 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c | |||
@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, | |||
53 | int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; | 53 | int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; |
54 | int ret; | 54 | int ret; |
55 | bool unmap = false; | 55 | bool unmap = false; |
56 | bool store_for_later = false; | ||
57 | int x2, y2; | ||
58 | unsigned long flags; | ||
56 | 59 | ||
57 | obj = afbdev->afb.obj; | 60 | obj = afbdev->afb.obj; |
58 | bo = gem_to_ast_bo(obj); | 61 | bo = gem_to_ast_bo(obj); |
59 | 62 | ||
63 | /* | ||
64 | * try and reserve the BO, if we fail with busy | ||
65 | * then the BO is being moved and we should | ||
66 | * store up the damage until later. | ||
67 | */ | ||
60 | ret = ast_bo_reserve(bo, true); | 68 | ret = ast_bo_reserve(bo, true); |
61 | if (ret) { | 69 | if (ret) { |
62 | DRM_ERROR("failed to reserve fb bo\n"); | 70 | if (ret != -EBUSY) |
71 | return; | ||
72 | |||
73 | store_for_later = true; | ||
74 | } | ||
75 | |||
76 | x2 = x + width - 1; | ||
77 | y2 = y + height - 1; | ||
78 | spin_lock_irqsave(&afbdev->dirty_lock, flags); | ||
79 | |||
80 | if (afbdev->y1 < y) | ||
81 | y = afbdev->y1; | ||
82 | if (afbdev->y2 > y2) | ||
83 | y2 = afbdev->y2; | ||
84 | if (afbdev->x1 < x) | ||
85 | x = afbdev->x1; | ||
86 | if (afbdev->x2 > x2) | ||
87 | x2 = afbdev->x2; | ||
88 | |||
89 | if (store_for_later) { | ||
90 | afbdev->x1 = x; | ||
91 | afbdev->x2 = x2; | ||
92 | afbdev->y1 = y; | ||
93 | afbdev->y2 = y2; | ||
94 | spin_unlock_irqrestore(&afbdev->dirty_lock, flags); | ||
63 | return; | 95 | return; |
64 | } | 96 | } |
65 | 97 | ||
98 | afbdev->x1 = afbdev->y1 = INT_MAX; | ||
99 | afbdev->x2 = afbdev->y2 = 0; | ||
100 | spin_unlock_irqrestore(&afbdev->dirty_lock, flags); | ||
101 | |||
66 | if (!bo->kmap.virtual) { | 102 | if (!bo->kmap.virtual) { |
67 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); | 103 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); |
68 | if (ret) { | 104 | if (ret) { |
@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, | |||
72 | } | 108 | } |
73 | unmap = true; | 109 | unmap = true; |
74 | } | 110 | } |
75 | for (i = y; i < y + height; i++) { | 111 | for (i = y; i <= y2; i++) { |
76 | /* assume equal stride for now */ | 112 | /* assume equal stride for now */ |
77 | src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); | 113 | src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); |
78 | memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); | 114 | memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp); |
79 | 115 | ||
80 | } | 116 | } |
81 | if (unmap) | 117 | if (unmap) |
@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev) | |||
292 | 328 | ||
293 | ast->fbdev = afbdev; | 329 | ast->fbdev = afbdev; |
294 | afbdev->helper.funcs = &ast_fb_helper_funcs; | 330 | afbdev->helper.funcs = &ast_fb_helper_funcs; |
331 | spin_lock_init(&afbdev->dirty_lock); | ||
295 | ret = drm_fb_helper_init(dev, &afbdev->helper, | 332 | ret = drm_fb_helper_init(dev, &afbdev->helper, |
296 | 1, 1); | 333 | 1, 1); |
297 | if (ret) { | 334 | if (ret) { |
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 3602731a6112..09da3393c527 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait) | |||
316 | 316 | ||
317 | ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); | 317 | ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); |
318 | if (ret) { | 318 | if (ret) { |
319 | if (ret != -ERESTARTSYS) | 319 | if (ret != -ERESTARTSYS && ret != -EBUSY) |
320 | DRM_ERROR("reserve failed %p\n", bo); | 320 | DRM_ERROR("reserve failed %p\n", bo); |
321 | return ret; | 321 | return ret; |
322 | } | 322 | } |