diff options
author | Andrzej Hajda <a.hajda@samsung.com> | 2014-08-28 05:07:38 -0400 |
---|---|---|
committer | Inki Dae <daeinki@gmail.com> | 2014-09-19 11:56:12 -0400 |
commit | 56442d83401f122cc5c38391bb5960bb6a52a343 (patch) | |
tree | 5034f8091160a2382640a84af545575ae0671c04 /drivers/gpu/drm/exynos/exynos_drm_fimc.c | |
parent | c7b3014bf90d91b0559b3b49f4c0acef7170ece6 (diff) |
drm/exynos/fimc: simplify buffer queuing
The patch removes redundant checks, redundant HW reads
and simplifies code.
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Reviewed-by: Joonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Diffstat (limited to 'drivers/gpu/drm/exynos/exynos_drm_fimc.c')
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fimc.c | 64 |
1 files changed, 15 insertions, 49 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 15d04d554fca..c2648a0717be 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
@@ -1119,67 +1119,34 @@ static int fimc_dst_set_size(struct device *dev, int swap, | |||
1119 | return 0; | 1119 | return 0; |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static int fimc_dst_get_buf_count(struct fimc_context *ctx) | 1122 | static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, |
1123 | { | ||
1124 | u32 cfg, buf_num; | ||
1125 | |||
1126 | cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); | ||
1127 | |||
1128 | buf_num = hweight32(cfg); | ||
1129 | |||
1130 | DRM_DEBUG_KMS("buf_num[%d]\n", buf_num); | ||
1131 | |||
1132 | return buf_num; | ||
1133 | } | ||
1134 | |||
1135 | static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, | ||
1136 | enum drm_exynos_ipp_buf_type buf_type) | 1123 | enum drm_exynos_ipp_buf_type buf_type) |
1137 | { | 1124 | { |
1138 | struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; | ||
1139 | bool enable; | ||
1140 | u32 cfg; | ||
1141 | u32 mask = 0x00000001 << buf_id; | ||
1142 | int ret = 0; | ||
1143 | unsigned long flags; | 1125 | unsigned long flags; |
1126 | u32 buf_num; | ||
1127 | u32 cfg; | ||
1144 | 1128 | ||
1145 | DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); | 1129 | DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); |
1146 | 1130 | ||
1147 | spin_lock_irqsave(&ctx->lock, flags); | 1131 | spin_lock_irqsave(&ctx->lock, flags); |
1148 | 1132 | ||
1149 | /* mask register set */ | ||
1150 | cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); | 1133 | cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); |
1151 | 1134 | ||
1152 | switch (buf_type) { | 1135 | if (buf_type == IPP_BUF_ENQUEUE) |
1153 | case IPP_BUF_ENQUEUE: | 1136 | cfg |= (1 << buf_id); |
1154 | enable = true; | 1137 | else |
1155 | break; | 1138 | cfg &= ~(1 << buf_id); |
1156 | case IPP_BUF_DEQUEUE: | ||
1157 | enable = false; | ||
1158 | break; | ||
1159 | default: | ||
1160 | dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); | ||
1161 | ret = -EINVAL; | ||
1162 | goto err_unlock; | ||
1163 | } | ||
1164 | 1139 | ||
1165 | /* sequence id */ | ||
1166 | cfg &= ~mask; | ||
1167 | cfg |= (enable << buf_id); | ||
1168 | fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ); | 1140 | fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ); |
1169 | 1141 | ||
1170 | /* interrupt enable */ | 1142 | buf_num = hweight32(cfg); |
1171 | if (buf_type == IPP_BUF_ENQUEUE && | ||
1172 | fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START) | ||
1173 | fimc_mask_irq(ctx, true); | ||
1174 | 1143 | ||
1175 | /* interrupt disable */ | 1144 | if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START) |
1176 | if (buf_type == IPP_BUF_DEQUEUE && | 1145 | fimc_mask_irq(ctx, true); |
1177 | fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP) | 1146 | else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP) |
1178 | fimc_mask_irq(ctx, false); | 1147 | fimc_mask_irq(ctx, false); |
1179 | 1148 | ||
1180 | err_unlock: | ||
1181 | spin_unlock_irqrestore(&ctx->lock, flags); | 1149 | spin_unlock_irqrestore(&ctx->lock, flags); |
1182 | return ret; | ||
1183 | } | 1150 | } |
1184 | 1151 | ||
1185 | static int fimc_dst_set_addr(struct device *dev, | 1152 | static int fimc_dst_set_addr(struct device *dev, |
@@ -1237,7 +1204,9 @@ static int fimc_dst_set_addr(struct device *dev, | |||
1237 | break; | 1204 | break; |
1238 | } | 1205 | } |
1239 | 1206 | ||
1240 | return fimc_dst_set_buf_seq(ctx, buf_id, buf_type); | 1207 | fimc_dst_set_buf_seq(ctx, buf_id, buf_type); |
1208 | |||
1209 | return 0; | ||
1241 | } | 1210 | } |
1242 | 1211 | ||
1243 | static struct exynos_drm_ipp_ops fimc_dst_ops = { | 1212 | static struct exynos_drm_ipp_ops fimc_dst_ops = { |
@@ -1288,10 +1257,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id) | |||
1288 | 1257 | ||
1289 | DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); | 1258 | DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); |
1290 | 1259 | ||
1291 | if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) { | 1260 | fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); |
1292 | DRM_ERROR("failed to dequeue.\n"); | ||
1293 | return IRQ_HANDLED; | ||
1294 | } | ||
1295 | 1261 | ||
1296 | event_work->ippdrv = ippdrv; | 1262 | event_work->ippdrv = ippdrv; |
1297 | event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; | 1263 | event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; |