aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBenjamin Gaignard <benjamin.gaignard@linaro.org>2014-12-04 05:21:48 -0500
committerBenjamin Gaignard <benjamin.gaignard@linaro.org>2014-12-11 07:58:28 -0500
commita51fe84d1d36122bdd7feeebd1d9d85e80ea16e7 (patch)
tree40cf681610a692db9811a104ca6e7bd2c8ab06a3 /drivers
parent2f7d0e82ce9fdbb450613abe28c3bbe1bda069a6 (diff)
drm: sti: simplify gdp code
Store the physical address at node creation time to avoid use of virt_to_dma and dma_to_virt everywhere Signed-off-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 4e30b74559f5..1b903ffb345b 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -73,7 +73,9 @@ struct sti_gdp_node {
73 73
74struct sti_gdp_node_list { 74struct sti_gdp_node_list {
75 struct sti_gdp_node *top_field; 75 struct sti_gdp_node *top_field;
76 dma_addr_t top_field_paddr;
76 struct sti_gdp_node *btm_field; 77 struct sti_gdp_node *btm_field;
78 dma_addr_t btm_field_paddr;
77}; 79};
78 80
79/** 81/**
@@ -168,7 +170,6 @@ static int sti_gdp_get_alpharange(int format)
168static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) 170static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
169{ 171{
170 int hw_nvn; 172 int hw_nvn;
171 void *virt_nvn;
172 struct sti_gdp *gdp = to_sti_gdp(layer); 173 struct sti_gdp *gdp = to_sti_gdp(layer);
173 unsigned int i; 174 unsigned int i;
174 175
@@ -176,11 +177,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
176 if (!hw_nvn) 177 if (!hw_nvn)
177 goto end; 178 goto end;
178 179
179 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
180
181 for (i = 0; i < GDP_NODE_NB_BANK; i++) 180 for (i = 0; i < GDP_NODE_NB_BANK; i++)
182 if ((virt_nvn != gdp->node_list[i].btm_field) && 181 if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
183 (virt_nvn != gdp->node_list[i].top_field)) 182 (hw_nvn != gdp->node_list[i].top_field_paddr))
184 return &gdp->node_list[i]; 183 return &gdp->node_list[i];
185 184
186 /* in hazardious cases restart with the first node */ 185 /* in hazardious cases restart with the first node */
@@ -204,7 +203,6 @@ static
204struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) 203struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
205{ 204{
206 int hw_nvn; 205 int hw_nvn;
207 void *virt_nvn;
208 struct sti_gdp *gdp = to_sti_gdp(layer); 206 struct sti_gdp *gdp = to_sti_gdp(layer);
209 unsigned int i; 207 unsigned int i;
210 208
@@ -212,11 +210,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
212 if (!hw_nvn) 210 if (!hw_nvn)
213 goto end; 211 goto end;
214 212
215 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
216
217 for (i = 0; i < GDP_NODE_NB_BANK; i++) 213 for (i = 0; i < GDP_NODE_NB_BANK; i++)
218 if ((virt_nvn == gdp->node_list[i].btm_field) || 214 if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
219 (virt_nvn == gdp->node_list[i].top_field)) 215 (hw_nvn == gdp->node_list[i].top_field_paddr))
220 return &gdp->node_list[i]; 216 return &gdp->node_list[i];
221 217
222end: 218end:
@@ -292,8 +288,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
292 288
293 /* Same content and chained together */ 289 /* Same content and chained together */
294 memcpy(btm_field, top_field, sizeof(*btm_field)); 290 memcpy(btm_field, top_field, sizeof(*btm_field));
295 top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field); 291 top_field->gam_gdp_nvn = list->btm_field_paddr;
296 btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field); 292 btm_field->gam_gdp_nvn = list->top_field_paddr;
297 293
298 /* Interlaced mode */ 294 /* Interlaced mode */
299 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) 295 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -349,8 +345,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer)
349 struct sti_gdp_node *updated_top_node = updated_list->top_field; 345 struct sti_gdp_node *updated_top_node = updated_list->top_field;
350 struct sti_gdp_node *updated_btm_node = updated_list->btm_field; 346 struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
351 struct sti_gdp *gdp = to_sti_gdp(layer); 347 struct sti_gdp *gdp = to_sti_gdp(layer);
352 u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node); 348 u32 dma_updated_top = updated_list->top_field_paddr;
353 u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node); 349 u32 dma_updated_btm = updated_list->btm_field_paddr;
354 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); 350 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
355 351
356 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, 352 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
@@ -461,16 +457,16 @@ static void sti_gdp_init(struct sti_layer *layer)
461{ 457{
462 struct sti_gdp *gdp = to_sti_gdp(layer); 458 struct sti_gdp *gdp = to_sti_gdp(layer);
463 struct device_node *np = layer->dev->of_node; 459 struct device_node *np = layer->dev->of_node;
464 dma_addr_t dma; 460 dma_addr_t dma_addr;
465 void *base; 461 void *base;
466 unsigned int i, size; 462 unsigned int i, size;
467 463
468 /* Allocate all the nodes within a single memory page */ 464 /* Allocate all the nodes within a single memory page */
469 size = sizeof(struct sti_gdp_node) * 465 size = sizeof(struct sti_gdp_node) *
470 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; 466 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
471
472 base = dma_alloc_writecombine(layer->dev, 467 base = dma_alloc_writecombine(layer->dev,
473 size, &dma, GFP_KERNEL | GFP_DMA); 468 size, &dma_addr, GFP_KERNEL | GFP_DMA);
469
474 if (!base) { 470 if (!base) {
475 DRM_ERROR("Failed to allocate memory for GDP node\n"); 471 DRM_ERROR("Failed to allocate memory for GDP node\n");
476 return; 472 return;
@@ -478,21 +474,26 @@ static void sti_gdp_init(struct sti_layer *layer)
478 memset(base, 0, size); 474 memset(base, 0, size);
479 475
480 for (i = 0; i < GDP_NODE_NB_BANK; i++) { 476 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
481 if (virt_to_dma(layer->dev, base) & 0xF) { 477 if (dma_addr & 0xF) {
482 DRM_ERROR("Mem alignment failed\n"); 478 DRM_ERROR("Mem alignment failed\n");
483 return; 479 return;
484 } 480 }
485 gdp->node_list[i].top_field = base; 481 gdp->node_list[i].top_field = base;
482 gdp->node_list[i].top_field_paddr = dma_addr;
483
486 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base); 484 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
487 base += sizeof(struct sti_gdp_node); 485 base += sizeof(struct sti_gdp_node);
486 dma_addr += sizeof(struct sti_gdp_node);
488 487
489 if (virt_to_dma(layer->dev, base) & 0xF) { 488 if (dma_addr & 0xF) {
490 DRM_ERROR("Mem alignment failed\n"); 489 DRM_ERROR("Mem alignment failed\n");
491 return; 490 return;
492 } 491 }
493 gdp->node_list[i].btm_field = base; 492 gdp->node_list[i].btm_field = base;
493 gdp->node_list[i].btm_field_paddr = dma_addr;
494 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base); 494 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
495 base += sizeof(struct sti_gdp_node); 495 base += sizeof(struct sti_gdp_node);
496 dma_addr += sizeof(struct sti_gdp_node);
496 } 497 }
497 498
498 if (of_device_is_compatible(np, "st,stih407-compositor")) { 499 if (of_device_is_compatible(np, "st,stih407-compositor")) {