diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.h | 5 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.c | 5 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 16 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 2 |
5 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h index c60afb97..9982b1e0 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h | |||
@@ -51,11 +51,6 @@ struct notification { | |||
51 | u16 status; | 51 | u16 status; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct fence { | ||
55 | u32 hw_chid; | ||
56 | u32 syncpt_val; | ||
57 | }; | ||
58 | |||
59 | /* contexts associated with a channel */ | 54 | /* contexts associated with a channel */ |
60 | struct channel_ctx_gk20a { | 55 | struct channel_ctx_gk20a { |
61 | struct gr_ctx_desc *gr_ctx; | 56 | struct gr_ctx_desc *gr_ctx; |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index c645aeea..6c18c895 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c | |||
@@ -1164,6 +1164,7 @@ static int gk20a_pm_unrailgate(struct generic_pm_domain *domain) | |||
1164 | return _gk20a_pm_unrailgate(platform->g->dev); | 1164 | return _gk20a_pm_unrailgate(platform->g->dev); |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | #if 0 | ||
1167 | static int gk20a_pm_suspend(struct device *dev) | 1168 | static int gk20a_pm_suspend(struct device *dev) |
1168 | { | 1169 | { |
1169 | struct gk20a_platform *platform = dev_get_drvdata(dev); | 1170 | struct gk20a_platform *platform = dev_get_drvdata(dev); |
@@ -1188,6 +1189,7 @@ static int gk20a_pm_resume(struct device *dev) | |||
1188 | { | 1189 | { |
1189 | return gk20a_pm_finalize_poweron(dev); | 1190 | return gk20a_pm_finalize_poweron(dev); |
1190 | } | 1191 | } |
1192 | #endif | ||
1191 | 1193 | ||
1192 | static int gk20a_pm_initialise_domain(struct platform_device *pdev) | 1194 | static int gk20a_pm_initialise_domain(struct platform_device *pdev) |
1193 | { | 1195 | { |
@@ -1211,8 +1213,11 @@ static int gk20a_pm_initialise_domain(struct platform_device *pdev) | |||
1211 | domain->dev_ops.stop = gk20a_pm_disable_clk; | 1213 | domain->dev_ops.stop = gk20a_pm_disable_clk; |
1212 | domain->dev_ops.save_state = gk20a_pm_prepare_poweroff; | 1214 | domain->dev_ops.save_state = gk20a_pm_prepare_poweroff; |
1213 | domain->dev_ops.restore_state = gk20a_pm_finalize_poweron; | 1215 | domain->dev_ops.restore_state = gk20a_pm_finalize_poweron; |
1216 | #warning domain suspend/resume ops have been removed upstream | ||
1217 | #if 0 | ||
1214 | domain->dev_ops.suspend = gk20a_pm_suspend; | 1218 | domain->dev_ops.suspend = gk20a_pm_suspend; |
1215 | domain->dev_ops.resume = gk20a_pm_resume; | 1219 | domain->dev_ops.resume = gk20a_pm_resume; |
1220 | #endif | ||
1216 | 1221 | ||
1217 | device_set_wakeup_capable(&pdev->dev, 0); | 1222 | device_set_wakeup_capable(&pdev->dev, 0); |
1218 | ret = pm_genpd_add_device(domain, &pdev->dev); | 1223 | ret = pm_genpd_add_device(domain, &pdev->dev); |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c index 42720307..7baadf2e 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c | |||
@@ -435,7 +435,7 @@ static ssize_t aelpg_param_read(struct device *device, | |||
435 | g->pmu.aelpg_param[3], g->pmu.aelpg_param[4]); | 435 | g->pmu.aelpg_param[3], g->pmu.aelpg_param[4]); |
436 | } | 436 | } |
437 | 437 | ||
438 | static DEVICE_ATTR(aelpg_param, S_IRWXUGO, | 438 | static DEVICE_ATTR(aelpg_param, ROOTRW, |
439 | aelpg_param_read, aelpg_param_store); | 439 | aelpg_param_read, aelpg_param_store); |
440 | 440 | ||
441 | static ssize_t aelpg_enable_store(struct device *device, | 441 | static ssize_t aelpg_enable_store(struct device *device, |
@@ -648,7 +648,7 @@ static ssize_t tpc_fs_mask_read(struct device *device, | |||
648 | return sprintf(buf, "0x%x\n", tpc_fs_mask); | 648 | return sprintf(buf, "0x%x\n", tpc_fs_mask); |
649 | } | 649 | } |
650 | 650 | ||
651 | static DEVICE_ATTR(tpc_fs_mask, S_IRWXUGO, tpc_fs_mask_read, tpc_fs_mask_store); | 651 | static DEVICE_ATTR(tpc_fs_mask, ROOTRW, tpc_fs_mask_read, tpc_fs_mask_store); |
652 | 652 | ||
653 | void gk20a_remove_sysfs(struct device *dev) | 653 | void gk20a_remove_sysfs(struct device *dev) |
654 | { | 654 | { |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index e9b39487..4d101845 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -580,7 +580,7 @@ int gr_gk20a_ctx_patch_write_begin(struct gk20a *g, | |||
580 | 580 | ||
581 | ch_ctx->patch_ctx.cpu_va = vmap(ch_ctx->patch_ctx.pages, | 581 | ch_ctx->patch_ctx.cpu_va = vmap(ch_ctx->patch_ctx.pages, |
582 | PAGE_ALIGN(ch_ctx->patch_ctx.size) >> PAGE_SHIFT, | 582 | PAGE_ALIGN(ch_ctx->patch_ctx.size) >> PAGE_SHIFT, |
583 | 0, pgprot_dmacoherent(PAGE_KERNEL)); | 583 | 0, pgprot_writecombine(PAGE_KERNEL)); |
584 | 584 | ||
585 | if (!ch_ctx->patch_ctx.cpu_va) | 585 | if (!ch_ctx->patch_ctx.cpu_va) |
586 | return -ENOMEM; | 586 | return -ENOMEM; |
@@ -691,7 +691,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c, | |||
691 | 691 | ||
692 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, | 692 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, |
693 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, | 693 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, |
694 | 0, pgprot_dmacoherent(PAGE_KERNEL)); | 694 | 0, pgprot_writecombine(PAGE_KERNEL)); |
695 | if (!ctx_ptr) | 695 | if (!ctx_ptr) |
696 | return -ENOMEM; | 696 | return -ENOMEM; |
697 | 697 | ||
@@ -1455,13 +1455,13 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1455 | 1455 | ||
1456 | gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].pages, | 1456 | gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].pages, |
1457 | PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].size) >> | 1457 | PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].size) >> |
1458 | PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL)); | 1458 | PAGE_SHIFT, 0, pgprot_writecombine(PAGE_KERNEL)); |
1459 | if (!gold_ptr) | 1459 | if (!gold_ptr) |
1460 | goto clean_up; | 1460 | goto clean_up; |
1461 | 1461 | ||
1462 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, | 1462 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, |
1463 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, | 1463 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, |
1464 | 0, pgprot_dmacoherent(PAGE_KERNEL)); | 1464 | 0, pgprot_writecombine(PAGE_KERNEL)); |
1465 | if (!ctx_ptr) | 1465 | if (!ctx_ptr) |
1466 | goto clean_up; | 1466 | goto clean_up; |
1467 | 1467 | ||
@@ -1535,7 +1535,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, | |||
1535 | 1535 | ||
1536 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, | 1536 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, |
1537 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, | 1537 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, |
1538 | 0, pgprot_dmacoherent(PAGE_KERNEL)); | 1538 | 0, pgprot_writecombine(PAGE_KERNEL)); |
1539 | if (!ctx_ptr) | 1539 | if (!ctx_ptr) |
1540 | return -ENOMEM; | 1540 | return -ENOMEM; |
1541 | 1541 | ||
@@ -1575,7 +1575,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, | |||
1575 | 1575 | ||
1576 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, | 1576 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, |
1577 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, | 1577 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, |
1578 | 0, pgprot_dmacoherent(PAGE_KERNEL)); | 1578 | 0, pgprot_writecombine(PAGE_KERNEL)); |
1579 | if (!ctx_ptr) | 1579 | if (!ctx_ptr) |
1580 | return -ENOMEM; | 1580 | return -ENOMEM; |
1581 | 1581 | ||
@@ -4554,7 +4554,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g) | |||
4554 | 4554 | ||
4555 | data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].pages, | 4555 | data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].pages, |
4556 | PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].size) >> | 4556 | PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].size) >> |
4557 | PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL)); | 4557 | PAGE_SHIFT, 0, pgprot_writecombine(PAGE_KERNEL)); |
4558 | if (!data) { | 4558 | if (!data) { |
4559 | gk20a_err(dev_from_gk20a(g), | 4559 | gk20a_err(dev_from_gk20a(g), |
4560 | "failed to map priv access map memory"); | 4560 | "failed to map priv access map memory"); |
@@ -6970,7 +6970,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
6970 | /* recoded in-place instead.*/ | 6970 | /* recoded in-place instead.*/ |
6971 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, | 6971 | ctx_ptr = vmap(ch_ctx->gr_ctx->pages, |
6972 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, | 6972 | PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT, |
6973 | 0, pgprot_dmacoherent(PAGE_KERNEL)); | 6973 | 0, pgprot_writecombine(PAGE_KERNEL)); |
6974 | if (!ctx_ptr) { | 6974 | if (!ctx_ptr) { |
6975 | err = -ENOMEM; | 6975 | err = -ENOMEM; |
6976 | goto cleanup; | 6976 | goto cleanup; |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index e51ce7c5..75de063a 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -550,7 +550,7 @@ int map_gmmu_pages(void *handle, struct sg_table *sgt, | |||
550 | *kva = handle; | 550 | *kva = handle; |
551 | } else { | 551 | } else { |
552 | pages = (struct page **)handle; | 552 | pages = (struct page **)handle; |
553 | *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL)); | 553 | *kva = vmap(pages, count, 0, pgprot_writecombine(PAGE_KERNEL)); |
554 | if (!(*kva)) | 554 | if (!(*kva)) |
555 | return -ENOMEM; | 555 | return -ENOMEM; |
556 | } | 556 | } |