summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorArto Merilainen <amerilainen@nvidia.com>2014-07-21 03:16:24 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:32 -0400
commit9b00f352423d4bd90d10cf598e6ce8a750cdd869 (patch)
tree8a73a71d5b445be33146c43788bc33a367c70b51 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent61e918910326e84ca652649ee3b5036949d36fc3 (diff)
gpu: nvgpu: Allow reloading the golden context
In cases where a kernel channel dies, we can reload the context by just reloading the golden context buffer. This patch makes necessary infrastructural changes to support this behaviour. Bug 1409151 Change-Id: Ibe6a88bf7acea2d3aced2b86a7a687279075c386 Signed-off-by: Arto Merilainen <amerilainen@nvidia.com> Reviewed-on: http://git-master/r/440262 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Lauri Peltonen <lpeltonen@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index b06ed9e6..e783f8d0 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -76,8 +76,6 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c);
76/* golden ctx image */ 76/* golden ctx image */
77static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, 77static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
78 struct channel_gk20a *c); 78 struct channel_gk20a *c);
79static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
80 struct channel_gk20a *c);
81 79
82void gk20a_fecs_dump_falcon_stats(struct gk20a *g) 80void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
83{ 81{
@@ -1656,7 +1654,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1656} 1654}
1657 1655
1658/* load saved fresh copy of gloden image into channel gr_ctx */ 1656/* load saved fresh copy of gloden image into channel gr_ctx */
1659static int gr_gk20a_load_golden_ctx_image(struct gk20a *g, 1657int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
1660 struct channel_gk20a *c) 1658 struct channel_gk20a *c)
1661{ 1659{
1662 struct gr_gk20a *gr = &g->gr; 1660 struct gr_gk20a *gr = &g->gr;
@@ -2786,7 +2784,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2786 gk20a_dbg_fn(""); 2784 gk20a_dbg_fn("");
2787 2785
2788 /* an address space needs to have been bound at this point.*/ 2786 /* an address space needs to have been bound at this point.*/
2789 if (!gk20a_channel_as_bound(c)) { 2787 if (!gk20a_channel_as_bound(c) && !c->vm) {
2790 gk20a_err(dev_from_gk20a(g), 2788 gk20a_err(dev_from_gk20a(g),
2791 "not bound to address space at time" 2789 "not bound to address space at time"
2792 " of grctx allocation"); 2790 " of grctx allocation");