summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu
diff options
context:
space:
mode:
authorThomas Fleury <tfleury@nvidia.com>2018-09-01 17:05:36 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-20 13:51:40 -0400
commitc28e73ee2f92d1d287637a22d40d170b42771f96 (patch)
treea22c6e5352fee71ccab2ba859bdef4de66707845 /drivers/gpu/nvgpu/common/pmu
parenteada4a3823cd8d1471141c3a4ed7c5ffa9b5e798 (diff)
gpu: nvgpu: fix race condition in pmu_rpc_handler
There was a race condition between pmu_rpc_handler and nvgpu_pmu_rpc_execute. The later could free the buffer before pmu_rpc_handler could access related data. Added explicit field in rpc_payload, so that nvgpu_pmu_rpc_execute can wait until pmu_rpc_handler completes. Bug 2331655 Change-Id: Ic2653524159eff10504b9c2625b5241610b5f5f0 Reviewed-on: https://git-master.nvidia.com/r/1811299 Signed-off-by: Vaikundanathan S <vaikuns@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1817582 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 6f88260f..01b91662 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -740,7 +740,9 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
740 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); 740 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
741 741
742 do { 742 do {
743 if (*(u8 *)var == val) { 743 nvgpu_rmb();
744
745 if (*(volatile u8 *)var == val) {
744 return 0; 746 return 0;
745 } 747 }
746 748
@@ -859,6 +861,8 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
859 } 861 }
860 862
861exit: 863exit:
864 rpc_payload->complete = true;
865
862 /* free allocated memory */ 866 /* free allocated memory */
863 if (rpc_payload->is_mem_free_set) { 867 if (rpc_payload->is_mem_free_set) {
864 nvgpu_kfree(g, rpc_payload); 868 nvgpu_kfree(g, rpc_payload);
@@ -914,6 +918,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
914 rpc_payload->rpc_buff = caller_cb_param; 918 rpc_payload->rpc_buff = caller_cb_param;
915 rpc_payload->is_mem_free_set = true; 919 rpc_payload->is_mem_free_set = true;
916 callback = caller_cb; 920 callback = caller_cb;
921 WARN_ON(is_copy_back);
917 } 922 }
918 923
919 rpc_buff = rpc_payload->rpc_buff; 924 rpc_buff = rpc_payload->rpc_buff;
@@ -945,12 +950,9 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
945 * to read data back in nvgpu 950 * to read data back in nvgpu
946 */ 951 */
947 if (is_copy_back) { 952 if (is_copy_back) {
948 /* clear buff */
949 memset(rpc_buff, 0xFF, size_rpc);
950 /* wait till RPC execute in PMU & ACK */ 953 /* wait till RPC execute in PMU & ACK */
951 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 954 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
952 &((struct nv_pmu_rpc_header *)rpc_buff)->function, 955 &rpc_payload->complete, true);
953 rpc->function);
954 /* copy back data to caller */ 956 /* copy back data to caller */
955 memcpy(rpc, rpc_buff, size_rpc); 957 memcpy(rpc, rpc_buff, size_rpc);
956 /* free allocated memory */ 958 /* free allocated memory */