summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2018-02-27 04:22:19 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2018-03-13 17:09:52 -0400
commit76ad9e8366f5c73d1ea47d54cea043f8cd9fa23e (patch)
tree6ca5a8ba117568252a1e75a28542d405a250e49e /drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
parent1d986dc33eae7cb56cd0a1beded5ba858f0535b9 (diff)
gpu: nvgpu: Updated RPC to support copyback & callback
- Updated & added new parameter "bool is_copy_back" to nvgpu_pmu_rpc_execute() to support copy back processed RPC request from PMU to caller by passing parameter value true & this blocks method till it receives ACK from PMU for requested RPC. - Added "struct rpc_handler_payload" to hold info required for RPC handler like RPC buff address & clear memory if copy back is not requested. - Added define PMU_RPC_EXECUTE_CPB to support to copy back processed RPC request from PMU to caller. - Updated RPC callback handler support, crated memory & assigned default handler if callback is not requested else use callback parameters data to request to PMU. - Added define PMU_RPC_EXECUTE_CB to support callback - Updated pmu_wait_message_cond(), restricted condition check to 8-bit instead 32-bit condition check. Change-Id: Ic05289b074954979fd0102daf5ab806bf1f07b62 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1664962 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_ipc.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_ipc.c100
1 files changed, 73 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
index 72337a1d..bb4edf38 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c
@@ -953,7 +953,7 @@ int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu)
953} 953}
954 954
955int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, 955int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
956 u32 *var, u32 val) 956 void *var, u8 val)
957{ 957{
958 struct gk20a *g = gk20a_from_pmu(pmu); 958 struct gk20a *g = gk20a_from_pmu(pmu);
959 struct nvgpu_timeout timeout; 959 struct nvgpu_timeout timeout;
@@ -962,7 +962,7 @@ int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
962 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER); 962 nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
963 963
964 do { 964 do {
965 if (*var == val) 965 if (*(u8 *)var == val)
966 return 0; 966 return 0;
967 967
968 if (gk20a_pmu_is_interrupted(pmu)) 968 if (gk20a_pmu_is_interrupted(pmu))
@@ -980,11 +980,12 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
980{ 980{
981 struct nv_pmu_rpc_header rpc; 981 struct nv_pmu_rpc_header rpc;
982 struct nvgpu_pmu *pmu = &g->pmu; 982 struct nvgpu_pmu *pmu = &g->pmu;
983 struct nv_pmu_rpc_struct_perfmon_query *rpc_param; 983 struct rpc_handler_payload *rpc_payload =
984 (struct rpc_handler_payload *)param;
985 struct nv_pmu_rpc_struct_perfmon_query *rpc_param;
984 986
985 memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header)); 987 memset(&rpc, 0, sizeof(struct nv_pmu_rpc_header));
986 if (param) 988 memcpy(&rpc, rpc_payload->rpc_buff, sizeof(struct nv_pmu_rpc_header));
987 memcpy(&rpc, param, sizeof(struct nv_pmu_rpc_header));
988 989
989 if (rpc.flcn_status) { 990 if (rpc.flcn_status) {
990 nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x", 991 nvgpu_err(g, " failed RPC response, status=0x%x, func=0x%x",
@@ -1026,7 +1027,8 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
1026 case NV_PMU_RPC_ID_PERFMON_T18X_QUERY: 1027 case NV_PMU_RPC_ID_PERFMON_T18X_QUERY:
1027 nvgpu_pmu_dbg(g, 1028 nvgpu_pmu_dbg(g,
1028 "reply NV_PMU_RPC_ID_PERFMON_QUERY"); 1029 "reply NV_PMU_RPC_ID_PERFMON_QUERY");
1029 rpc_param = (struct nv_pmu_rpc_struct_perfmon_query *)param; 1030 rpc_param = (struct nv_pmu_rpc_struct_perfmon_query *)
1031 rpc_payload->rpc_buff;
1030 pmu->load = rpc_param->sample_buffer[0]; 1032 pmu->load = rpc_param->sample_buffer[0];
1031 pmu->perfmon_query = 1; 1033 pmu->perfmon_query = 1;
1032 /* set perfmon_query to 1 after load is copied */ 1034 /* set perfmon_query to 1 after load is copied */
@@ -1042,32 +1044,62 @@ static void pmu_rpc_handler(struct gk20a *g, struct pmu_msg *msg,
1042 1044
1043exit: 1045exit:
1044 /* free allocated memory */ 1046 /* free allocated memory */
1045 if (param) 1047 if (rpc_payload->is_mem_free_set)
1046 nvgpu_kfree(g, param); 1048 nvgpu_kfree(g, rpc_payload);
1047} 1049}
1048 1050
1049int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc, 1051int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
1050 u16 size_rpc, u16 size_scratch, pmu_callback caller_cb, 1052 u16 size_rpc, u16 size_scratch, pmu_callback caller_cb,
1051 void *caller_cb_param) 1053 void *caller_cb_param, bool is_copy_back)
1052{ 1054{
1053 struct gk20a *g = pmu->g; 1055 struct gk20a *g = pmu->g;
1054 struct pmu_cmd cmd; 1056 struct pmu_cmd cmd;
1055 struct pmu_payload payload; 1057 struct pmu_payload payload;
1056 pmu_callback callback = caller_cb; 1058 struct rpc_handler_payload *rpc_payload = NULL;
1059 pmu_callback callback = NULL;
1057 void *rpc_buff = NULL; 1060 void *rpc_buff = NULL;
1058 void *cb_param = caller_cb_param;
1059 u32 seq = 0; 1061 u32 seq = 0;
1060 int status = 0; 1062 int status = 0;
1061 1063
1062 if (!pmu->pmu_ready) { 1064 if (!pmu->pmu_ready) {
1063 nvgpu_warn(g, "PMU is not ready to process RPC"); 1065 nvgpu_warn(g, "PMU is not ready to process RPC");
1064 return -EINVAL; 1066 status = EINVAL;
1067 goto exit;
1065 } 1068 }
1066 1069
1067 rpc_buff = nvgpu_kzalloc(g, size_rpc); 1070 if (caller_cb == NULL) {
1068 if (!rpc_buff) 1071 rpc_payload = nvgpu_kzalloc(g,
1069 return -ENOMEM; 1072 sizeof(struct rpc_handler_payload) + size_rpc);
1073 if (!rpc_payload) {
1074 status = ENOMEM;
1075 goto exit;
1076 }
1077
1078 rpc_payload->rpc_buff = (u8 *)rpc_payload +
1079 sizeof(struct rpc_handler_payload);
1080 rpc_payload->is_mem_free_set =
1081 is_copy_back ? false : true;
1082
1083 /* assign default RPC handler*/
1084 callback = pmu_rpc_handler;
1085 } else {
1086 if (caller_cb_param == NULL) {
1087 nvgpu_err(g, "Invalid cb param addr");
1088 status = EINVAL;
1089 goto exit;
1090 }
1091 rpc_payload = nvgpu_kzalloc(g,
1092 sizeof(struct rpc_handler_payload));
1093 if (!rpc_payload) {
1094 status = ENOMEM;
1095 goto exit;
1096 }
1097 rpc_payload->rpc_buff = caller_cb_param;
1098 rpc_payload->is_mem_free_set = true;
1099 callback = caller_cb;
1100 }
1070 1101
1102 rpc_buff = rpc_payload->rpc_buff;
1071 memset(&cmd, 0, sizeof(struct pmu_cmd)); 1103 memset(&cmd, 0, sizeof(struct pmu_cmd));
1072 memset(&payload, 0, sizeof(struct pmu_payload)); 1104 memset(&payload, 0, sizeof(struct pmu_payload));
1073 1105
@@ -1081,24 +1113,38 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
1081 payload.rpc.size_rpc = size_rpc; 1113 payload.rpc.size_rpc = size_rpc;
1082 payload.rpc.size_scratch = size_scratch; 1114 payload.rpc.size_scratch = size_scratch;
1083 1115
1084 /* assign default RPC handler & buffer */
1085 if (!callback && !cb_param) {
1086 callback = pmu_rpc_handler;
1087 cb_param = rpc_buff;
1088 }
1089
1090 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, 1116 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
1091 PMU_COMMAND_QUEUE_LPQ, pmu_rpc_handler, 1117 PMU_COMMAND_QUEUE_LPQ, callback,
1092 cb_param, &seq, ~0); 1118 rpc_payload, &seq, ~0);
1093 if (status) { 1119 if (status) {
1094 nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x", 1120 nvgpu_err(g, "Failed to execute RPC status=0x%x, func=0x%x",
1095 status, rpc->function); 1121 status, rpc->function);
1122 goto exit;
1096 } 1123 }
1097 1124
1098 /* if caller passed buff then free allocated RPC buffer */ 1125 /*
1099 if (caller_cb_param) 1126 * Option act like blocking call, which waits till RPC request
1100 nvgpu_kfree(g, rpc_buff); 1127 * executes on PMU & copy back processed data to rpc_buff
1128 * to read data back in nvgpu
1129 */
1130 if (is_copy_back) {
1131 /* clear buff */
1132 memset(rpc_buff, 0, size_rpc);
1133 /* wait till RPC execute in PMU & ACK */
1134 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
1135 &((struct nv_pmu_rpc_header *)rpc_buff)->function,
1136 rpc->function);
1137 /* copy back data to caller */
1138 memcpy(rpc, rpc_buff, size_rpc);
1139 /* free allocated memory */
1140 nvgpu_kfree(g, rpc_payload);
1141 }
1101 1142
1102 return status; 1143exit:
1144 if (status) {
1145 if (rpc_payload)
1146 nvgpu_kfree(g, rpc_payload);
1147 }
1103 1148
1149 return status;
1104} 1150}