summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-06-18 07:39:25 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:17 -0400
commit7878824093972a6b8805dd8c00f1838e24a61ec0 (patch)
treeefbef295366773abb59c53aa26c3768a6619c3ad /drivers/gpu/nvgpu/gm20b/acr_gm20b.c
parent7ed71374e90f8e5c8554cb7d2f14aa8e9a807862 (diff)
gpu: nvgpu: Separate PMU firmware load from init
Separate the code to load PMU firmware from the software init. This allows folding ACR and non-ACR PMU software initialization sequences. Bug 200006956 Change-Id: I74b289747852167e8ebf1be63036c790ae634da4 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/424768 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/acr_gm20b.c')
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c151
1 files changed, 1 insertions, 150 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 1a136cdb..2b7be4f7 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -68,7 +68,7 @@ void start_gm20b_pmu(struct gk20a *g)
68 68
69void gm20b_init_secure_pmu(struct gpu_ops *gops) 69void gm20b_init_secure_pmu(struct gpu_ops *gops)
70{ 70{
71 gops->pmu.pmu_setup_sw = gm20b_pmu_setup_sw; 71 gops->pmu.prepare_ucode = prepare_ucode_blob;
72 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn; 72 gops->pmu.pmu_setup_hw_and_bootstrap = gm20b_bootstrap_hs_flcn;
73} 73}
74 74
@@ -77,155 +77,6 @@ static void free_blob_res(struct gk20a *g)
77 /*TODO */ 77 /*TODO */
78} 78}
79 79
80int gm20b_pmu_setup_sw(struct gk20a *g)
81{
82 /*from pmu_gk20a.c*/
83 struct pmu_gk20a *pmu = &g->pmu;
84 struct mm_gk20a *mm = &g->mm;
85 struct vm_gk20a *vm = &mm->pmu.vm;
86 struct device *d = dev_from_gk20a(g);
87 int i, err = 0;
88 u8 *ptr;
89 struct sg_table *sgt_seq_buf;
90 dma_addr_t iova;
91
92 gk20a_dbg_fn("");
93 /* Make any ACR structure settings here if ever need be*/
94
95 if (pmu->sw_ready) {
96 for (i = 0; i < pmu->mutex_cnt; i++) {
97 pmu->mutex[i].id = i;
98 pmu->mutex[i].index = i;
99 }
100 pmu_seq_init(pmu);
101
102 mutex_init(&pmu->elpg_mutex);
103 mutex_init(&pmu->isr_mutex);
104 mutex_init(&pmu->pmu_copy_lock);
105 mutex_init(&pmu->pmu_seq_lock);
106 gk20a_dbg_fn("skip init");
107 goto skip_init;
108 }
109 gm20b_dbg_pmu("gk20a_init_pmu_setup_sw 2\n");
110
111 /* TBD: sysmon subtask */
112
113 if (IS_ENABLED(CONFIG_TEGRA_GK20A_PERFMON))
114 pmu->perfmon_sampling_enabled = true;
115
116 pmu->mutex_cnt = pwr_pmu_mutex__size_1_v();
117 pmu->mutex = kzalloc(pmu->mutex_cnt *
118 sizeof(struct pmu_mutex), GFP_KERNEL);
119 if (!pmu->mutex) {
120 err = -ENOMEM;
121 goto err;
122 }
123
124 for (i = 0; i < pmu->mutex_cnt; i++) {
125 pmu->mutex[i].id = i;
126 pmu->mutex[i].index = i;
127 }
128 gm20b_dbg_pmu("gk20a_init_pmu_setup_sw 3\n");
129
130 pmu->seq = kzalloc(PMU_MAX_NUM_SEQUENCES *
131 sizeof(struct pmu_sequence), GFP_KERNEL);
132 if (!pmu->seq) {
133 err = -ENOMEM;
134 goto err_free_mutex;
135 }
136
137 pmu_seq_init(pmu);
138 mutex_init(&pmu->elpg_mutex);
139 mutex_init(&pmu->isr_mutex);
140 mutex_init(&pmu->pmu_copy_lock);
141 mutex_init(&pmu->pmu_seq_lock);
142
143 err = prepare_ucode_blob(g);
144 if (err)
145 goto err_free_seq;
146 INIT_WORK(&pmu->pg_init, pmu_setup_hw);
147 pmu->seq_buf.cpuva = dma_alloc_coherent(d, GK20A_PMU_SEQ_BUF_SIZE,
148 &iova,
149 GFP_KERNEL);
150 if (!pmu->seq_buf.cpuva) {
151 gk20a_err(d, "failed to allocate memory\n");
152 err = -ENOMEM;
153 goto err_free_blob_res;
154 }
155
156 pmu->seq_buf.iova = iova;
157 err = gk20a_get_sgtable(d, &sgt_seq_buf,
158 pmu->seq_buf.cpuva,
159 pmu->seq_buf.iova,
160 GK20A_PMU_SEQ_BUF_SIZE);
161 if (err) {
162 gk20a_err(d, "failed to allocate sg table\n");
163 goto err_free_seq_buf;
164 }
165
166 pmu->seq_buf.pmu_va = gk20a_gmmu_map(vm, &sgt_seq_buf,
167 GK20A_PMU_SEQ_BUF_SIZE,
168 0, /* flags */
169 gk20a_mem_flag_none);
170 if (!pmu->seq_buf.pmu_va) {
171 gk20a_err(d, "failed to map pmu ucode memory!!");
172 goto err_free_seq_buf_sgt;
173 }
174
175 ptr = (u8 *)pmu->seq_buf.cpuva;
176 if (!ptr) {
177 gk20a_err(d, "failed to map cpu ptr for zbc buffer");
178 goto err_unmap_seq_buf;
179 }
180
181 /* TBD: remove this if ZBC save/restore is handled by PMU
182 * end an empty ZBC sequence for now */
183 ptr[0] = 0x16; /* opcode EXIT */
184 ptr[1] = 0; ptr[2] = 1; ptr[3] = 0;
185 ptr[4] = 0; ptr[5] = 0; ptr[6] = 0; ptr[7] = 0;
186
187 pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE;
188
189 gk20a_dbg_fn("done");
190 gk20a_free_sgtable(&sgt_seq_buf);
191
192 pmu->sw_ready = true;
193
194skip_init:
195 pmu->perfmon_counter.index = 3; /* GR & CE2 */
196 pmu->perfmon_counter.group_id = PMU_DOMAIN_GROUP_PSTATE;
197
198 pmu->remove_support = gk20a_remove_pmu_support;
199 err = gk20a_init_pmu(pmu);
200 if (err) {
201 gk20a_err(d, "failed to set function pointers\n");
202 goto err_unmap_seq_buf;
203 }
204
205 gk20a_dbg_fn("done");
206 return 0;
207
208 err_unmap_seq_buf:
209 gk20a_gmmu_unmap(vm, pmu->seq_buf.pmu_va,
210 GK20A_PMU_SEQ_BUF_SIZE, gk20a_mem_flag_none);
211 err_free_seq_buf_sgt:
212 gk20a_free_sgtable(&sgt_seq_buf);
213 err_free_seq_buf:
214 dma_free_coherent(d, GK20A_PMU_SEQ_BUF_SIZE,
215 pmu->seq_buf.cpuva, pmu->seq_buf.iova);
216 pmu->seq_buf.cpuva = NULL;
217 pmu->seq_buf.iova = 0;
218 err_free_blob_res:
219 free_blob_res(g);
220 err_free_seq:
221 kfree(pmu->seq);
222 err_free_mutex:
223 kfree(pmu->mutex);
224 err:
225 gk20a_dbg_fn("fail");
226 return err;
227}
228
229int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) 80int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img)
230{ 81{
231 const struct firmware *pmu_fw; 82 const struct firmware *pmu_fw;