summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-11-10 13:26:41 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-15 16:26:15 -0500
commit44f8b11f47bc31aafd0e3d2486125e1d87725fd4 (patch)
treedb041826c9aa327579a2542e6833270cee785a07 /drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
parent1f28b429a2fb73a260e0c9fe112dbbc6981ef4b4 (diff)
gpu: nvgpu: Remove GPU characteristics from gk20a
Remove a global copy of GPU characteristics in struct gk20a. Instead fill it at the Linux implementation of GPU characteristics IOCTL. JIRA NVGPU-388 Change-Id: Idc4ad58301d44a554777f5b969f3191a342e73fd Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1597330 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c120
1 files changed, 88 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
index 58178ac3..2a91b87d 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
@@ -221,57 +221,113 @@ gk20a_ctrl_ioctl_gpu_characteristics(
221 struct gk20a *g, 221 struct gk20a *g,
222 struct nvgpu_gpu_get_characteristics *request) 222 struct nvgpu_gpu_get_characteristics *request)
223{ 223{
224 struct nvgpu_gpu_characteristics *pgpu = &g->gpu_characteristics; 224 struct nvgpu_gpu_characteristics gpu;
225 long err = 0; 225 long err = 0;
226 226
227 pgpu->flags = nvgpu_ctrl_ioctl_gpu_characteristics_flags(g); 227 if (gk20a_busy(g)) {
228 nvgpu_err(g, "failed to power on gpu");
229 return -EINVAL;
230 }
231
232 memset(&gpu, 0, sizeof(gpu));
233
234 gpu.L2_cache_size = g->ops.ltc.determine_L2_size_bytes(g);
235 gpu.on_board_video_memory_size = 0; /* integrated GPU */
236
237 gpu.num_gpc = g->gr.gpc_count;
238 gpu.max_gpc_count = g->gr.max_gpc_count;
239
240 gpu.num_tpc_per_gpc = g->gr.max_tpc_per_gpc_count;
241
242 gpu.bus_type = NVGPU_GPU_BUS_TYPE_AXI; /* always AXI for now */
243
244 gpu.compression_page_size = g->ops.fb.compression_page_size(g);
245
246 gpu.gpc_mask = (1 << g->gr.gpc_count)-1;
247
248 gpu.flags = nvgpu_ctrl_ioctl_gpu_characteristics_flags(g);
228#ifdef CONFIG_TEGRA_19x_GPU 249#ifdef CONFIG_TEGRA_19x_GPU
229 pgpu->flags |= nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(g); 250 gpu.flags |= nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(g);
230#endif 251#endif
231 pgpu->arch = g->params.gpu_arch; 252 gpu.arch = g->params.gpu_arch;
232 pgpu->impl = g->params.gpu_impl; 253 gpu.impl = g->params.gpu_impl;
233 pgpu->rev = g->params.gpu_rev; 254 gpu.rev = g->params.gpu_rev;
234 pgpu->reg_ops_limit = NVGPU_IOCTL_DBG_REG_OPS_LIMIT; 255 gpu.reg_ops_limit = NVGPU_IOCTL_DBG_REG_OPS_LIMIT;
235 pgpu->map_buffer_batch_limit = nvgpu_is_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH) ? 256 gpu.map_buffer_batch_limit = nvgpu_is_enabled(g, NVGPU_SUPPORT_MAP_BUFFER_BATCH) ?
236 NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT : 0; 257 NVGPU_IOCTL_AS_MAP_BUFFER_BATCH_LIMIT : 0;
237 pgpu->twod_class = g->ops.get_litter_value(g, GPU_LIT_TWOD_CLASS); 258 gpu.twod_class = g->ops.get_litter_value(g, GPU_LIT_TWOD_CLASS);
238 pgpu->threed_class = g->ops.get_litter_value(g, GPU_LIT_THREED_CLASS); 259 gpu.threed_class = g->ops.get_litter_value(g, GPU_LIT_THREED_CLASS);
239 pgpu->compute_class = g->ops.get_litter_value(g, GPU_LIT_COMPUTE_CLASS); 260 gpu.compute_class = g->ops.get_litter_value(g, GPU_LIT_COMPUTE_CLASS);
240 pgpu->gpfifo_class = g->ops.get_litter_value(g, GPU_LIT_GPFIFO_CLASS); 261 gpu.gpfifo_class = g->ops.get_litter_value(g, GPU_LIT_GPFIFO_CLASS);
241 pgpu->inline_to_memory_class = 262 gpu.inline_to_memory_class =
242 g->ops.get_litter_value(g, GPU_LIT_I2M_CLASS); 263 g->ops.get_litter_value(g, GPU_LIT_I2M_CLASS);
243 pgpu->dma_copy_class = 264 gpu.dma_copy_class =
244 g->ops.get_litter_value(g, GPU_LIT_DMA_COPY_CLASS); 265 g->ops.get_litter_value(g, GPU_LIT_DMA_COPY_CLASS);
245 266
246 pgpu->vbios_version = g->bios.vbios_version; 267 gpu.vbios_version = g->bios.vbios_version;
247 pgpu->vbios_oem_version = g->bios.vbios_oem_version; 268 gpu.vbios_oem_version = g->bios.vbios_oem_version;
248 269
249 pgpu->big_page_size = nvgpu_mm_get_default_big_page_size(g); 270 gpu.big_page_size = nvgpu_mm_get_default_big_page_size(g);
250 pgpu->pde_coverage_bit_count = 271 gpu.pde_coverage_bit_count =
251 g->ops.mm.get_mmu_levels(g, pgpu->big_page_size)[0].lo_bit[0]; 272 g->ops.mm.get_mmu_levels(g, gpu.big_page_size)[0].lo_bit[0];
252 pgpu->available_big_page_sizes = nvgpu_mm_get_available_big_page_sizes(g); 273 gpu.available_big_page_sizes = nvgpu_mm_get_available_big_page_sizes(g);
253 274
254 pgpu->sm_arch_sm_version = g->params.sm_arch_sm_version; 275 gpu.sm_arch_sm_version = g->params.sm_arch_sm_version;
255 pgpu->sm_arch_spa_version = g->params.sm_arch_spa_version; 276 gpu.sm_arch_spa_version = g->params.sm_arch_spa_version;
256 pgpu->sm_arch_warp_count = g->params.sm_arch_warp_count; 277 gpu.sm_arch_warp_count = g->params.sm_arch_warp_count;
257 278
258 pgpu->max_css_buffer_size = g->gr.max_css_buffer_size; 279 gpu.max_css_buffer_size = g->gr.max_css_buffer_size;
259 280
260 nvgpu_set_preemption_mode_flags(g, pgpu); 281 gpu.gpu_ioctl_nr_last = NVGPU_GPU_IOCTL_LAST;
282 gpu.tsg_ioctl_nr_last = NVGPU_TSG_IOCTL_LAST;
283 gpu.dbg_gpu_ioctl_nr_last = NVGPU_DBG_GPU_IOCTL_LAST;
284 gpu.ioctl_channel_nr_last = NVGPU_IOCTL_CHANNEL_LAST;
285 gpu.as_ioctl_nr_last = NVGPU_AS_IOCTL_LAST;
286 gpu.event_ioctl_nr_last = NVGPU_EVENT_IOCTL_LAST;
287 gpu.gpu_va_bit_count = 40;
288
289 strlcpy(gpu.chipname, g->name, sizeof(gpu.chipname));
290 gpu.max_fbps_count = g->ops.gr.get_max_fbps_count(g);
291 gpu.fbp_en_mask = g->ops.gr.get_fbp_en_mask(g);
292 gpu.max_ltc_per_fbp = g->ops.gr.get_max_ltc_per_fbp(g);
293 gpu.max_lts_per_ltc = g->ops.gr.get_max_lts_per_ltc(g);
294 gpu.gr_compbit_store_base_hw = g->gr.compbit_store.base_hw;
295 gpu.gr_gobs_per_comptagline_per_slice =
296 g->gr.gobs_per_comptagline_per_slice;
297 gpu.num_ltc = g->ltc_count;
298 gpu.lts_per_ltc = g->gr.slices_per_ltc;
299 gpu.cbc_cache_line_size = g->gr.cacheline_size;
300 gpu.cbc_comptags_per_line = g->gr.comptags_per_cacheline;
301
302 if (g->ops.clk.get_maxrate)
303 gpu.max_freq = g->ops.clk.get_maxrate(g, CTRL_CLK_DOMAIN_GPCCLK);
304
305 gpu.local_video_memory_size = g->mm.vidmem.size;
306
307 gpu.pci_vendor_id = g->pci_vendor_id;
308 gpu.pci_device_id = g->pci_device_id;
309 gpu.pci_subsystem_vendor_id = g->pci_subsystem_vendor_id;
310 gpu.pci_subsystem_device_id = g->pci_subsystem_device_id;
311 gpu.pci_class = g->pci_class;
312 gpu.pci_revision = g->pci_revision;
313
314 nvgpu_set_preemption_mode_flags(g, &gpu);
261 315
262 if (request->gpu_characteristics_buf_size > 0) { 316 if (request->gpu_characteristics_buf_size > 0) {
263 size_t write_size = sizeof(*pgpu); 317 size_t write_size = sizeof(gpu);
264 318
265 if (write_size > request->gpu_characteristics_buf_size) 319 if (write_size > request->gpu_characteristics_buf_size)
266 write_size = request->gpu_characteristics_buf_size; 320 write_size = request->gpu_characteristics_buf_size;
267 321
268 err = copy_to_user((void __user *)(uintptr_t) 322 err = copy_to_user((void __user *)(uintptr_t)
269 request->gpu_characteristics_buf_addr, 323 request->gpu_characteristics_buf_addr,
270 pgpu, write_size); 324 &gpu, write_size);
271 } 325 }
272 326
273 if (err == 0) 327 if (err == 0)
274 request->gpu_characteristics_buf_size = sizeof(*pgpu); 328 request->gpu_characteristics_buf_size = sizeof(gpu);
329
330 gk20a_idle(g);
275 331
276 return err; 332 return err;
277} 333}