From b42fb7ba26b565f93118fbdd9e17b42ee6144c5e Mon Sep 17 00:00:00 2001 From: Deepak Nibade Date: Tue, 14 Nov 2017 06:43:28 -0800 Subject: gpu: nvgpu: move vgpu code to linux Most of VGPU code is linux specific but lies in common code So until VGPU code is properly abstracted and made os-independent, move all of VGPU code to linux specific directory Handle corresponding Makefile changes Update all #includes to reflect new paths Add GPL license to newly added linux files Jira NVGPU-387 Change-Id: Ic133e4c80e570bcc273f0dacf45283fefd678923 Signed-off-by: Deepak Nibade Reviewed-on: https://git-master.nvidia.com/r/1599472 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c | 338 --------------------------- 1 file changed, 338 deletions(-) delete mode 100644 drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c (limited to 'drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c') diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c deleted file mode 100644 index 8a5130f6..00000000 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#include "vgpu/vgpu.h" -#include "vgpu/gm20b/vgpu_gr_gm20b.h" - -#include "vgpu_gr_gp10b.h" - -#include - -void vgpu_gr_gp10b_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm, - struct gr_ctx_desc *gr_ctx) -{ - struct tegra_vgpu_cmd_msg msg = {0}; - struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx; - int err; - - gk20a_dbg_fn(""); - - if (!gr_ctx || !gr_ctx->mem.gpu_va) - return; - - msg.cmd = TEGRA_VGPU_CMD_GR_CTX_FREE; - msg.handle = vgpu_get_handle(g); - p->gr_ctx_handle = gr_ctx->virt_ctx; - err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); - WARN_ON(err || msg.ret); - - __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, gmmu_page_size_kernel); - - nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.pagepool_ctxsw_buffer); - nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.betacb_ctxsw_buffer); - nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.spill_ctxsw_buffer); - nvgpu_dma_unmap_free(vm, &gr_ctx->t18x.preempt_ctxsw_buffer); - - nvgpu_kfree(g, gr_ctx); -} - -int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, - struct gr_ctx_desc **__gr_ctx, - struct vm_gk20a *vm, - u32 class, - u32 flags) -{ - struct gr_ctx_desc *gr_ctx; - u32 graphics_preempt_mode = 0; - u32 compute_preempt_mode = 0; - struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - int err; - - gk20a_dbg_fn(""); - - err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags); - if (err) - return err; - - gr_ctx = *__gr_ctx; - - if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP) - graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP; - if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP) - compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP; - - if (priv->constants.force_preempt_mode && !graphics_preempt_mode && - !compute_preempt_mode) { - graphics_preempt_mode = g->ops.gr.is_valid_gfx_class(g, class) ? - NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP : 0; - compute_preempt_mode = - g->ops.gr.is_valid_compute_class(g, class) ? - NVGPU_PREEMPTION_MODE_COMPUTE_CTA : 0; - } - - if (graphics_preempt_mode || compute_preempt_mode) { - if (g->ops.gr.set_ctxsw_preemption_mode) { - err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, - class, graphics_preempt_mode, compute_preempt_mode); - if (err) { - nvgpu_err(g, - "set_ctxsw_preemption_mode failed"); - goto fail; - } - } else { - err = -ENOSYS; - goto fail; - } - } - - gk20a_dbg_fn("done"); - return err; - -fail: - vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx); - return err; -} - -int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, - struct gr_ctx_desc *gr_ctx, - struct vm_gk20a *vm, u32 class, - u32 graphics_preempt_mode, - u32 compute_preempt_mode) -{ - struct tegra_vgpu_cmd_msg msg = {}; - struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p = - &msg.params.gr_bind_ctxsw_buffers; - int err = 0; - - if (g->ops.gr.is_valid_gfx_class(g, class) && - g->gr.t18x.ctx_vars.force_preemption_gfxp) - graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP; - - if (g->ops.gr.is_valid_compute_class(g, class) && - g->gr.t18x.ctx_vars.force_preemption_cilp) - compute_preempt_mode = NVGPU_PREEMPTION_MODE_COMPUTE_CILP; - - /* check for invalid combinations */ - if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0)) - return -EINVAL; - - if ((graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) && - (compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP)) - return -EINVAL; - - /* set preemption modes */ - switch (graphics_preempt_mode) { - case NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP: - { - u32 spill_size = - gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() * - gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(); - u32 pagepool_size = g->ops.gr.pagepool_default_size(g) * - gr_scc_pagepool_total_pages_byte_granularity_v(); - u32 betacb_size = g->gr.attrib_cb_default_size + - (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() - - gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v()); - u32 attrib_cb_size = (betacb_size + g->gr.alpha_cb_size) * - gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() * - g->gr.max_tpc_count; - struct nvgpu_mem *desc; - - attrib_cb_size = ALIGN(attrib_cb_size, 128); - - gk20a_dbg_info("gfxp context preempt size=%d", - g->gr.t18x.ctx_vars.preempt_image_size); - gk20a_dbg_info("gfxp context spill size=%d", spill_size); - gk20a_dbg_info("gfxp context pagepool size=%d", pagepool_size); - gk20a_dbg_info("gfxp context attrib cb size=%d", - attrib_cb_size); - - err = gr_gp10b_alloc_buffer(vm, - g->gr.t18x.ctx_vars.preempt_image_size, - &gr_ctx->t18x.preempt_ctxsw_buffer); - if (err) { - err = -ENOMEM; - goto fail; - } - desc = &gr_ctx->t18x.preempt_ctxsw_buffer; - p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->gpu_va; - p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAIN] = desc->size; - - err = gr_gp10b_alloc_buffer(vm, - spill_size, - &gr_ctx->t18x.spill_ctxsw_buffer); - if (err) { - err = -ENOMEM; - goto fail; - } - desc = &gr_ctx->t18x.spill_ctxsw_buffer; - p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->gpu_va; - p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_SPILL] = desc->size; - - err = gr_gp10b_alloc_buffer(vm, - pagepool_size, - &gr_ctx->t18x.pagepool_ctxsw_buffer); - if (err) { - err = -ENOMEM; - goto fail; - } - desc = &gr_ctx->t18x.pagepool_ctxsw_buffer; - p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = - desc->gpu_va; - p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_PAGEPOOL] = desc->size; - - err = gr_gp10b_alloc_buffer(vm, - attrib_cb_size, - &gr_ctx->t18x.betacb_ctxsw_buffer); - if (err) { - err = -ENOMEM; - goto fail; - } - desc = &gr_ctx->t18x.betacb_ctxsw_buffer; - p->gpu_va[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = - desc->gpu_va; - p->size[TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_BETACB] = desc->size; - - gr_ctx->graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP; - p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP; - break; - } - case NVGPU_PREEMPTION_MODE_GRAPHICS_WFI: - gr_ctx->graphics_preempt_mode = graphics_preempt_mode; - break; - - default: - break; - } - - if (g->ops.gr.is_valid_compute_class(g, class)) { - switch (compute_preempt_mode) { - case NVGPU_PREEMPTION_MODE_COMPUTE_WFI: - gr_ctx->compute_preempt_mode = - NVGPU_PREEMPTION_MODE_COMPUTE_WFI; - p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI; - break; - case NVGPU_PREEMPTION_MODE_COMPUTE_CTA: - gr_ctx->compute_preempt_mode = - NVGPU_PREEMPTION_MODE_COMPUTE_CTA; - p->mode = - TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA; - break; - case NVGPU_PREEMPTION_MODE_COMPUTE_CILP: - gr_ctx->compute_preempt_mode = - NVGPU_PREEMPTION_MODE_COMPUTE_CILP; - p->mode = - TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP; - break; - default: - break; - } - } - - if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode) { - msg.cmd = TEGRA_VGPU_CMD_CHANNEL_BIND_GR_CTXSW_BUFFERS; - msg.handle = vgpu_get_handle(g); - p->gr_ctx_handle = gr_ctx->virt_ctx; - err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); - if (err || msg.ret) { - err = -ENOMEM; - goto fail; - } - } - - return err; - -fail: - nvgpu_err(g, "%s failed %d", __func__, err); - return err; -} - -int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, - u32 graphics_preempt_mode, - u32 compute_preempt_mode) -{ - struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx; - struct gk20a *g = ch->g; - struct tsg_gk20a *tsg; - struct vm_gk20a *vm; - u32 class; - int err; - - class = ch->obj_class; - if (!class) - return -EINVAL; - - /* skip setting anything if both modes are already set */ - if (graphics_preempt_mode && - (graphics_preempt_mode == gr_ctx->graphics_preempt_mode)) - graphics_preempt_mode = 0; - - if (compute_preempt_mode && - (compute_preempt_mode == gr_ctx->compute_preempt_mode)) - compute_preempt_mode = 0; - - if (graphics_preempt_mode == 0 && compute_preempt_mode == 0) - return 0; - - if (gk20a_is_channel_marked_as_tsg(ch)) { - tsg = &g->fifo.tsg[ch->tsgid]; - vm = tsg->vm; - } else { - vm = ch->vm; - } - - if (g->ops.gr.set_ctxsw_preemption_mode) { - err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, - graphics_preempt_mode, - compute_preempt_mode); - if (err) { - nvgpu_err(g, "set_ctxsw_preemption_mode failed"); - return err; - } - } else { - err = -ENOSYS; - } - - return err; -} - -int vgpu_gr_gp10b_init_ctx_state(struct gk20a *g) -{ - struct vgpu_priv_data *priv = vgpu_get_priv_data(g); - int err; - - gk20a_dbg_fn(""); - - err = vgpu_gr_init_ctx_state(g); - if (err) - return err; - - g->gr.t18x.ctx_vars.preempt_image_size = - priv->constants.preempt_ctx_size; - if (!g->gr.t18x.ctx_vars.preempt_image_size) - return -EINVAL; - - return 0; -} -- cgit v1.2.2