From c47eab005ad9a26a36d2c7ca1595c790ff1bc40d Mon Sep 17 00:00:00 2001 From: Konsta Holtta Date: Wed, 12 Sep 2018 12:40:10 +0300 Subject: gpu: nvgpu: move tsg code to common tsg_gk20a.c doesn't depend on any specific hardware, so move it to the common directory. Rename the posix tsg file to posix-tsg.c. Jira NVGPU-967 Change-Id: I6e8908a8f6cf43132db8dffe3a99e424e4f764b1 Signed-off-by: Konsta Holtta Reviewed-on: https://git-master.nvidia.com/r/1821509 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/Makefile | 2 +- drivers/gpu/nvgpu/Makefile.sources | 4 +- drivers/gpu/nvgpu/common/fifo/tsg.c | 441 ++++++++++++++++++++++++++ drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 442 --------------------------- drivers/gpu/nvgpu/gk20a/tsg_gk20a.h | 128 -------- drivers/gpu/nvgpu/gm20b/hal_gm20b.c | 2 +- drivers/gpu/nvgpu/gp10b/hal_gp10b.c | 2 +- drivers/gpu/nvgpu/include/nvgpu/gk20a.h | 2 +- drivers/gpu/nvgpu/include/nvgpu/tsg.h | 128 ++++++++ drivers/gpu/nvgpu/os/linux/ioctl_dbg.c | 2 +- drivers/gpu/nvgpu/os/linux/ioctl_tsg.c | 2 +- drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c | 1 - drivers/gpu/nvgpu/os/posix/posix-tsg.c | 28 ++ drivers/gpu/nvgpu/os/posix/tsg.c | 28 -- drivers/gpu/nvgpu/vgpu/gr_vgpu.c | 2 +- drivers/gpu/nvgpu/vgpu/tsg_vgpu.c | 5 +- 16 files changed, 609 insertions(+), 610 deletions(-) create mode 100644 drivers/gpu/nvgpu/common/fifo/tsg.c delete mode 100644 drivers/gpu/nvgpu/gk20a/tsg_gk20a.c delete mode 100644 drivers/gpu/nvgpu/gk20a/tsg_gk20a.h create mode 100644 drivers/gpu/nvgpu/include/nvgpu/tsg.h create mode 100644 drivers/gpu/nvgpu/os/posix/posix-tsg.c delete mode 100644 drivers/gpu/nvgpu/os/posix/tsg.c (limited to 'drivers') diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index bf02affe..4e08cd51 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -224,6 +224,7 @@ nvgpu-y += \ common/sim_pci.o \ common/fifo/channel.o \ common/fifo/submit.o \ + common/fifo/tsg.o \ common/ecc.o \ common/ce2.o \ gk20a/gk20a.o \ @@ -238,7 +239,6 @@ nvgpu-y += \ gk20a/fence_gk20a.o \ gk20a/gr_ctx_gk20a_sim.o \ gk20a/gr_ctx_gk20a.o \ - gk20a/tsg_gk20a.o \ gk20a/fecs_trace_gk20a.o \ gm20b/hal_gm20b.o \ gm20b/gr_gm20b.o \ diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index 4e67434d..4283a01c 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -40,7 +40,7 @@ srcs := os/posix/nvgpu.c \ os/posix/fuse.c \ os/posix/clk_arb.c \ os/posix/posix-channel.c \ - os/posix/tsg.c \ + os/posix/posix-tsg.c \ os/posix/nvlink.c \ os/posix/lock.c \ os/posix/stubs.c \ @@ -109,6 +109,7 @@ srcs := os/posix/nvgpu.c \ common/clock_gating/gv100_gating_reglist.c \ common/fifo/channel.c \ common/fifo/submit.c \ + common/fifo/tsg.c \ common/mc/mc.c \ common/mc/mc_gm20b.c \ common/mc/mc_gp10b.c \ @@ -158,7 +159,6 @@ srcs := os/posix/nvgpu.c \ gk20a/fence_gk20a.c \ gk20a/gr_ctx_gk20a_sim.c \ gk20a/gr_ctx_gk20a.c \ - gk20a/tsg_gk20a.c \ gm20b/hal_gm20b.c \ gm20b/gr_gm20b.c \ gm20b/clk_gm20b.c \ diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c new file mode 100644 index 00000000..0892e8bf --- /dev/null +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c @@ -0,0 +1,441 @@ +/* + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch) +{ + return !(ch->tsgid == NVGPU_INVALID_TSG_ID); +} + +int gk20a_enable_tsg(struct tsg_gk20a *tsg) +{ + struct gk20a *g = tsg->g; + struct channel_gk20a *ch; + bool is_next, is_ctx_reload; + + gk20a_fifo_disable_tsg_sched(g, tsg); + + /* + * Due to h/w bug that exists in Maxwell and Pascal, + * we first need to enable all channels with NEXT and CTX_RELOAD set, + * and then rest of the channels should be enabled + */ + nvgpu_rwsem_down_read(&tsg->ch_list_lock); + nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); + is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); + + if (is_next || is_ctx_reload) { + g->ops.fifo.enable_channel(ch); + } + } + + nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); + is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); + + if (is_next || is_ctx_reload) { + continue; + } + + g->ops.fifo.enable_channel(ch); + } + nvgpu_rwsem_up_read(&tsg->ch_list_lock); + + gk20a_fifo_enable_tsg_sched(g, tsg); + + return 0; +} + +int gk20a_disable_tsg(struct tsg_gk20a *tsg) +{ + struct gk20a *g = tsg->g; + struct channel_gk20a *ch; + + nvgpu_rwsem_down_read(&tsg->ch_list_lock); + nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + g->ops.fifo.disable_channel(ch); + } + nvgpu_rwsem_up_read(&tsg->ch_list_lock); + + return 0; +} + +static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) +{ + struct fifo_gk20a *f = &g->fifo; + struct fifo_runlist_info_gk20a *runlist; + unsigned int i; + + for (i = 0; i < f->max_runlists; ++i) { + runlist = &f->runlist_info[i]; + if (test_bit(ch->chid, runlist->active_channels)) { + return true; + } + } + + return false; +} + +/* + * API to mark channel as part of TSG + * + * Note that channel is not runnable when we bind it to TSG + */ +int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, + struct channel_gk20a *ch) +{ + struct gk20a *g = ch->g; + + nvgpu_log_fn(g, " "); + + /* check if channel is already bound to some TSG */ + if (gk20a_is_channel_marked_as_tsg(ch)) { + return -EINVAL; + } + + /* channel cannot be bound to TSG if it is already active */ + if (gk20a_is_channel_active(tsg->g, ch)) { + return -EINVAL; + } + + ch->tsgid = tsg->tsgid; + + /* all the channel part of TSG should need to be same runlist_id */ + if (tsg->runlist_id == FIFO_INVAL_TSG_ID) { + tsg->runlist_id = ch->runlist_id; + } else if (tsg->runlist_id != ch->runlist_id) { + nvgpu_err(tsg->g, + "Error: TSG channel should be share same runlist ch[%d] tsg[%d]", + ch->runlist_id, tsg->runlist_id); + return -EINVAL; + } + + nvgpu_rwsem_down_write(&tsg->ch_list_lock); + nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); + nvgpu_rwsem_up_write(&tsg->ch_list_lock); + + nvgpu_ref_get(&tsg->refcount); + + nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n", + tsg->tsgid, ch->chid); + + nvgpu_log_fn(g, "done"); + return 0; +} + +int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) +{ + struct gk20a *g = ch->g; + struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; + int err; + + err = g->ops.fifo.tsg_unbind_channel(ch); + if (err) { + nvgpu_err(g, "Channel %d unbind failed, tearing down TSG %d", + ch->chid, tsg->tsgid); + + gk20a_fifo_abort_tsg(ch->g, ch->tsgid, true); + /* If channel unbind fails, channel is still part of runlist */ + channel_gk20a_update_runlist(ch, false); + + nvgpu_rwsem_down_write(&tsg->ch_list_lock); + nvgpu_list_del(&ch->ch_entry); + nvgpu_rwsem_up_write(&tsg->ch_list_lock); + } + + nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); + ch->tsgid = NVGPU_INVALID_TSG_ID; + + nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", + tsg->tsgid, ch->chid); + + return 0; +} + +int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) +{ + struct tsg_gk20a *tsg = NULL; + int err; + + if (tsgid >= g->fifo.num_channels) { + return -EINVAL; + } + + tsg = &g->fifo.tsg[tsgid]; + + tsg->in_use = false; + tsg->tsgid = tsgid; + + nvgpu_init_list_node(&tsg->ch_list); + nvgpu_rwsem_init(&tsg->ch_list_lock); + + nvgpu_init_list_node(&tsg->event_id_list); + err = nvgpu_mutex_init(&tsg->event_id_list_lock); + if (err) { + tsg->in_use = true; /* make this TSG unusable */ + return err; + } + + return 0; +} + +int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) +{ + struct gk20a *g = tsg->g; + int ret; + + nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); + + switch (level) { + case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: + case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: + case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: + ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, + 0, level); + if (!ret) { + tsg->interleave_level = level; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); +} + +int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) +{ + struct gk20a *g = tsg->g; + + nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); + + return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); +} + +u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg) +{ + struct gk20a *g = tsg->g; + + if (!tsg->timeslice_us) { + return g->ops.fifo.default_timeslice_us(g); + } + + return tsg->timeslice_us; +} + +static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) +{ + nvgpu_mutex_acquire(&f->tsg_inuse_mutex); + f->tsg[tsg->tsgid].in_use = false; + nvgpu_mutex_release(&f->tsg_inuse_mutex); +} + +static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct fifo_gk20a *f) +{ + struct tsg_gk20a *tsg = NULL; + unsigned int tsgid; + + nvgpu_mutex_acquire(&f->tsg_inuse_mutex); + for (tsgid = 0; tsgid < f->num_channels; tsgid++) { + if (!f->tsg[tsgid].in_use) { + f->tsg[tsgid].in_use = true; + tsg = &f->tsg[tsgid]; + break; + } + } + nvgpu_mutex_release(&f->tsg_inuse_mutex); + + return tsg; +} + +struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) +{ + struct tsg_gk20a *tsg; + int err; + + tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo); + if (tsg == NULL) { + return NULL; + } + + /* we need to allocate this after g->ops.gr.init_fs_state() since + * we initialize gr->no_of_sm in this function + */ + if (g->gr.no_of_sm == 0U) { + nvgpu_err(g, "no_of_sm %d not set, failed allocation", + g->gr.no_of_sm); + return NULL; + } + + err = gk20a_tsg_alloc_sm_error_states_mem(g, tsg, g->gr.no_of_sm); + if (err != 0) { + return NULL; + } + + tsg->g = g; + tsg->num_active_channels = 0; + nvgpu_ref_init(&tsg->refcount); + + tsg->vm = NULL; + tsg->interleave_level = NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW; + tsg->timeslice_us = 0; + tsg->timeslice_timeout = 0; + tsg->timeslice_scale = 0; + tsg->runlist_id = ~0; + tsg->tgid = pid; + tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; + + if (g->ops.fifo.init_eng_method_buffers) { + g->ops.fifo.init_eng_method_buffers(g, tsg); + } + + if (g->ops.fifo.tsg_open) { + err = g->ops.fifo.tsg_open(tsg); + if (err != 0) { + nvgpu_err(g, "tsg %d fifo open failed %d", + tsg->tsgid, err); + goto clean_up; + } + } + + nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); + + return tsg; + +clean_up: + + if(tsg->sm_error_states != NULL) { + nvgpu_kfree(g, tsg->sm_error_states); + tsg->sm_error_states = NULL; + } + + nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); + return NULL; +} + +void gk20a_tsg_release(struct nvgpu_ref *ref) +{ + struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount); + struct gk20a *g = tsg->g; + struct gk20a_event_id_data *event_id_data, *event_id_data_temp; + + if (g->ops.fifo.tsg_release != NULL) { + g->ops.fifo.tsg_release(tsg); + } + + if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) { + gr_gk20a_free_tsg_gr_ctx(tsg); + } + + if (g->ops.fifo.deinit_eng_method_buffers != NULL) { + g->ops.fifo.deinit_eng_method_buffers(g, tsg); + } + + if (tsg->vm != NULL) { + nvgpu_vm_put(tsg->vm); + tsg->vm = NULL; + } + + if(tsg->sm_error_states != NULL) { + nvgpu_kfree(g, tsg->sm_error_states); + tsg->sm_error_states = NULL; + } + + /* unhook all events created on this TSG */ + nvgpu_mutex_acquire(&tsg->event_id_list_lock); + nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp, + &tsg->event_id_list, + gk20a_event_id_data, + event_id_node) { + nvgpu_list_del(&event_id_data->event_id_node); + } + nvgpu_mutex_release(&tsg->event_id_list_lock); + + release_used_tsg(&g->fifo, tsg); + + tsg->runlist_id = ~0; + tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; + + nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); +} + +struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) +{ + struct tsg_gk20a *tsg = NULL; + + if (gk20a_is_channel_marked_as_tsg(ch)) { + struct gk20a *g = ch->g; + struct fifo_gk20a *f = &g->fifo; + tsg = &f->tsg[ch->tsgid]; + } + + return tsg; +} + +int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, + struct tsg_gk20a *tsg, + u32 num_sm) +{ + int err = 0; + + if (tsg->sm_error_states != NULL) { + return err; + } + + tsg->sm_error_states = nvgpu_kzalloc(g, + sizeof(struct nvgpu_tsg_sm_error_state) + * num_sm); + if (tsg->sm_error_states == NULL) { + nvgpu_err(g, "sm_error_states mem allocation failed"); + err = -ENOMEM; + } + + return err; +} + +void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, + u32 sm_id, + struct nvgpu_tsg_sm_error_state *sm_error_state) +{ + struct nvgpu_tsg_sm_error_state *tsg_sm_error_states; + + tsg_sm_error_states = tsg->sm_error_states + sm_id; + + tsg_sm_error_states->hww_global_esr = + sm_error_state->hww_global_esr; + tsg_sm_error_states->hww_warp_esr = + sm_error_state->hww_warp_esr; + tsg_sm_error_states->hww_warp_esr_pc = + sm_error_state->hww_warp_esr_pc; + tsg_sm_error_states->hww_global_esr_report_mask = + sm_error_state->hww_global_esr_report_mask; + tsg_sm_error_states->hww_warp_esr_report_mask = + sm_error_state->hww_warp_esr_report_mask; +} diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c deleted file mode 100644 index 885ce172..00000000 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include - -#include "gk20a.h" -#include "tsg_gk20a.h" - -bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch) -{ - return !(ch->tsgid == NVGPU_INVALID_TSG_ID); -} - -int gk20a_enable_tsg(struct tsg_gk20a *tsg) -{ - struct gk20a *g = tsg->g; - struct channel_gk20a *ch; - bool is_next, is_ctx_reload; - - gk20a_fifo_disable_tsg_sched(g, tsg); - - /* - * Due to h/w bug that exists in Maxwell and Pascal, - * we first need to enable all channels with NEXT and CTX_RELOAD set, - * and then rest of the channels should be enabled - */ - nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { - is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); - is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); - - if (is_next || is_ctx_reload) { - g->ops.fifo.enable_channel(ch); - } - } - - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { - is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); - is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); - - if (is_next || is_ctx_reload) { - continue; - } - - g->ops.fifo.enable_channel(ch); - } - nvgpu_rwsem_up_read(&tsg->ch_list_lock); - - gk20a_fifo_enable_tsg_sched(g, tsg); - - return 0; -} - -int gk20a_disable_tsg(struct tsg_gk20a *tsg) -{ - struct gk20a *g = tsg->g; - struct channel_gk20a *ch; - - nvgpu_rwsem_down_read(&tsg->ch_list_lock); - nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { - g->ops.fifo.disable_channel(ch); - } - nvgpu_rwsem_up_read(&tsg->ch_list_lock); - - return 0; -} - -static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) -{ - struct fifo_gk20a *f = &g->fifo; - struct fifo_runlist_info_gk20a *runlist; - unsigned int i; - - for (i = 0; i < f->max_runlists; ++i) { - runlist = &f->runlist_info[i]; - if (test_bit(ch->chid, runlist->active_channels)) { - return true; - } - } - - return false; -} - -/* - * API to mark channel as part of TSG - * - * Note that channel is not runnable when we bind it to TSG - */ -int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, - struct channel_gk20a *ch) -{ - struct gk20a *g = ch->g; - - nvgpu_log_fn(g, " "); - - /* check if channel is already bound to some TSG */ - if (gk20a_is_channel_marked_as_tsg(ch)) { - return -EINVAL; - } - - /* channel cannot be bound to TSG if it is already active */ - if (gk20a_is_channel_active(tsg->g, ch)) { - return -EINVAL; - } - - ch->tsgid = tsg->tsgid; - - /* all the channel part of TSG should need to be same runlist_id */ - if (tsg->runlist_id == FIFO_INVAL_TSG_ID) { - tsg->runlist_id = ch->runlist_id; - } else if (tsg->runlist_id != ch->runlist_id) { - nvgpu_err(tsg->g, - "Error: TSG channel should be share same runlist ch[%d] tsg[%d]", - ch->runlist_id, tsg->runlist_id); - return -EINVAL; - } - - nvgpu_rwsem_down_write(&tsg->ch_list_lock); - nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); - nvgpu_rwsem_up_write(&tsg->ch_list_lock); - - nvgpu_ref_get(&tsg->refcount); - - nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n", - tsg->tsgid, ch->chid); - - nvgpu_log_fn(g, "done"); - return 0; -} - -int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) -{ - struct gk20a *g = ch->g; - struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; - int err; - - err = g->ops.fifo.tsg_unbind_channel(ch); - if (err) { - nvgpu_err(g, "Channel %d unbind failed, tearing down TSG %d", - ch->chid, tsg->tsgid); - - gk20a_fifo_abort_tsg(ch->g, ch->tsgid, true); - /* If channel unbind fails, channel is still part of runlist */ - channel_gk20a_update_runlist(ch, false); - - nvgpu_rwsem_down_write(&tsg->ch_list_lock); - nvgpu_list_del(&ch->ch_entry); - nvgpu_rwsem_up_write(&tsg->ch_list_lock); - } - - nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); - ch->tsgid = NVGPU_INVALID_TSG_ID; - - nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", - tsg->tsgid, ch->chid); - - return 0; -} - -int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) -{ - struct tsg_gk20a *tsg = NULL; - int err; - - if (tsgid >= g->fifo.num_channels) { - return -EINVAL; - } - - tsg = &g->fifo.tsg[tsgid]; - - tsg->in_use = false; - tsg->tsgid = tsgid; - - nvgpu_init_list_node(&tsg->ch_list); - nvgpu_rwsem_init(&tsg->ch_list_lock); - - nvgpu_init_list_node(&tsg->event_id_list); - err = nvgpu_mutex_init(&tsg->event_id_list_lock); - if (err) { - tsg->in_use = true; /* make this TSG unusable */ - return err; - } - - return 0; -} - -int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) -{ - struct gk20a *g = tsg->g; - int ret; - - nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); - - switch (level) { - case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: - case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: - case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: - ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, - 0, level); - if (!ret) { - tsg->interleave_level = level; - } - break; - default: - ret = -EINVAL; - break; - } - - return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); -} - -int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) -{ - struct gk20a *g = tsg->g; - - nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); - - return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); -} - -u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg) -{ - struct gk20a *g = tsg->g; - - if (!tsg->timeslice_us) { - return g->ops.fifo.default_timeslice_us(g); - } - - return tsg->timeslice_us; -} - -static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) -{ - nvgpu_mutex_acquire(&f->tsg_inuse_mutex); - f->tsg[tsg->tsgid].in_use = false; - nvgpu_mutex_release(&f->tsg_inuse_mutex); -} - -static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct fifo_gk20a *f) -{ - struct tsg_gk20a *tsg = NULL; - unsigned int tsgid; - - nvgpu_mutex_acquire(&f->tsg_inuse_mutex); - for (tsgid = 0; tsgid < f->num_channels; tsgid++) { - if (!f->tsg[tsgid].in_use) { - f->tsg[tsgid].in_use = true; - tsg = &f->tsg[tsgid]; - break; - } - } - nvgpu_mutex_release(&f->tsg_inuse_mutex); - - return tsg; -} - -struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) -{ - struct tsg_gk20a *tsg; - int err; - - tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo); - if (tsg == NULL) { - return NULL; - } - - /* we need to allocate this after g->ops.gr.init_fs_state() since - * we initialize gr->no_of_sm in this function - */ - if (g->gr.no_of_sm == 0U) { - nvgpu_err(g, "no_of_sm %d not set, failed allocation", - g->gr.no_of_sm); - return NULL; - } - - err = gk20a_tsg_alloc_sm_error_states_mem(g, tsg, g->gr.no_of_sm); - if (err != 0) { - return NULL; - } - - tsg->g = g; - tsg->num_active_channels = 0; - nvgpu_ref_init(&tsg->refcount); - - tsg->vm = NULL; - tsg->interleave_level = NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW; - tsg->timeslice_us = 0; - tsg->timeslice_timeout = 0; - tsg->timeslice_scale = 0; - tsg->runlist_id = ~0; - tsg->tgid = pid; - tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; - - if (g->ops.fifo.init_eng_method_buffers) { - g->ops.fifo.init_eng_method_buffers(g, tsg); - } - - if (g->ops.fifo.tsg_open) { - err = g->ops.fifo.tsg_open(tsg); - if (err != 0) { - nvgpu_err(g, "tsg %d fifo open failed %d", - tsg->tsgid, err); - goto clean_up; - } - } - - nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); - - return tsg; - -clean_up: - - if(tsg->sm_error_states != NULL) { - nvgpu_kfree(g, tsg->sm_error_states); - tsg->sm_error_states = NULL; - } - - nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); - return NULL; -} - -void gk20a_tsg_release(struct nvgpu_ref *ref) -{ - struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount); - struct gk20a *g = tsg->g; - struct gk20a_event_id_data *event_id_data, *event_id_data_temp; - - if (g->ops.fifo.tsg_release != NULL) { - g->ops.fifo.tsg_release(tsg); - } - - if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) { - gr_gk20a_free_tsg_gr_ctx(tsg); - } - - if (g->ops.fifo.deinit_eng_method_buffers != NULL) { - g->ops.fifo.deinit_eng_method_buffers(g, tsg); - } - - if (tsg->vm != NULL) { - nvgpu_vm_put(tsg->vm); - tsg->vm = NULL; - } - - if(tsg->sm_error_states != NULL) { - nvgpu_kfree(g, tsg->sm_error_states); - tsg->sm_error_states = NULL; - } - - /* unhook all events created on this TSG */ - nvgpu_mutex_acquire(&tsg->event_id_list_lock); - nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp, - &tsg->event_id_list, - gk20a_event_id_data, - event_id_node) { - nvgpu_list_del(&event_id_data->event_id_node); - } - nvgpu_mutex_release(&tsg->event_id_list_lock); - - release_used_tsg(&g->fifo, tsg); - - tsg->runlist_id = ~0; - tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; - - nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); -} - -struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) -{ - struct tsg_gk20a *tsg = NULL; - - if (gk20a_is_channel_marked_as_tsg(ch)) { - struct gk20a *g = ch->g; - struct fifo_gk20a *f = &g->fifo; - tsg = &f->tsg[ch->tsgid]; - } - - return tsg; -} - -int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, - struct tsg_gk20a *tsg, - u32 num_sm) -{ - int err = 0; - - if (tsg->sm_error_states != NULL) { - return err; - } - - tsg->sm_error_states = nvgpu_kzalloc(g, - sizeof(struct nvgpu_tsg_sm_error_state) - * num_sm); - if (tsg->sm_error_states == NULL) { - nvgpu_err(g, "sm_error_states mem allocation failed"); - err = -ENOMEM; - } - - return err; -} - -void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, - u32 sm_id, - struct nvgpu_tsg_sm_error_state *sm_error_state) -{ - struct nvgpu_tsg_sm_error_state *tsg_sm_error_states; - - tsg_sm_error_states = tsg->sm_error_states + sm_id; - - tsg_sm_error_states->hww_global_esr = - sm_error_state->hww_global_esr; - tsg_sm_error_states->hww_warp_esr = - sm_error_state->hww_warp_esr; - tsg_sm_error_states->hww_warp_esr_pc = - sm_error_state->hww_warp_esr_pc; - tsg_sm_error_states->hww_global_esr_report_mask = - sm_error_state->hww_global_esr_report_mask; - tsg_sm_error_states->hww_warp_esr_report_mask = - sm_error_state->hww_warp_esr_report_mask; -} diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h deleted file mode 100644 index 1e3be553..00000000 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef TSG_GK20A_H -#define TSG_GK20A_H - -#include -#include -#include - -#include "gr_gk20a.h" - -#define NVGPU_INVALID_TSG_ID (-1) - -struct channel_gk20a; - -bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch); -struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid); -void gk20a_tsg_release(struct nvgpu_ref *ref); - -int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid); -struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch); - -struct nvgpu_tsg_sm_error_state { - u32 hww_global_esr; - u32 hww_warp_esr; - u64 hww_warp_esr_pc; - u32 hww_global_esr_report_mask; - u32 hww_warp_esr_report_mask; -}; - -struct tsg_gk20a { - struct gk20a *g; - - struct vm_gk20a *vm; - struct nvgpu_mem *eng_method_buffers; - - - struct nvgpu_gr_ctx gr_ctx; - struct nvgpu_ref refcount; - - struct nvgpu_list_node ch_list; - struct nvgpu_list_node event_id_list; - struct nvgpu_rwsem ch_list_lock; - struct nvgpu_mutex event_id_list_lock; - int num_active_channels; - - unsigned int timeslice_us; - unsigned int timeslice_timeout; - unsigned int timeslice_scale; - - u32 interleave_level; - int tsgid; - - u32 runlist_id; - pid_t tgid; - u32 num_active_tpcs; - u8 tpc_pg_enabled; - bool tpc_num_initialized; - bool in_use; - - struct nvgpu_tsg_sm_error_state *sm_error_states; - -#define NVGPU_SM_EXCEPTION_TYPE_MASK_NONE (0x0U) -#define NVGPU_SM_EXCEPTION_TYPE_MASK_FATAL (0x1U << 0) - u32 sm_exception_mask_type; -}; - -int gk20a_enable_tsg(struct tsg_gk20a *tsg); -int gk20a_disable_tsg(struct tsg_gk20a *tsg); -int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, - struct channel_gk20a *ch); -int gk20a_tsg_unbind_channel(struct channel_gk20a *ch); - -void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, - int event_id); -int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level); -int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice); -u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg); -int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg, - u32 priority); -int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, - struct tsg_gk20a *tsg, - u32 num_sm); -void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, - u32 sm_id, - struct nvgpu_tsg_sm_error_state *sm_error_state); - -struct gk20a_event_id_data { - struct gk20a *g; - - int id; /* ch or tsg */ - int pid; - u32 event_id; - - bool event_posted; - - struct nvgpu_cond event_id_wq; - struct nvgpu_mutex lock; - struct nvgpu_list_node event_id_node; -}; - -static inline struct gk20a_event_id_data * -gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node) -{ - return (struct gk20a_event_id_data *) - ((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node)); -}; - -#endif /* TSG_GK20A_H */ diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c index 133428da..427403e3 100644 --- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "common/clock_gating/gm20b_gating_reglist.h" #include "common/bus/bus_gm20b.h" @@ -50,7 +51,6 @@ #include "gk20a/regops_gk20a.h" #include "gk20a/pmu_gk20a.h" #include "gk20a/gr_gk20a.h" -#include "gk20a/tsg_gk20a.h" #include "gr_gm20b.h" #include "fifo_gm20b.h" diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c index 740cb8b7..68dfcfe4 100644 --- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "common/bus/bus_gk20a.h" #include "common/clock_gating/gp10b_gating_reglist.h" @@ -57,7 +58,6 @@ #include "gk20a/regops_gk20a.h" #include "gk20a/pmu_gk20a.h" #include "gk20a/gr_gk20a.h" -#include "gk20a/tsg_gk20a.h" #include "gp10b/gr_gp10b.h" #include "gp10b/fecs_trace_gp10b.h" diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h index 2fd533c6..ad77f802 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h @@ -66,11 +66,11 @@ struct nvgpu_gpfifo_args; #include #include #include +#include #include "gk20a/clk_gk20a.h" #include "gk20a/ce2_gk20a.h" #include "gk20a/fifo_gk20a.h" -#include "gk20a/tsg_gk20a.h" #include "clk/clk.h" #include "perf/perf.h" #include "pmgr/pmgr.h" diff --git a/drivers/gpu/nvgpu/include/nvgpu/tsg.h b/drivers/gpu/nvgpu/include/nvgpu/tsg.h new file mode 100644 index 00000000..bed84986 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/tsg.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef TSG_GK20A_H +#define TSG_GK20A_H + +#include +#include +#include + +#include "gk20a/gr_gk20a.h" + +#define NVGPU_INVALID_TSG_ID (-1) + +struct channel_gk20a; + +bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch); +struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid); +void gk20a_tsg_release(struct nvgpu_ref *ref); + +int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid); +struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch); + +struct nvgpu_tsg_sm_error_state { + u32 hww_global_esr; + u32 hww_warp_esr; + u64 hww_warp_esr_pc; + u32 hww_global_esr_report_mask; + u32 hww_warp_esr_report_mask; +}; + +struct tsg_gk20a { + struct gk20a *g; + + struct vm_gk20a *vm; + struct nvgpu_mem *eng_method_buffers; + + + struct nvgpu_gr_ctx gr_ctx; + struct nvgpu_ref refcount; + + struct nvgpu_list_node ch_list; + struct nvgpu_list_node event_id_list; + struct nvgpu_rwsem ch_list_lock; + struct nvgpu_mutex event_id_list_lock; + int num_active_channels; + + unsigned int timeslice_us; + unsigned int timeslice_timeout; + unsigned int timeslice_scale; + + u32 interleave_level; + int tsgid; + + u32 runlist_id; + pid_t tgid; + u32 num_active_tpcs; + u8 tpc_pg_enabled; + bool tpc_num_initialized; + bool in_use; + + struct nvgpu_tsg_sm_error_state *sm_error_states; + +#define NVGPU_SM_EXCEPTION_TYPE_MASK_NONE (0x0U) +#define NVGPU_SM_EXCEPTION_TYPE_MASK_FATAL (0x1U << 0) + u32 sm_exception_mask_type; +}; + +int gk20a_enable_tsg(struct tsg_gk20a *tsg); +int gk20a_disable_tsg(struct tsg_gk20a *tsg); +int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, + struct channel_gk20a *ch); +int gk20a_tsg_unbind_channel(struct channel_gk20a *ch); + +void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, + int event_id); +int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level); +int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice); +u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg); +int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg, + u32 priority); +int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, + struct tsg_gk20a *tsg, + u32 num_sm); +void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, + u32 sm_id, + struct nvgpu_tsg_sm_error_state *sm_error_state); + +struct gk20a_event_id_data { + struct gk20a *g; + + int id; /* ch or tsg */ + int pid; + u32 event_id; + + bool event_posted; + + struct nvgpu_cond event_id_wq; + struct nvgpu_mutex lock; + struct nvgpu_list_node event_id_node; +}; + +static inline struct gk20a_event_id_data * +gk20a_event_id_data_from_event_id_node(struct nvgpu_list_node *node) +{ + return (struct gk20a_event_id_data *) + ((uintptr_t)node - offsetof(struct gk20a_event_id_data, event_id_node)); +}; + +#endif /* TSG_GK20A_H */ diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c index 953b7168..705434be 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c @@ -31,12 +31,12 @@ #include #include #include +#include #include #include "gk20a/gk20a.h" #include "gk20a/gr_gk20a.h" -#include "gk20a/tsg_gk20a.h" #include "gk20a/regops_gk20a.h" #include "gk20a/dbg_gpu_gk20a.h" #include "os_linux.h" diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c index 6dd96d6b..998a1544 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_tsg.c @@ -26,9 +26,9 @@ #include #include #include +#include #include "gk20a/gk20a.h" -#include "gk20a/tsg_gk20a.h" #include "gv11b/fifo_gv11b.h" #include "platform_gk20a.h" #include "ioctl_tsg.h" diff --git a/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c b/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c index f5628bc1..475c6dc0 100644 --- a/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c +++ b/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c @@ -36,7 +36,6 @@ #include "vgpu_linux.h" #include "vgpu/fecs_trace_vgpu.h" #include "vgpu/clk_vgpu.h" -#include "gk20a/tsg_gk20a.h" #include "gk20a/regops_gk20a.h" #include "gm20b/hal_gm20b.h" diff --git a/drivers/gpu/nvgpu/os/posix/posix-tsg.c b/drivers/gpu/nvgpu/os/posix/posix-tsg.c new file mode 100644 index 00000000..d8e3f370 --- /dev/null +++ b/drivers/gpu/nvgpu/os/posix/posix-tsg.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, + int __event_id) +{ +} diff --git a/drivers/gpu/nvgpu/os/posix/tsg.c b/drivers/gpu/nvgpu/os/posix/tsg.c deleted file mode 100644 index 8736123d..00000000 --- a/drivers/gpu/nvgpu/os/posix/tsg.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include "gk20a/tsg_gk20a.h" - -void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg, - int __event_id) -{ -} diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c index 4b1cf212..9fafa52f 100644 --- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c @@ -30,11 +30,11 @@ #include #include #include +#include #include "gr_vgpu.h" #include "gk20a/gk20a.h" #include "gk20a/dbg_gpu_gk20a.h" -#include "gk20a/tsg_gk20a.h" #include "gk20a/fecs_trace_gk20a.h" #include diff --git a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c index a81b5022..3553bf51 100644 --- a/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/tsg_vgpu.c @@ -21,11 +21,12 @@ */ #include "gk20a/gk20a.h" -#include "gk20a/tsg_gk20a.h" #include "fifo_vgpu.h" -#include +#include +#include #include + #include #include -- cgit v1.2.2