summaryrefslogblamecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
blob: a0232a6934bcfbad792e8365895b0462874dbe3d (plain) (tree)
1
2
  
                                                                     


















                                                                            
                             


                              
                          
 

                                                




                                                             





























                                                                       














                                                                              
  



                                                           
                                                                   

                                     






                                                

                                                           

                                                 
                               
         
 





                                                                    

                               



                                                    

                                 




                                                                 
                             


                 
                                                      
 






                                                   

                                                    

                                         














                                                       
                                      

                                       


                                             


                 



























                                                                         



















































































































                                                                           
























                                                                         
                                                      

                              

                           








                                                        
                                     
                                  
 

                               
                                                                   
 






                                                             












                                                              
                                               
 

                                                                              
 



                                              

                                      
                               
         
 


                                                               
 
 



                                                                 















                                                             
                               


                                                              
                                    




















                                                                            
                                                         



                                            


                                                                           
                      
 

                                    





                                                                            
                                      





                                     





                                                                            
                                       

















                                                                            






                                                                           






                                                                













                                                                  
/*
 * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/fs.h>
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/nvhost.h>
#include <uapi/linux/nvgpu.h>
#include <linux/anon_inodes.h>

#include "gk20a.h"
#include "hw_ccsr_gk20a.h"

static void gk20a_tsg_release(struct kref *ref);

bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch)
{
	return !(ch->tsgid == NVGPU_INVALID_TSG_ID);
}

int gk20a_enable_tsg(struct tsg_gk20a *tsg)
{
	struct channel_gk20a *ch;

	mutex_lock(&tsg->ch_list_lock);
	list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
		gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid),
			gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
			| ccsr_channel_enable_set_true_f());
	}
	mutex_unlock(&tsg->ch_list_lock);

	return 0;
}

int gk20a_disable_tsg(struct tsg_gk20a *tsg)
{
	struct channel_gk20a *ch;

	mutex_lock(&tsg->ch_list_lock);
	list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
		gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid),
			gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid))
			| ccsr_channel_enable_clr_true_f());
	}
	mutex_unlock(&tsg->ch_list_lock);

	return 0;
}

static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
{
	struct fifo_gk20a *f = &g->fifo;
	struct fifo_runlist_info_gk20a *runlist;
	int i;

	for (i = 0; i < f->max_runlists; ++i) {
		runlist = &f->runlist_info[i];
		if (test_bit(ch->hw_chid, runlist->active_channels))
			return true;
	}

	return false;
}

/*
 * API to mark channel as part of TSG
 *
 * Note that channel is not runnable when we bind it to TSG
 */
static int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, int ch_fd)
{
	struct file *f = fget(ch_fd);
	struct channel_gk20a *ch;

	gk20a_dbg_fn("");

	ch = gk20a_get_channel_from_file(ch_fd);
	if (!ch)
		return -EINVAL;

	/* check if channel is already bound to some TSG */
	if (gk20a_is_channel_marked_as_tsg(ch)) {
		fput(f);
		return -EINVAL;
	}

	/* channel cannot be bound to TSG if it is already active */
	if (gk20a_is_channel_active(tsg->g, ch)) {
		fput(f);
		return -EINVAL;
	}

	ch->tsgid = tsg->tsgid;

	mutex_lock(&tsg->ch_list_lock);
	list_add_tail(&ch->ch_entry, &tsg->ch_list);
	mutex_unlock(&tsg->ch_list_lock);

	kref_get(&tsg->refcount);

	gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n",
					tsg->tsgid, ch->hw_chid);

	fput(f);

	gk20a_dbg_fn("done");
	return 0;
}

int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
{
	struct fifo_gk20a *f = &ch->g->fifo;
	struct tsg_gk20a *tsg = &f->tsg[ch->tsgid];

	mutex_lock(&tsg->ch_list_lock);
	list_del_init(&ch->ch_entry);
	mutex_unlock(&tsg->ch_list_lock);

	kref_put(&tsg->refcount, gk20a_tsg_release);

	ch->tsgid = NVGPU_INVALID_TSG_ID;

	return 0;
}

int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
{
	struct tsg_gk20a *tsg = NULL;

	if (tsgid < 0 || tsgid >= g->fifo.num_channels)
		return -EINVAL;

	tsg = &g->fifo.tsg[tsgid];

	tsg->in_use = false;
	tsg->tsgid = tsgid;

	INIT_LIST_HEAD(&tsg->ch_list);
	mutex_init(&tsg->ch_list_lock);

	INIT_LIST_HEAD(&tsg->event_id_list);
	mutex_init(&tsg->event_id_list_lock);

	return 0;
}

static int gk20a_tsg_set_priority(struct gk20a *g, struct tsg_gk20a *tsg,
				u32 priority)
{
	int timeslice_period;

	switch (priority) {
	case NVGPU_PRIORITY_LOW:
		timeslice_period = g->timeslice_low_priority_us;
		break;
	case NVGPU_PRIORITY_MEDIUM:
		timeslice_period = g->timeslice_medium_priority_us;
		break;
	case NVGPU_PRIORITY_HIGH:
		timeslice_period = g->timeslice_high_priority_us;
		break;
	default:
		pr_err("Unsupported priority");
		return -EINVAL;
	}

	gk20a_channel_get_timescale_from_timeslice(g, timeslice_period,
			&tsg->timeslice_timeout, &tsg->timeslice_scale);

	g->ops.fifo.update_runlist(g, 0, ~0, true, true);

	return 0;
}

static int gk20a_tsg_get_event_data_from_id(struct tsg_gk20a *tsg,
				int event_id,
				struct gk20a_event_id_data **event_id_data)
{
	struct gk20a_event_id_data *local_event_id_data;
	bool event_found = false;

	mutex_lock(&tsg->event_id_list_lock);
	list_for_each_entry(local_event_id_data, &tsg->event_id_list,
						 event_id_node) {
		if (local_event_id_data->event_id == event_id) {
			event_found = true;
			break;
		}
	}
	mutex_unlock(&tsg->event_id_list_lock);

	if (event_found) {
		*event_id_data = local_event_id_data;
		return 0;
	} else {
		return -1;
	}
}

static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
					 int event_id,
					 int *fd)
{
	int err = 0;
	int local_fd;
	struct file *file;
	char *name;
	struct gk20a_event_id_data *event_id_data;

	err = gk20a_tsg_get_event_data_from_id(tsg,
				event_id, &event_id_data);
	if (err == 0) /* We already have event enabled */
		return -EINVAL;

	err = get_unused_fd_flags(O_RDWR);
	if (err < 0)
		return err;
	local_fd = err;

	name = kasprintf(GFP_KERNEL, "nvgpu-event%d-fd%d",
			event_id, local_fd);

	file = anon_inode_getfile(name, &gk20a_event_id_ops,
				  NULL, O_RDWR);
	kfree(name);
	if (IS_ERR(file)) {
		err = PTR_ERR(file);
		goto clean_up;
	}

	event_id_data = kzalloc(sizeof(*event_id_data), GFP_KERNEL);
	if (!event_id_data) {
		err = -ENOMEM;
		goto clean_up_file;
	}
	event_id_data->g = tsg->g;
	event_id_data->id = tsg->tsgid;
	event_id_data->is_tsg = true;
	event_id_data->event_id = event_id;

	init_waitqueue_head(&event_id_data->event_id_wq);
	mutex_init(&event_id_data->lock);
	INIT_LIST_HEAD(&event_id_data->event_id_node);

	mutex_lock(&tsg->event_id_list_lock);
	list_add_tail(&event_id_data->event_id_node, &tsg->event_id_list);
	mutex_unlock(&tsg->event_id_list_lock);

	fd_install(local_fd, file);
	file->private_data = event_id_data;

	*fd = local_fd;

	return 0;

clean_up_file:
	fput(file);
clean_up:
	put_unused_fd(local_fd);
	return err;
}

static int gk20a_tsg_event_id_ctrl(struct gk20a *g, struct tsg_gk20a *tsg,
		struct nvgpu_event_id_ctrl_args *args)
{
	int err = 0;
	int fd = -1;

	if (args->event_id < 0 ||
	    args->event_id >= NVGPU_IOCTL_CHANNEL_EVENT_ID_MAX)
		return -EINVAL;

	switch (args->cmd) {
	case NVGPU_IOCTL_CHANNEL_EVENT_ID_CMD_ENABLE:
		err = gk20a_tsg_event_id_enable(tsg, args->event_id, &fd);
		if (!err)
			args->event_fd = fd;
		break;

	default:
		gk20a_err(dev_from_gk20a(tsg->g),
			   "unrecognized tsg event id cmd: 0x%x",
			   args->cmd);
		err = -EINVAL;
		break;
	}

	return err;
}

static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg)
{
	mutex_lock(&f->tsg_inuse_mutex);
	f->tsg[tsg->tsgid].in_use = false;
	mutex_unlock(&f->tsg_inuse_mutex);
}

static struct tsg_gk20a *acquire_unused_tsg(struct fifo_gk20a *f)
{
	struct tsg_gk20a *tsg = NULL;
	int tsgid;

	mutex_lock(&f->tsg_inuse_mutex);
	for (tsgid = 0; tsgid < f->num_channels; tsgid++) {
		if (!f->tsg[tsgid].in_use) {
			f->tsg[tsgid].in_use = true;
			tsg = &f->tsg[tsgid];
			break;
		}
	}
	mutex_unlock(&f->tsg_inuse_mutex);

	return tsg;
}

int gk20a_tsg_open(struct gk20a *g, struct file *filp)
{
	struct tsg_gk20a *tsg;
	struct device *dev;

	dev  = dev_from_gk20a(g);

	gk20a_dbg(gpu_dbg_fn, "tsg: %s", dev_name(dev));

	tsg = acquire_unused_tsg(&g->fifo);
	if (!tsg)
		return -ENOMEM;

	tsg->g = g;
	tsg->num_active_channels = 0;
	kref_init(&tsg->refcount);

	tsg->tsg_gr_ctx = NULL;
	tsg->vm = NULL;
	tsg->interleave_level = NVGPU_RUNLIST_INTERLEAVE_LEVEL_LOW;

	filp->private_data = tsg;

	gk20a_dbg(gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid);

	return 0;
}

int gk20a_tsg_dev_open(struct inode *inode, struct file *filp)
{
	struct gk20a *g;
	int ret;

	g = container_of(inode->i_cdev,
			 struct gk20a, tsg.cdev);
	gk20a_dbg_fn("");
	ret = gk20a_tsg_open(g, filp);
	gk20a_dbg_fn("done");
	return ret;
}

static void gk20a_tsg_release(struct kref *ref)
{
	struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount);
	struct gk20a *g = tsg->g;

	if (tsg->tsg_gr_ctx) {
		gr_gk20a_free_tsg_gr_ctx(tsg);
		tsg->tsg_gr_ctx = NULL;
	}
	if (tsg->vm) {
		gk20a_vm_put(tsg->vm);
		tsg->vm = NULL;
	}

	release_used_tsg(&g->fifo, tsg);

	gk20a_dbg(gpu_dbg_fn, "tsg released %d\n", tsg->tsgid);
}

int gk20a_tsg_dev_release(struct inode *inode, struct file *filp)
{
	struct tsg_gk20a *tsg = filp->private_data;
	kref_put(&tsg->refcount, gk20a_tsg_release);
	return 0;
}

long gk20a_tsg_dev_ioctl(struct file *filp, unsigned int cmd,
			     unsigned long arg)
{
	struct tsg_gk20a *tsg = filp->private_data;
	struct gk20a *g = tsg->g;
	u8 __maybe_unused buf[NVGPU_TSG_IOCTL_MAX_ARG_SIZE];
	int err = 0;

	gk20a_dbg(gpu_dbg_fn, "");

	if ((_IOC_TYPE(cmd) != NVGPU_TSG_IOCTL_MAGIC) ||
	    (_IOC_NR(cmd) == 0) ||
	    (_IOC_NR(cmd) > NVGPU_TSG_IOCTL_LAST))
		return -EINVAL;

	BUG_ON(_IOC_SIZE(cmd) > NVGPU_TSG_IOCTL_MAX_ARG_SIZE);

	memset(buf, 0, sizeof(buf));
	if (_IOC_DIR(cmd) & _IOC_WRITE) {
		if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

	if (!g->gr.sw_ready) {
		err = gk20a_busy(g->dev);
		if (err)
			return err;

		gk20a_idle(g->dev);
	}

	switch (cmd) {
	case NVGPU_TSG_IOCTL_BIND_CHANNEL:
		{
		int ch_fd = *(int *)buf;
		if (ch_fd < 0) {
			err = -EINVAL;
			break;
		}
		err = gk20a_tsg_bind_channel(tsg, ch_fd);
		break;
		}

	case NVGPU_TSG_IOCTL_UNBIND_CHANNEL:
		/* We do not support explicitly unbinding channel from TSG.
		 * Channel will be unbounded from TSG when it is closed.
		 */
		break;

	case NVGPU_IOCTL_TSG_ENABLE:
		{
		err = gk20a_busy(g->dev);
		if (err) {
			gk20a_err(&g->dev->dev,
			   "failed to host gk20a for ioctl cmd: 0x%x", cmd);
			return err;
		}
		gk20a_enable_tsg(tsg);
		gk20a_idle(g->dev);
		break;
		}

	case NVGPU_IOCTL_TSG_DISABLE:
		{
		err = gk20a_busy(g->dev);
		if (err) {
			gk20a_err(&g->dev->dev,
			   "failed to host gk20a for ioctl cmd: 0x%x", cmd);
			return err;
		}
		gk20a_disable_tsg(tsg);
		gk20a_idle(g->dev);
		break;
		}

	case NVGPU_IOCTL_TSG_PREEMPT:
		{
		err = gk20a_busy(g->dev);
		if (err) {
			gk20a_err(&g->dev->dev,
			   "failed to host gk20a for ioctl cmd: 0x%x", cmd);
			return err;
		}
		/* preempt TSG */
		err = gk20a_fifo_preempt_tsg(g, tsg->tsgid);
		gk20a_idle(g->dev);
		break;
		}

	case NVGPU_IOCTL_TSG_SET_PRIORITY:
		{
		err = gk20a_tsg_set_priority(g, tsg,
			((struct nvgpu_set_priority_args *)buf)->priority);
		break;
		}

	case NVGPU_IOCTL_TSG_EVENT_ID_CTRL:
		{
		err = gk20a_tsg_event_id_ctrl(g, tsg,
			(struct nvgpu_event_id_ctrl_args *)buf);
		break;
		}

	default:
		gk20a_err(dev_from_gk20a(g),
			   "unrecognized tsg gpu ioctl cmd: 0x%x",
			   cmd);
		err = -ENOTTY;
		break;
	}

	if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
		err = copy_to_user((void __user *)arg,
				   buf, _IOC_SIZE(cmd));

	return err;
}