aboutsummaryrefslogblamecommitdiffstats
path: root/litmus/sched_task_trace.c
blob: 931028fcf4a4bacff85d637014f3df9a5b0def22 (plain) (tree)
1
2
3
4
5
6
7
8
9







                                                                  
                          






                                 

                                     
 
                                                                         


                            
                                                  













                                                               
                           
                                                               
                                           
                                                              





                                                          
                                                   
                                                             



                                                                    
                                     
                                                  

                                              
                
                                 
         











                                                                       

                                   
                                   


                                                                                
                                           










































                                                                                      
                                                         




















                                                                                        
                                                                  
         












                                                                          
                                                                  
         














                                                                           
                                                  
                                                                                       
                                                                                               








                                                                    
 
                  


                                                                                       
                                                                        
                                
                                                                                        
                           
                                                                              
      
                          
























                                                                       












                                                                 
 


















                                                                                                                                





                                                                         
 





                                                    




                                                                      
                                                                                                                         




                                                                        
                                                           














































                                                                                                                        
                                                                                                                               




                                                                     
                                                        




















































































































































                                                                                                                      
/*
 * sched_task_trace.c -- record scheduling events to a byte stream
 */

#define NO_TASK_TRACE_DECLS

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>

#include <litmus/ftdev.h>
#include <litmus/litmus.h>

#include <litmus/sched_trace.h>
#include <litmus/feather_trace.h>
#include <litmus/ftdev.h>

#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT
#define CREATE_TRACE_POINTS
#endif

#define NUM_EVENTS		(1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11))

#define now() litmus_clock()

struct local_buffer {
	struct st_event_record record[NUM_EVENTS];
	char   flag[NUM_EVENTS];
	struct ft_buffer ftbuf;
};

DEFINE_PER_CPU(struct local_buffer, st_event_buffer);

static struct ftdev st_dev;

static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
{
	return cpu_online(cpu) ? 0 : -ENODEV;
}

static int __init init_sched_task_trace(void)
{
	struct local_buffer* buf;
	int i, ok = 0, err;
	printk("Allocated %u sched_trace_xxx() events per CPU "
	       "(buffer size: %d bytes)\n",
	       NUM_EVENTS, (int) sizeof(struct local_buffer));

	err = ftdev_init(&st_dev, THIS_MODULE,
			num_online_cpus(), "sched_trace");
	if (err)
		goto err_out;

	for (i = 0; i < st_dev.minor_cnt; i++) {
		buf = &per_cpu(st_event_buffer, i);
		ok += init_ft_buffer(&buf->ftbuf, NUM_EVENTS,
				     sizeof(struct st_event_record),
				     buf->flag,
				     buf->record);
		st_dev.minor[i].buf = &buf->ftbuf;
	}
	if (ok == st_dev.minor_cnt) {
		st_dev.can_open = st_dev_can_open;
		err = register_ftdev(&st_dev);
		if (err)
			goto err_dealloc;
	} else {
		err = -EINVAL;
		goto err_dealloc;
	}

	return 0;

err_dealloc:
	ftdev_exit(&st_dev);
err_out:
	printk(KERN_WARNING "Could not register sched_trace module\n");
	return err;
}

static void __exit exit_sched_task_trace(void)
{
	ftdev_exit(&st_dev);
}

module_init(init_sched_task_trace);
module_exit(exit_sched_task_trace);


static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
{
	struct st_event_record* rec = NULL;
	struct local_buffer* buf;

	buf = &get_cpu_var(st_event_buffer);
	if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
		rec->hdr.type = type;
		rec->hdr.cpu  = smp_processor_id();
		rec->hdr.pid  = t ? t->pid : 0;
		rec->hdr.job  = t ? t->rt_param.job_params.job_no : 0;
	} else {
		put_cpu_var(st_event_buffer);
	}
	/* rec will be NULL if it failed */
	return rec;
}

static inline void put_record(struct st_event_record* rec)
{
	struct local_buffer* buf;
	buf = &__get_cpu_var(st_event_buffer);
	ft_buffer_finish_write(&buf->ftbuf, rec);
	put_cpu_var(st_event_buffer);
}

feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec = get_record(ST_NAME, t);
	int i;
	if (rec) {
		for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
			rec->data.name.cmd[i] = t->comm[i];
		put_record(rec);
	}
}

feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec = get_record(ST_PARAM, t);
	if (rec) {
		rec->data.param.wcet      = get_exec_cost(t);
		rec->data.param.period    = get_rt_period(t);
		rec->data.param.phase     = get_rt_phase(t);
		rec->data.param.partition = get_partition(t);
		rec->data.param.class     = get_class(t);
		put_record(rec);
	}
}

feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec = get_record(ST_RELEASE, t);
	if (rec) {
		rec->data.release.release  = get_release(t);
		rec->data.release.deadline = get_deadline(t);
		put_record(rec);
	}
}

/* skipped: st_assigned_data, we don't use it atm */

feather_callback void do_sched_trace_task_switch_to(unsigned long id,
						    unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec;
	if (is_realtime(t))  /* comment out to trace EVERYTHING */
	{
		rec = get_record(ST_SWITCH_TO, t);
		if (rec) {
			rec->data.switch_to.when      = now();
			rec->data.switch_to.exec_time = get_exec_time(t);
			put_record(rec);
		}
	}
}

feather_callback void do_sched_trace_task_switch_away(unsigned long id,
						      unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec;
	if (is_realtime(t))  /* comment out to trace EVERYTHING */
	{
		rec = get_record(ST_SWITCH_AWAY, t);
		if (rec) {
			rec->data.switch_away.when      = now();
			rec->data.switch_away.exec_time = get_exec_time(t);
			put_record(rec);
		}
	}
}

feather_callback void do_sched_trace_task_completion(unsigned long id,
						     unsigned long _task,
						     unsigned long forced)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec = get_record(ST_COMPLETION, t);
	if (rec) {
		rec->data.completion.when = now();
		rec->data.completion.backlog_remaining = tsk_rt(t)->job_params.backlog;
		rec->data.completion.was_backlog_job = tsk_rt(t)->job_params.is_backlogged_job;
		rec->data.completion.forced = forced;
		put_record(rec);
	}
}

feather_callback void do_sched_trace_task_block(unsigned long id,
						unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec = get_record(ST_BLOCK, t);

	if (rec) {
		rec->data.block.when = now();

		// hiding is turned on by locking protocols, so if there isn't any
		// hiding, then we're blocking for some other reason.  assume it's I/O.
		rec->data.block.for_io  = !tsk_rt(t)->blocked_lock || (0
#ifdef CONFIG_REALTIME_AUX_TASKS
			|| (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks)
#endif
#ifdef CONFIG_LITMUS_NVIDIA
			|| (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu)
#endif
			);
		put_record(rec);
	}
}

feather_callback void do_sched_trace_task_resume(unsigned long id,
						 unsigned long _task)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec = get_record(ST_RESUME, t);
	if (rec) {
		rec->data.resume.when      = now();
		put_record(rec);
	}
}

feather_callback void do_sched_trace_sys_release(unsigned long id,
						 unsigned long _start)
{
	lt_t *start = (lt_t*) _start;
	struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
	if (rec) {
		rec->data.sys_release.when    = now();
		rec->data.sys_release.release = *start;
		put_record(rec);
	}
}

feather_callback void do_sched_trace_action(unsigned long id,
					    unsigned long _task,
					    unsigned long action)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record* rec = get_record(ST_ACTION, t);

	if (rec) {
		rec->data.action.when   = now();
		rec->data.action.action = action;
		put_record(rec);
	}
}

feather_callback void do_sched_trace_migration(unsigned long id,
													unsigned long _task,
													unsigned long _mig_info)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record *rec = get_record(ST_MIGRATION, t);

	if (rec) {
		struct migration_info* mig_info = (struct migration_info*) _mig_info;

		rec->hdr.extra = mig_info->distance;
		rec->data.migration.observed = mig_info->observed;
		rec->data.migration.estimated = mig_info->estimated;

		put_record(rec);
	}
}



feather_callback void do_sched_trace_lock(unsigned long id,
						 unsigned long _task,
						 unsigned long _lock_id,
						 unsigned long _acquired)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record *rec = get_record(ST_LOCK, t);

	if (rec) {
		rec->data.lock.when = now();
		rec->data.lock.lock_id = _lock_id;
		rec->data.lock.acquired = _acquired;
		put_record(rec);
	}
}





feather_callback void do_sched_trace_tasklet_release(unsigned long id,
												   unsigned long _owner,
												   unsigned long _device)
{
	struct task_struct *t = (struct task_struct*) _owner;
	struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t);

	if (rec) {
		rec->data.tasklet_release.when = now();
		rec->data.tasklet_release.device = _device;
		put_record(rec);
	}
}


feather_callback void do_sched_trace_tasklet_begin(unsigned long id,
												   unsigned long _owner)
{
	struct task_struct *t = (struct task_struct*) _owner;
	struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t);

	if (rec) {
		rec->data.tasklet_begin.when = now();

		if(!in_interrupt())
			rec->data.tasklet_begin.exe_pid = current->pid;
		else
			rec->data.tasklet_begin.exe_pid = 0;

		put_record(rec);
	}
}
EXPORT_SYMBOL(do_sched_trace_tasklet_begin);


feather_callback void do_sched_trace_tasklet_end(unsigned long id,
												 unsigned long _owner,
												 unsigned long _flushed)
{
	struct task_struct *t = (struct task_struct*) _owner;
	struct st_event_record *rec = get_record(ST_TASKLET_END, t);

	if (rec) {
		rec->data.tasklet_end.when = now();
		rec->data.tasklet_end.flushed = _flushed;

		if(!in_interrupt())
			rec->data.tasklet_end.exe_pid = current->pid;
		else
			rec->data.tasklet_end.exe_pid = 0;

		put_record(rec);
	}
}
EXPORT_SYMBOL(do_sched_trace_tasklet_end);


feather_callback void do_sched_trace_work_release(unsigned long id,
													 unsigned long _owner,
													 unsigned long _device)
{
	struct task_struct *t = (struct task_struct*) _owner;
	struct st_event_record *rec = get_record(ST_WORK_RELEASE, t);

	if (rec) {
		rec->data.work_release.when = now();
		rec->data.work_release.device = _device;
		put_record(rec);
	}
}


feather_callback void do_sched_trace_work_begin(unsigned long id,
												unsigned long _owner,
												unsigned long _exe)
{
	struct task_struct *t = (struct task_struct*) _owner;
	struct st_event_record *rec = get_record(ST_WORK_BEGIN, t);

	if (rec) {
		struct task_struct *exe = (struct task_struct*) _exe;
		rec->data.work_begin.exe_pid = exe->pid;
		rec->data.work_begin.when = now();
		put_record(rec);
	}
}
EXPORT_SYMBOL(do_sched_trace_work_begin);


feather_callback void do_sched_trace_work_end(unsigned long id,
											  unsigned long _owner,
											  unsigned long _exe,
											  unsigned long _flushed)
{
	struct task_struct *t = (struct task_struct*) _owner;
	struct st_event_record *rec = get_record(ST_WORK_END, t);

	if (rec) {
		struct task_struct *exe = (struct task_struct*) _exe;
		rec->data.work_end.exe_pid = exe->pid;
		rec->data.work_end.flushed = _flushed;
		rec->data.work_end.when = now();
		put_record(rec);
	}
}
EXPORT_SYMBOL(do_sched_trace_work_end);


feather_callback void do_sched_trace_eff_prio_change(unsigned long id,
											  unsigned long _task,
											  unsigned long _inh)
{
	struct task_struct *t = (struct task_struct*) _task;
	struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t);

	if (rec) {
		struct task_struct *inh = (struct task_struct*) _inh;
		rec->data.effective_priority_change.when = now();
		rec->data.effective_priority_change.inh_pid = (inh != NULL) ?
			inh->pid :
			0xffff;

		put_record(rec);
	}
}

/* pray for no nesting of nv interrupts on same CPU... */
struct tracing_interrupt_map
{
	int active;
	int count;
	unsigned long data[128]; // assume nesting less than 128...
	unsigned long serial[128];
};
DEFINE_PER_CPU(struct tracing_interrupt_map, active_interrupt_tracing);


DEFINE_PER_CPU(u32, intCounter);

feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
												unsigned long _device)
{
	struct st_event_record *rec;
	u32 serialNum;

	{
		u32* serial;
		struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id());
		if(!int_map->active == 0xcafebabe)
		{
			int_map->count++;
		}
		else
		{
			int_map->active = 0xcafebabe;
			int_map->count = 1;
		}
		//int_map->data[int_map->count-1] = _device;

		serial = &per_cpu(intCounter, smp_processor_id());
		*serial += num_online_cpus();
		serialNum = *serial;
		int_map->serial[int_map->count-1] = serialNum;
	}

	rec = get_record(ST_NV_INTERRUPT_BEGIN, NULL);
	if(rec) {
		u32 device = _device;
		rec->data.nv_interrupt_begin.when = now();
		rec->data.nv_interrupt_begin.device = device;
		rec->data.nv_interrupt_begin.serialNumber = serialNum;
		put_record(rec);
	}
}
EXPORT_SYMBOL(do_sched_trace_nv_interrupt_begin);

/*
int is_interrupt_tracing_active(void)
{
	struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id());
	if(int_map->active == 0xcafebabe)
		return 1;
	return 0;
}
*/

feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, unsigned long _device)
{
	struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id());
	if(int_map->active == 0xcafebabe)
	{
		struct st_event_record *rec = get_record(ST_NV_INTERRUPT_END, NULL);

		int_map->count--;
		if(int_map->count == 0)
			int_map->active = 0;

		if(rec) {
			u32 device = _device;
			rec->data.nv_interrupt_end.when = now();
			//rec->data.nv_interrupt_end.device = int_map->data[int_map->count];
			rec->data.nv_interrupt_end.device = device;
			rec->data.nv_interrupt_end.serialNumber = int_map->serial[int_map->count];
			put_record(rec);
		}
	}
}
EXPORT_SYMBOL(do_sched_trace_nv_interrupt_end);