aboutsummaryrefslogblamecommitdiffstats
path: root/include/litmus/litmus_softirq.h
blob: 34287f3cbb8d7eb7a516e1efc9fe13ebc0e34af3 (plain) (tree)






































































































































































































                                                                             
#ifndef __LITMUS_SOFTIRQ_H
#define __LITMUS_SOFTIRQ_H

#include <linux/interrupt.h>
#include <linux/workqueue.h>

/*
   Threaded tasklet handling for Litmus.  Tasklets
   are scheduled with the priority of the tasklet's
   owner---that is, the RT task on behalf the tasklet
   runs.
 
   Tasklets are current scheduled in FIFO order with
   NO priority inheritance for "blocked" tasklets.
 
   klitirqd assumes the priority of the owner of the
   tasklet when the tasklet is next to execute.
 
   Currently, hi-tasklets are scheduled before
   low-tasklets, regardless of priority of low-tasklets.
   And likewise, low-tasklets are scheduled before work
   queue objects.  This priority inversion probably needs
   to be fixed, though it is not an issue if our work with
   GPUs as GPUs are owned (and associated klitirqds) for
   exclusive time periods, thus no inversions can
   occur.
 */



#define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD

/* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons.
   Actual launch of threads is deffered to kworker's
   workqueue, so daemons will likely not be immediately
   running when this function returns, though the required
   data will be initialized.
 
   @affinity_set: an array expressing the processor affinity
    for each of the NR_LITMUS_SOFTIRQD daemons.  May be set
    to NULL for global scheduling.
 
	- Examples -
	8-CPU system with two CPU clusters:
		affinity[] = {0, 0, 0, 0, 3, 3, 3, 3}
		NOTE: Daemons not actually bound to specified CPU, but rather
		cluster in which the CPU resides.
 
	8-CPU system, partitioned:
		affinity[] = {0, 1, 2, 3, 4, 5, 6, 7}
 
	FIXME: change array to a CPU topology or array of cpumasks
 
 */
void spawn_klitirqd(int* affinity);


/* Raises a flag to tell klitirqds to terminate.
   Termination is async, so some threads may be running
   after function return. */
void kill_klitirqd(void);


/* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready
   to handle tasklets. 0, otherwise.*/
int klitirqd_is_ready(void);

/* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready
   to handle tasklets. 0, otherwise.*/
int klitirqd_is_dead(void);

/* Flushes all pending work out to the OS for regular
 * tasklet/work processing of the specified 'owner'
 *
 * PRECOND: klitirqd_thread must have a clear entry
 * in the GPU registry, otherwise this call will become
 * a no-op as work will loop back to the klitirqd_thread.
 *
 * Pass NULL for owner to flush ALL pending items.
 */
void flush_pending(struct task_struct* klitirqd_thread,
				   struct task_struct* owner);

struct task_struct* get_klitirqd(unsigned int k_id);


extern int __litmus_tasklet_schedule(
        struct tasklet_struct *t,
        unsigned int k_id);

/* schedule a tasklet on klitirqd #k_id */
static inline int litmus_tasklet_schedule(
    struct tasklet_struct *t,
    unsigned int k_id)
{
	int ret = 0;
	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
		ret = __litmus_tasklet_schedule(t, k_id);
	return(ret);
}

/* for use by __tasklet_schedule() */
static inline int _litmus_tasklet_schedule(
    struct tasklet_struct *t,
    unsigned int k_id)
{
    return(__litmus_tasklet_schedule(t, k_id));
}




extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t,
                                         unsigned int k_id);

/* schedule a hi tasklet on klitirqd #k_id */
static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t,
                                              unsigned int k_id)
{
	int ret = 0;
	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
		ret = __litmus_tasklet_hi_schedule(t, k_id);
	return(ret);
}

/* for use by __tasklet_hi_schedule() */
static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t,
                                               unsigned int k_id)
{
    return(__litmus_tasklet_hi_schedule(t, k_id));
}





extern int __litmus_tasklet_hi_schedule_first(
    struct tasklet_struct *t,
    unsigned int k_id);

/* schedule a hi tasklet on klitirqd #k_id on next go-around */
/* PRECONDITION: Interrupts must be disabled. */
static inline int litmus_tasklet_hi_schedule_first(
    struct tasklet_struct *t,
    unsigned int k_id)
{
	int ret = 0;
	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
		ret = __litmus_tasklet_hi_schedule_first(t, k_id);
	return(ret);
}

/* for use by __tasklet_hi_schedule_first() */
static inline int _litmus_tasklet_hi_schedule_first(
    struct tasklet_struct *t,
    unsigned int k_id)
{
    return(__litmus_tasklet_hi_schedule_first(t, k_id));
}



//////////////

extern int __litmus_schedule_work(
	struct work_struct* w,
	unsigned int k_id);

static inline int litmus_schedule_work(
	struct work_struct* w,
	unsigned int k_id)
{
	return(__litmus_schedule_work(w, k_id));
}



///////////// mutex operations for client threads.
 
void down_and_set_stat(struct task_struct* t,
					 enum klitirqd_sem_status to_set,
					 struct mutex* sem);

void __down_and_reset_and_set_stat(struct task_struct* t,
				enum klitirqd_sem_status to_reset,
				enum klitirqd_sem_status to_set,
				struct mutex* sem);

void up_and_set_stat(struct task_struct* t,
					enum klitirqd_sem_status to_set,
					struct mutex* sem);



void release_klitirqd_lock(struct task_struct* t);

int reacquire_klitirqd_lock(struct task_struct* t);

#endif