#ifndef __LITMUS_SOFTIRQ_H #define __LITMUS_SOFTIRQ_H #include #include /* Threaded tasklet handling for Litmus. Tasklets are scheduled with the priority of the tasklet's owner-- that is, the RT task on behalf the tasklet runs. Tasklets are current scheduled in FIFO order with NO priority inheritance for "blocked" tasklets. klitirqd assumes the priority of the owner of the tasklet when the tasklet is next to execute. Currently, hi-tasklets are scheduled before low-tasklets, regardless of priority of low-tasklets. This priority inversion probably needs to be fixed, though it is not an issue if our work with GPUs as GPUs are owned (and associated klitirqds) for exclusive time periods, thus no inversions can occur. FIXME: Let low-tasklets with higher Litmus priority be scheduled before hi-tasklets of lower Litmus priority. TODO: Decide if tasklets should really be scheduled FIFO. If not, we should probably ensure tasklets with the same owner still execute in FIFO order lest we confuse drivers with out-of-order execution (though they probably should still be able to handle it by tasklet processing design). */ #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD //void trigger_litirqs(struct task_struct*); /* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons. Actual launch of threads is deffered to kworker's workqueue, so daemons will likely not be immediately running when this function returns, though the required data will be initialized. */ void spawn_klitirqd(void); /* Raises a flag to tell klitirqds to terminate. Termination is async, so some threads may be running after function return. */ void kill_klitirqd(void); /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready to handle tasklets. 0, otherwise.*/ int klitirqd_is_ready(void); /* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready to handle tasklets. 0, otherwise.*/ int klitirqd_is_dead(void); extern void __litmus_tasklet_schedule( struct tasklet_struct *t, unsigned int k_id); /* schedule a tasklet on klitirqd #k_id */ static inline void litmus_tasklet_schedule( struct tasklet_struct *t, unsigned int k_id) { if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) __litmus_tasklet_schedule(t, k_id); } extern void __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id); /* schedule a hi tasklet on klitirqd #k_id */ static inline void litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) { if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) __litmus_tasklet_hi_schedule(t, k_id); } extern void __litmus_tasklet_hi_schedule_first( struct tasklet_struct *t, unsigned int k_id); /* schedule a hi tasklet on klitirqd #k_id on next go-around */ /* PRECONDITION: Interrupts must be disabled. */ static inline void litmus_tasklet_hi_schedule_first( struct tasklet_struct *t, unsigned int k_id) { if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) __litmus_tasklet_hi_schedule_first(t, k_id); } ////////////// extern void __litmus_schedule_work( struct work_struct* w, unsigned int k_id); static inline void litmus_schedule_work( struct work_struct* w, unsigned int k_id) { __litmus_schedule_work(w, k_id); } #endif