/* * Constant definitions related to * scheduling policy. */ #ifndef _LINUX_LITMUS_H_ #define _LINUX_LITMUS_H_ #include #ifdef CONFIG_RELEASE_MASTER extern atomic_t release_master_cpu; #endif /* in_list - is a given list_head queued on some list? */ static inline int in_list(struct list_head* list) { return !( /* case 1: deleted */ (list->next == LIST_POISON1 && list->prev == LIST_POISON2) || /* case 2: initialized */ (list->next == list && list->prev == list) ); } struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); #define NO_CPU 0xffffffff void litmus_fork(struct task_struct *tsk); void litmus_post_fork_thread(struct task_struct *tsk); void litmus_exec(void); /* clean up real-time state of a task */ void exit_litmus(struct task_struct *dead_tsk); long litmus_admit_task(struct task_struct *tsk); void litmus_pre_exit_task(struct task_struct *tsk); // called before litmus_exit_task, but without run queue locks held void litmus_exit_task(struct task_struct *tsk); #define is_realtime(t) ((t)->policy == SCHED_LITMUS) #define rt_transition_pending(t) \ ((t)->rt_param.transition_pending) #define tsk_rt(t) (&(t)->rt_param) #define tsk_aux(t) (&(t)->aux_data) /* Realtime utility macros */ #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) #define get_boost_start(t) (tsk_rt(t)->boost_start_time) /* task_params macros */ #define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) #define get_rt_period(t) (tsk_rt(t)->task_params.period) #define get_rt_relative_deadline(t) (tsk_rt(t)->task_params.relative_deadline) #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) #define get_partition(t) (tsk_rt(t)->task_params.cpu) #define get_priority(t) (tsk_rt(t)->task_params.priority) #define get_class(t) (tsk_rt(t)->task_params.cls) #define get_release_policy(t) (tsk_rt(t)->task_params.release_policy) #define get_drain_policy(t) (tsk_rt(t)->task_params.drain_policy) /* job_param macros */ #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) #define get_deadline(t) (tsk_rt(t)->job_params.deadline) #define get_period(t) (tsk_rt(t)->task_params.period) #define get_release(t) (tsk_rt(t)->job_params.release) #define get_lateness(t) (tsk_rt(t)->job_params.lateness) #define get_backlog(t) (tsk_rt(t)->job_params.backlog) #define has_backlog(t) (get_backlog(t) != 0) #define get_budget_timer(t) (tsk_rt(t)->budget) #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) #define base_priority(t) (t) /* release policy macros */ #define is_periodic(t) (get_release_policy(t) == PERIODIC) #define is_sporadic(t) (get_release_policy(t) == SPORADIC) #ifdef CONFIG_ALLOW_EARLY_RELEASE #define is_early_releasing(t) (get_release_policy(t) == EARLY) #else #define is_early_releasing(t) (0) #endif #define is_hrt(t) \ (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) #define is_srt(t) \ (tsk_rt(t)->task_params.cls == RT_CLASS_SOFT) #define is_be(t) \ (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT) /* budget-related functions and macros */ inline static int budget_exhausted(struct task_struct* t) { return get_exec_time(t) >= get_exec_cost(t); } inline static int budget_remaining(struct task_struct* t) { return (!budget_exhausted(t)) ? (get_exec_cost(t) - get_exec_time(t)) : 0; } #define budget_enforced(t) (\ tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) #define budget_precisely_tracked(t) (\ tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \ tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS) #define budget_quantum_tracked(t) (\ tsk_rt(t)->task_params.budget_policy == QUANTUM_ENFORCEMENT || \ tsk_rt(t)->task_params.budget_signal_policy == QUANTUM_SIGNALS) #define budget_signalled(t) (\ tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS) #define budget_precisely_signalled(t) (\ tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS) #define bt_flag_is_set(t, flag_nr) (\ test_bit(flag_nr, &tsk_rt(t)->budget.flags)) #define bt_flag_test_and_set(t, flag_nr) (\ test_and_set_bit(flag_nr, &tsk_rt(t)->budget.flags)) #define bt_flag_test_and_clear(t, flag_nr) (\ test_and_clear_bit(flag_nr, &tsk_rt(t)->budget.flags)) #define bt_flag_set(t, flag_nr) (\ set_bit(flag_nr, &tsk_rt(t)->budget.flags)) #define bt_flag_clear(t, flag_nr) (\ clear_bit(flag_nr, &tsk_rt(t)->budget.flags)) #define bt_flags_reset(t) (\ tsk_rt(t)->budget.flags = 0) #define should_requeue_preempted_job(t) \ (t && !is_completed(t) && (!budget_exhausted(t) || !budget_enforced(t))) #ifdef CONFIG_LITMUS_LOCKING static inline void set_inh_task_linkback(struct task_struct* t, struct task_struct* linkto) { const int MAX_IDX = BITS_PER_LONG - 1; int success = 0; int old_idx = tsk_rt(t)->inh_task_linkback_idx; /* is the linkback already set? */ if (old_idx >= 0 && old_idx <= MAX_IDX) { if ((BIT_MASK(old_idx) & tsk_rt(linkto)->used_linkback_slots) && (tsk_rt(linkto)->inh_task_linkbacks[old_idx] == t)) { TRACE_TASK(t, "linkback is current.\n"); return; } BUG(); } /* kludge: upper limit on num linkbacks */ BUG_ON(tsk_rt(linkto)->used_linkback_slots == ~0ul); while(!success) { int b = find_first_zero_bit(&tsk_rt(linkto)->used_linkback_slots, BITS_PER_BYTE*sizeof(tsk_rt(linkto)->used_linkback_slots)); BUG_ON(b > MAX_IDX); /* set bit... */ if (!test_and_set_bit(b, &tsk_rt(linkto)->used_linkback_slots)) { TRACE_TASK(t, "linking back to %s/%d in slot %d\n", linkto->comm, linkto->pid, b); if (tsk_rt(linkto)->inh_task_linkbacks[b]) TRACE_TASK(t, "%s/%d already has %s/%d in slot %d\n", linkto->comm, linkto->pid, tsk_rt(linkto)->inh_task_linkbacks[b]->comm, tsk_rt(linkto)->inh_task_linkbacks[b]->pid, b); /* TODO: allow dirty data to remain in [b] after code is tested */ BUG_ON(tsk_rt(linkto)->inh_task_linkbacks[b] != NULL); /* ...before setting slot */ tsk_rt(linkto)->inh_task_linkbacks[b] = t; tsk_rt(t)->inh_task_linkback_idx = b; success = 1; } } } static inline void clear_inh_task_linkback(struct task_struct* t, struct task_struct* linkedto) { const int MAX_IDX = BITS_PER_LONG - 1; int success = 0; int slot = tsk_rt(t)->inh_task_linkback_idx; if (slot < 0) { TRACE_TASK(t, "assuming linkback already cleared.\n"); return; } BUG_ON(slot > MAX_IDX); BUG_ON(tsk_rt(linkedto)->inh_task_linkbacks[slot] != t); /* be safe - clear slot before clearing the bit */ tsk_rt(t)->inh_task_linkback_idx = -1; tsk_rt(linkedto)->inh_task_linkbacks[slot] = NULL; success = test_and_clear_bit(slot, &tsk_rt(linkedto)->used_linkback_slots); BUG_ON(!success); } #endif /* Our notion of time within LITMUS: kernel monotonic time. */ static inline lt_t litmus_clock(void) { return ktime_to_ns(ktime_get()); } /* A macro to convert from nanoseconds to ktime_t. */ #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) #define get_domain(t) (tsk_rt(t)->domain) /* Honor the flag in the preempt_count variable that is set * when scheduling is in progress. */ #define is_running(t) \ ((t)->state == TASK_RUNNING || \ task_thread_info(t)->preempt_count & PREEMPT_ACTIVE) #define is_blocked(t) \ (!is_running(t)) #define is_released(t, now) \ (lt_before_eq(get_release(t), now)) #define is_tardy(t, now) \ (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) /* real-time comparison macros */ #define earlier_deadline(a, b) (lt_before(\ (a)->rt_param.job_params.deadline,\ (b)->rt_param.job_params.deadline)) #define shorter_period(a, b) (lt_before(\ (a)->rt_param.task_params.period,\ (b)->rt_param.task_params.period)) #define earlier_release(a, b) (lt_before(\ (a)->rt_param.job_params.release,\ (b)->rt_param.job_params.release)) void preempt_if_preemptable(struct task_struct* t, int on_cpu); #ifdef CONFIG_LITMUS_LOCKING void srp_ceiling_block(void); #else #define srp_ceiling_block() /* nothing */ #endif #define bheap2task(hn) ((struct task_struct*) hn->value) #ifdef CONFIG_NP_SECTION static inline int is_kernel_np(struct task_struct *t) { return tsk_rt(t)->kernel_np; } static inline int is_user_np(struct task_struct *t) { return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0; } static inline void request_exit_np(struct task_struct *t) { if (is_user_np(t)) { /* Set the flag that tells user space to call * into the kernel at the end of a critical section. */ if (likely(tsk_rt(t)->ctrl_page)) { TRACE_TASK(t, "setting delayed_preemption flag\n"); tsk_rt(t)->ctrl_page->sched.np.preempt = 1; } } } static inline void make_np(struct task_struct *t) { tsk_rt(t)->kernel_np++; } /* Caller should check if preemption is necessary when * the function return 0. */ static inline int take_np(struct task_struct *t) { return --tsk_rt(t)->kernel_np; } /* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */ static inline int request_exit_np_atomic(struct task_struct *t) { union np_flag old, new; if (tsk_rt(t)->ctrl_page) { old.raw = tsk_rt(t)->ctrl_page->sched.raw; if (old.np.flag == 0) { /* no longer non-preemptive */ return 0; } else if (old.np.preempt) { /* already set, nothing for us to do */ return 1; } else { /* non preemptive and flag not set */ new.raw = old.raw; new.np.preempt = 1; /* if we get old back, then we atomically set the flag */ return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; /* If we raced with a concurrent change, then so be * it. Deliver it by IPI. We don't want an unbounded * retry loop here since tasks might exploit that to * keep the kernel busy indefinitely. */ } } else { return 0; } } #else static inline int is_kernel_np(struct task_struct* t) { return 0; } static inline int is_user_np(struct task_struct* t) { return 0; } static inline void request_exit_np(struct task_struct *t) { /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ BUG(); } static inline int request_exit_np_atomic(struct task_struct *t) { return 0; } #endif static inline void clear_exit_np(struct task_struct *t) { if (likely(tsk_rt(t)->ctrl_page)) tsk_rt(t)->ctrl_page->sched.np.preempt = 0; } static inline int is_np(struct task_struct *t) { #ifdef CONFIG_SCHED_DEBUG_TRACE int kernel, user; kernel = is_kernel_np(t); user = is_user_np(t); if (kernel || user) TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", kernel, user); return kernel || user; #else return unlikely(is_kernel_np(t) || is_user_np(t)); #endif } static inline int is_present(struct task_struct* t) { return t && tsk_rt(t)->present; } static inline int is_completed(struct task_struct* t) { return t && tsk_rt(t)->completed; } /* make the unit explicit */ typedef unsigned long quanta_t; enum round { FLOOR, CEIL }; /* Tick period is used to convert ns-specified execution * costs and periods into tick-based equivalents. */ extern ktime_t tick_period; static inline quanta_t time2quanta(lt_t time, enum round round) { s64 quantum_length = ktime_to_ns(tick_period); if (do_div(time, quantum_length) && round == CEIL) time++; return (quanta_t) time; } /* By how much is cpu staggered behind CPU 0? */ u64 cpu_stagger_offset(int cpu); static inline struct control_page* get_control_page(struct task_struct *t) { return tsk_rt(t)->ctrl_page; } static inline int has_control_page(struct task_struct* t) { return tsk_rt(t)->ctrl_page != NULL; } #ifdef CONFIG_SCHED_OVERHEAD_TRACE #define TS_SYSCALL_IN_START \ if (has_control_page(current)) { \ __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \ } #define TS_SYSCALL_IN_END \ if (has_control_page(current)) { \ uint64_t irqs; \ local_irq_disable(); \ irqs = get_control_page(current)->irq_count - \ get_control_page(current)->irq_syscall_start; \ __TS_SYSCALL_IN_END(&irqs); \ local_irq_enable(); \ } #else #define TS_SYSCALL_IN_START #define TS_SYSCALL_IN_END #endif #endif