diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-13 16:18:03 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-13 16:18:03 -0400 |
commit | c0667dc4894e913048cf8904f0ce9a79b481b556 (patch) | |
tree | 1803f6f9a6de45c949f57d1172aab4aa2546393b | |
parent | 8eb55f8fa1a2c3854f0f77b9b8663178c0129f6c (diff) |
Move RSM and IKGLP imp. to own .c fileswip-ikglp
Also reformated code to be slightly more
standard coding practice compliant.
-rw-r--r-- | include/litmus/ikglp_lock.h | 97 | ||||
-rw-r--r-- | include/litmus/locking.h | 94 | ||||
-rw-r--r-- | include/litmus/rsm_lock.h | 54 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 14 | ||||
-rw-r--r-- | litmus/Makefile | 2 | ||||
-rw-r--r-- | litmus/edf_common.c | 16 | ||||
-rw-r--r-- | litmus/ikglp_lock.c | 1587 | ||||
-rw-r--r-- | litmus/litmus.c | 2 | ||||
-rw-r--r-- | litmus/locking.c | 276 | ||||
-rw-r--r-- | litmus/rsm_lock.c | 774 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 2645 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 34 |
12 files changed, 2885 insertions, 2710 deletions
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h new file mode 100644 index 000000000000..c0cc04db1bc6 --- /dev/null +++ b/include/litmus/ikglp_lock.h | |||
@@ -0,0 +1,97 @@ | |||
1 | #ifndef LITMUS_IKGLP_H | ||
2 | #define LITMUS_IKGLP_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/binheap.h> | ||
6 | #include <litmus/locking.h> | ||
7 | |||
8 | typedef struct ikglp_heap_node | ||
9 | { | ||
10 | struct task_struct *task; | ||
11 | struct binheap_node node; | ||
12 | } ikglp_heap_node_t; | ||
13 | |||
14 | struct fifo_queue; | ||
15 | struct ikglp_wait_state; | ||
16 | |||
17 | typedef struct ikglp_donee_heap_node | ||
18 | { | ||
19 | struct task_struct *task; | ||
20 | struct fifo_queue *fq; | ||
21 | struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor | ||
22 | |||
23 | struct binheap_node node; | ||
24 | } ikglp_donee_heap_node_t; | ||
25 | |||
26 | // Maintains the state of a request as it goes through the IKGLP | ||
27 | typedef struct ikglp_wait_state { | ||
28 | struct task_struct *task; // pointer back to the requesting task | ||
29 | |||
30 | // Data for while waiting in FIFO Queue | ||
31 | wait_queue_t fq_node; | ||
32 | ikglp_heap_node_t global_heap_node; | ||
33 | ikglp_donee_heap_node_t donee_heap_node; | ||
34 | |||
35 | // Data for while waiting in PQ | ||
36 | ikglp_heap_node_t pq_node; | ||
37 | |||
38 | // Data for while waiting as a donor | ||
39 | ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t | ||
40 | struct nested_info prio_donation; | ||
41 | struct binheap_node node; | ||
42 | } ikglp_wait_state_t; | ||
43 | |||
44 | /* struct for semaphore with priority inheritance */ | ||
45 | struct fifo_queue | ||
46 | { | ||
47 | wait_queue_head_t wait; | ||
48 | struct task_struct* owner; | ||
49 | |||
50 | // used for bookkeepping | ||
51 | ikglp_heap_node_t global_heap_node; | ||
52 | ikglp_donee_heap_node_t donee_heap_node; | ||
53 | |||
54 | struct task_struct* hp_waiter; | ||
55 | int count; /* number of waiters + holder */ | ||
56 | |||
57 | struct nested_info nest; | ||
58 | }; | ||
59 | |||
60 | struct ikglp_semaphore | ||
61 | { | ||
62 | struct litmus_lock litmus_lock; | ||
63 | |||
64 | raw_spinlock_t lock; | ||
65 | raw_spinlock_t real_lock; | ||
66 | |||
67 | int nr_replicas; // AKA k | ||
68 | int m; | ||
69 | |||
70 | int max_fifo_len; // max len of a fifo queue | ||
71 | |||
72 | struct binheap_handle top_m; // min heap, base prio | ||
73 | int top_m_size; // number of nodes in top_m | ||
74 | |||
75 | struct binheap_handle not_top_m; // max heap, base prio | ||
76 | |||
77 | struct binheap_handle donees; // min-heap, base prio | ||
78 | struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue | ||
79 | |||
80 | /* data structures for holding requests */ | ||
81 | struct fifo_queue *fifo_queues; // array nr_replicas in length | ||
82 | struct binheap_handle priority_queue; // max-heap, base prio | ||
83 | struct binheap_handle donors; // max-heap, base prio | ||
84 | }; | ||
85 | |||
86 | static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) | ||
87 | { | ||
88 | return container_of(lock, struct ikglp_semaphore, litmus_lock); | ||
89 | } | ||
90 | |||
91 | int ikglp_lock(struct litmus_lock* l); | ||
92 | int ikglp_unlock(struct litmus_lock* l); | ||
93 | int ikglp_close(struct litmus_lock* l); | ||
94 | void ikglp_free(struct litmus_lock* l); | ||
95 | struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg); | ||
96 | |||
97 | #endif | ||
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 972cbdb7fdd5..c2324c4ccb8a 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -13,6 +13,15 @@ struct nested_info | |||
13 | struct task_struct **hp_waiter_ptr; | 13 | struct task_struct **hp_waiter_ptr; |
14 | struct binheap_node hp_binheap_node; | 14 | struct binheap_node hp_binheap_node; |
15 | }; | 15 | }; |
16 | |||
17 | static inline struct task_struct* top_priority(struct binheap_handle* handle) { | ||
18 | if(!binheap_empty(handle)) { | ||
19 | return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); | ||
20 | } | ||
21 | return NULL; | ||
22 | } | ||
23 | |||
24 | void print_hp_waiters(struct binheap_node* n, int depth); | ||
16 | #endif | 25 | #endif |
17 | 26 | ||
18 | 27 | ||
@@ -23,16 +32,14 @@ struct litmus_lock { | |||
23 | struct litmus_lock_ops *ops; | 32 | struct litmus_lock_ops *ops; |
24 | int type; | 33 | int type; |
25 | 34 | ||
26 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
27 | int ident; | 35 | int ident; |
28 | 36 | ||
37 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
29 | struct nested_info nest; | 38 | struct nested_info nest; |
30 | |||
31 | //#ifdef CONFIG_DEBUG_SPINLOCK | 39 | //#ifdef CONFIG_DEBUG_SPINLOCK |
32 | char cheat_lockdep[2]; | 40 | char cheat_lockdep[2]; |
33 | struct lock_class_key key; | 41 | struct lock_class_key key; |
34 | //#endif | 42 | //#endif |
35 | |||
36 | #endif | 43 | #endif |
37 | }; | 44 | }; |
38 | 45 | ||
@@ -41,36 +48,42 @@ struct litmus_lock { | |||
41 | #define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE | 48 | #define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE |
42 | 49 | ||
43 | typedef struct dgl_wait_state { | 50 | typedef struct dgl_wait_state { |
44 | struct task_struct *task; | 51 | struct task_struct *task; /* task waiting on DGL */ |
45 | struct litmus_lock *locks[MAX_DGL_SIZE]; | 52 | struct litmus_lock *locks[MAX_DGL_SIZE]; /* requested locks in DGL */ |
46 | int size; | 53 | int size; /* size of the DGL */ |
47 | int nr_remaining; | 54 | int nr_remaining; /* nr locks remainging before DGL is complete */ |
48 | 55 | int last_primary; /* index lock in locks[] that has active priority */ | |
49 | int last_primary; | ||
50 | |||
51 | wait_queue_t wq_nodes[MAX_DGL_SIZE]; | 56 | wait_queue_t wq_nodes[MAX_DGL_SIZE]; |
52 | } dgl_wait_state_t; | 57 | } dgl_wait_state_t; |
53 | 58 | ||
54 | void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); | 59 | void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); |
55 | void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock); | 60 | void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/); |
56 | 61 | ||
57 | void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); | 62 | void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); |
58 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); | 63 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); |
59 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); | 64 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); |
60 | #endif | 65 | #endif |
61 | 66 | ||
67 | typedef int (*lock_op_t)(struct litmus_lock *l); | ||
68 | typedef lock_op_t lock_close_t; | ||
69 | typedef lock_op_t lock_lock_t; | ||
70 | typedef lock_op_t lock_unlock_t; | ||
71 | |||
72 | typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg); | ||
73 | typedef void (*lock_free_t)(struct litmus_lock *l); | ||
74 | |||
62 | struct litmus_lock_ops { | 75 | struct litmus_lock_ops { |
63 | /* Current task tries to obtain / drop a reference to a lock. | 76 | /* Current task tries to obtain / drop a reference to a lock. |
64 | * Optional methods, allowed by default. */ | 77 | * Optional methods, allowed by default. */ |
65 | int (*open)(struct litmus_lock*, void* __user); | 78 | lock_open_t open; |
66 | int (*close)(struct litmus_lock*); | 79 | lock_close_t close; |
67 | 80 | ||
68 | /* Current tries to lock/unlock this lock (mandatory methods). */ | 81 | /* Current tries to lock/unlock this lock (mandatory methods). */ |
69 | int (*lock)(struct litmus_lock*); | 82 | lock_lock_t lock; |
70 | int (*unlock)(struct litmus_lock*); | 83 | lock_unlock_t unlock; |
71 | 84 | ||
72 | /* The lock is no longer being referenced (mandatory method). */ | 85 | /* The lock is no longer being referenced (mandatory method). */ |
73 | void (*deallocate)(struct litmus_lock*); | 86 | lock_free_t deallocate; |
74 | 87 | ||
75 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 88 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
76 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | 89 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); |
@@ -86,7 +99,36 @@ struct litmus_lock_ops { | |||
86 | }; | 99 | }; |
87 | 100 | ||
88 | 101 | ||
89 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 102 | /* |
103 | Nested inheritance can be achieved with fine-grain locking when there is | ||
104 | no need for DGL support, presuming locks are acquired in a partial order | ||
105 | (no cycles!). However, DGLs allow locks to be acquired in any order. This | ||
106 | makes nested inheritance very difficult (we don't yet know a solution) to | ||
107 | realize with fine-grain locks, so we use a big lock instead. | ||
108 | |||
109 | Code contains both fine-grain and coarse-grain methods together, side-by-side. | ||
110 | Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more | ||
111 | readable. However, this leads to the odd situation where both code paths | ||
112 | appear together in code as if they were both active together. | ||
113 | |||
114 | THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE! | ||
115 | |||
116 | Example: | ||
117 | lock_global_irqsave(coarseLock, flags); | ||
118 | lock_fine_irqsave(fineLock, flags); | ||
119 | |||
120 | Reality (coarse): | ||
121 | lock_global_irqsave(coarseLock, flags); | ||
122 | //lock_fine_irqsave(fineLock, flags); | ||
123 | |||
124 | Reality (fine): | ||
125 | //lock_global_irqsave(coarseLock, flags); | ||
126 | lock_fine_irqsave(fineLock, flags); | ||
127 | |||
128 | Be careful when you read code involving nested inheritance. | ||
129 | */ | ||
130 | #if defined(CONFIG_LITMUS_DGL_SUPPORT) | ||
131 | /* DGL requires a big lock to implement nested inheritance */ | ||
90 | #define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) | 132 | #define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) |
91 | #define lock_global(lock) raw_spin_lock((lock)) | 133 | #define lock_global(lock) raw_spin_lock((lock)) |
92 | #define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) | 134 | #define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) |
@@ -98,8 +140,8 @@ struct litmus_lock_ops { | |||
98 | #define unlock_fine_irqrestore(lock, flags) | 140 | #define unlock_fine_irqrestore(lock, flags) |
99 | #define unlock_fine(lock) | 141 | #define unlock_fine(lock) |
100 | 142 | ||
101 | #elif CONFIG_LITMUS_NESTED_LOCKING | 143 | #elif defined(CONFIG_LITMUS_NESTED_LOCKING) |
102 | 144 | /* Use fine-grain locking when DGLs are disabled. */ | |
103 | /* global locking are no-ops without DGL support */ | 145 | /* global locking are no-ops without DGL support */ |
104 | #define lock_global_irqsave(lock, flags) | 146 | #define lock_global_irqsave(lock, flags) |
105 | #define lock_global(lock) | 147 | #define lock_global(lock) |
@@ -116,17 +158,3 @@ struct litmus_lock_ops { | |||
116 | 158 | ||
117 | #endif | 159 | #endif |
118 | 160 | ||
119 | |||
120 | |||
121 | |||
122 | |||
123 | |||
124 | |||
125 | |||
126 | |||
127 | |||
128 | |||
129 | |||
130 | |||
131 | |||
132 | |||
diff --git a/include/litmus/rsm_lock.h b/include/litmus/rsm_lock.h new file mode 100644 index 000000000000..a15189683de4 --- /dev/null +++ b/include/litmus/rsm_lock.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef LITMUS_RSM_H | ||
2 | #define LITMUS_RSM_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/binheap.h> | ||
6 | #include <litmus/locking.h> | ||
7 | |||
8 | /* struct for semaphore with priority inheritance */ | ||
9 | struct rsm_mutex { | ||
10 | struct litmus_lock litmus_lock; | ||
11 | |||
12 | /* current resource holder */ | ||
13 | struct task_struct *owner; | ||
14 | |||
15 | /* highest-priority waiter */ | ||
16 | struct task_struct *hp_waiter; | ||
17 | |||
18 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | ||
19 | wait_queue_head_t wait; | ||
20 | |||
21 | /* we do some nesting within spinlocks, so we can't use the normal | ||
22 | sleeplocks found in wait_queue_head_t. */ | ||
23 | raw_spinlock_t lock; | ||
24 | }; | ||
25 | |||
26 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | ||
27 | { | ||
28 | return container_of(lock, struct rsm_mutex, litmus_lock); | ||
29 | } | ||
30 | |||
31 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
32 | int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); | ||
33 | int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); | ||
34 | void rsm_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); | ||
35 | #endif | ||
36 | |||
37 | void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
38 | struct task_struct* t, | ||
39 | raw_spinlock_t* to_unlock, | ||
40 | unsigned long irqflags); | ||
41 | |||
42 | void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
43 | struct task_struct* t, | ||
44 | raw_spinlock_t* to_unlock, | ||
45 | unsigned long irqflags); | ||
46 | |||
47 | int rsm_mutex_lock(struct litmus_lock* l); | ||
48 | int rsm_mutex_unlock(struct litmus_lock* l); | ||
49 | int rsm_mutex_close(struct litmus_lock* l); | ||
50 | void rsm_mutex_free(struct litmus_lock* l); | ||
51 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops*); | ||
52 | |||
53 | |||
54 | #endif \ No newline at end of file | ||
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index ae11e3ac9266..8e5167970340 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -58,6 +58,13 @@ typedef void (*task_exit_t) (struct task_struct *); | |||
58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | 58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, |
59 | void* __user config); | 59 | void* __user config); |
60 | 60 | ||
61 | typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
62 | typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
63 | typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | ||
64 | raw_spinlock_t *to_unlock, unsigned long irqflags); | ||
65 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | ||
66 | raw_spinlock_t *to_unlock, unsigned long irqflags); | ||
67 | |||
61 | typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); | 68 | typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); |
62 | 69 | ||
63 | /********************* sys call backends ********************/ | 70 | /********************* sys call backends ********************/ |
@@ -97,8 +104,13 @@ struct sched_plugin { | |||
97 | #ifdef CONFIG_LITMUS_LOCKING | 104 | #ifdef CONFIG_LITMUS_LOCKING |
98 | /* locking protocols */ | 105 | /* locking protocols */ |
99 | allocate_lock_t allocate_lock; | 106 | allocate_lock_t allocate_lock; |
107 | increase_prio_t increase_prio; | ||
108 | decrease_prio_t decrease_prio; | ||
109 | #endif | ||
110 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
111 | nested_increase_prio_t nested_increase_prio; | ||
112 | nested_decrease_prio_t nested_decrease_prio; | ||
100 | #endif | 113 | #endif |
101 | |||
102 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 114 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
103 | get_dgl_spinlock_t get_dgl_spinlock; | 115 | get_dgl_spinlock_t get_dgl_spinlock; |
104 | #endif | 116 | #endif |
diff --git a/litmus/Makefile b/litmus/Makefile index b05f7982d823..c2449a761ea4 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -28,3 +28,5 @@ obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | |||
28 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | 28 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o |
29 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | 29 | obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o |
30 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | 30 | obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o |
31 | |||
32 | obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o | ||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 5ea6b1bc7f24..4b65be7302be 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -60,7 +60,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
60 | first_task = first->rt_param.inh_task; | 60 | first_task = first->rt_param.inh_task; |
61 | } | 61 | } |
62 | if (unlikely(second->rt_param.inh_task) | 62 | if (unlikely(second->rt_param.inh_task) |
63 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 63 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
64 | && (second_mode == EFFECTIVE) | 64 | && (second_mode == EFFECTIVE) |
65 | #endif | 65 | #endif |
66 | ) { | 66 | ) { |
@@ -85,24 +85,24 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
85 | 85 | ||
86 | // // rate-monotonic for testing | 86 | // // rate-monotonic for testing |
87 | // return !is_realtime(second_task) || | 87 | // return !is_realtime(second_task) || |
88 | // | 88 | // |
89 | // /* is the deadline of the first task earlier? | 89 | // /* is the deadline of the first task earlier? |
90 | // * Then it has higher priority. | 90 | // * Then it has higher priority. |
91 | // */ | 91 | // */ |
92 | // shorter_period(first_task, second_task) || | 92 | // shorter_period(first_task, second_task) || |
93 | // | 93 | // |
94 | // /* Do we have a deadline tie? | 94 | // /* Do we have a deadline tie? |
95 | // * Then break by PID. | 95 | // * Then break by PID. |
96 | // */ | 96 | // */ |
97 | // (get_period(first_task) == get_period(second_task) && | 97 | // (get_period(first_task) == get_period(second_task) && |
98 | // (first_task->pid < second_task->pid || | 98 | // (first_task->pid < second_task->pid || |
99 | // | 99 | // |
100 | // /* If the PIDs are the same then the task with the EFFECTIVE | 100 | // /* If the PIDs are the same then the task with the EFFECTIVE |
101 | // * priority wins. | 101 | // * priority wins. |
102 | // */ | 102 | // */ |
103 | // (first_task->pid == second_task->pid && | 103 | // (first_task->pid == second_task->pid && |
104 | // !second->rt_param.inh_task))); | 104 | // !second->rt_param.inh_task))); |
105 | 105 | ||
106 | return !is_realtime(second_task) || | 106 | return !is_realtime(second_task) || |
107 | 107 | ||
108 | /* is the deadline of the first task earlier? | 108 | /* is the deadline of the first task earlier? |
@@ -134,7 +134,7 @@ int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b) | |||
134 | { | 134 | { |
135 | struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node); | 135 | struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node); |
136 | struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node); | 136 | struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node); |
137 | 137 | ||
138 | return __edf_higher_prio(l_a->hp_waiter_eff_prio, EFFECTIVE, l_b->hp_waiter_eff_prio, EFFECTIVE); | 138 | return __edf_higher_prio(l_a->hp_waiter_eff_prio, EFFECTIVE, l_b->hp_waiter_eff_prio, EFFECTIVE); |
139 | } | 139 | } |
140 | 140 | ||
@@ -147,7 +147,7 @@ int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node | |||
147 | { | 147 | { |
148 | struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node); | 148 | struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node); |
149 | struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node); | 149 | struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node); |
150 | 150 | ||
151 | return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE); | 151 | return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE); |
152 | } | 152 | } |
153 | 153 | ||
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c new file mode 100644 index 000000000000..0ae9994111fb --- /dev/null +++ b/litmus/ikglp_lock.c | |||
@@ -0,0 +1,1587 @@ | |||
1 | #include <linux/slab.h> | ||
2 | #include <linux/uaccess.h> | ||
3 | |||
4 | #include <litmus/trace.h> | ||
5 | #include <litmus/sched_plugin.h> | ||
6 | #include <litmus/ikglp_lock.h> | ||
7 | |||
8 | #include <litmus/edf_common.h> | ||
9 | |||
10 | int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, | ||
11 | struct binheap_node *b) | ||
12 | { | ||
13 | ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); | ||
14 | ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); | ||
15 | |||
16 | BUG_ON(!d_a); | ||
17 | BUG_ON(!d_b); | ||
18 | |||
19 | return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); | ||
20 | } | ||
21 | |||
22 | int ikglp_edf_min_heap_base_priority_order(struct binheap_node *a, | ||
23 | struct binheap_node *b) | ||
24 | { | ||
25 | ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); | ||
26 | ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); | ||
27 | |||
28 | return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE); | ||
29 | } | ||
30 | |||
31 | int ikglp_donor_edf_max_heap_base_priority_order(struct binheap_node *a, | ||
32 | struct binheap_node *b) | ||
33 | { | ||
34 | ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node); | ||
35 | ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node); | ||
36 | |||
37 | return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); | ||
38 | } | ||
39 | |||
40 | |||
41 | int ikglp_edf_min_heap_donee_order(struct binheap_node *a, | ||
42 | struct binheap_node *b) | ||
43 | { | ||
44 | struct task_struct *prio_a, *prio_b; | ||
45 | |||
46 | ikglp_donee_heap_node_t *d_a = | ||
47 | binheap_entry(a, ikglp_donee_heap_node_t, node); | ||
48 | ikglp_donee_heap_node_t *d_b = | ||
49 | binheap_entry(b, ikglp_donee_heap_node_t, node); | ||
50 | |||
51 | if(!d_a->donor_info) { | ||
52 | prio_a = d_a->task; | ||
53 | } | ||
54 | else { | ||
55 | prio_a = d_a->donor_info->task; | ||
56 | BUG_ON(d_a->task != d_a->donor_info->donee_info->task); | ||
57 | } | ||
58 | |||
59 | if(!d_b->donor_info) { | ||
60 | prio_b = d_b->task; | ||
61 | } | ||
62 | else { | ||
63 | prio_b = d_b->donor_info->task; | ||
64 | BUG_ON(d_b->task != d_b->donor_info->donee_info->task); | ||
65 | } | ||
66 | |||
67 | // note reversed order | ||
68 | return __edf_higher_prio(prio_b, BASE, prio_a, BASE); | ||
69 | } | ||
70 | |||
71 | |||
72 | |||
73 | static inline int ikglp_get_idx(struct ikglp_semaphore *sem, | ||
74 | struct fifo_queue *queue) | ||
75 | { | ||
76 | return (queue - &sem->fifo_queues[0]); | ||
77 | } | ||
78 | |||
79 | static inline struct fifo_queue* ikglp_get_queue(struct ikglp_semaphore *sem, | ||
80 | struct task_struct *holder) | ||
81 | { | ||
82 | int i; | ||
83 | for(i = 0; i < sem->nr_replicas; ++i) | ||
84 | if(sem->fifo_queues[i].owner == holder) | ||
85 | return(&sem->fifo_queues[i]); | ||
86 | return(NULL); | ||
87 | } | ||
88 | |||
89 | |||
90 | |||
91 | static struct task_struct* ikglp_find_hp_waiter(struct fifo_queue *kqueue, | ||
92 | struct task_struct *skip) | ||
93 | { | ||
94 | struct list_head *pos; | ||
95 | struct task_struct *queued, *found = NULL; | ||
96 | |||
97 | list_for_each(pos, &kqueue->wait.task_list) { | ||
98 | queued = (struct task_struct*) list_entry(pos, | ||
99 | wait_queue_t, task_list)->private; | ||
100 | |||
101 | /* Compare task prios, find high prio task. */ | ||
102 | if (queued != skip && edf_higher_prio(queued, found)) | ||
103 | found = queued; | ||
104 | } | ||
105 | return found; | ||
106 | } | ||
107 | |||
108 | static struct fifo_queue* ikglp_find_shortest(struct ikglp_semaphore *sem, | ||
109 | struct fifo_queue *search_start) | ||
110 | { | ||
111 | // we start our search at search_start instead of at the beginning of the | ||
112 | // queue list to load-balance across all resources. | ||
113 | struct fifo_queue* step = search_start; | ||
114 | struct fifo_queue* shortest = sem->shortest_fifo_queue; | ||
115 | |||
116 | do { | ||
117 | step = (step+1 != &sem->fifo_queues[sem->nr_replicas]) ? | ||
118 | step+1 : &sem->fifo_queues[0]; | ||
119 | |||
120 | if(step->count < shortest->count) { | ||
121 | shortest = step; | ||
122 | if(step->count == 0) | ||
123 | break; /* can't get any shorter */ | ||
124 | } | ||
125 | |||
126 | }while(step != search_start); | ||
127 | |||
128 | return(shortest); | ||
129 | } | ||
130 | |||
131 | static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem) | ||
132 | { | ||
133 | return binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node)->task; | ||
134 | } | ||
135 | |||
136 | |||
137 | |||
138 | #if 0 | ||
139 | static void print_global_list(struct binheap_node* n, int depth) | ||
140 | { | ||
141 | ikglp_heap_node_t *global_heap_node; | ||
142 | char padding[81] = " "; | ||
143 | |||
144 | if(n == NULL) { | ||
145 | TRACE_CUR("+-> %p\n", NULL); | ||
146 | return; | ||
147 | } | ||
148 | |||
149 | global_heap_node = binheap_entry(n, ikglp_heap_node_t, node); | ||
150 | |||
151 | if(depth*2 <= 80) | ||
152 | padding[depth*2] = '\0'; | ||
153 | |||
154 | TRACE_CUR("%s+-> %s/%d\n", | ||
155 | padding, | ||
156 | global_heap_node->task->comm, | ||
157 | global_heap_node->task->pid); | ||
158 | |||
159 | if(n->left) print_global_list(n->left, depth+1); | ||
160 | if(n->right) print_global_list(n->right, depth+1); | ||
161 | } | ||
162 | |||
163 | static void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth) | ||
164 | { | ||
165 | ikglp_donee_heap_node_t *donee_node; | ||
166 | char padding[81] = " "; | ||
167 | struct task_struct* donor = NULL; | ||
168 | |||
169 | if(n == NULL) { | ||
170 | TRACE_CUR("+-> %p\n", NULL); | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | donee_node = binheap_entry(n, ikglp_donee_heap_node_t, node); | ||
175 | |||
176 | if(depth*2 <= 80) | ||
177 | padding[depth*2] = '\0'; | ||
178 | |||
179 | if(donee_node->donor_info) { | ||
180 | donor = donee_node->donor_info->task; | ||
181 | } | ||
182 | |||
183 | TRACE_CUR("%s+-> %s/%d (d: %s/%d) (fq: %d)\n", | ||
184 | padding, | ||
185 | donee_node->task->comm, | ||
186 | donee_node->task->pid, | ||
187 | (donor) ? donor->comm : "nil", | ||
188 | (donor) ? donor->pid : -1, | ||
189 | ikglp_get_idx(sem, donee_node->fq)); | ||
190 | |||
191 | if(n->left) print_donees(sem, n->left, depth+1); | ||
192 | if(n->right) print_donees(sem, n->right, depth+1); | ||
193 | } | ||
194 | |||
195 | static void print_donors(struct binheap_node *n, int depth) | ||
196 | { | ||
197 | ikglp_wait_state_t *donor_node; | ||
198 | char padding[81] = " "; | ||
199 | |||
200 | if(n == NULL) { | ||
201 | TRACE_CUR("+-> %p\n", NULL); | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | donor_node = binheap_entry(n, ikglp_wait_state_t, node); | ||
206 | |||
207 | if(depth*2 <= 80) | ||
208 | padding[depth*2] = '\0'; | ||
209 | |||
210 | |||
211 | TRACE_CUR("%s+-> %s/%d (donee: %s/%d)\n", | ||
212 | padding, | ||
213 | donor_node->task->comm, | ||
214 | donor_node->task->pid, | ||
215 | donor_node->donee_info->task->comm, | ||
216 | donor_node->donee_info->task->pid); | ||
217 | |||
218 | if(n->left) print_donors(n->left, depth+1); | ||
219 | if(n->right) print_donors(n->right, depth+1); | ||
220 | } | ||
221 | #endif | ||
222 | |||
223 | static void ikglp_add_global_list(struct ikglp_semaphore *sem, | ||
224 | struct task_struct *t, | ||
225 | ikglp_heap_node_t *node) | ||
226 | { | ||
227 | |||
228 | |||
229 | node->task = t; | ||
230 | INIT_BINHEAP_NODE(&node->node); | ||
231 | |||
232 | if(sem->top_m_size < sem->m) { | ||
233 | TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", | ||
234 | t->comm, t->pid); | ||
235 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
236 | // print_global_list(sem->top_m.root, 1); | ||
237 | |||
238 | binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); | ||
239 | ++(sem->top_m_size); | ||
240 | |||
241 | // TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
242 | // print_global_list(sem->top_m.root, 1); | ||
243 | } | ||
244 | else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) { | ||
245 | ikglp_heap_node_t *evicted = | ||
246 | binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node); | ||
247 | |||
248 | TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n", | ||
249 | t->comm, t->pid, | ||
250 | evicted->task->comm, evicted->task->pid); | ||
251 | |||
252 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
253 | // print_global_list(sem->not_top_m.root, 1); | ||
254 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
255 | // print_global_list(sem->top_m.root, 1); | ||
256 | |||
257 | |||
258 | binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node); | ||
259 | INIT_BINHEAP_NODE(&evicted->node); | ||
260 | binheap_add(&evicted->node, &sem->not_top_m, ikglp_heap_node_t, node); | ||
261 | |||
262 | binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); | ||
263 | |||
264 | // TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
265 | // print_global_list(sem->top_m.root, 1); | ||
266 | // TRACE_CUR("Not-Top-M After:\n"); | ||
267 | // print_global_list(sem->not_top_m.root, 1); | ||
268 | } | ||
269 | else { | ||
270 | TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", | ||
271 | t->comm, t->pid); | ||
272 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
273 | // print_global_list(sem->not_top_m.root, 1); | ||
274 | |||
275 | binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); | ||
276 | |||
277 | // TRACE_CUR("Not-Top-M After:\n"); | ||
278 | // print_global_list(sem->not_top_m.root, 1); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | |||
283 | static void ikglp_del_global_list(struct ikglp_semaphore *sem, | ||
284 | struct task_struct *t, | ||
285 | ikglp_heap_node_t *node) | ||
286 | { | ||
287 | BUG_ON(!binheap_is_in_heap(&node->node)); | ||
288 | |||
289 | TRACE_CUR("Removing %s/%d from global list.\n", t->comm, t->pid); | ||
290 | |||
291 | if(binheap_is_in_this_heap(&node->node, &sem->top_m)) { | ||
292 | TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid); | ||
293 | |||
294 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
295 | // print_global_list(sem->not_top_m.root, 1); | ||
296 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
297 | // print_global_list(sem->top_m.root, 1); | ||
298 | |||
299 | |||
300 | binheap_delete(&node->node, &sem->top_m); | ||
301 | |||
302 | if(!binheap_empty(&sem->not_top_m)) { | ||
303 | ikglp_heap_node_t *promoted = | ||
304 | binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node); | ||
305 | |||
306 | TRACE_CUR("Promoting %s/%d to top-m\n", | ||
307 | promoted->task->comm, promoted->task->pid); | ||
308 | |||
309 | binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node); | ||
310 | INIT_BINHEAP_NODE(&promoted->node); | ||
311 | |||
312 | binheap_add(&promoted->node, &sem->top_m, ikglp_heap_node_t, node); | ||
313 | } | ||
314 | else { | ||
315 | TRACE_CUR("No one to promote to top-m.\n"); | ||
316 | --(sem->top_m_size); | ||
317 | } | ||
318 | |||
319 | // TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
320 | // print_global_list(sem->top_m.root, 1); | ||
321 | // TRACE_CUR("Not-Top-M After:\n"); | ||
322 | // print_global_list(sem->not_top_m.root, 1); | ||
323 | } | ||
324 | else { | ||
325 | // TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid); | ||
326 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
327 | // print_global_list(sem->not_top_m.root, 1); | ||
328 | |||
329 | binheap_delete(&node->node, &sem->not_top_m); | ||
330 | |||
331 | // TRACE_CUR("Not-Top-M After:\n"); | ||
332 | // print_global_list(sem->not_top_m.root, 1); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | |||
337 | static void ikglp_add_donees(struct ikglp_semaphore *sem, | ||
338 | struct fifo_queue *fq, | ||
339 | struct task_struct *t, | ||
340 | ikglp_donee_heap_node_t* node) | ||
341 | { | ||
342 | // TRACE_CUR("Adding %s/%d to donee list.\n", t->comm, t->pid); | ||
343 | // TRACE_CUR("donees Before:\n"); | ||
344 | // print_donees(sem, sem->donees.root, 1); | ||
345 | |||
346 | node->task = t; | ||
347 | node->donor_info = NULL; | ||
348 | node->fq = fq; | ||
349 | INIT_BINHEAP_NODE(&node->node); | ||
350 | |||
351 | binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node); | ||
352 | |||
353 | // TRACE_CUR("donees After:\n"); | ||
354 | // print_donees(sem, sem->donees.root, 1); | ||
355 | } | ||
356 | |||
357 | |||
358 | static void ikglp_refresh_owners_prio_increase(struct task_struct *t, | ||
359 | struct fifo_queue *fq, | ||
360 | struct ikglp_semaphore *sem, | ||
361 | unsigned long flags) | ||
362 | { | ||
363 | // priority of 't' has increased (note: 't' might already be hp_waiter). | ||
364 | if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { | ||
365 | struct task_struct *old_max_eff_prio; | ||
366 | struct task_struct *new_max_eff_prio; | ||
367 | struct task_struct *new_prio = NULL; | ||
368 | struct task_struct *owner = fq->owner; | ||
369 | |||
370 | if(fq->hp_waiter) | ||
371 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
372 | fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
373 | else | ||
374 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
375 | |||
376 | if(owner) | ||
377 | { | ||
378 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
379 | |||
380 | // TRACE_TASK(owner, "Heap Before:\n"); | ||
381 | // print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
382 | |||
383 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
384 | |||
385 | fq->hp_waiter = t; | ||
386 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
387 | |||
388 | binheap_decrease(&fq->nest.hp_binheap_node, | ||
389 | &tsk_rt(owner)->hp_blocked_tasks); | ||
390 | |||
391 | // TRACE_TASK(owner, "Heap After:\n"); | ||
392 | // print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
393 | |||
394 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
395 | |||
396 | if(new_max_eff_prio != old_max_eff_prio) { | ||
397 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
398 | |||
399 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
400 | (__edf_higher_prio(new_max_eff_prio, BASE, | ||
401 | owner, EFFECTIVE))){ | ||
402 | new_prio = new_max_eff_prio; | ||
403 | } | ||
404 | } | ||
405 | else { | ||
406 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
407 | } | ||
408 | |||
409 | if(new_prio) { | ||
410 | // set new inheritance and propagate | ||
411 | TRACE_TASK(t, "Effective priority changed for owner %s/%d to %s/%d\n", | ||
412 | owner->comm, owner->pid, | ||
413 | new_prio->comm, new_prio->pid); | ||
414 | litmus->nested_increase_prio(owner, new_prio, &sem->lock, | ||
415 | flags); // unlocks lock. | ||
416 | } | ||
417 | else { | ||
418 | TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", | ||
419 | new_max_eff_prio->comm, new_max_eff_prio->pid); | ||
420 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
421 | unlock_fine_irqrestore(&sem->lock, flags); | ||
422 | } | ||
423 | } | ||
424 | else { | ||
425 | fq->hp_waiter = t; | ||
426 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
427 | |||
428 | TRACE_TASK(t, "no owner??\n"); | ||
429 | unlock_fine_irqrestore(&sem->lock, flags); | ||
430 | } | ||
431 | } | ||
432 | else { | ||
433 | TRACE_TASK(t, "hp_waiter is unaffected.\n"); | ||
434 | unlock_fine_irqrestore(&sem->lock, flags); | ||
435 | } | ||
436 | } | ||
437 | |||
438 | // hp_waiter has decreased | ||
439 | static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, | ||
440 | struct ikglp_semaphore *sem, | ||
441 | unsigned long flags) | ||
442 | { | ||
443 | struct task_struct *owner = fq->owner; | ||
444 | |||
445 | struct task_struct *old_max_eff_prio; | ||
446 | struct task_struct *new_max_eff_prio; | ||
447 | |||
448 | if(!owner) { | ||
449 | TRACE_CUR("No owner. Returning.\n"); | ||
450 | unlock_fine_irqrestore(&sem->lock, flags); | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
455 | |||
456 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
457 | |||
458 | binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
459 | fq->nest.hp_waiter_eff_prio = fq->hp_waiter; | ||
460 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, | ||
461 | struct nested_info, hp_binheap_node); | ||
462 | |||
463 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
464 | |||
465 | if((old_max_eff_prio != new_max_eff_prio) && | ||
466 | (effective_priority(owner) == old_max_eff_prio)) | ||
467 | { | ||
468 | // Need to set new effective_priority for owner | ||
469 | struct task_struct *decreased_prio; | ||
470 | |||
471 | TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", | ||
472 | ikglp_get_idx(sem, fq)); | ||
473 | |||
474 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
475 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of fq %d.\n", | ||
476 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
477 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
478 | owner->comm, | ||
479 | owner->pid, | ||
480 | ikglp_get_idx(sem, fq)); | ||
481 | |||
482 | decreased_prio = new_max_eff_prio; | ||
483 | } | ||
484 | else { | ||
485 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of fq %d.\n", | ||
486 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
487 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
488 | owner->comm, | ||
489 | owner->pid, | ||
490 | ikglp_get_idx(sem, fq)); | ||
491 | |||
492 | decreased_prio = NULL; | ||
493 | } | ||
494 | |||
495 | // beware: recursion | ||
496 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | ||
497 | } | ||
498 | else { | ||
499 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | ||
500 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
501 | unlock_fine_irqrestore(&sem->lock, flags); | ||
502 | } | ||
503 | } | ||
504 | |||
505 | |||
506 | static void ikglp_remove_donation_from_owner(struct binheap_node *n, | ||
507 | struct fifo_queue *fq, | ||
508 | struct ikglp_semaphore *sem, | ||
509 | unsigned long flags) | ||
510 | { | ||
511 | struct task_struct *owner = fq->owner; | ||
512 | |||
513 | struct task_struct *old_max_eff_prio; | ||
514 | struct task_struct *new_max_eff_prio; | ||
515 | |||
516 | BUG_ON(!owner); | ||
517 | |||
518 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
519 | |||
520 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
521 | |||
522 | binheap_delete(n, &tsk_rt(owner)->hp_blocked_tasks); | ||
523 | |||
524 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
525 | |||
526 | if((old_max_eff_prio != new_max_eff_prio) && | ||
527 | (effective_priority(owner) == old_max_eff_prio)) | ||
528 | { | ||
529 | // Need to set new effective_priority for owner | ||
530 | struct task_struct *decreased_prio; | ||
531 | |||
532 | TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", | ||
533 | ikglp_get_idx(sem, fq)); | ||
534 | |||
535 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
536 | TRACE_CUR("has greater base priority than base priority of owner of fq %d.\n", | ||
537 | ikglp_get_idx(sem, fq)); | ||
538 | decreased_prio = new_max_eff_prio; | ||
539 | } | ||
540 | else { | ||
541 | TRACE_CUR("has lesser base priority than base priority of owner of fq %d.\n", | ||
542 | ikglp_get_idx(sem, fq)); | ||
543 | decreased_prio = NULL; | ||
544 | } | ||
545 | |||
546 | // beware: recursion | ||
547 | litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | ||
548 | } | ||
549 | else { | ||
550 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | ||
551 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
552 | unlock_fine_irqrestore(&sem->lock, flags); | ||
553 | } | ||
554 | } | ||
555 | |||
556 | static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, | ||
557 | struct binheap_node *n) | ||
558 | { | ||
559 | struct task_struct *old_max_eff_prio; | ||
560 | struct task_struct *new_max_eff_prio; | ||
561 | |||
562 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
563 | |||
564 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
565 | |||
566 | binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks); | ||
567 | |||
568 | new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
569 | |||
570 | if((old_max_eff_prio != new_max_eff_prio) && | ||
571 | (effective_priority(t) == old_max_eff_prio)) | ||
572 | { | ||
573 | // Need to set new effective_priority for owner | ||
574 | struct task_struct *decreased_prio; | ||
575 | |||
576 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) { | ||
577 | decreased_prio = new_max_eff_prio; | ||
578 | } | ||
579 | else { | ||
580 | decreased_prio = NULL; | ||
581 | } | ||
582 | |||
583 | tsk_rt(t)->inh_task = decreased_prio; | ||
584 | } | ||
585 | |||
586 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
587 | } | ||
588 | |||
589 | static void ikglp_get_immediate(struct task_struct* t, | ||
590 | struct fifo_queue *fq, | ||
591 | struct ikglp_semaphore *sem, | ||
592 | unsigned long flags) | ||
593 | { | ||
594 | // resource available now | ||
595 | TRACE_CUR("queue %d: acquired immediately\n", ikglp_get_idx(sem, fq)); | ||
596 | |||
597 | fq->owner = t; | ||
598 | |||
599 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
600 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
601 | struct nested_info, hp_binheap_node); | ||
602 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
603 | |||
604 | ++(fq->count); | ||
605 | |||
606 | ikglp_add_global_list(sem, t, &fq->global_heap_node); | ||
607 | ikglp_add_donees(sem, fq, t, &fq->donee_heap_node); | ||
608 | |||
609 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); | ||
610 | |||
611 | unlock_fine_irqrestore(&sem->lock, flags); | ||
612 | } | ||
613 | |||
614 | |||
615 | |||
616 | |||
617 | |||
618 | static void __ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, | ||
619 | struct fifo_queue* fq, | ||
620 | struct task_struct* t, | ||
621 | wait_queue_t *wait, | ||
622 | ikglp_heap_node_t *global_heap_node, | ||
623 | ikglp_donee_heap_node_t *donee_heap_node) | ||
624 | { | ||
625 | /* resource is not free => must suspend and wait */ | ||
626 | TRACE_TASK(t, "Enqueuing on fq %d.\n", | ||
627 | ikglp_get_idx(sem, fq)); | ||
628 | |||
629 | init_waitqueue_entry(wait, t); | ||
630 | |||
631 | __add_wait_queue_tail_exclusive(&fq->wait, wait); | ||
632 | |||
633 | ++(fq->count); | ||
634 | |||
635 | // update global list. | ||
636 | if(likely(global_heap_node)) { | ||
637 | if(binheap_is_in_heap(&global_heap_node->node)) { | ||
638 | WARN_ON(1); | ||
639 | ikglp_del_global_list(sem, t, global_heap_node); | ||
640 | } | ||
641 | ikglp_add_global_list(sem, t, global_heap_node); | ||
642 | } | ||
643 | // update donor eligiblity list. | ||
644 | if(likely(donee_heap_node)) { | ||
645 | // if(binheap_is_in_heap(&donee_heap_node->node)) { | ||
646 | // WARN_ON(1); | ||
647 | // } | ||
648 | ikglp_add_donees(sem, fq, t, donee_heap_node); | ||
649 | } | ||
650 | |||
651 | if(likely(sem->shortest_fifo_queue == fq)) { | ||
652 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); | ||
653 | } | ||
654 | |||
655 | TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); | ||
656 | } | ||
657 | |||
658 | |||
659 | static void ikglp_enqueue_on_fq( | ||
660 | struct ikglp_semaphore *sem, | ||
661 | struct fifo_queue *fq, | ||
662 | ikglp_wait_state_t *wait, | ||
663 | unsigned long flags) | ||
664 | { | ||
665 | /* resource is not free => must suspend and wait */ | ||
666 | TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend and wait.\n", | ||
667 | ikglp_get_idx(sem, fq)); | ||
668 | |||
669 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
670 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
671 | |||
672 | __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, | ||
673 | &wait->global_heap_node, &wait->donee_heap_node); | ||
674 | |||
675 | ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock | ||
676 | } | ||
677 | |||
678 | |||
679 | static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, | ||
680 | ikglp_wait_state_t *wait) | ||
681 | { | ||
682 | TRACE_TASK(wait->task, "goes to PQ.\n"); | ||
683 | |||
684 | wait->pq_node.task = wait->task; // copy over task (little redundant...) | ||
685 | |||
686 | binheap_add(&wait->pq_node.node, &sem->priority_queue, | ||
687 | ikglp_heap_node_t, node); | ||
688 | } | ||
689 | |||
690 | static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, | ||
691 | ikglp_wait_state_t *wait) | ||
692 | { | ||
693 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
694 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
695 | INIT_BINHEAP_NODE(&wait->pq_node.node); | ||
696 | |||
697 | __ikglp_enqueue_on_pq(sem, wait); | ||
698 | } | ||
699 | |||
700 | static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, | ||
701 | ikglp_wait_state_t* wait, | ||
702 | unsigned long flags) | ||
703 | { | ||
704 | struct task_struct *t = wait->task; | ||
705 | ikglp_donee_heap_node_t *donee_node = NULL; | ||
706 | struct task_struct *donee; | ||
707 | |||
708 | struct task_struct *old_max_eff_prio; | ||
709 | struct task_struct *new_max_eff_prio; | ||
710 | struct task_struct *new_prio = NULL; | ||
711 | |||
712 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
713 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
714 | INIT_BINHEAP_NODE(&wait->pq_node.node); | ||
715 | INIT_BINHEAP_NODE(&wait->node); | ||
716 | |||
717 | // TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid); | ||
718 | // TRACE_CUR("donors Before:\n"); | ||
719 | // print_donors(sem->donors.root, 1); | ||
720 | |||
721 | // Add donor to the global list. | ||
722 | ikglp_add_global_list(sem, t, &wait->global_heap_node); | ||
723 | |||
724 | // Select a donee | ||
725 | donee_node = binheap_top_entry(&sem->donees, | ||
726 | ikglp_donee_heap_node_t, node); | ||
727 | donee = donee_node->task; | ||
728 | |||
729 | TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid); | ||
730 | |||
731 | TRACE_CUR("Temporarily removing %s/%d to donee list.\n", | ||
732 | donee->comm, donee->pid); | ||
733 | // TRACE_CUR("donees Before:\n"); | ||
734 | // print_donees(sem, sem->donees.root, 1); | ||
735 | |||
736 | binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly | ||
737 | |||
738 | // TRACE_CUR("donees After:\n"); | ||
739 | // print_donees(sem, sem->donees.root, 1); | ||
740 | |||
741 | |||
742 | wait->donee_info = donee_node; | ||
743 | |||
744 | // Add t to donor heap. | ||
745 | binheap_add(&wait->node, &sem->donors, ikglp_wait_state_t, node); | ||
746 | |||
747 | // Now adjust the donee's priority. | ||
748 | |||
749 | // Lock the donee's inheritance heap. | ||
750 | raw_spin_lock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
751 | |||
752 | old_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); | ||
753 | |||
754 | if(donee_node->donor_info) { | ||
755 | // Steal donation relation. Evict old donor to PQ. | ||
756 | |||
757 | // Remove old donor from donor heap | ||
758 | ikglp_wait_state_t *old_wait = donee_node->donor_info; | ||
759 | struct task_struct *old_donor = old_wait->task; | ||
760 | |||
761 | TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. Moving old donor to PQ.\n", | ||
762 | donee->comm, donee->pid, old_donor->comm, old_donor->pid); | ||
763 | |||
764 | binheap_delete(&old_wait->node, &sem->donors); | ||
765 | |||
766 | // Remove donation from donee's inheritance heap. | ||
767 | binheap_delete(&old_wait->prio_donation.hp_binheap_node, | ||
768 | &tsk_rt(donee)->hp_blocked_tasks); | ||
769 | // WARNING: have not updated inh_prio! | ||
770 | |||
771 | // Add old donor to PQ. | ||
772 | __ikglp_enqueue_on_pq(sem, old_wait); | ||
773 | |||
774 | // Remove old donor from the global heap. | ||
775 | ikglp_del_global_list(sem, old_donor, &old_wait->global_heap_node); | ||
776 | } | ||
777 | |||
778 | // Add back donee's node to the donees heap with increased prio | ||
779 | donee_node->donor_info = wait; | ||
780 | INIT_BINHEAP_NODE(&donee_node->node); | ||
781 | |||
782 | |||
783 | TRACE_CUR("Adding %s/%d back to donee list.\n", donee->comm, donee->pid); | ||
784 | // TRACE_CUR("donees Before:\n"); | ||
785 | // print_donees(sem, sem->donees.root, 1); | ||
786 | |||
787 | binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node); | ||
788 | |||
789 | // TRACE_CUR("donees After:\n"); | ||
790 | // print_donees(sem, sem->donees.root, 1); | ||
791 | |||
792 | // Add an inheritance/donation to the donee's inheritance heap. | ||
793 | wait->prio_donation.lock = (struct litmus_lock*)sem; | ||
794 | wait->prio_donation.hp_waiter_eff_prio = t; | ||
795 | wait->prio_donation.hp_waiter_ptr = NULL; | ||
796 | INIT_BINHEAP_NODE(&wait->prio_donation.hp_binheap_node); | ||
797 | |||
798 | binheap_add(&wait->prio_donation.hp_binheap_node, | ||
799 | &tsk_rt(donee)->hp_blocked_tasks, | ||
800 | struct nested_info, hp_binheap_node); | ||
801 | |||
802 | new_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); | ||
803 | |||
804 | if(new_max_eff_prio != old_max_eff_prio) { | ||
805 | if ((effective_priority(donee) == old_max_eff_prio) || | ||
806 | (__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){ | ||
807 | TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", | ||
808 | donee->comm, donee->pid); | ||
809 | new_prio = new_max_eff_prio; | ||
810 | } | ||
811 | // else { | ||
812 | // // should be bug. donor would not be in top-m. | ||
813 | // TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid); | ||
814 | // WARN_ON(1); | ||
815 | // } | ||
816 | // } | ||
817 | // else { | ||
818 | // // should be bug. donor would not be in top-m. | ||
819 | // TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid); | ||
820 | // WARN_ON(1); | ||
821 | } | ||
822 | |||
823 | if(new_prio) { | ||
824 | struct fifo_queue *donee_fq = donee_node->fq; | ||
825 | |||
826 | if(donee != donee_fq->owner) { | ||
827 | TRACE_TASK(t, "%s/%d is not the owner. Propagating priority to owner %s/%d.\n", | ||
828 | donee->comm, donee->pid, | ||
829 | donee_fq->owner->comm, donee_fq->owner->pid); | ||
830 | |||
831 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
832 | ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock | ||
833 | } | ||
834 | else { | ||
835 | TRACE_TASK(t, "%s/%d is the owner. Progatating priority immediatly.\n", | ||
836 | donee->comm, donee->pid); | ||
837 | litmus->nested_increase_prio(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock | ||
838 | } | ||
839 | } | ||
840 | else { | ||
841 | TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", | ||
842 | new_max_eff_prio->comm, new_max_eff_prio->pid); | ||
843 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
844 | unlock_fine_irqrestore(&sem->lock, flags); | ||
845 | } | ||
846 | |||
847 | |||
848 | // TRACE_CUR("donors After:\n"); | ||
849 | // print_donors(sem->donors.root, 1); | ||
850 | } | ||
851 | |||
852 | |||
853 | int ikglp_lock(struct litmus_lock* l) | ||
854 | { | ||
855 | struct task_struct* t = current; | ||
856 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
857 | unsigned long flags = 0, real_flags; | ||
858 | struct fifo_queue *fq = NULL; | ||
859 | int replica = -EINVAL; | ||
860 | |||
861 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
862 | raw_spinlock_t *dgl_lock; | ||
863 | #endif | ||
864 | |||
865 | ikglp_wait_state_t wait; | ||
866 | |||
867 | if (!is_realtime(t)) | ||
868 | return -EPERM; | ||
869 | |||
870 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
871 | dgl_lock = litmus->get_dgl_spinlock(t); | ||
872 | #endif | ||
873 | |||
874 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
875 | |||
876 | lock_global_irqsave(dgl_lock, flags); | ||
877 | lock_fine_irqsave(&sem->lock, flags); | ||
878 | |||
879 | if(sem->shortest_fifo_queue->count == 0) { | ||
880 | // take available resource | ||
881 | replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); | ||
882 | |||
883 | ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock | ||
884 | |||
885 | unlock_global_irqrestore(dgl_lock, flags); | ||
886 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
887 | } | ||
888 | else | ||
889 | { | ||
890 | // we have to suspend. | ||
891 | |||
892 | wait.task = t; // THIS IS CRITICALLY IMPORTANT!!! | ||
893 | |||
894 | tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked | ||
895 | mb(); | ||
896 | |||
897 | /* FIXME: interruptible would be nice some day */ | ||
898 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
899 | |||
900 | if(sem->shortest_fifo_queue->count < sem->max_fifo_len) { | ||
901 | // enqueue on fq | ||
902 | ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock | ||
903 | } | ||
904 | else { | ||
905 | |||
906 | TRACE_CUR("IKGLP fifo queues are full.\n"); | ||
907 | |||
908 | // no room in fifos. Go to PQ or donors. | ||
909 | |||
910 | if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { | ||
911 | // enqueue on PQ | ||
912 | ikglp_enqueue_on_pq(sem, &wait); | ||
913 | unlock_fine_irqrestore(&sem->lock, flags); | ||
914 | } | ||
915 | else { | ||
916 | // enqueue as donor | ||
917 | ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock | ||
918 | } | ||
919 | } | ||
920 | |||
921 | unlock_global_irqrestore(dgl_lock, flags); | ||
922 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
923 | |||
924 | TS_LOCK_SUSPEND; | ||
925 | |||
926 | schedule(); | ||
927 | |||
928 | TS_LOCK_RESUME; | ||
929 | |||
930 | fq = ikglp_get_queue(sem, t); | ||
931 | BUG_ON(!fq); | ||
932 | |||
933 | replica = ikglp_get_idx(sem, fq); | ||
934 | } | ||
935 | |||
936 | TRACE_CUR("Acquired lock %d, queue %d\n", | ||
937 | l->ident, replica); | ||
938 | |||
939 | return replica; | ||
940 | } | ||
941 | |||
942 | static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, | ||
943 | struct fifo_queue *fq, | ||
944 | ikglp_wait_state_t *donor_info) | ||
945 | { | ||
946 | struct task_struct *t = donor_info->task; | ||
947 | |||
948 | TRACE_CUR("Donor %s/%d being moved to fq %d\n", | ||
949 | t->comm, | ||
950 | t->pid, | ||
951 | ikglp_get_idx(sem, fq)); | ||
952 | |||
953 | binheap_delete(&donor_info->node, &sem->donors); | ||
954 | |||
955 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
956 | &donor_info->fq_node, | ||
957 | NULL, // already in global_list, so pass null to prevent adding 2nd time. | ||
958 | &donor_info->donee_heap_node); | ||
959 | |||
960 | // warning: | ||
961 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. | ||
962 | } | ||
963 | |||
964 | static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, | ||
965 | struct fifo_queue *fq, | ||
966 | ikglp_wait_state_t *wait) | ||
967 | { | ||
968 | struct task_struct *t = wait->task; | ||
969 | |||
970 | TRACE_CUR("PQ request %s/%d being moved to fq %d\n", | ||
971 | t->comm, | ||
972 | t->pid, | ||
973 | ikglp_get_idx(sem, fq)); | ||
974 | |||
975 | binheap_delete(&wait->pq_node.node, &sem->priority_queue); | ||
976 | |||
977 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
978 | &wait->fq_node, | ||
979 | &wait->global_heap_node, | ||
980 | &wait->donee_heap_node); | ||
981 | // warning: | ||
982 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. | ||
983 | } | ||
984 | |||
985 | static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal( | ||
986 | struct ikglp_semaphore* sem) | ||
987 | { | ||
988 | /* must hold sem->lock */ | ||
989 | |||
990 | struct fifo_queue *fq = NULL; | ||
991 | struct list_head *pos; | ||
992 | struct task_struct *queued; | ||
993 | int i; | ||
994 | |||
995 | for(i = 0; i < sem->nr_replicas; ++i) { | ||
996 | if( (sem->fifo_queues[i].count > 1) && | ||
997 | (!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { | ||
998 | |||
999 | TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n", | ||
1000 | ikglp_get_idx(sem, &sem->fifo_queues[i]), | ||
1001 | sem->fifo_queues[i].hp_waiter->comm, | ||
1002 | sem->fifo_queues[i].hp_waiter->pid, | ||
1003 | (fq) ? ikglp_get_idx(sem, fq) : -1, | ||
1004 | (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "nil") : "nilXX", | ||
1005 | (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : -1) : -2); | ||
1006 | |||
1007 | fq = &sem->fifo_queues[i]; | ||
1008 | |||
1009 | WARN_ON(!(fq->hp_waiter)); | ||
1010 | } | ||
1011 | } | ||
1012 | |||
1013 | if(fq) { | ||
1014 | struct task_struct *max_hp = fq->hp_waiter; | ||
1015 | ikglp_wait_state_t* ret = NULL; | ||
1016 | |||
1017 | TRACE_CUR("Searching for %s/%d on fq %d\n", | ||
1018 | max_hp->comm, | ||
1019 | max_hp->pid, | ||
1020 | ikglp_get_idx(sem, fq)); | ||
1021 | |||
1022 | BUG_ON(!max_hp); | ||
1023 | |||
1024 | list_for_each(pos, &fq->wait.task_list) { | ||
1025 | wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); | ||
1026 | |||
1027 | queued = (struct task_struct*) wait->private; | ||
1028 | |||
1029 | TRACE_CUR("fq %d entry: %s/%d\n", | ||
1030 | ikglp_get_idx(sem, fq), | ||
1031 | queued->comm, | ||
1032 | queued->pid); | ||
1033 | |||
1034 | /* Compare task prios, find high prio task. */ | ||
1035 | if (queued == max_hp) { | ||
1036 | TRACE_CUR("Found it!\n"); | ||
1037 | ret = container_of(wait, ikglp_wait_state_t, fq_node); | ||
1038 | } | ||
1039 | } | ||
1040 | |||
1041 | WARN_ON(!ret); | ||
1042 | return ret; | ||
1043 | } | ||
1044 | |||
1045 | return(NULL); | ||
1046 | } | ||
1047 | |||
1048 | static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, | ||
1049 | struct fifo_queue *fq, | ||
1050 | ikglp_wait_state_t *fq_wait) | ||
1051 | { | ||
1052 | struct task_struct *t = fq_wait->task; | ||
1053 | struct fifo_queue *fq_steal = fq_wait->donee_heap_node.fq; | ||
1054 | |||
1055 | WARN_ON(t != fq_steal->hp_waiter); | ||
1056 | |||
1057 | TRACE_CUR("FQ request %s/%d being moved to fq %d\n", | ||
1058 | t->comm, | ||
1059 | t->pid, | ||
1060 | ikglp_get_idx(sem, fq)); | ||
1061 | |||
1062 | fq_wait->donee_heap_node.fq = fq; // just to be safe | ||
1063 | |||
1064 | |||
1065 | __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node); | ||
1066 | --(fq_steal->count); | ||
1067 | |||
1068 | fq_steal->hp_waiter = ikglp_find_hp_waiter(fq_steal, NULL); | ||
1069 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
1070 | ikglp_get_idx(sem, fq_steal), | ||
1071 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->comm : "nil", | ||
1072 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->pid : -1); | ||
1073 | |||
1074 | |||
1075 | // Update shortest. | ||
1076 | if(fq_steal->count < sem->shortest_fifo_queue->count) { | ||
1077 | sem->shortest_fifo_queue = fq_steal; | ||
1078 | } | ||
1079 | |||
1080 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
1081 | &fq_wait->fq_node, | ||
1082 | NULL, | ||
1083 | NULL); | ||
1084 | |||
1085 | // warning: We have not checked the priority inheritance of fq's owner yet. | ||
1086 | } | ||
1087 | |||
1088 | |||
1089 | static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, | ||
1090 | struct fifo_queue *fq, | ||
1091 | ikglp_wait_state_t *old_wait) | ||
1092 | { | ||
1093 | struct task_struct *t = old_wait->task; | ||
1094 | |||
1095 | BUG_ON(old_wait->donee_heap_node.fq != fq); | ||
1096 | |||
1097 | TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n", | ||
1098 | ikglp_get_idx(sem, fq)); | ||
1099 | |||
1100 | // need to migrate global_heap_node and donee_heap_node off of the stack | ||
1101 | // to the nodes allocated for the owner of this fq. | ||
1102 | |||
1103 | // TODO: Enhance binheap() to perform this operation in place. | ||
1104 | |||
1105 | ikglp_del_global_list(sem, t, &old_wait->global_heap_node); // remove | ||
1106 | fq->global_heap_node = old_wait->global_heap_node; // copy | ||
1107 | ikglp_add_global_list(sem, t, &fq->global_heap_node); // re-add | ||
1108 | |||
1109 | binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); // remove | ||
1110 | fq->donee_heap_node = old_wait->donee_heap_node; // copy | ||
1111 | |||
1112 | if(fq->donee_heap_node.donor_info) { | ||
1113 | // let donor know that our location has changed | ||
1114 | BUG_ON(fq->donee_heap_node.donor_info->donee_info->task != t); // validate cross-link | ||
1115 | fq->donee_heap_node.donor_info->donee_info = &fq->donee_heap_node; | ||
1116 | } | ||
1117 | INIT_BINHEAP_NODE(&fq->donee_heap_node.node); | ||
1118 | binheap_add(&fq->donee_heap_node.node, &sem->donees, | ||
1119 | ikglp_donee_heap_node_t, node); // re-add | ||
1120 | } | ||
1121 | |||
1122 | int ikglp_unlock(struct litmus_lock* l) | ||
1123 | { | ||
1124 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
1125 | struct task_struct *t = current; | ||
1126 | struct task_struct *donee = NULL; | ||
1127 | struct task_struct *next = NULL; | ||
1128 | struct task_struct *new_on_fq = NULL; | ||
1129 | |||
1130 | ikglp_wait_state_t *other_donor_info = NULL; | ||
1131 | struct fifo_queue *to_steal = NULL; | ||
1132 | struct fifo_queue *fq; | ||
1133 | |||
1134 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1135 | raw_spinlock_t *dgl_lock; | ||
1136 | #endif | ||
1137 | |||
1138 | unsigned long flags = 0, real_flags; | ||
1139 | |||
1140 | int err = 0; | ||
1141 | |||
1142 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1143 | dgl_lock = litmus->get_dgl_spinlock(t); | ||
1144 | #endif | ||
1145 | |||
1146 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
1147 | |||
1148 | lock_global_irqsave(dgl_lock, flags); // TODO: Push this deeper | ||
1149 | lock_fine_irqsave(&sem->lock, flags); | ||
1150 | |||
1151 | fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. | ||
1152 | |||
1153 | if (!fq) { | ||
1154 | err = -EINVAL; | ||
1155 | goto out; | ||
1156 | } | ||
1157 | |||
1158 | TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); | ||
1159 | |||
1160 | |||
1161 | // Remove 't' from the heaps, but data in nodes will still be good. | ||
1162 | ikglp_del_global_list(sem, t, &fq->global_heap_node); | ||
1163 | binheap_delete(&fq->donee_heap_node.node, &sem->donees); | ||
1164 | |||
1165 | // Move the next request into the FQ and update heaps as needed. | ||
1166 | // We defer re-evaluation of priorities to later in the function. | ||
1167 | if(fq->donee_heap_node.donor_info) { // move my doner to FQ | ||
1168 | ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; | ||
1169 | |||
1170 | new_on_fq = donor_info->task; | ||
1171 | |||
1172 | TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n", | ||
1173 | new_on_fq->comm, new_on_fq->pid, | ||
1174 | ikglp_get_idx(sem, fq)); | ||
1175 | // donor moved to FQ | ||
1176 | donee = t; | ||
1177 | ikglp_move_donor_to_fq(sem, fq, donor_info); | ||
1178 | } | ||
1179 | else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ | ||
1180 | // move other donor to FQ | ||
1181 | other_donor_info = binheap_top_entry(&sem->donors, | ||
1182 | ikglp_wait_state_t, node); | ||
1183 | |||
1184 | new_on_fq = other_donor_info->task; | ||
1185 | donee = other_donor_info->donee_info->task; | ||
1186 | |||
1187 | // update the donee's heap position. | ||
1188 | other_donor_info->donee_info->donor_info = NULL; // clear the cross-link | ||
1189 | binheap_decrease(&other_donor_info->donee_info->node, &sem->donees); | ||
1190 | |||
1191 | |||
1192 | TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n", | ||
1193 | new_on_fq->comm, new_on_fq->pid, | ||
1194 | ikglp_get_idx(sem, fq)); | ||
1195 | |||
1196 | ikglp_move_donor_to_fq(sem, fq, other_donor_info); | ||
1197 | } | ||
1198 | else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ | ||
1199 | ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, | ||
1200 | ikglp_heap_node_t, node); | ||
1201 | ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, | ||
1202 | pq_node); | ||
1203 | |||
1204 | new_on_fq = pq_wait->task; | ||
1205 | |||
1206 | TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n", | ||
1207 | new_on_fq->comm, new_on_fq->pid, | ||
1208 | ikglp_get_idx(sem, fq)); | ||
1209 | |||
1210 | ikglp_move_pq_to_fq(sem, fq, pq_wait); | ||
1211 | } | ||
1212 | else if(fq->count == 1) { // No PQ and this queue is empty, so steal | ||
1213 | // steal. | ||
1214 | ikglp_wait_state_t *fq_wait; | ||
1215 | |||
1216 | TRACE_TASK(t, "Looking to steal a request for fq %d...\n", | ||
1217 | ikglp_get_idx(sem, fq)); | ||
1218 | |||
1219 | fq_wait = ikglp_find_hp_waiter_to_steal(sem); | ||
1220 | if(fq_wait) { | ||
1221 | to_steal = fq_wait->donee_heap_node.fq; | ||
1222 | |||
1223 | new_on_fq = fq_wait->task; | ||
1224 | |||
1225 | TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n", | ||
1226 | new_on_fq->comm, new_on_fq->pid, | ||
1227 | ikglp_get_idx(sem, to_steal), | ||
1228 | ikglp_get_idx(sem, fq)); | ||
1229 | |||
1230 | ikglp_steal_to_fq(sem, fq, fq_wait); | ||
1231 | } | ||
1232 | else { | ||
1233 | TRACE_TASK(t, "Found nothing to steal for fq %d.\n", | ||
1234 | ikglp_get_idx(sem, fq)); | ||
1235 | } | ||
1236 | } | ||
1237 | else { // move no one | ||
1238 | } | ||
1239 | |||
1240 | // 't' must drop all priority and clean up data structures before hand-off. | ||
1241 | |||
1242 | // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST | ||
1243 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1244 | { | ||
1245 | int count = 0; | ||
1246 | while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { | ||
1247 | binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, | ||
1248 | struct nested_info, hp_binheap_node); | ||
1249 | ++count; | ||
1250 | } | ||
1251 | litmus->decrease_prio(t, NULL); | ||
1252 | WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. | ||
1253 | } | ||
1254 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1255 | |||
1256 | |||
1257 | // Updating the owner and updating sem->shortest_fifo_queue | ||
1258 | // could have been done sooner, but it is deffered, hoping | ||
1259 | // that it will reduce thrashing of sem->shortest_fifo_queue | ||
1260 | // assignment. | ||
1261 | fq->owner = NULL; // no longer owned!! | ||
1262 | --(fq->count); | ||
1263 | if(fq->count < sem->shortest_fifo_queue->count) { | ||
1264 | sem->shortest_fifo_queue = fq; | ||
1265 | } | ||
1266 | |||
1267 | // Now patch up other priorities. | ||
1268 | // | ||
1269 | // At most one of the following: | ||
1270 | // if(donee && donee != t), decrease prio, propagate to owner, or onward | ||
1271 | // if(to_steal), update owner's prio (hp_waiter has already been set) | ||
1272 | // | ||
1273 | |||
1274 | BUG_ON((other_donor_info != NULL) && (to_steal != NULL)); | ||
1275 | |||
1276 | if(other_donor_info) { | ||
1277 | struct fifo_queue *other_fq = other_donor_info->donee_info->fq; | ||
1278 | |||
1279 | BUG_ON(!donee); | ||
1280 | BUG_ON(donee == t); | ||
1281 | |||
1282 | TRACE_TASK(t, "Terminating donation relation of donor %s/%d to donee %s/%d!\n", | ||
1283 | other_donor_info->task->comm, other_donor_info->task->pid, | ||
1284 | donee->comm, donee->pid); | ||
1285 | |||
1286 | // need to terminate donation relation. | ||
1287 | if(donee == other_fq->owner) { | ||
1288 | TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", | ||
1289 | donee->comm, donee->pid, | ||
1290 | ikglp_get_idx(sem, other_fq)); | ||
1291 | |||
1292 | ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); | ||
1293 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
1294 | } | ||
1295 | else { | ||
1296 | TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", | ||
1297 | donee->comm, donee->pid, | ||
1298 | ikglp_get_idx(sem, other_fq)); | ||
1299 | |||
1300 | ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node); | ||
1301 | if(donee == other_fq->hp_waiter) { | ||
1302 | TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", | ||
1303 | donee->comm, donee->pid, | ||
1304 | ikglp_get_idx(sem, other_fq)); | ||
1305 | |||
1306 | other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL); | ||
1307 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
1308 | ikglp_get_idx(sem, other_fq), | ||
1309 | (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "nil", | ||
1310 | (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); | ||
1311 | |||
1312 | ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. | ||
1313 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
1314 | } | ||
1315 | } | ||
1316 | } | ||
1317 | else if(to_steal) { | ||
1318 | TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n", | ||
1319 | ikglp_get_idx(sem, to_steal)); | ||
1320 | |||
1321 | ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. | ||
1322 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
1323 | } | ||
1324 | |||
1325 | // check for new HP waiter. | ||
1326 | if(new_on_fq) { | ||
1327 | // fq->owner is null, so just update the hp_waiter without locking. | ||
1328 | |||
1329 | if(new_on_fq == fq->hp_waiter) { | ||
1330 | TRACE_TASK(t, "new_on_fq is already hp_waiter.\n", | ||
1331 | fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
1332 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure... | ||
1333 | } | ||
1334 | else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) { | ||
1335 | if(fq->hp_waiter) | ||
1336 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
1337 | fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
1338 | else | ||
1339 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
1340 | |||
1341 | fq->hp_waiter = new_on_fq; | ||
1342 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
1343 | |||
1344 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
1345 | ikglp_get_idx(sem, fq), | ||
1346 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
1347 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | |||
1352 | if(waitqueue_active(&fq->wait)) | ||
1353 | { | ||
1354 | wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); | ||
1355 | ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); | ||
1356 | next = (struct task_struct*) wait->private; | ||
1357 | |||
1358 | __remove_wait_queue(&fq->wait, wait); | ||
1359 | |||
1360 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1361 | ikglp_get_idx(sem, fq), | ||
1362 | next->comm, next->pid); | ||
1363 | |||
1364 | // migrate wait-state to fifo-memory. | ||
1365 | ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); | ||
1366 | |||
1367 | /* next becomes the resouce holder */ | ||
1368 | fq->owner = next; | ||
1369 | tsk_rt(next)->blocked_lock = NULL; | ||
1370 | |||
1371 | |||
1372 | /* determine new hp_waiter if necessary */ | ||
1373 | if (next == fq->hp_waiter) { | ||
1374 | |||
1375 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1376 | /* next has the highest priority --- it doesn't need to | ||
1377 | * inherit. However, we need to make sure that the | ||
1378 | * next-highest priority in the queue is reflected in | ||
1379 | * hp_waiter. */ | ||
1380 | fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); | ||
1381 | TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", | ||
1382 | ikglp_get_idx(sem, fq), | ||
1383 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
1384 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
1385 | |||
1386 | fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? | ||
1387 | effective_priority(fq->hp_waiter) : NULL; | ||
1388 | |||
1389 | if (fq->hp_waiter) | ||
1390 | TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n"); | ||
1391 | else | ||
1392 | TRACE("no further waiters\n"); | ||
1393 | |||
1394 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1395 | |||
1396 | // TRACE_TASK(next, "Heap Before:\n"); | ||
1397 | // print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1398 | |||
1399 | binheap_add(&fq->nest.hp_binheap_node, | ||
1400 | &tsk_rt(next)->hp_blocked_tasks, | ||
1401 | struct nested_info, | ||
1402 | hp_binheap_node); | ||
1403 | |||
1404 | // TRACE_TASK(next, "Heap After:\n"); | ||
1405 | // print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1406 | |||
1407 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1408 | } | ||
1409 | else { | ||
1410 | /* Well, if 'next' is not the highest-priority waiter, | ||
1411 | * then it (probably) ought to inherit the highest-priority | ||
1412 | * waiter's priority. */ | ||
1413 | TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", | ||
1414 | ikglp_get_idx(sem, fq), | ||
1415 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
1416 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
1417 | |||
1418 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1419 | |||
1420 | binheap_add(&fq->nest.hp_binheap_node, | ||
1421 | &tsk_rt(next)->hp_blocked_tasks, | ||
1422 | struct nested_info, | ||
1423 | hp_binheap_node); | ||
1424 | |||
1425 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
1426 | * because that update hasn't yet executed (update operation is | ||
1427 | * probably blocked on mutex->lock). So only inherit if the top of | ||
1428 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
1429 | * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
1430 | * since the effective priority of hp_waiter can change (and the | ||
1431 | * update has not made it to this lock).) | ||
1432 | */ | ||
1433 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == | ||
1434 | fq->nest.hp_waiter_eff_prio)) | ||
1435 | { | ||
1436 | if(fq->nest.hp_waiter_eff_prio) | ||
1437 | litmus->increase_prio(next, fq->nest.hp_waiter_eff_prio); | ||
1438 | else | ||
1439 | WARN_ON(1); | ||
1440 | } | ||
1441 | |||
1442 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1443 | } | ||
1444 | |||
1445 | |||
1446 | // wake up the new resource holder! | ||
1447 | wake_up_process(next); | ||
1448 | } | ||
1449 | |||
1450 | out: | ||
1451 | unlock_fine_irqrestore(&sem->lock, flags); | ||
1452 | unlock_global_irqrestore(dgl_lock, flags); | ||
1453 | |||
1454 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
1455 | |||
1456 | return err; | ||
1457 | } | ||
1458 | |||
1459 | |||
1460 | |||
1461 | int ikglp_close(struct litmus_lock* l) | ||
1462 | { | ||
1463 | struct task_struct *t = current; | ||
1464 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
1465 | unsigned long flags; | ||
1466 | |||
1467 | int owner = 0; | ||
1468 | int i; | ||
1469 | |||
1470 | raw_spin_lock_irqsave(&sem->real_lock, flags); | ||
1471 | |||
1472 | for(i = 0; i < sem->nr_replicas; ++i) { | ||
1473 | if(sem->fifo_queues[i].owner == t) { | ||
1474 | owner = 1; | ||
1475 | break; | ||
1476 | } | ||
1477 | } | ||
1478 | |||
1479 | raw_spin_unlock_irqrestore(&sem->real_lock, flags); | ||
1480 | |||
1481 | if (owner) | ||
1482 | ikglp_unlock(l); | ||
1483 | |||
1484 | return 0; | ||
1485 | } | ||
1486 | |||
1487 | void ikglp_free(struct litmus_lock* l) | ||
1488 | { | ||
1489 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
1490 | |||
1491 | kfree(sem->fifo_queues); | ||
1492 | kfree(sem); | ||
1493 | } | ||
1494 | |||
1495 | |||
1496 | |||
1497 | struct litmus_lock* ikglp_new(int m, | ||
1498 | struct litmus_lock_ops* ops, | ||
1499 | void* __user arg) | ||
1500 | { | ||
1501 | struct ikglp_semaphore* sem; | ||
1502 | int nr_replicas = 0; | ||
1503 | int i; | ||
1504 | |||
1505 | if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas))) | ||
1506 | { | ||
1507 | return(NULL); | ||
1508 | } | ||
1509 | if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas))) | ||
1510 | { | ||
1511 | return(NULL); | ||
1512 | } | ||
1513 | if(nr_replicas < 1) | ||
1514 | { | ||
1515 | return(NULL); | ||
1516 | } | ||
1517 | |||
1518 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1519 | if(!sem) | ||
1520 | { | ||
1521 | return NULL; | ||
1522 | } | ||
1523 | |||
1524 | sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL); | ||
1525 | if(!sem->fifo_queues) | ||
1526 | { | ||
1527 | kfree(sem); | ||
1528 | return NULL; | ||
1529 | } | ||
1530 | |||
1531 | sem->litmus_lock.ops = ops; | ||
1532 | |||
1533 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
1534 | { | ||
1535 | __raw_spin_lock_init(&sem->lock, ((struct litmus_lock*)sem)->cheat_lockdep, &((struct litmus_lock*)sem)->key); | ||
1536 | } | ||
1537 | #else | ||
1538 | raw_spin_lock_init(&sem->lock); | ||
1539 | #endif | ||
1540 | |||
1541 | raw_spin_lock_init(&sem->real_lock); | ||
1542 | |||
1543 | sem->nr_replicas = nr_replicas; | ||
1544 | sem->m = m; | ||
1545 | sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0); | ||
1546 | |||
1547 | TRACE("New IKGLP Sem: m = %d, k = %d, max fifo_len = %d\n", | ||
1548 | sem->m, | ||
1549 | sem->nr_replicas, | ||
1550 | sem->max_fifo_len); | ||
1551 | |||
1552 | for(i = 0; i < nr_replicas; ++i) | ||
1553 | { | ||
1554 | struct fifo_queue* q = &(sem->fifo_queues[i]); | ||
1555 | |||
1556 | q->owner = NULL; | ||
1557 | q->hp_waiter = NULL; | ||
1558 | init_waitqueue_head(&q->wait); | ||
1559 | q->count = 0; | ||
1560 | |||
1561 | q->global_heap_node.task = NULL; | ||
1562 | INIT_BINHEAP_NODE(&q->global_heap_node.node); | ||
1563 | |||
1564 | q->donee_heap_node.task = NULL; | ||
1565 | q->donee_heap_node.donor_info = NULL; | ||
1566 | q->donee_heap_node.fq = NULL; | ||
1567 | INIT_BINHEAP_NODE(&q->donee_heap_node.node); | ||
1568 | |||
1569 | q->nest.lock = (struct litmus_lock*)sem; | ||
1570 | q->nest.hp_waiter_eff_prio = NULL; | ||
1571 | q->nest.hp_waiter_ptr = &q->hp_waiter; | ||
1572 | INIT_BINHEAP_NODE(&q->nest.hp_binheap_node); | ||
1573 | } | ||
1574 | |||
1575 | sem->shortest_fifo_queue = &sem->fifo_queues[0]; | ||
1576 | |||
1577 | sem->top_m_size = 0; | ||
1578 | |||
1579 | // init heaps | ||
1580 | INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_edf_min_heap_base_priority_order); | ||
1581 | INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_edf_max_heap_base_priority_order); | ||
1582 | INIT_BINHEAP_HANDLE(&sem->donees, ikglp_edf_min_heap_donee_order); | ||
1583 | INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); | ||
1584 | INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order); | ||
1585 | |||
1586 | return &sem->litmus_lock; | ||
1587 | } | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 68285b319160..7271af09a188 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -379,8 +379,10 @@ long litmus_admit_task(struct task_struct* tsk) | |||
379 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 379 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
380 | } | 380 | } |
381 | 381 | ||
382 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
382 | tsk_rt(tsk)->blocked_lock = NULL; | 383 | tsk_rt(tsk)->blocked_lock = NULL; |
383 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); | 384 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); |
385 | #endif | ||
384 | 386 | ||
385 | retval = litmus->admit_task(tsk); | 387 | retval = litmus->admit_task(tsk); |
386 | 388 | ||
diff --git a/litmus/locking.c b/litmus/locking.c index b2f4a205cd04..f78169dbbeef 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -22,6 +22,9 @@ struct fdso_ops generic_lock_ops = { | |||
22 | .destroy = destroy_generic_lock | 22 | .destroy = destroy_generic_lock |
23 | }; | 23 | }; |
24 | 24 | ||
25 | static atomic_t lock_id_gen = ATOMIC_INIT(0); | ||
26 | |||
27 | |||
25 | static inline bool is_lock(struct od_table_entry* entry) | 28 | static inline bool is_lock(struct od_table_entry* entry) |
26 | { | 29 | { |
27 | return entry->class == &generic_lock_ops; | 30 | return entry->class == &generic_lock_ops; |
@@ -33,11 +36,6 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry) | |||
33 | return (struct litmus_lock*) entry->obj->obj; | 36 | return (struct litmus_lock*) entry->obj->obj; |
34 | } | 37 | } |
35 | 38 | ||
36 | |||
37 | atomic_t lock_id_gen = ATOMIC_INIT(0); | ||
38 | //raw_spinlock_t rsm_global_lock; | ||
39 | |||
40 | |||
41 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) | 39 | static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) |
42 | { | 40 | { |
43 | struct litmus_lock* lock; | 41 | struct litmus_lock* lock; |
@@ -48,16 +46,11 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar | |||
48 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 46 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
49 | lock->nest.lock = lock; | 47 | lock->nest.lock = lock; |
50 | lock->nest.hp_waiter_eff_prio = NULL; | 48 | lock->nest.hp_waiter_eff_prio = NULL; |
51 | 49 | ||
52 | INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); | 50 | INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); |
53 | WARN_ON(!(lock->nest.hp_waiter_ptr)); | 51 | WARN_ON(!(lock->nest.hp_waiter_ptr)); |
54 | |||
55 | lock->ident = atomic_inc_return(&lock_id_gen); | ||
56 | |||
57 | // if(lock->ident == 1) { | ||
58 | // raw_spin_lock_init(&rsm_global_lock); | ||
59 | // } | ||
60 | #endif | 52 | #endif |
53 | lock->ident = atomic_inc_return(&lock_id_gen); | ||
61 | *obj_ref = lock; | 54 | *obj_ref = lock; |
62 | } | 55 | } |
63 | return err; | 56 | return err; |
@@ -145,69 +138,86 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | |||
145 | return(t); | 138 | return(t); |
146 | } | 139 | } |
147 | 140 | ||
141 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
142 | |||
143 | void print_hp_waiters(struct binheap_node* n, int depth) | ||
144 | { | ||
145 | struct litmus_lock *l; | ||
146 | struct nested_info *nest; | ||
147 | char padding[81] = " "; | ||
148 | struct task_struct *hp = NULL; | ||
149 | struct task_struct *hp_eff = NULL; | ||
150 | struct task_struct *node_prio = NULL; | ||
151 | |||
152 | |||
153 | if(n == NULL) { | ||
154 | TRACE("+-> %p\n", NULL); | ||
155 | return; | ||
156 | } | ||
157 | |||
158 | nest = binheap_entry(n, struct nested_info, hp_binheap_node); | ||
159 | l = nest->lock; | ||
160 | |||
161 | if(depth*2 <= 80) | ||
162 | padding[depth*2] = '\0'; | ||
163 | |||
164 | if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) { | ||
165 | hp = *(nest->hp_waiter_ptr); | ||
166 | |||
167 | if(tsk_rt(hp)->inh_task) { | ||
168 | hp_eff = tsk_rt(hp)->inh_task; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | node_prio = nest->hp_waiter_eff_prio; | ||
173 | |||
174 | TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", | ||
175 | padding, | ||
176 | (node_prio) ? node_prio->comm : "nil", | ||
177 | (node_prio) ? node_prio->pid : -1, | ||
178 | (hp) ? hp->comm : "nil", | ||
179 | (hp) ? hp->pid : -1, | ||
180 | (hp_eff) ? hp_eff->comm : "nil", | ||
181 | (hp_eff) ? hp_eff->pid : -1, | ||
182 | l->ident); | ||
183 | |||
184 | if(n->left) print_hp_waiters(n->left, depth+1); | ||
185 | if(n->right) print_hp_waiters(n->right, depth+1); | ||
186 | } | ||
187 | #endif | ||
188 | |||
148 | 189 | ||
149 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 190 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
150 | 191 | ||
151 | void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock) | 192 | void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/) |
152 | { | 193 | { |
153 | // int i = dgl_wait->size - 1; | 194 | /* |
154 | 195 | We pick the next lock in reverse order. This causes inheritance propagation | |
155 | 196 | from locks received earlier to flow in the same direction as regular nested | |
197 | locking. This might make fine-grain DGL easier in the future. | ||
198 | */ | ||
199 | |||
156 | BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); | 200 | BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); |
157 | 201 | ||
158 | WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); | 202 | //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); |
159 | // | 203 | |
160 | // // since dgl_wait->task->blocked_lock, all locks after prev_lock | 204 | // note reverse order |
161 | // // are already held. | ||
162 | // | ||
163 | // // find the lock after prev. | ||
164 | // if(prev_lock) { | ||
165 | // for(/**/; i >= 0; --i) { | ||
166 | // if(prev_lock == dgl_wait->locks[i]) { | ||
167 | // --i; | ||
168 | // break; | ||
169 | // } | ||
170 | // else { | ||
171 | // BUG_ON(!dgl_wait->locks[i]->ops->is_owner(dgl_wait->locks[i], dgl_wait->task)); | ||
172 | // } | ||
173 | // } | ||
174 | // } | ||
175 | |||
176 | for(dgl_wait->last_primary = dgl_wait->last_primary - 1; | 205 | for(dgl_wait->last_primary = dgl_wait->last_primary - 1; |
177 | dgl_wait->last_primary >= 0; | 206 | dgl_wait->last_primary >= 0; |
178 | --(dgl_wait->last_primary)){ | 207 | --(dgl_wait->last_primary)){ |
179 | if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { | 208 | if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner( |
180 | 209 | dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { | |
181 | tsk_rt(dgl_wait->task)->blocked_lock = dgl_wait->locks[dgl_wait->last_primary]; | 210 | |
211 | tsk_rt(dgl_wait->task)->blocked_lock = | ||
212 | dgl_wait->locks[dgl_wait->last_primary]; | ||
182 | mb(); | 213 | mb(); |
183 | 214 | ||
184 | TRACE_CUR("New blocked lock is %d\n", dgl_wait->locks[dgl_wait->last_primary]->ident); | 215 | TRACE_CUR("New blocked lock is %d\n", |
185 | 216 | dgl_wait->locks[dgl_wait->last_primary]->ident); | |
217 | |||
186 | break; | 218 | break; |
187 | } | 219 | } |
188 | } | 220 | } |
189 | |||
190 | // for(/**/; i >= 0; --i) { | ||
191 | // struct litmus_lock *l = dgl_wait->locks[i]; | ||
192 | // if(!l->ops->is_owner(l, dgl_wait->task)) { | ||
193 | // | ||
194 | // tsk_rt(dgl_wait->task)->blocked_lock = l; | ||
195 | // mb(); | ||
196 | // | ||
197 | // TRACE_CUR("New blocked lock is %d\n", l->ident); | ||
198 | // | ||
199 | // if(dgl_wait->last_primary >= 0) | ||
200 | // { | ||
201 | // TRACE_CUR("old meth = %d; new meth = %d\n", l->ident, dgl_wait->locks[dgl_wait->last_primary]->ident); | ||
202 | // WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != l); | ||
203 | // } | ||
204 | // | ||
205 | // break; | ||
206 | // } | ||
207 | // else { | ||
208 | // TRACE_CUR("Lock %d is actually held!\n", l->ident); | ||
209 | // } | ||
210 | // } | ||
211 | } | 221 | } |
212 | 222 | ||
213 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) | 223 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) |
@@ -217,24 +227,26 @@ int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) | |||
217 | return 1; | 227 | return 1; |
218 | } | 228 | } |
219 | 229 | ||
220 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task) | 230 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, |
231 | dgl_wait_state_t** dgl_wait, | ||
232 | struct task_struct **task) | ||
221 | { | 233 | { |
222 | wait_queue_t *q; | 234 | wait_queue_t *q; |
223 | 235 | ||
224 | *dgl_wait = NULL; | 236 | *dgl_wait = NULL; |
225 | *task = NULL; | 237 | *task = NULL; |
226 | 238 | ||
227 | if (waitqueue_active(wq)) { | 239 | if (waitqueue_active(wq)) { |
228 | q = list_entry(wq->task_list.next, | 240 | q = list_entry(wq->task_list.next, |
229 | wait_queue_t, task_list); | 241 | wait_queue_t, task_list); |
230 | 242 | ||
231 | if(q->func == dgl_wake_up) { | 243 | if(q->func == dgl_wake_up) { |
232 | *dgl_wait = (dgl_wait_state_t*) q->private; | 244 | *dgl_wait = (dgl_wait_state_t*) q->private; |
233 | } | 245 | } |
234 | else { | 246 | else { |
235 | *task = (struct task_struct*) q->private; | 247 | *task = (struct task_struct*) q->private; |
236 | } | 248 | } |
237 | 249 | ||
238 | __remove_wait_queue(wq, q); | 250 | __remove_wait_queue(wq, q); |
239 | } | 251 | } |
240 | } | 252 | } |
@@ -252,76 +264,76 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) | |||
252 | int i; | 264 | int i; |
253 | unsigned long irqflags; //, dummyflags; | 265 | unsigned long irqflags; //, dummyflags; |
254 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); | 266 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); |
255 | 267 | ||
256 | BUG_ON(dgl_wait->task != current); | 268 | BUG_ON(dgl_wait->task != current); |
257 | 269 | ||
258 | raw_spin_lock_irqsave(dgl_lock, irqflags); | 270 | raw_spin_lock_irqsave(dgl_lock, irqflags); |
259 | 271 | ||
260 | 272 | ||
261 | dgl_wait->nr_remaining = dgl_wait->size; | 273 | dgl_wait->nr_remaining = dgl_wait->size; |
262 | //atomic_set(&dgl_wait->nr_remaining, dgl_wait->size); | 274 | |
263 | |||
264 | // try to acquire each lock. enqueue (non-blocking) if it is unavailable. | 275 | // try to acquire each lock. enqueue (non-blocking) if it is unavailable. |
265 | for(i = 0; i < dgl_wait->size; ++i) { | 276 | for(i = 0; i < dgl_wait->size; ++i) { |
266 | struct litmus_lock *l = dgl_wait->locks[i]; | 277 | struct litmus_lock *l = dgl_wait->locks[i]; |
267 | 278 | ||
268 | // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. | 279 | // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. |
269 | 280 | ||
270 | if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) { | 281 | if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) { |
271 | --(dgl_wait->nr_remaining); | 282 | --(dgl_wait->nr_remaining); |
272 | //atomic_dec(&dgl_wait->nr_remaining); | ||
273 | TRACE_CUR("Acquired lock %d immediatly.\n", l->ident); | 283 | TRACE_CUR("Acquired lock %d immediatly.\n", l->ident); |
274 | } | 284 | } |
275 | } | 285 | } |
276 | 286 | ||
277 | //if(atomic_read(&dgl_wait->nr_remaining) == 0) { | ||
278 | if(dgl_wait->nr_remaining == 0) { | 287 | if(dgl_wait->nr_remaining == 0) { |
279 | // acquired entire group immediatly | 288 | // acquired entire group immediatly |
280 | TRACE_CUR("Acquired all locks in DGL immediatly!\n"); | 289 | TRACE_CUR("Acquired all locks in DGL immediatly!\n"); |
281 | } | 290 | } |
282 | else { | 291 | else { |
292 | |||
293 | TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", | ||
294 | dgl_wait->nr_remaining); | ||
283 | 295 | ||
284 | TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", dgl_wait->nr_remaining); //atomic_read(&dgl_wait->nr_remaining)); | 296 | // note reverse order. see comments in select_next_lock for reason. |
285 | |||
286 | for(i = dgl_wait->size - 1; i >= 0; --i) { | 297 | for(i = dgl_wait->size - 1; i >= 0; --i) { |
287 | struct litmus_lock *l = dgl_wait->locks[i]; | 298 | struct litmus_lock *l = dgl_wait->locks[i]; |
288 | if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe | 299 | if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe |
289 | 300 | ||
290 | TRACE_CUR("Activating priority inheritance on lock %d\n", l->ident); | 301 | TRACE_CUR("Activating priority inheritance on lock %d\n", |
291 | 302 | l->ident); | |
303 | |||
292 | TS_DGL_LOCK_SUSPEND; | 304 | TS_DGL_LOCK_SUSPEND; |
293 | 305 | ||
294 | l->ops->enable_priority(l, dgl_wait); | 306 | l->ops->enable_priority(l, dgl_wait); |
295 | dgl_wait->last_primary = i; | 307 | dgl_wait->last_primary = i; |
296 | 308 | ||
297 | TRACE_CUR("Suspending for lock %d\n", l->ident); | 309 | TRACE_CUR("Suspending for lock %d\n", l->ident); |
298 | 310 | ||
299 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending | 311 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending |
300 | 312 | ||
301 | schedule(); // suspend!!! | 313 | schedule(); // suspend!!! |
302 | 314 | ||
303 | TS_DGL_LOCK_RESUME; | 315 | TS_DGL_LOCK_RESUME; |
304 | 316 | ||
305 | TRACE_CUR("Woken up from DGL suspension.\n"); | 317 | TRACE_CUR("Woken up from DGL suspension.\n"); |
306 | 318 | ||
307 | goto all_acquired; // we should hold all locks when we wake up. | 319 | goto all_acquired; // we should hold all locks when we wake up. |
308 | } | 320 | } |
309 | } | 321 | } |
310 | 322 | ||
311 | TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); | 323 | TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); |
312 | BUG(); | 324 | BUG(); |
313 | } | 325 | } |
314 | 326 | ||
315 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); | 327 | raw_spin_unlock_irqrestore(dgl_lock, irqflags); |
316 | 328 | ||
317 | all_acquired: | 329 | all_acquired: |
318 | 330 | ||
319 | // FOR SANITY CHECK FOR TESTING | 331 | // FOR SANITY CHECK FOR TESTING |
320 | for(i = 0; i < dgl_wait->size; ++i) { | 332 | for(i = 0; i < dgl_wait->size; ++i) { |
321 | struct litmus_lock *l = dgl_wait->locks[i]; | 333 | struct litmus_lock *l = dgl_wait->locks[i]; |
322 | BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); | 334 | BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); |
323 | } | 335 | } |
324 | 336 | ||
325 | TRACE_CUR("Acquired entire DGL\n"); | 337 | TRACE_CUR("Acquired entire DGL\n"); |
326 | 338 | ||
327 | return 0; | 339 | return 0; |
@@ -330,7 +342,7 @@ all_acquired: | |||
330 | //static int supports_dgl(struct litmus_lock *l) | 342 | //static int supports_dgl(struct litmus_lock *l) |
331 | //{ | 343 | //{ |
332 | // struct litmus_lock_ops* ops = l->ops; | 344 | // struct litmus_lock_ops* ops = l->ops; |
333 | // | 345 | // |
334 | // return (ops->dgl_lock && | 346 | // return (ops->dgl_lock && |
335 | // ops->is_owner && | 347 | // ops->is_owner && |
336 | // ops->enable_priority); | 348 | // ops->enable_priority); |
@@ -342,23 +354,23 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
342 | long err = -EINVAL; | 354 | long err = -EINVAL; |
343 | int dgl_ods[MAX_DGL_SIZE]; | 355 | int dgl_ods[MAX_DGL_SIZE]; |
344 | int i; | 356 | int i; |
345 | 357 | ||
346 | dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. | 358 | dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. |
347 | 359 | ||
348 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | 360 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) |
349 | goto out; | 361 | goto out; |
350 | 362 | ||
351 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | 363 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) |
352 | goto out; | 364 | goto out; |
353 | 365 | ||
354 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | 366 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) |
355 | goto out; | 367 | goto out; |
356 | 368 | ||
357 | if (!is_realtime(t)) { | 369 | if (!is_realtime(t)) { |
358 | err = -EPERM; | 370 | err = -EPERM; |
359 | goto out; | 371 | goto out; |
360 | } | 372 | } |
361 | 373 | ||
362 | for(i = 0; i < dgl_size; ++i) { | 374 | for(i = 0; i < dgl_size; ++i) { |
363 | struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); | 375 | struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); |
364 | if(entry && is_lock(entry)) { | 376 | if(entry && is_lock(entry)) { |
@@ -374,17 +386,17 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | |||
374 | goto out; | 386 | goto out; |
375 | } | 387 | } |
376 | } | 388 | } |
377 | 389 | ||
378 | dgl_wait_state.task = t; | 390 | dgl_wait_state.task = t; |
379 | dgl_wait_state.size = dgl_size; | 391 | dgl_wait_state.size = dgl_size; |
380 | 392 | ||
381 | TS_DGL_LOCK_START; | 393 | TS_DGL_LOCK_START; |
382 | err = do_litmus_dgl_lock(&dgl_wait_state); | 394 | err = do_litmus_dgl_lock(&dgl_wait_state); |
383 | 395 | ||
384 | /* Note: task my have been suspended or preempted in between! Take | 396 | /* Note: task my have been suspended or preempted in between! Take |
385 | * this into account when computing overheads. */ | 397 | * this into account when computing overheads. */ |
386 | TS_DGL_LOCK_END; | 398 | TS_DGL_LOCK_END; |
387 | 399 | ||
388 | out: | 400 | out: |
389 | return err; | 401 | return err; |
390 | } | 402 | } |
@@ -393,26 +405,26 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) | |||
393 | { | 405 | { |
394 | int i; | 406 | int i; |
395 | long err = 0; | 407 | long err = 0; |
396 | 408 | ||
397 | TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); | 409 | TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); |
398 | 410 | ||
399 | for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order | 411 | for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order |
400 | 412 | ||
401 | struct litmus_lock *l = dgl_locks[i]; | 413 | struct litmus_lock *l = dgl_locks[i]; |
402 | long tmp_err; | 414 | long tmp_err; |
403 | 415 | ||
404 | TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); | 416 | TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); |
405 | 417 | ||
406 | tmp_err = l->ops->unlock(l); | 418 | tmp_err = l->ops->unlock(l); |
407 | 419 | ||
408 | if(tmp_err) { | 420 | if(tmp_err) { |
409 | TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); | 421 | TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); |
410 | err = tmp_err; | 422 | err = tmp_err; |
411 | } | 423 | } |
412 | } | 424 | } |
413 | 425 | ||
414 | TRACE_CUR("DGL unlocked. err = %d\n", err); | 426 | TRACE_CUR("DGL unlocked. err = %d\n", err); |
415 | 427 | ||
416 | return err; | 428 | return err; |
417 | } | 429 | } |
418 | 430 | ||
@@ -422,18 +434,18 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | |||
422 | int dgl_ods[MAX_DGL_SIZE]; | 434 | int dgl_ods[MAX_DGL_SIZE]; |
423 | struct od_table_entry* entry; | 435 | struct od_table_entry* entry; |
424 | int i; | 436 | int i; |
425 | 437 | ||
426 | struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; | 438 | struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; |
427 | 439 | ||
428 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) | 440 | if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) |
429 | goto out; | 441 | goto out; |
430 | 442 | ||
431 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) | 443 | if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) |
432 | goto out; | 444 | goto out; |
433 | 445 | ||
434 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) | 446 | if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) |
435 | goto out; | 447 | goto out; |
436 | 448 | ||
437 | for(i = 0; i < dgl_size; ++i) { | 449 | for(i = 0; i < dgl_size; ++i) { |
438 | entry = get_entry_for_od(dgl_ods[i]); | 450 | entry = get_entry_for_od(dgl_ods[i]); |
439 | if(entry && is_lock(entry)) { | 451 | if(entry && is_lock(entry)) { |
@@ -449,16 +461,28 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | |||
449 | goto out; | 461 | goto out; |
450 | } | 462 | } |
451 | } | 463 | } |
452 | 464 | ||
453 | TS_DGL_UNLOCK_START; | 465 | TS_DGL_UNLOCK_START; |
454 | err = do_litmus_dgl_unlock(dgl_locks, dgl_size); | 466 | err = do_litmus_dgl_unlock(dgl_locks, dgl_size); |
455 | 467 | ||
456 | /* Note: task my have been suspended or preempted in between! Take | 468 | /* Note: task my have been suspended or preempted in between! Take |
457 | * this into account when computing overheads. */ | 469 | * this into account when computing overheads. */ |
458 | TS_DGL_UNLOCK_END; | 470 | TS_DGL_UNLOCK_END; |
459 | 471 | ||
460 | out: | 472 | out: |
461 | return err; | 473 | return err; |
474 | } | ||
475 | |||
476 | #else | ||
477 | |||
478 | asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) | ||
479 | { | ||
480 | return -ENOSYS; | ||
481 | } | ||
482 | |||
483 | asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) | ||
484 | { | ||
485 | return -ENOSYS; | ||
462 | } | 486 | } |
463 | 487 | ||
464 | #endif | 488 | #endif |
diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c new file mode 100644 index 000000000000..11d119210ef9 --- /dev/null +++ b/litmus/rsm_lock.c | |||
@@ -0,0 +1,774 @@ | |||
1 | #include <linux/slab.h> | ||
2 | #include <linux/uaccess.h> | ||
3 | |||
4 | #include <litmus/trace.h> | ||
5 | #include <litmus/sched_plugin.h> | ||
6 | #include <litmus/rsm_lock.h> | ||
7 | |||
8 | #include <litmus/edf_common.h> | ||
9 | |||
10 | |||
11 | /* caller is responsible for locking */ | ||
12 | static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | ||
13 | struct task_struct* skip) | ||
14 | { | ||
15 | wait_queue_t *q; | ||
16 | struct list_head *pos; | ||
17 | struct task_struct *queued = NULL, *found = NULL; | ||
18 | |||
19 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
20 | dgl_wait_state_t *dgl_wait = NULL; | ||
21 | #endif | ||
22 | |||
23 | list_for_each(pos, &mutex->wait.task_list) { | ||
24 | q = list_entry(pos, wait_queue_t, task_list); | ||
25 | |||
26 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
27 | if(q->func == dgl_wake_up) { | ||
28 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
29 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { | ||
30 | queued = dgl_wait->task; | ||
31 | } | ||
32 | else { | ||
33 | queued = NULL; // skip it. | ||
34 | } | ||
35 | } | ||
36 | else { | ||
37 | queued = (struct task_struct*) q->private; | ||
38 | } | ||
39 | #else | ||
40 | queued = (struct task_struct*) q->private; | ||
41 | #endif | ||
42 | |||
43 | /* Compare task prios, find high prio task. */ | ||
44 | if (queued && queued != skip && edf_higher_prio(queued, found)) { | ||
45 | found = queued; | ||
46 | } | ||
47 | } | ||
48 | return found; | ||
49 | } | ||
50 | |||
51 | |||
52 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
53 | |||
54 | int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) | ||
55 | { | ||
56 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
57 | return(mutex->owner == t); | ||
58 | } | ||
59 | |||
60 | // return 1 if resource was immediatly acquired. | ||
61 | // Assumes mutex->lock is held. | ||
62 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
63 | int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, | ||
64 | wait_queue_t* wq_node) | ||
65 | { | ||
66 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
67 | struct task_struct *t = dgl_wait->task; | ||
68 | |||
69 | int acquired_immediatly = 0; | ||
70 | |||
71 | BUG_ON(t != current); | ||
72 | |||
73 | if (mutex->owner) { | ||
74 | TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); | ||
75 | |||
76 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | ||
77 | |||
78 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
79 | __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); | ||
80 | } else { | ||
81 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
82 | |||
83 | /* it's ours now */ | ||
84 | mutex->owner = t; | ||
85 | |||
86 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
87 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
88 | struct nested_info, hp_binheap_node); | ||
89 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
90 | |||
91 | acquired_immediatly = 1; | ||
92 | } | ||
93 | |||
94 | return acquired_immediatly; | ||
95 | } | ||
96 | |||
97 | void rsm_mutex_enable_priority(struct litmus_lock *l, | ||
98 | dgl_wait_state_t* dgl_wait) | ||
99 | { | ||
100 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
101 | struct task_struct *t = dgl_wait->task; | ||
102 | struct task_struct *owner = mutex->owner; | ||
103 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | ||
104 | |||
105 | BUG_ON(owner == t); | ||
106 | |||
107 | tsk_rt(t)->blocked_lock = l; | ||
108 | mb(); | ||
109 | |||
110 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
111 | |||
112 | struct task_struct *old_max_eff_prio; | ||
113 | struct task_struct *new_max_eff_prio; | ||
114 | struct task_struct *new_prio = NULL; | ||
115 | |||
116 | if(mutex->hp_waiter) | ||
117 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
118 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
119 | else | ||
120 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
121 | |||
122 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
123 | |||
124 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
125 | mutex->hp_waiter = t; | ||
126 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
127 | binheap_decrease(&l->nest.hp_binheap_node, | ||
128 | &tsk_rt(owner)->hp_blocked_tasks); | ||
129 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
130 | |||
131 | if(new_max_eff_prio != old_max_eff_prio) { | ||
132 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
133 | |||
134 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
135 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
136 | new_prio = new_max_eff_prio; | ||
137 | } | ||
138 | } | ||
139 | else { | ||
140 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
141 | } | ||
142 | |||
143 | if(new_prio) { | ||
144 | litmus->nested_increase_prio(owner, new_prio, | ||
145 | &mutex->lock, flags); // unlocks lock. | ||
146 | } | ||
147 | else { | ||
148 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
149 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
150 | } | ||
151 | } | ||
152 | else { | ||
153 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
154 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static void select_next_lock_if_primary(struct litmus_lock *l, | ||
159 | dgl_wait_state_t *dgl_wait) | ||
160 | { | ||
161 | if(tsk_rt(dgl_wait->task)->blocked_lock == l) { | ||
162 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", | ||
163 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
164 | tsk_rt(dgl_wait->task)->blocked_lock = NULL; | ||
165 | mb(); | ||
166 | select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on | ||
167 | } | ||
168 | else { | ||
169 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", | ||
170 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
171 | } | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | |||
176 | |||
177 | |||
178 | int rsm_mutex_lock(struct litmus_lock* l) | ||
179 | { | ||
180 | struct task_struct *t = current; | ||
181 | struct task_struct *owner; | ||
182 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
183 | wait_queue_t wait; | ||
184 | unsigned long flags; | ||
185 | |||
186 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
187 | raw_spinlock_t *dgl_lock; | ||
188 | #endif | ||
189 | |||
190 | if (!is_realtime(t)) | ||
191 | return -EPERM; | ||
192 | |||
193 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
194 | dgl_lock = litmus->get_dgl_spinlock(t); | ||
195 | #endif | ||
196 | |||
197 | lock_global_irqsave(dgl_lock, flags); | ||
198 | lock_fine_irqsave(&mutex->lock, flags); | ||
199 | |||
200 | if (mutex->owner) { | ||
201 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | ||
202 | |||
203 | /* resource is not free => must suspend and wait */ | ||
204 | |||
205 | owner = mutex->owner; | ||
206 | |||
207 | init_waitqueue_entry(&wait, t); | ||
208 | |||
209 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | ||
210 | mb(); // needed? | ||
211 | |||
212 | /* FIXME: interruptible would be nice some day */ | ||
213 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
214 | |||
215 | __add_wait_queue_tail_exclusive(&mutex->wait, &wait); | ||
216 | |||
217 | /* check if we need to activate priority inheritance */ | ||
218 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
219 | |||
220 | struct task_struct *old_max_eff_prio; | ||
221 | struct task_struct *new_max_eff_prio; | ||
222 | struct task_struct *new_prio = NULL; | ||
223 | |||
224 | if(mutex->hp_waiter) | ||
225 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
226 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
227 | else | ||
228 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
229 | |||
230 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
231 | |||
232 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
233 | mutex->hp_waiter = t; | ||
234 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
235 | binheap_decrease(&l->nest.hp_binheap_node, | ||
236 | &tsk_rt(owner)->hp_blocked_tasks); | ||
237 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
238 | |||
239 | if(new_max_eff_prio != old_max_eff_prio) { | ||
240 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
241 | |||
242 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
243 | (__edf_higher_prio(new_max_eff_prio, BASE, | ||
244 | owner, EFFECTIVE))){ | ||
245 | new_prio = new_max_eff_prio; | ||
246 | } | ||
247 | } | ||
248 | else { | ||
249 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
250 | } | ||
251 | |||
252 | if(new_prio) { | ||
253 | litmus->nested_increase_prio(owner, new_prio, &mutex->lock, | ||
254 | flags); // unlocks lock. | ||
255 | } | ||
256 | else { | ||
257 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
258 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
259 | } | ||
260 | } | ||
261 | else { | ||
262 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
263 | |||
264 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
265 | } | ||
266 | |||
267 | unlock_global_irqrestore(dgl_lock, flags); | ||
268 | |||
269 | TS_LOCK_SUSPEND; | ||
270 | |||
271 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
272 | * when we wake up; we are guaranteed to have the lock since | ||
273 | * there is only one wake up per release. | ||
274 | */ | ||
275 | |||
276 | schedule(); | ||
277 | |||
278 | TS_LOCK_RESUME; | ||
279 | |||
280 | /* Since we hold the lock, no other task will change | ||
281 | * ->owner. We can thus check it without acquiring the spin | ||
282 | * lock. */ | ||
283 | BUG_ON(mutex->owner != t); | ||
284 | |||
285 | TRACE_TASK(t, "Acquired lock %d.\n", l->ident); | ||
286 | |||
287 | } else { | ||
288 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
289 | |||
290 | /* it's ours now */ | ||
291 | mutex->owner = t; | ||
292 | |||
293 | raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
294 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
295 | struct nested_info, hp_binheap_node); | ||
296 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
297 | |||
298 | |||
299 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
300 | unlock_global_irqrestore(dgl_lock, flags); | ||
301 | } | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | |||
307 | |||
308 | int rsm_mutex_unlock(struct litmus_lock* l) | ||
309 | { | ||
310 | struct task_struct *t = current, *next = NULL; | ||
311 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
312 | unsigned long flags; | ||
313 | |||
314 | struct task_struct *old_max_eff_prio; | ||
315 | |||
316 | int wake_up_task = 1; | ||
317 | |||
318 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
319 | dgl_wait_state_t *dgl_wait = NULL; | ||
320 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
321 | #endif | ||
322 | |||
323 | int err = 0; | ||
324 | |||
325 | lock_global_irqsave(dgl_lock, flags); | ||
326 | lock_fine_irqsave(&mutex->lock, flags); | ||
327 | |||
328 | |||
329 | if (mutex->owner != t) { | ||
330 | err = -EINVAL; | ||
331 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
332 | unlock_global_irqrestore(dgl_lock, flags); | ||
333 | return err; | ||
334 | } | ||
335 | |||
336 | |||
337 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
338 | |||
339 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | ||
340 | |||
341 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
342 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); | ||
343 | |||
344 | if(tsk_rt(t)->inh_task){ | ||
345 | struct task_struct *new_max_eff_prio = | ||
346 | top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
347 | |||
348 | if((new_max_eff_prio == NULL) || | ||
349 | /* there was a change in eff prio */ | ||
350 | ( (new_max_eff_prio != old_max_eff_prio) && | ||
351 | /* and owner had the old eff prio */ | ||
352 | (effective_priority(t) == old_max_eff_prio)) ) | ||
353 | { | ||
354 | // old_max_eff_prio > new_max_eff_prio | ||
355 | |||
356 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { | ||
357 | TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", | ||
358 | new_max_eff_prio->comm, new_max_eff_prio->pid, | ||
359 | t->comm, t->pid, tsk_rt(t)->inh_task->comm, | ||
360 | tsk_rt(t)->inh_task->pid); | ||
361 | WARN_ON(1); | ||
362 | } | ||
363 | |||
364 | litmus->decrease_prio(t, new_max_eff_prio); | ||
365 | } | ||
366 | } | ||
367 | |||
368 | if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && | ||
369 | tsk_rt(t)->inh_task != NULL) | ||
370 | { | ||
371 | WARN_ON(tsk_rt(t)->inh_task != NULL); | ||
372 | TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", | ||
373 | tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
374 | } | ||
375 | |||
376 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
377 | |||
378 | |||
379 | /* check if there are jobs waiting for this resource */ | ||
380 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
381 | __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); | ||
382 | if(dgl_wait) { | ||
383 | next = dgl_wait->task; | ||
384 | //select_next_lock_if_primary(l, dgl_wait); | ||
385 | } | ||
386 | #else | ||
387 | next = __waitqueue_remove_first(&mutex->wait); | ||
388 | #endif | ||
389 | if (next) { | ||
390 | /* next becomes the resouce holder */ | ||
391 | mutex->owner = next; | ||
392 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
393 | |||
394 | /* determine new hp_waiter if necessary */ | ||
395 | if (next == mutex->hp_waiter) { | ||
396 | |||
397 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
398 | /* next has the highest priority --- it doesn't need to | ||
399 | * inherit. However, we need to make sure that the | ||
400 | * next-highest priority in the queue is reflected in | ||
401 | * hp_waiter. */ | ||
402 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | ||
403 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | ||
404 | effective_priority(mutex->hp_waiter) : | ||
405 | NULL; | ||
406 | |||
407 | if (mutex->hp_waiter) | ||
408 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
409 | else | ||
410 | TRACE("no further waiters\n"); | ||
411 | |||
412 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
413 | |||
414 | binheap_add(&l->nest.hp_binheap_node, | ||
415 | &tsk_rt(next)->hp_blocked_tasks, | ||
416 | struct nested_info, hp_binheap_node); | ||
417 | |||
418 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
419 | if(dgl_wait) { | ||
420 | select_next_lock_if_primary(l, dgl_wait); | ||
421 | //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
422 | --(dgl_wait->nr_remaining); | ||
423 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
424 | } | ||
425 | #endif | ||
426 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
427 | } | ||
428 | else { | ||
429 | /* Well, if 'next' is not the highest-priority waiter, | ||
430 | * then it (probably) ought to inherit the highest-priority | ||
431 | * waiter's priority. */ | ||
432 | TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); | ||
433 | |||
434 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
435 | |||
436 | binheap_add(&l->nest.hp_binheap_node, | ||
437 | &tsk_rt(next)->hp_blocked_tasks, | ||
438 | struct nested_info, hp_binheap_node); | ||
439 | |||
440 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
441 | if(dgl_wait) { | ||
442 | select_next_lock_if_primary(l, dgl_wait); | ||
443 | --(dgl_wait->nr_remaining); | ||
444 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
449 | * because that update hasn't yet executed (update operation is | ||
450 | * probably blocked on mutex->lock). So only inherit if the top of | ||
451 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
452 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
453 | * since the effective priority of hp_waiter can change (and the | ||
454 | * update has not made it to this lock).) | ||
455 | */ | ||
456 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
457 | if((l->nest.hp_waiter_eff_prio != NULL) && | ||
458 | (top_priority(&tsk_rt(next)->hp_blocked_tasks) == | ||
459 | l->nest.hp_waiter_eff_prio)) | ||
460 | { | ||
461 | if(dgl_wait && tsk_rt(next)->blocked_lock) { | ||
462 | BUG_ON(wake_up_task); | ||
463 | if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, | ||
464 | next, EFFECTIVE)) { | ||
465 | litmus->nested_increase_prio(next, | ||
466 | l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. | ||
467 | goto out; // all spinlocks are released. bail out now. | ||
468 | } | ||
469 | } | ||
470 | else { | ||
471 | litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
476 | #else | ||
477 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == | ||
478 | l->nest.hp_waiter_eff_prio)) | ||
479 | { | ||
480 | litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); | ||
481 | } | ||
482 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
483 | #endif | ||
484 | } | ||
485 | |||
486 | if(wake_up_task) { | ||
487 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); | ||
488 | |||
489 | tsk_rt(next)->blocked_lock = NULL; | ||
490 | mb(); | ||
491 | |||
492 | wake_up_process(next); | ||
493 | } | ||
494 | else { | ||
495 | TRACE_TASK(next, "is still blocked.\n"); | ||
496 | } | ||
497 | } | ||
498 | else { | ||
499 | /* becomes available */ | ||
500 | mutex->owner = NULL; | ||
501 | } | ||
502 | |||
503 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
504 | |||
505 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
506 | out: | ||
507 | #endif | ||
508 | unlock_global_irqrestore(dgl_lock, flags); | ||
509 | |||
510 | return err; | ||
511 | } | ||
512 | |||
513 | |||
514 | void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
515 | struct task_struct* t, | ||
516 | raw_spinlock_t* to_unlock, | ||
517 | unsigned long irqflags) | ||
518 | { | ||
519 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
520 | |||
521 | // relay-style locking | ||
522 | lock_fine(&mutex->lock); | ||
523 | unlock_fine(to_unlock); | ||
524 | |||
525 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
526 | struct task_struct *owner = mutex->owner; | ||
527 | |||
528 | struct task_struct *old_max_eff_prio; | ||
529 | struct task_struct *new_max_eff_prio; | ||
530 | |||
531 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
532 | |||
533 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
534 | |||
535 | if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { | ||
536 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | ||
537 | mutex->hp_waiter = t; | ||
538 | } | ||
539 | if(t == mutex->hp_waiter) { | ||
540 | // reflect the decreased priority in the heap node. | ||
541 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
542 | |||
543 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); | ||
544 | BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, | ||
545 | &tsk_rt(owner)->hp_blocked_tasks)); | ||
546 | |||
547 | binheap_decrease(&l->nest.hp_binheap_node, | ||
548 | &tsk_rt(owner)->hp_blocked_tasks); | ||
549 | } | ||
550 | |||
551 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
552 | |||
553 | |||
554 | if(new_max_eff_prio != old_max_eff_prio) { | ||
555 | // new_max_eff_prio > old_max_eff_prio holds. | ||
556 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
557 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { | ||
558 | |||
559 | TRACE_CUR("Propagating inheritance to holder of lock %d.\n", | ||
560 | l->ident); | ||
561 | |||
562 | // beware: recursion | ||
563 | litmus->nested_increase_prio(owner, new_max_eff_prio, | ||
564 | &mutex->lock, irqflags); // unlocks mutex->lock | ||
565 | } | ||
566 | else { | ||
567 | TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", | ||
568 | owner->comm, owner->pid); | ||
569 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
570 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
571 | } | ||
572 | } | ||
573 | else { | ||
574 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | ||
575 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
576 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
577 | } | ||
578 | } | ||
579 | else { | ||
580 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
581 | |||
582 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
583 | if(still_blocked) { | ||
584 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", | ||
585 | still_blocked->ident); | ||
586 | if(still_blocked->ops->propagate_increase_inheritance) { | ||
587 | /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
588 | we know that task 't' has not released any locks behind us in this | ||
589 | chain. Propagation just needs to catch up with task 't'. */ | ||
590 | still_blocked->ops->propagate_increase_inheritance(still_blocked, | ||
591 | t, | ||
592 | &mutex->lock, | ||
593 | irqflags); | ||
594 | } | ||
595 | else { | ||
596 | TRACE_TASK(t, | ||
597 | "Inheritor is blocked on lock (%p) that does not " | ||
598 | "support nesting!\n", | ||
599 | still_blocked); | ||
600 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
601 | } | ||
602 | } | ||
603 | else { | ||
604 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
605 | } | ||
606 | } | ||
607 | } | ||
608 | |||
609 | |||
610 | void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
611 | struct task_struct* t, | ||
612 | raw_spinlock_t* to_unlock, | ||
613 | unsigned long irqflags) | ||
614 | { | ||
615 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
616 | |||
617 | // relay-style locking | ||
618 | lock_fine(&mutex->lock); | ||
619 | unlock_fine(to_unlock); | ||
620 | |||
621 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
622 | if(t == mutex->hp_waiter) { | ||
623 | struct task_struct *owner = mutex->owner; | ||
624 | |||
625 | struct task_struct *old_max_eff_prio; | ||
626 | struct task_struct *new_max_eff_prio; | ||
627 | |||
628 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
629 | |||
630 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
631 | |||
632 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
633 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
634 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | ||
635 | effective_priority(mutex->hp_waiter) : NULL; | ||
636 | binheap_add(&l->nest.hp_binheap_node, | ||
637 | &tsk_rt(owner)->hp_blocked_tasks, | ||
638 | struct nested_info, hp_binheap_node); | ||
639 | |||
640 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
641 | |||
642 | if((old_max_eff_prio != new_max_eff_prio) && | ||
643 | (effective_priority(owner) == old_max_eff_prio)) | ||
644 | { | ||
645 | // Need to set new effective_priority for owner | ||
646 | |||
647 | struct task_struct *decreased_prio; | ||
648 | |||
649 | TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", | ||
650 | l->ident); | ||
651 | |||
652 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
653 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
654 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
655 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
656 | owner->comm, | ||
657 | owner->pid, | ||
658 | l->ident); | ||
659 | |||
660 | decreased_prio = new_max_eff_prio; | ||
661 | } | ||
662 | else { | ||
663 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
664 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
665 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
666 | owner->comm, | ||
667 | owner->pid, | ||
668 | l->ident); | ||
669 | |||
670 | decreased_prio = NULL; | ||
671 | } | ||
672 | |||
673 | // beware: recursion | ||
674 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | ||
675 | } | ||
676 | else { | ||
677 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
678 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
679 | } | ||
680 | } | ||
681 | else { | ||
682 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | ||
683 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
684 | } | ||
685 | } | ||
686 | else { | ||
687 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
688 | |||
689 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
690 | if(still_blocked) { | ||
691 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", | ||
692 | still_blocked->ident); | ||
693 | if(still_blocked->ops->propagate_decrease_inheritance) { | ||
694 | /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
695 | we know that task 't' has not released any locks behind us in this | ||
696 | chain. propagation just needs to catch up with task 't' */ | ||
697 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, | ||
698 | t, | ||
699 | &mutex->lock, | ||
700 | irqflags); | ||
701 | } | ||
702 | else { | ||
703 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | ||
704 | still_blocked); | ||
705 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
706 | } | ||
707 | } | ||
708 | else { | ||
709 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
710 | } | ||
711 | } | ||
712 | } | ||
713 | |||
714 | |||
715 | int rsm_mutex_close(struct litmus_lock* l) | ||
716 | { | ||
717 | struct task_struct *t = current; | ||
718 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
719 | unsigned long flags; | ||
720 | |||
721 | int owner; | ||
722 | |||
723 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
724 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
725 | #endif | ||
726 | |||
727 | lock_global_irqsave(dgl_lock, flags); | ||
728 | lock_fine_irqsave(&mutex->lock, flags); | ||
729 | |||
730 | owner = (mutex->owner == t); | ||
731 | |||
732 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
733 | unlock_global_irqrestore(dgl_lock, flags); | ||
734 | |||
735 | if (owner) | ||
736 | rsm_mutex_unlock(l); | ||
737 | |||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | void rsm_mutex_free(struct litmus_lock* lock) | ||
742 | { | ||
743 | kfree(rsm_mutex_from_lock(lock)); | ||
744 | } | ||
745 | |||
746 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) | ||
747 | { | ||
748 | struct rsm_mutex* mutex; | ||
749 | |||
750 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); | ||
751 | if (!mutex) | ||
752 | return NULL; | ||
753 | |||
754 | mutex->litmus_lock.ops = ops; | ||
755 | mutex->owner = NULL; | ||
756 | mutex->hp_waiter = NULL; | ||
757 | init_waitqueue_head(&mutex->wait); | ||
758 | |||
759 | |||
760 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
761 | { | ||
762 | __raw_spin_lock_init(&mutex->lock, | ||
763 | ((struct litmus_lock*)mutex)->cheat_lockdep, | ||
764 | &((struct litmus_lock*)mutex)->key); | ||
765 | } | ||
766 | #else | ||
767 | raw_spin_lock_init(&mutex->lock); | ||
768 | #endif | ||
769 | |||
770 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; | ||
771 | |||
772 | return &mutex->litmus_lock; | ||
773 | } | ||
774 | |||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c0316c4a1b35..59236d007fd8 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -29,6 +29,11 @@ | |||
29 | #include <litmus/bheap.h> | 29 | #include <litmus/bheap.h> |
30 | #include <litmus/binheap.h> | 30 | #include <litmus/binheap.h> |
31 | 31 | ||
32 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
33 | #include <litmus/rsm_lock.h> | ||
34 | #include <litmus/ikglp_lock.h> | ||
35 | #endif | ||
36 | |||
32 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 37 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
33 | #include <litmus/affinity.h> | 38 | #include <litmus/affinity.h> |
34 | #endif | 39 | #endif |
@@ -122,6 +127,11 @@ static rt_domain_t gsnedf; | |||
122 | 127 | ||
123 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 128 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
124 | static raw_spinlock_t dgl_lock; | 129 | static raw_spinlock_t dgl_lock; |
130 | |||
131 | static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t) | ||
132 | { | ||
133 | return(&dgl_lock); | ||
134 | } | ||
125 | #endif | 135 | #endif |
126 | 136 | ||
127 | /* Uncomment this if you want to see all scheduling decisions in the | 137 | /* Uncomment this if you want to see all scheduling decisions in the |
@@ -133,7 +143,7 @@ static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) | |||
133 | { | 143 | { |
134 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); | 144 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); |
135 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); | 145 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); |
136 | 146 | ||
137 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 147 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
138 | * the top of the heap. | 148 | * the top of the heap. |
139 | */ | 149 | */ |
@@ -645,29 +655,37 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
645 | static long gsnedf_admit_task(struct task_struct* tsk) | 655 | static long gsnedf_admit_task(struct task_struct* tsk) |
646 | { | 656 | { |
647 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 657 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
648 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, edf_max_heap_base_priority_order); | 658 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, |
659 | edf_max_heap_base_priority_order); | ||
649 | #endif | 660 | #endif |
650 | 661 | ||
651 | return 0; | 662 | return 0; |
652 | } | 663 | } |
653 | 664 | ||
654 | #ifdef CONFIG_LITMUS_LOCKING | ||
655 | 665 | ||
656 | extern raw_spinlock_t rsm_global_lock; | 666 | |
667 | |||
668 | |||
669 | |||
670 | #ifdef CONFIG_LITMUS_LOCKING | ||
657 | 671 | ||
658 | #include <litmus/fdso.h> | 672 | #include <litmus/fdso.h> |
659 | 673 | ||
660 | /* called with IRQs off */ | 674 | /* called with IRQs off */ |
661 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 675 | static void increase_priority_inheritance(struct task_struct* t, |
676 | struct task_struct* prio_inh) | ||
662 | { | 677 | { |
663 | int linked_on; | 678 | int linked_on; |
664 | int check_preempt = 0; | 679 | int check_preempt = 0; |
665 | 680 | ||
666 | raw_spin_lock(&gsnedf_lock); | 681 | raw_spin_lock(&gsnedf_lock); |
667 | 682 | ||
683 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
668 | /* this sanity check allows for weaker locking in protocols */ | 684 | /* this sanity check allows for weaker locking in protocols */ |
669 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { | 685 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { |
670 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 686 | #endif |
687 | TRACE_TASK(t, "inherits priority from %s/%d\n", | ||
688 | prio_inh->comm, prio_inh->pid); | ||
671 | tsk_rt(t)->inh_task = prio_inh; | 689 | tsk_rt(t)->inh_task = prio_inh; |
672 | 690 | ||
673 | linked_on = tsk_rt(t)->linked_on; | 691 | linked_on = tsk_rt(t)->linked_on; |
@@ -721,6 +739,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
721 | check_for_preemptions(); | 739 | check_for_preemptions(); |
722 | } | 740 | } |
723 | } | 741 | } |
742 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
724 | } | 743 | } |
725 | else { | 744 | else { |
726 | TRACE_TASK(t, "Spurious invalid priority increase. " | 745 | TRACE_TASK(t, "Spurious invalid priority increase. " |
@@ -730,28 +749,33 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
730 | effective_priority(t)->comm, effective_priority(t)->pid, | 749 | effective_priority(t)->comm, effective_priority(t)->pid, |
731 | prio_inh->comm, prio_inh->pid); | 750 | prio_inh->comm, prio_inh->pid); |
732 | } | 751 | } |
752 | #endif | ||
733 | 753 | ||
734 | raw_spin_unlock(&gsnedf_lock); | 754 | raw_spin_unlock(&gsnedf_lock); |
735 | } | 755 | } |
736 | 756 | ||
737 | /* called with IRQs off */ | 757 | /* called with IRQs off */ |
738 | static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 758 | static void decrease_priority_inheritance(struct task_struct* t, |
759 | struct task_struct* prio_inh) | ||
739 | { | 760 | { |
740 | raw_spin_lock(&gsnedf_lock); | 761 | raw_spin_lock(&gsnedf_lock); |
741 | 762 | ||
763 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
742 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { | 764 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
765 | #endif | ||
743 | /* A job only stops inheriting a priority when it releases a | 766 | /* A job only stops inheriting a priority when it releases a |
744 | * resource. Thus we can make the following assumption.*/ | 767 | * resource. Thus we can make the following assumption.*/ |
745 | if(prio_inh) | 768 | if(prio_inh) |
746 | TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); | 769 | TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", |
770 | prio_inh->comm, prio_inh->pid); | ||
747 | else | 771 | else |
748 | TRACE_TASK(t, "base priority restored.\n"); | 772 | TRACE_TASK(t, "base priority restored.\n"); |
749 | 773 | ||
750 | tsk_rt(t)->inh_task = prio_inh; | 774 | tsk_rt(t)->inh_task = prio_inh; |
751 | 775 | ||
752 | if(tsk_rt(t)->scheduled_on != NO_CPU) { | 776 | if(tsk_rt(t)->scheduled_on != NO_CPU) { |
753 | TRACE_TASK(t, "is scheduled.\n"); | 777 | TRACE_TASK(t, "is scheduled.\n"); |
754 | 778 | ||
755 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 779 | /* Check if rescheduling is necessary. We can't use heap_decrease() |
756 | * since the priority was effectively lowered. */ | 780 | * since the priority was effectively lowered. */ |
757 | unlink(t); | 781 | unlink(t); |
@@ -762,7 +786,7 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
762 | raw_spin_lock(&gsnedf.release_lock); | 786 | raw_spin_lock(&gsnedf.release_lock); |
763 | if (is_queued(t)) { | 787 | if (is_queued(t)) { |
764 | TRACE_TASK(t, "is queued.\n"); | 788 | TRACE_TASK(t, "is queued.\n"); |
765 | 789 | ||
766 | /* decrease in priority, so we have to re-add to binomial heap */ | 790 | /* decrease in priority, so we have to re-add to binomial heap */ |
767 | unlink(t); | 791 | unlink(t); |
768 | gsnedf_job_arrival(t); | 792 | gsnedf_job_arrival(t); |
@@ -772,6 +796,7 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
772 | } | 796 | } |
773 | raw_spin_unlock(&gsnedf.release_lock); | 797 | raw_spin_unlock(&gsnedf.release_lock); |
774 | } | 798 | } |
799 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
775 | } | 800 | } |
776 | else { | 801 | else { |
777 | TRACE_TASK(t, "Spurious invalid priority decrease. " | 802 | TRACE_TASK(t, "Spurious invalid priority decrease. " |
@@ -782,8 +807,8 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
782 | (prio_inh) ? prio_inh->comm : "nil", | 807 | (prio_inh) ? prio_inh->comm : "nil", |
783 | (prio_inh) ? prio_inh->pid : -1); | 808 | (prio_inh) ? prio_inh->pid : -1); |
784 | } | 809 | } |
810 | #endif | ||
785 | 811 | ||
786 | |||
787 | raw_spin_unlock(&gsnedf_lock); | 812 | raw_spin_unlock(&gsnedf_lock); |
788 | } | 813 | } |
789 | 814 | ||
@@ -792,96 +817,15 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str | |||
792 | 817 | ||
793 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 818 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
794 | 819 | ||
795 | |||
796 | void print_hp_waiters(struct binheap_node* n, int depth) | ||
797 | { | ||
798 | struct litmus_lock *l; | ||
799 | struct nested_info *nest; | ||
800 | char padding[81] = " "; | ||
801 | struct task_struct *hp = NULL; | ||
802 | struct task_struct *hp_eff = NULL; | ||
803 | struct task_struct *node_prio = NULL; | ||
804 | |||
805 | |||
806 | if(n == NULL) { | ||
807 | TRACE("+-> %p\n", NULL); | ||
808 | return; | ||
809 | } | ||
810 | |||
811 | nest = binheap_entry(n, struct nested_info, hp_binheap_node); | ||
812 | l = nest->lock; | ||
813 | |||
814 | if(depth*2 <= 80) | ||
815 | padding[depth*2] = '\0'; | ||
816 | |||
817 | if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) { | ||
818 | hp = *(nest->hp_waiter_ptr); | ||
819 | |||
820 | if(tsk_rt(hp)->inh_task) { | ||
821 | hp_eff = tsk_rt(hp)->inh_task; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | node_prio = nest->hp_waiter_eff_prio; | ||
826 | |||
827 | TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", | ||
828 | padding, | ||
829 | (node_prio) ? node_prio->comm : "nil", | ||
830 | (node_prio) ? node_prio->pid : -1, | ||
831 | (hp) ? hp->comm : "nil", | ||
832 | (hp) ? hp->pid : -1, | ||
833 | (hp_eff) ? hp_eff->comm : "nil", | ||
834 | (hp_eff) ? hp_eff->pid : -1, | ||
835 | l->ident); | ||
836 | |||
837 | if(n->left) print_hp_waiters(n->left, depth+1); | ||
838 | if(n->right) print_hp_waiters(n->right, depth+1); | ||
839 | } | ||
840 | |||
841 | void dump_node_data(struct binheap_node* parent, struct binheap_node* child) | ||
842 | { | ||
843 | struct binheap_node *root = (parent != BINHEAP_POISON) ? parent : child; | ||
844 | struct binheap_node *bad_node = (parent == BINHEAP_POISON) ? parent : child; | ||
845 | struct nested_info *nest; | ||
846 | |||
847 | while(root->parent != NULL) { | ||
848 | root = root->parent; | ||
849 | } | ||
850 | |||
851 | if(parent == BINHEAP_POISON) { | ||
852 | TRACE_CUR("parent was bad node.\n"); | ||
853 | } | ||
854 | else { | ||
855 | TRACE_CUR("child was bad node.\n"); | ||
856 | } | ||
857 | TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); | ||
858 | |||
859 | nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); | ||
860 | TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); | ||
861 | |||
862 | print_hp_waiters(root, 1); | ||
863 | } | ||
864 | |||
865 | void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node) | ||
866 | { | ||
867 | struct binheap_node *root = handle->root; | ||
868 | struct nested_info *nest; | ||
869 | |||
870 | TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); | ||
871 | |||
872 | nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); | ||
873 | TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); | ||
874 | |||
875 | print_hp_waiters(root, 1); | ||
876 | } | ||
877 | |||
878 | |||
879 | /* called with IRQs off */ | 820 | /* called with IRQs off */ |
880 | /* preconditions: | 821 | /* preconditions: |
881 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | 822 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. |
882 | (2) The lock 'to_unlock' is held. | 823 | (2) The lock 'to_unlock' is held. |
883 | */ | 824 | */ |
884 | static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | 825 | static void nested_increase_priority_inheritance(struct task_struct* t, |
826 | struct task_struct* prio_inh, | ||
827 | raw_spinlock_t *to_unlock, | ||
828 | unsigned long irqflags) | ||
885 | { | 829 | { |
886 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | 830 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
887 | 831 | ||
@@ -890,17 +834,21 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t | |||
890 | } | 834 | } |
891 | 835 | ||
892 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | 836 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
893 | 837 | ||
894 | 838 | ||
895 | if(blocked_lock) { | 839 | if(blocked_lock) { |
896 | if(blocked_lock->ops->propagate_increase_inheritance) { | 840 | if(blocked_lock->ops->propagate_increase_inheritance) { |
897 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); | 841 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", |
898 | 842 | blocked_lock->ident); | |
899 | // beware: recursion | 843 | |
900 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, t, to_unlock, irqflags); | 844 | // beware: recursion |
845 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, | ||
846 | t, to_unlock, | ||
847 | irqflags); | ||
901 | } | 848 | } |
902 | else { | 849 | else { |
903 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); | 850 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", |
851 | blocked_lock->ident); | ||
904 | unlock_fine_irqrestore(to_unlock, irqflags); | 852 | unlock_fine_irqrestore(to_unlock, irqflags); |
905 | } | 853 | } |
906 | } | 854 | } |
@@ -915,22 +863,29 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t | |||
915 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | 863 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. |
916 | (2) The lock 'to_unlock' is held. | 864 | (2) The lock 'to_unlock' is held. |
917 | */ | 865 | */ |
918 | static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) | 866 | static void nested_decrease_priority_inheritance(struct task_struct* t, |
867 | struct task_struct* prio_inh, | ||
868 | raw_spinlock_t *to_unlock, | ||
869 | unsigned long irqflags) | ||
919 | { | 870 | { |
920 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | 871 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; |
921 | decrease_priority_inheritance(t, prio_inh); | 872 | decrease_priority_inheritance(t, prio_inh); |
922 | 873 | ||
923 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | 874 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
924 | 875 | ||
925 | if(blocked_lock) { | 876 | if(blocked_lock) { |
926 | if(blocked_lock->ops->propagate_decrease_inheritance) { | 877 | if(blocked_lock->ops->propagate_decrease_inheritance) { |
927 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); | 878 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", |
928 | 879 | blocked_lock->ident); | |
880 | |||
929 | // beware: recursion | 881 | // beware: recursion |
930 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, to_unlock, irqflags); | 882 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, |
931 | } | 883 | to_unlock, |
884 | irqflags); | ||
885 | } | ||
932 | else { | 886 | else { |
933 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); | 887 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", |
888 | blocked_lock); | ||
934 | unlock_fine_irqrestore(to_unlock, irqflags); | 889 | unlock_fine_irqrestore(to_unlock, irqflags); |
935 | } | 890 | } |
936 | } | 891 | } |
@@ -943,2440 +898,46 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, struct t | |||
943 | 898 | ||
944 | /* ******************** RSM MUTEX ********************** */ | 899 | /* ******************** RSM MUTEX ********************** */ |
945 | 900 | ||
946 | /* struct for semaphore with priority inheritance */ | 901 | static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { |
947 | struct rsm_mutex { | 902 | .lock = rsm_mutex_lock, |
948 | struct litmus_lock litmus_lock; | 903 | .unlock = rsm_mutex_unlock, |
949 | 904 | .close = rsm_mutex_close, | |
950 | /* current resource holder */ | 905 | .deallocate = rsm_mutex_free, |
951 | struct task_struct *owner; | ||
952 | |||
953 | /* highest-priority waiter */ | ||
954 | struct task_struct *hp_waiter; | ||
955 | |||
956 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | ||
957 | wait_queue_head_t wait; | ||
958 | |||
959 | /* we do some nesting within spinlocks, so we can't use the normal | ||
960 | sleeplocks found in wait_queue_head_t. */ | ||
961 | raw_spinlock_t lock; | ||
962 | }; | ||
963 | |||
964 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | ||
965 | { | ||
966 | return container_of(lock, struct rsm_mutex, litmus_lock); | ||
967 | } | ||
968 | |||
969 | /* caller is responsible for locking */ | ||
970 | struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | ||
971 | struct task_struct* skip) | ||
972 | { | ||
973 | wait_queue_t *q; | ||
974 | struct list_head *pos; | ||
975 | struct task_struct *queued = NULL, *found = NULL; | ||
976 | |||
977 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
978 | dgl_wait_state_t *dgl_wait = NULL; | ||
979 | #endif | ||
980 | |||
981 | list_for_each(pos, &mutex->wait.task_list) { | ||
982 | q = list_entry(pos, wait_queue_t, task_list); | ||
983 | |||
984 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
985 | if(q->func == dgl_wake_up) { | ||
986 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
987 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { | ||
988 | queued = dgl_wait->task; | ||
989 | } | ||
990 | else { | ||
991 | queued = NULL; // skip it. | ||
992 | } | ||
993 | } | ||
994 | else { | ||
995 | queued = (struct task_struct*) q->private; | ||
996 | } | ||
997 | #else | ||
998 | queued = (struct task_struct*) q->private; | ||
999 | #endif | ||
1000 | |||
1001 | /* Compare task prios, find high prio task. */ | ||
1002 | if (queued && queued != skip && edf_higher_prio(queued, found)) { | ||
1003 | found = queued; | ||
1004 | } | ||
1005 | } | ||
1006 | return found; | ||
1007 | } | ||
1008 | |||
1009 | static inline struct task_struct* top_priority(struct binheap_handle* handle) { | ||
1010 | if(!binheap_empty(handle)) { | ||
1011 | return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); | ||
1012 | } | ||
1013 | return NULL; | ||
1014 | } | ||
1015 | |||
1016 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1017 | //static void gsnedf_rsm_mutex_reserve(struct litmus_lock *l, unsigned long *irqflags) | ||
1018 | //{ | ||
1019 | // struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1020 | // raw_spin_lock_irqsave(&mutex->lock, *irqflags); | ||
1021 | //} | ||
1022 | // | ||
1023 | //static void gsnedf_rsm_mutex_unreserve(struct litmus_lock *l, unsigned long irqflags) | ||
1024 | //{ | ||
1025 | // struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1026 | // raw_spin_unlock_irqrestore(&mutex->lock, irqflags); | ||
1027 | //} | ||
1028 | |||
1029 | static raw_spinlock_t* gsn_edf_get_dgl_spinlock(struct task_struct *t) | ||
1030 | { | ||
1031 | return(&dgl_lock); | ||
1032 | } | ||
1033 | |||
1034 | static int gsn_edf_rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) | ||
1035 | { | ||
1036 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1037 | return(mutex->owner == t); | ||
1038 | } | ||
1039 | |||
1040 | |||
1041 | // return 1 if resource was immediatly acquired. | ||
1042 | // Assumes mutex->lock is held. | ||
1043 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
1044 | static int gsn_edf_rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node) | ||
1045 | { | ||
1046 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1047 | struct task_struct *t = dgl_wait->task; | ||
1048 | |||
1049 | int acquired_immediatly = 0; | ||
1050 | |||
1051 | BUG_ON(t != current); | ||
1052 | |||
1053 | if (mutex->owner) { | ||
1054 | TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); | ||
1055 | |||
1056 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | ||
1057 | |||
1058 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1059 | __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); | ||
1060 | } else { | ||
1061 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
1062 | |||
1063 | /* it's ours now */ | ||
1064 | mutex->owner = t; | ||
1065 | |||
1066 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1067 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1068 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1069 | |||
1070 | acquired_immediatly = 1; | ||
1071 | } | ||
1072 | |||
1073 | return acquired_immediatly; | ||
1074 | } | ||
1075 | |||
1076 | // Assumes mutex->lock is held. | ||
1077 | static void gsn_edf_rsm_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait) | ||
1078 | { | ||
1079 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1080 | struct task_struct *t = dgl_wait->task; | ||
1081 | struct task_struct *owner = mutex->owner; | ||
1082 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | ||
1083 | |||
1084 | BUG_ON(owner == t); | ||
1085 | |||
1086 | tsk_rt(t)->blocked_lock = l; | ||
1087 | mb(); | ||
1088 | |||
1089 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
1090 | |||
1091 | struct task_struct *old_max_eff_prio; | ||
1092 | struct task_struct *new_max_eff_prio; | ||
1093 | struct task_struct *new_prio = NULL; | ||
1094 | |||
1095 | if(mutex->hp_waiter) | ||
1096 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
1097 | else | ||
1098 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
1099 | |||
1100 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1101 | |||
1102 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
1103 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1104 | |||
1105 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1106 | |||
1107 | mutex->hp_waiter = t; | ||
1108 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1109 | |||
1110 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1111 | |||
1112 | //TRACE_TASK(owner, "Heap After:\n"); | ||
1113 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1114 | |||
1115 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1116 | |||
1117 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1118 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
1119 | |||
1120 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1121 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
1122 | new_prio = new_max_eff_prio; | ||
1123 | } | ||
1124 | } | ||
1125 | else { | ||
1126 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
1127 | } | ||
1128 | |||
1129 | //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1130 | |||
1131 | if(new_prio) { | ||
1132 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | ||
1133 | } | ||
1134 | else { | ||
1135 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1136 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1137 | } | ||
1138 | } | ||
1139 | else { | ||
1140 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
1141 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1142 | } | ||
1143 | } | ||
1144 | #endif | ||
1145 | |||
1146 | int gsnedf_rsm_mutex_lock(struct litmus_lock* l) | ||
1147 | { | ||
1148 | struct task_struct *t = current; | ||
1149 | struct task_struct *owner; | ||
1150 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1151 | wait_queue_t wait; | ||
1152 | unsigned long flags; | ||
1153 | |||
1154 | if (!is_realtime(t)) | ||
1155 | return -EPERM; | ||
1156 | |||
1157 | |||
1158 | lock_global_irqsave(&dgl_lock, flags); | ||
1159 | lock_fine_irqsave(&mutex->lock, flags); | ||
1160 | |||
1161 | if (mutex->owner) { | ||
1162 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | ||
1163 | |||
1164 | /* resource is not free => must suspend and wait */ | ||
1165 | |||
1166 | owner = mutex->owner; | ||
1167 | |||
1168 | init_waitqueue_entry(&wait, t); | ||
1169 | |||
1170 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | ||
1171 | mb(); // needed? | ||
1172 | |||
1173 | /* FIXME: interruptible would be nice some day */ | ||
1174 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1175 | |||
1176 | __add_wait_queue_tail_exclusive(&mutex->wait, &wait); | ||
1177 | |||
1178 | /* check if we need to activate priority inheritance */ | ||
1179 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
1180 | |||
1181 | struct task_struct *old_max_eff_prio; | ||
1182 | struct task_struct *new_max_eff_prio; | ||
1183 | struct task_struct *new_prio = NULL; | ||
1184 | |||
1185 | if(mutex->hp_waiter) | ||
1186 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
1187 | else | ||
1188 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
1189 | |||
1190 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1191 | |||
1192 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
1193 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1194 | |||
1195 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1196 | |||
1197 | mutex->hp_waiter = t; | ||
1198 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1199 | |||
1200 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1201 | |||
1202 | //TRACE_TASK(owner, "Heap After:\n"); | ||
1203 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
1204 | |||
1205 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1206 | |||
1207 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1208 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
1209 | |||
1210 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1211 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
1212 | new_prio = new_max_eff_prio; | ||
1213 | } | ||
1214 | } | ||
1215 | else { | ||
1216 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
1217 | } | ||
1218 | |||
1219 | if(new_prio) { | ||
1220 | nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. | ||
1221 | } | ||
1222 | else { | ||
1223 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1224 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1225 | } | ||
1226 | } | ||
1227 | else { | ||
1228 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
1229 | |||
1230 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1231 | } | ||
1232 | |||
1233 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1234 | |||
1235 | TS_LOCK_SUSPEND; | ||
1236 | |||
1237 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1238 | * when we wake up; we are guaranteed to have the lock since | ||
1239 | * there is only one wake up per release. | ||
1240 | */ | ||
1241 | |||
1242 | schedule(); | ||
1243 | |||
1244 | TS_LOCK_RESUME; | ||
1245 | |||
1246 | /* Since we hold the lock, no other task will change | ||
1247 | * ->owner. We can thus check it without acquiring the spin | ||
1248 | * lock. */ | ||
1249 | BUG_ON(mutex->owner != t); | ||
1250 | |||
1251 | TRACE_TASK(t, "Acquired lock %d.\n", l->ident); | ||
1252 | |||
1253 | } else { | ||
1254 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
1255 | |||
1256 | /* it's ours now */ | ||
1257 | mutex->owner = t; | ||
1258 | |||
1259 | raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
1260 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1261 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
1262 | |||
1263 | |||
1264 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1265 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1266 | } | ||
1267 | |||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | |||
1272 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1273 | void select_next_lock_if_primary(struct litmus_lock *l, dgl_wait_state_t *dgl_wait) | ||
1274 | { | ||
1275 | if(tsk_rt(dgl_wait->task)->blocked_lock == l) { | ||
1276 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
1277 | tsk_rt(dgl_wait->task)->blocked_lock = NULL; | ||
1278 | mb(); | ||
1279 | select_next_lock(dgl_wait, l); // pick the next lock to be blocked on | ||
1280 | } | ||
1281 | else { | ||
1282 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
1283 | } | ||
1284 | } | ||
1285 | #endif | ||
1286 | |||
1287 | |||
1288 | int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) | ||
1289 | { | ||
1290 | struct task_struct *t = current, *next = NULL; | ||
1291 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1292 | unsigned long flags; | ||
1293 | |||
1294 | struct task_struct *old_max_eff_prio; | ||
1295 | |||
1296 | int wake_up_task = 1; | ||
1297 | |||
1298 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1299 | dgl_wait_state_t *dgl_wait = NULL; | ||
1300 | #endif | ||
1301 | |||
1302 | int err = 0; | ||
1303 | |||
1304 | lock_global_irqsave(&dgl_lock, flags); | ||
1305 | lock_fine_irqsave(&mutex->lock, flags); | ||
1306 | |||
1307 | |||
1308 | if (mutex->owner != t) { | ||
1309 | err = -EINVAL; | ||
1310 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1311 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1312 | return err; | ||
1313 | } | ||
1314 | |||
1315 | |||
1316 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1317 | |||
1318 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | ||
1319 | //TRACE_TASK(t, "Heap Before:\n"); | ||
1320 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1321 | |||
1322 | //old_max_hp_waiter = *(binheap_top_entry(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node)->hp_waiter_ptr); | ||
1323 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1324 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); | ||
1325 | |||
1326 | //raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1327 | |||
1328 | //TRACE_TASK(t, "Heap After:\n"); | ||
1329 | //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); | ||
1330 | |||
1331 | if(tsk_rt(t)->inh_task){ | ||
1332 | struct task_struct *new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
1333 | |||
1334 | if((new_max_eff_prio == NULL) || | ||
1335 | ( (new_max_eff_prio != old_max_eff_prio) /* there was a change in eff prio */ && | ||
1336 | (effective_priority(t) == old_max_eff_prio) /* and owner had the old eff prio */) ) | ||
1337 | { | ||
1338 | // old_max_eff_prio > new_max_eff_prio | ||
1339 | |||
1340 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { | ||
1341 | TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", | ||
1342 | new_max_eff_prio->comm, new_max_eff_prio->pid, | ||
1343 | t->comm, t->pid, tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1344 | WARN_ON(1); | ||
1345 | } | ||
1346 | |||
1347 | decrease_priority_inheritance(t, new_max_eff_prio); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && tsk_rt(t)->inh_task != NULL) | ||
1352 | { | ||
1353 | WARN_ON(tsk_rt(t)->inh_task != NULL); | ||
1354 | TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", | ||
1355 | tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
1356 | } | ||
1357 | |||
1358 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
1359 | |||
1360 | |||
1361 | /* check if there are jobs waiting for this resource */ | ||
1362 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1363 | __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); | ||
1364 | if(dgl_wait) { | ||
1365 | next = dgl_wait->task; | ||
1366 | //select_next_lock_if_primary(l, dgl_wait); | ||
1367 | } | ||
1368 | #else | ||
1369 | next = __waitqueue_remove_first(&mutex->wait); | ||
1370 | #endif | ||
1371 | if (next) { | ||
1372 | /* next becomes the resouce holder */ | ||
1373 | mutex->owner = next; | ||
1374 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
1375 | |||
1376 | // if(tsk_rt(next)->blocked_lock == &mutex->litmus_lock) { // might be false for DGL. | ||
1377 | // tsk_rt(next)->blocked_lock = NULL; | ||
1378 | // mb(); | ||
1379 | // } | ||
1380 | |||
1381 | /* determine new hp_waiter if necessary */ | ||
1382 | if (next == mutex->hp_waiter) { | ||
1383 | |||
1384 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1385 | /* next has the highest priority --- it doesn't need to | ||
1386 | * inherit. However, we need to make sure that the | ||
1387 | * next-highest priority in the queue is reflected in | ||
1388 | * hp_waiter. */ | ||
1389 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | ||
1390 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1391 | |||
1392 | if (mutex->hp_waiter) | ||
1393 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
1394 | else | ||
1395 | TRACE("no further waiters\n"); | ||
1396 | |||
1397 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1398 | |||
1399 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1400 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1401 | |||
1402 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1403 | |||
1404 | //TRACE_TASK(next, "Heap After:\n"); | ||
1405 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1406 | |||
1407 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1408 | if(dgl_wait) { | ||
1409 | select_next_lock_if_primary(l, dgl_wait); | ||
1410 | //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
1411 | --(dgl_wait->nr_remaining); | ||
1412 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
1413 | } | ||
1414 | #endif | ||
1415 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1416 | } | ||
1417 | else { | ||
1418 | /* Well, if 'next' is not the highest-priority waiter, | ||
1419 | * then it (probably) ought to inherit the highest-priority | ||
1420 | * waiter's priority. */ | ||
1421 | TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); | ||
1422 | |||
1423 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1424 | |||
1425 | //TRACE_TASK(next, "Heap Before:\n"); | ||
1426 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1427 | |||
1428 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, | ||
1429 | struct nested_info, hp_binheap_node); | ||
1430 | |||
1431 | |||
1432 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1433 | if(dgl_wait) { | ||
1434 | select_next_lock_if_primary(l, dgl_wait); | ||
1435 | // wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
1436 | --(dgl_wait->nr_remaining); | ||
1437 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
1438 | } | ||
1439 | #endif | ||
1440 | |||
1441 | //TRACE_TASK(next, "Heap After:\n"); | ||
1442 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
1443 | |||
1444 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
1445 | * because that update hasn't yet executed (update operation is | ||
1446 | * probably blocked on mutex->lock). So only inherit if the top of | ||
1447 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
1448 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
1449 | * since the effective priority of hp_waiter can change (and the | ||
1450 | * update has not made it to this lock).) | ||
1451 | */ | ||
1452 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1453 | if((l->nest.hp_waiter_eff_prio != NULL) && (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) | ||
1454 | { | ||
1455 | if(dgl_wait && tsk_rt(next)->blocked_lock) { | ||
1456 | BUG_ON(wake_up_task); | ||
1457 | if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { | ||
1458 | nested_increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. | ||
1459 | goto out; // all spinlocks are released. bail out now. | ||
1460 | } | ||
1461 | } | ||
1462 | else { | ||
1463 | increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); | ||
1464 | } | ||
1465 | } | ||
1466 | |||
1467 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1468 | #else | ||
1469 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) | ||
1470 | { | ||
1471 | increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); | ||
1472 | } | ||
1473 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
1474 | #endif | ||
1475 | } | ||
1476 | |||
1477 | if(wake_up_task) { | ||
1478 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); | ||
1479 | |||
1480 | tsk_rt(next)->blocked_lock = NULL; | ||
1481 | mb(); | ||
1482 | |||
1483 | wake_up_process(next); | ||
1484 | } | ||
1485 | else { | ||
1486 | TRACE_TASK(next, "is still blocked.\n"); | ||
1487 | } | ||
1488 | } | ||
1489 | else { | ||
1490 | /* becomes available */ | ||
1491 | mutex->owner = NULL; | ||
1492 | } | ||
1493 | |||
1494 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1495 | |||
1496 | out: | ||
1497 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1498 | |||
1499 | return err; | ||
1500 | } | ||
1501 | |||
1502 | |||
1503 | void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
1504 | struct task_struct* t, | ||
1505 | raw_spinlock_t* to_unlock, | ||
1506 | unsigned long irqflags) | ||
1507 | { | ||
1508 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1509 | |||
1510 | // relay-style locking | ||
1511 | lock_fine(&mutex->lock); | ||
1512 | unlock_fine(to_unlock); | ||
1513 | |||
1514 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1515 | struct task_struct *owner = mutex->owner; | ||
1516 | |||
1517 | struct task_struct *old_max_eff_prio; | ||
1518 | struct task_struct *new_max_eff_prio; | ||
1519 | |||
1520 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1521 | |||
1522 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1523 | |||
1524 | if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { | ||
1525 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | ||
1526 | mutex->hp_waiter = t; | ||
1527 | } | ||
1528 | if(t == mutex->hp_waiter) { | ||
1529 | // reflect the decreased priority in the heap node. | ||
1530 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1531 | |||
1532 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); | ||
1533 | BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks)); | ||
1534 | |||
1535 | binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1536 | } | ||
1537 | |||
1538 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1539 | |||
1540 | |||
1541 | if(new_max_eff_prio != old_max_eff_prio) { | ||
1542 | // new_max_eff_prio > old_max_eff_prio holds. | ||
1543 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
1544 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { | ||
1545 | |||
1546 | TRACE_CUR("Propagating inheritance to holder of lock %d.\n", l->ident); | ||
1547 | |||
1548 | // beware: recursion | ||
1549 | nested_increase_priority_inheritance(owner, new_max_eff_prio, &mutex->lock, irqflags); // unlocks mutex->lock | ||
1550 | } | ||
1551 | else { | ||
1552 | TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); | ||
1553 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1554 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1555 | } | ||
1556 | } | ||
1557 | else { | ||
1558 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | ||
1559 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1560 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1561 | } | ||
1562 | } | ||
1563 | else { | ||
1564 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1565 | |||
1566 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1567 | if(still_blocked) { | ||
1568 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1569 | if(still_blocked->ops->propagate_increase_inheritance) { | ||
1570 | /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1571 | we know that task 't' has not released any locks behind us in this | ||
1572 | chain. Propagation just needs to catch up with task 't'. */ | ||
1573 | still_blocked->ops->propagate_increase_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1574 | } | ||
1575 | else { | ||
1576 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1577 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1578 | } | ||
1579 | } | ||
1580 | else { | ||
1581 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1582 | } | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | |||
1587 | void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
1588 | struct task_struct* t, | ||
1589 | raw_spinlock_t* to_unlock, | ||
1590 | unsigned long irqflags) | ||
1591 | { | ||
1592 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1593 | |||
1594 | // relay-style locking | ||
1595 | lock_fine(&mutex->lock); | ||
1596 | unlock_fine(to_unlock); | ||
1597 | |||
1598 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
1599 | if(t == mutex->hp_waiter) { | ||
1600 | struct task_struct *owner = mutex->owner; | ||
1601 | |||
1602 | struct task_struct *old_max_eff_prio; | ||
1603 | struct task_struct *new_max_eff_prio; | ||
1604 | |||
1605 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1606 | |||
1607 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1608 | |||
1609 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
1610 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
1611 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; | ||
1612 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
1613 | |||
1614 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
1615 | |||
1616 | if((old_max_eff_prio != new_max_eff_prio) && | ||
1617 | (effective_priority(owner) == old_max_eff_prio)) | ||
1618 | { | ||
1619 | // Need to set new effective_priority for owner | ||
1620 | |||
1621 | struct task_struct *decreased_prio; | ||
1622 | |||
1623 | TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", l->ident); | ||
1624 | |||
1625 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
1626 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
1627 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
1628 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
1629 | owner->comm, | ||
1630 | owner->pid, | ||
1631 | l->ident); | ||
1632 | |||
1633 | decreased_prio = new_max_eff_prio; | ||
1634 | } | ||
1635 | else { | ||
1636 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
1637 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
1638 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
1639 | owner->comm, | ||
1640 | owner->pid, | ||
1641 | l->ident); | ||
1642 | |||
1643 | decreased_prio = NULL; | ||
1644 | } | ||
1645 | |||
1646 | // beware: recursion | ||
1647 | nested_decrease_priority_inheritance(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | ||
1648 | } | ||
1649 | else { | ||
1650 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
1651 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1652 | } | ||
1653 | } | ||
1654 | else { | ||
1655 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | ||
1656 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1657 | } | ||
1658 | } | ||
1659 | else { | ||
1660 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
1661 | |||
1662 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
1663 | if(still_blocked) { | ||
1664 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); | ||
1665 | if(still_blocked->ops->propagate_decrease_inheritance) { | ||
1666 | /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
1667 | we know that task 't' has not released any locks behind us in this | ||
1668 | chain. propagation just needs to catch up with task 't' */ | ||
1669 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, t, &mutex->lock, irqflags); | ||
1670 | } | ||
1671 | else { | ||
1672 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); | ||
1673 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1674 | } | ||
1675 | } | ||
1676 | else { | ||
1677 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
1678 | } | ||
1679 | } | ||
1680 | } | ||
1681 | |||
1682 | |||
1683 | |||
1684 | int gsnedf_rsm_mutex_close(struct litmus_lock* l) | ||
1685 | { | ||
1686 | struct task_struct *t = current; | ||
1687 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
1688 | unsigned long flags; | ||
1689 | |||
1690 | int owner; | ||
1691 | |||
1692 | |||
1693 | lock_global_irqsave(&dgl_lock, flags); | ||
1694 | lock_fine_irqsave(&mutex->lock, flags); | ||
1695 | |||
1696 | owner = (mutex->owner == t); | ||
1697 | |||
1698 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
1699 | unlock_global_irqrestore(&dgl_lock, flags); | ||
1700 | |||
1701 | if (owner) | ||
1702 | gsnedf_rsm_mutex_unlock(l); | ||
1703 | |||
1704 | return 0; | ||
1705 | } | ||
1706 | 906 | ||
1707 | void gsnedf_rsm_mutex_free(struct litmus_lock* lock) | 907 | .propagate_increase_inheritance = rsm_mutex_propagate_increase_inheritance, |
1708 | { | 908 | .propagate_decrease_inheritance = rsm_mutex_propagate_decrease_inheritance, |
1709 | kfree(rsm_mutex_from_lock(lock)); | ||
1710 | } | ||
1711 | 909 | ||
1712 | static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { | ||
1713 | .close = gsnedf_rsm_mutex_close, | ||
1714 | .lock = gsnedf_rsm_mutex_lock, | ||
1715 | .unlock = gsnedf_rsm_mutex_unlock, | ||
1716 | .deallocate = gsnedf_rsm_mutex_free, | ||
1717 | .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, | ||
1718 | .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance, | ||
1719 | |||
1720 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 910 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
1721 | // .reserve = gsnedf_rsm_mutex_reserve, | 911 | .dgl_lock = rsm_mutex_dgl_lock, |
1722 | // .unreserve = gsnedf_rsm_mutex_unreserve, | 912 | .is_owner = rsm_mutex_is_owner, |
1723 | .dgl_lock = gsn_edf_rsm_mutex_dgl_lock, | 913 | .enable_priority = rsm_mutex_enable_priority, |
1724 | .is_owner = gsn_edf_rsm_mutex_is_owner, | ||
1725 | .enable_priority = gsn_edf_rsm_enable_priority, | ||
1726 | #endif | 914 | #endif |
1727 | }; | 915 | }; |
1728 | 916 | ||
1729 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) | 917 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) |
1730 | { | 918 | { |
1731 | struct rsm_mutex* mutex; | 919 | return rsm_mutex_new(&gsnedf_rsm_mutex_lock_ops); |
1732 | |||
1733 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); | ||
1734 | if (!mutex) | ||
1735 | return NULL; | ||
1736 | |||
1737 | mutex->owner = NULL; | ||
1738 | mutex->hp_waiter = NULL; | ||
1739 | init_waitqueue_head(&mutex->wait); | ||
1740 | |||
1741 | |||
1742 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
1743 | { | ||
1744 | __raw_spin_lock_init(&mutex->lock, ((struct litmus_lock*)mutex)->cheat_lockdep, &((struct litmus_lock*)mutex)->key); | ||
1745 | } | ||
1746 | #else | ||
1747 | raw_spin_lock_init(&mutex->lock); | ||
1748 | #endif | ||
1749 | |||
1750 | mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; | ||
1751 | |||
1752 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; | ||
1753 | |||
1754 | return &mutex->litmus_lock; | ||
1755 | } | 920 | } |
1756 | 921 | ||
1757 | /* **** lock constructor **** */ | ||
1758 | |||
1759 | |||
1760 | |||
1761 | |||
1762 | |||
1763 | |||
1764 | |||
1765 | /* ******************** IKGLP ********************** */ | 922 | /* ******************** IKGLP ********************** */ |
1766 | 923 | ||
1767 | |||
1768 | typedef struct ikglp_heap_node | ||
1769 | { | ||
1770 | struct task_struct *task; | ||
1771 | struct binheap_node node; | ||
1772 | } ikglp_heap_node_t; | ||
1773 | |||
1774 | static int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
1775 | { | ||
1776 | ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); | ||
1777 | ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); | ||
1778 | |||
1779 | BUG_ON(!d_a); | ||
1780 | BUG_ON(!d_b); | ||
1781 | |||
1782 | return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); | ||
1783 | } | ||
1784 | |||
1785 | static int ikglp_edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
1786 | { | ||
1787 | ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); | ||
1788 | ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); | ||
1789 | |||
1790 | return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE); | ||
1791 | } | ||
1792 | |||
1793 | struct fifo_queue; | ||
1794 | struct ikglp_wait_state; | ||
1795 | |||
1796 | typedef struct ikglp_donee_heap_node | ||
1797 | { | ||
1798 | struct task_struct *task; | ||
1799 | struct fifo_queue *fq; | ||
1800 | struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor | ||
1801 | |||
1802 | struct binheap_node node; | ||
1803 | } ikglp_donee_heap_node_t; | ||
1804 | |||
1805 | |||
1806 | // Maintains the state of a request as it goes through the IKGLP | ||
1807 | typedef struct ikglp_wait_state { | ||
1808 | struct task_struct *task; // pointer back to the requesting task | ||
1809 | |||
1810 | // Data for while waiting in FIFO Queue | ||
1811 | wait_queue_t fq_node; | ||
1812 | ikglp_heap_node_t global_heap_node; | ||
1813 | ikglp_donee_heap_node_t donee_heap_node; | ||
1814 | |||
1815 | // Data for while waiting in PQ | ||
1816 | ikglp_heap_node_t pq_node; | ||
1817 | |||
1818 | // Data for while waiting as a donor | ||
1819 | ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t | ||
1820 | struct nested_info prio_donation; | ||
1821 | struct binheap_node node; | ||
1822 | } ikglp_wait_state_t; | ||
1823 | |||
1824 | |||
1825 | static int ikglp_donor_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) | ||
1826 | { | ||
1827 | ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node); | ||
1828 | ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node); | ||
1829 | |||
1830 | return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); | ||
1831 | } | ||
1832 | |||
1833 | |||
1834 | static int ikglp_edf_min_heap_donee_order(struct binheap_node *a, struct binheap_node *b) | ||
1835 | { | ||
1836 | struct task_struct *prio_a, *prio_b; | ||
1837 | |||
1838 | ikglp_donee_heap_node_t *d_a = binheap_entry(a, ikglp_donee_heap_node_t, node); | ||
1839 | ikglp_donee_heap_node_t *d_b = binheap_entry(b, ikglp_donee_heap_node_t, node); | ||
1840 | |||
1841 | if(!d_a->donor_info) { | ||
1842 | prio_a = d_a->task; | ||
1843 | } | ||
1844 | else { | ||
1845 | prio_a = d_a->donor_info->task; | ||
1846 | BUG_ON(d_a->task != d_a->donor_info->donee_info->task); | ||
1847 | } | ||
1848 | |||
1849 | if(!d_b->donor_info) { | ||
1850 | prio_b = d_b->task; | ||
1851 | } | ||
1852 | else { | ||
1853 | prio_b = d_b->donor_info->task; | ||
1854 | BUG_ON(d_b->task != d_b->donor_info->donee_info->task); | ||
1855 | } | ||
1856 | |||
1857 | // note reversed order | ||
1858 | return __edf_higher_prio(prio_b, BASE, prio_a, BASE); | ||
1859 | } | ||
1860 | |||
1861 | |||
1862 | /* struct for semaphore with priority inheritance */ | ||
1863 | struct fifo_queue | ||
1864 | { | ||
1865 | wait_queue_head_t wait; | ||
1866 | struct task_struct* owner; | ||
1867 | |||
1868 | // used for bookkeepping | ||
1869 | ikglp_heap_node_t global_heap_node; | ||
1870 | ikglp_donee_heap_node_t donee_heap_node; | ||
1871 | |||
1872 | struct task_struct* hp_waiter; | ||
1873 | int count; /* number of waiters + holder */ | ||
1874 | |||
1875 | struct nested_info nest; | ||
1876 | }; | ||
1877 | |||
1878 | |||
1879 | struct ikglp_semaphore | ||
1880 | { | ||
1881 | struct litmus_lock litmus_lock; | ||
1882 | |||
1883 | raw_spinlock_t lock; | ||
1884 | raw_spinlock_t real_lock; | ||
1885 | |||
1886 | int nr_replicas; // AKA k | ||
1887 | int m; | ||
1888 | |||
1889 | int max_fifo_len; // max len of a fifo queue | ||
1890 | |||
1891 | struct binheap_handle top_m; // min heap, base prio | ||
1892 | int top_m_size; // number of nodes in top_m | ||
1893 | |||
1894 | struct binheap_handle not_top_m; // max heap, base prio | ||
1895 | |||
1896 | struct binheap_handle donees; // min-heap, base prio | ||
1897 | struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue | ||
1898 | |||
1899 | /* data structures for holding requests */ | ||
1900 | struct fifo_queue *fifo_queues; // array nr_replicas in length | ||
1901 | struct binheap_handle priority_queue; // max-heap, base prio | ||
1902 | struct binheap_handle donors; // max-heap, base prio | ||
1903 | }; | ||
1904 | |||
1905 | static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) | ||
1906 | { | ||
1907 | return container_of(lock, struct ikglp_semaphore, litmus_lock); | ||
1908 | } | ||
1909 | |||
1910 | |||
1911 | static inline int ikglp_get_idx(struct ikglp_semaphore *sem, | ||
1912 | struct fifo_queue *queue) | ||
1913 | { | ||
1914 | return (queue - &sem->fifo_queues[0]); | ||
1915 | } | ||
1916 | |||
1917 | static inline struct fifo_queue* ikglp_get_queue( | ||
1918 | struct ikglp_semaphore *sem, | ||
1919 | struct task_struct *holder) | ||
1920 | { | ||
1921 | int i; | ||
1922 | for(i = 0; i < sem->nr_replicas; ++i) | ||
1923 | if(sem->fifo_queues[i].owner == holder) | ||
1924 | return(&sem->fifo_queues[i]); | ||
1925 | return(NULL); | ||
1926 | } | ||
1927 | |||
1928 | static struct task_struct* ikglp_find_hp_waiter( | ||
1929 | struct fifo_queue *kqueue, | ||
1930 | struct task_struct *skip) | ||
1931 | { | ||
1932 | struct list_head *pos; | ||
1933 | struct task_struct *queued, *found = NULL; | ||
1934 | |||
1935 | list_for_each(pos, &kqueue->wait.task_list) { | ||
1936 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, task_list)->private; | ||
1937 | |||
1938 | /* Compare task prios, find high prio task. */ | ||
1939 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1940 | found = queued; | ||
1941 | } | ||
1942 | return found; | ||
1943 | } | ||
1944 | |||
1945 | static struct fifo_queue* ikglp_find_shortest( | ||
1946 | struct ikglp_semaphore *sem, | ||
1947 | struct fifo_queue *search_start) | ||
1948 | { | ||
1949 | // we start our search at search_start instead of at the beginning of the | ||
1950 | // queue list to load-balance across all resources. | ||
1951 | struct fifo_queue* step = search_start; | ||
1952 | struct fifo_queue* shortest = sem->shortest_fifo_queue; | ||
1953 | |||
1954 | do { | ||
1955 | step = (step+1 != &sem->fifo_queues[sem->nr_replicas]) ? | ||
1956 | step+1 : &sem->fifo_queues[0]; | ||
1957 | |||
1958 | if(step->count < shortest->count) { | ||
1959 | shortest = step; | ||
1960 | if(step->count == 0) | ||
1961 | break; /* can't get any shorter */ | ||
1962 | } | ||
1963 | |||
1964 | }while(step != search_start); | ||
1965 | |||
1966 | return(shortest); | ||
1967 | } | ||
1968 | |||
1969 | static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem) | ||
1970 | { | ||
1971 | return binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node)->task; | ||
1972 | } | ||
1973 | |||
1974 | void print_global_list(struct binheap_node* n, int depth) | ||
1975 | { | ||
1976 | ikglp_heap_node_t *global_heap_node; | ||
1977 | char padding[81] = " "; | ||
1978 | |||
1979 | if(n == NULL) { | ||
1980 | TRACE_CUR("+-> %p\n", NULL); | ||
1981 | return; | ||
1982 | } | ||
1983 | |||
1984 | global_heap_node = binheap_entry(n, ikglp_heap_node_t, node); | ||
1985 | |||
1986 | if(depth*2 <= 80) | ||
1987 | padding[depth*2] = '\0'; | ||
1988 | |||
1989 | TRACE_CUR("%s+-> %s/%d\n", | ||
1990 | padding, | ||
1991 | global_heap_node->task->comm, | ||
1992 | global_heap_node->task->pid); | ||
1993 | |||
1994 | if(n->left) print_global_list(n->left, depth+1); | ||
1995 | if(n->right) print_global_list(n->right, depth+1); | ||
1996 | } | ||
1997 | |||
1998 | |||
1999 | static void ikglp_add_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node) | ||
2000 | { | ||
2001 | |||
2002 | |||
2003 | node->task = t; | ||
2004 | INIT_BINHEAP_NODE(&node->node); | ||
2005 | |||
2006 | if(sem->top_m_size < sem->m) { | ||
2007 | TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", t->comm, t->pid); | ||
2008 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
2009 | // print_global_list(sem->top_m.root, 1); | ||
2010 | |||
2011 | binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); | ||
2012 | ++(sem->top_m_size); | ||
2013 | |||
2014 | TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
2015 | print_global_list(sem->top_m.root, 1); | ||
2016 | } | ||
2017 | else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) { | ||
2018 | ikglp_heap_node_t *evicted = binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node); | ||
2019 | |||
2020 | TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n", | ||
2021 | t->comm, t->pid, | ||
2022 | evicted->task->comm, evicted->task->pid); | ||
2023 | |||
2024 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2025 | // print_global_list(sem->not_top_m.root, 1); | ||
2026 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
2027 | // print_global_list(sem->top_m.root, 1); | ||
2028 | |||
2029 | |||
2030 | binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node); | ||
2031 | INIT_BINHEAP_NODE(&evicted->node); | ||
2032 | binheap_add(&evicted->node, &sem->not_top_m, ikglp_heap_node_t, node); | ||
2033 | |||
2034 | binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); | ||
2035 | |||
2036 | TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
2037 | print_global_list(sem->top_m.root, 1); | ||
2038 | TRACE_CUR("Not-Top-M After:\n"); | ||
2039 | print_global_list(sem->not_top_m.root, 1); | ||
2040 | } | ||
2041 | else { | ||
2042 | TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", t->comm, t->pid); | ||
2043 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2044 | // print_global_list(sem->not_top_m.root, 1); | ||
2045 | |||
2046 | binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); | ||
2047 | |||
2048 | TRACE_CUR("Not-Top-M After:\n"); | ||
2049 | print_global_list(sem->not_top_m.root, 1); | ||
2050 | } | ||
2051 | } | ||
2052 | |||
2053 | |||
2054 | static void ikglp_del_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node) | ||
2055 | { | ||
2056 | BUG_ON(!binheap_is_in_heap(&node->node)); | ||
2057 | |||
2058 | TRACE_CUR("Removing %s/%d from global list.\n", t->comm, t->pid); | ||
2059 | |||
2060 | if(binheap_is_in_this_heap(&node->node, &sem->top_m)) { | ||
2061 | TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid); | ||
2062 | |||
2063 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2064 | // print_global_list(sem->not_top_m.root, 1); | ||
2065 | // TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); | ||
2066 | // print_global_list(sem->top_m.root, 1); | ||
2067 | |||
2068 | |||
2069 | binheap_delete(&node->node, &sem->top_m); | ||
2070 | |||
2071 | if(!binheap_empty(&sem->not_top_m)) { | ||
2072 | ikglp_heap_node_t *promoted = binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node); | ||
2073 | |||
2074 | TRACE_CUR("Promoting %s/%d to top-m\n", promoted->task->comm, promoted->task->pid); | ||
2075 | |||
2076 | binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node); | ||
2077 | INIT_BINHEAP_NODE(&promoted->node); | ||
2078 | |||
2079 | binheap_add(&promoted->node, &sem->top_m, ikglp_heap_node_t, node); | ||
2080 | } | ||
2081 | else { | ||
2082 | TRACE_CUR("No one to promote to top-m.\n"); | ||
2083 | --(sem->top_m_size); | ||
2084 | } | ||
2085 | |||
2086 | TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); | ||
2087 | print_global_list(sem->top_m.root, 1); | ||
2088 | TRACE_CUR("Not-Top-M After:\n"); | ||
2089 | print_global_list(sem->not_top_m.root, 1); | ||
2090 | } | ||
2091 | else { | ||
2092 | // TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid); | ||
2093 | // TRACE_CUR("Not-Top-M Before:\n"); | ||
2094 | // print_global_list(sem->not_top_m.root, 1); | ||
2095 | |||
2096 | binheap_delete(&node->node, &sem->not_top_m); | ||
2097 | |||
2098 | TRACE_CUR("Not-Top-M After:\n"); | ||
2099 | print_global_list(sem->not_top_m.root, 1); | ||
2100 | } | ||
2101 | } | ||
2102 | |||
2103 | |||
2104 | void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth) | ||
2105 | { | ||
2106 | ikglp_donee_heap_node_t *donee_node; | ||
2107 | char padding[81] = " "; | ||
2108 | struct task_struct* donor = NULL; | ||
2109 | |||
2110 | if(n == NULL) { | ||
2111 | TRACE_CUR("+-> %p\n", NULL); | ||
2112 | return; | ||
2113 | } | ||
2114 | |||
2115 | donee_node = binheap_entry(n, ikglp_donee_heap_node_t, node); | ||
2116 | |||
2117 | if(depth*2 <= 80) | ||
2118 | padding[depth*2] = '\0'; | ||
2119 | |||
2120 | if(donee_node->donor_info) { | ||
2121 | donor = donee_node->donor_info->task; | ||
2122 | } | ||
2123 | |||
2124 | TRACE_CUR("%s+-> %s/%d (d: %s/%d) (fq: %d)\n", | ||
2125 | padding, | ||
2126 | donee_node->task->comm, | ||
2127 | donee_node->task->pid, | ||
2128 | (donor) ? donor->comm : "nil", | ||
2129 | (donor) ? donor->pid : -1, | ||
2130 | ikglp_get_idx(sem, donee_node->fq)); | ||
2131 | |||
2132 | if(n->left) print_donees(sem, n->left, depth+1); | ||
2133 | if(n->right) print_donees(sem, n->right, depth+1); | ||
2134 | } | ||
2135 | |||
2136 | |||
2137 | static void ikglp_add_donees(struct ikglp_semaphore *sem, struct fifo_queue *fq, struct task_struct *t, ikglp_donee_heap_node_t* node) | ||
2138 | { | ||
2139 | // TRACE_CUR("Adding %s/%d to donee list.\n", t->comm, t->pid); | ||
2140 | // TRACE_CUR("donees Before:\n"); | ||
2141 | // print_donees(sem, sem->donees.root, 1); | ||
2142 | |||
2143 | node->task = t; | ||
2144 | node->donor_info = NULL; | ||
2145 | node->fq = fq; | ||
2146 | INIT_BINHEAP_NODE(&node->node); | ||
2147 | |||
2148 | binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node); | ||
2149 | |||
2150 | TRACE_CUR("donees After:\n"); | ||
2151 | print_donees(sem, sem->donees.root, 1); | ||
2152 | } | ||
2153 | |||
2154 | |||
2155 | static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2156 | { | ||
2157 | // priority of 't' has increased (note: 't' might already be hp_waiter). | ||
2158 | if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { | ||
2159 | struct task_struct *old_max_eff_prio; | ||
2160 | struct task_struct *new_max_eff_prio; | ||
2161 | struct task_struct *new_prio = NULL; | ||
2162 | struct task_struct *owner = fq->owner; | ||
2163 | |||
2164 | if(fq->hp_waiter) | ||
2165 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
2166 | else | ||
2167 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
2168 | |||
2169 | if(owner) | ||
2170 | { | ||
2171 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2172 | |||
2173 | //TRACE_TASK(owner, "Heap Before:\n"); | ||
2174 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
2175 | |||
2176 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2177 | |||
2178 | fq->hp_waiter = t; | ||
2179 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
2180 | |||
2181 | binheap_decrease(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
2182 | |||
2183 | //TRACE_TASK(owner, "Heap After:\n"); | ||
2184 | //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); | ||
2185 | |||
2186 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2187 | |||
2188 | if(new_max_eff_prio != old_max_eff_prio) { | ||
2189 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
2190 | |||
2191 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
2192 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
2193 | new_prio = new_max_eff_prio; | ||
2194 | } | ||
2195 | } | ||
2196 | else { | ||
2197 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
2198 | } | ||
2199 | |||
2200 | if(new_prio) { | ||
2201 | // set new inheritance and propagate | ||
2202 | TRACE_TASK(t, "Effective priority changed for owner %s/%d to %s/%d\n", | ||
2203 | owner->comm, owner->pid, | ||
2204 | new_prio->comm, new_prio->pid); | ||
2205 | nested_increase_priority_inheritance(owner, new_prio, &sem->lock, flags); // unlocks lock. | ||
2206 | } | ||
2207 | else { | ||
2208 | TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", | ||
2209 | new_max_eff_prio->comm, new_max_eff_prio->pid); | ||
2210 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2211 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2212 | } | ||
2213 | } | ||
2214 | else { | ||
2215 | fq->hp_waiter = t; | ||
2216 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
2217 | |||
2218 | TRACE_TASK(t, "no owner??\n"); | ||
2219 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2220 | } | ||
2221 | } | ||
2222 | else { | ||
2223 | TRACE_TASK(t, "hp_waiter is unaffected.\n"); | ||
2224 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2225 | } | ||
2226 | } | ||
2227 | |||
2228 | // hp_waiter has decreased | ||
2229 | static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2230 | { | ||
2231 | struct task_struct *owner = fq->owner; | ||
2232 | |||
2233 | struct task_struct *old_max_eff_prio; | ||
2234 | struct task_struct *new_max_eff_prio; | ||
2235 | |||
2236 | if(!owner) { | ||
2237 | TRACE_CUR("No owner. Returning.\n"); | ||
2238 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2239 | return; | ||
2240 | } | ||
2241 | |||
2242 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2243 | |||
2244 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2245 | |||
2246 | binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
2247 | fq->nest.hp_waiter_eff_prio = fq->hp_waiter; | ||
2248 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
2249 | |||
2250 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2251 | |||
2252 | if((old_max_eff_prio != new_max_eff_prio) && | ||
2253 | (effective_priority(owner) == old_max_eff_prio)) | ||
2254 | { | ||
2255 | // Need to set new effective_priority for owner | ||
2256 | struct task_struct *decreased_prio; | ||
2257 | |||
2258 | TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2259 | |||
2260 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
2261 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of fq %d.\n", | ||
2262 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
2263 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
2264 | owner->comm, | ||
2265 | owner->pid, | ||
2266 | ikglp_get_idx(sem, fq)); | ||
2267 | |||
2268 | decreased_prio = new_max_eff_prio; | ||
2269 | } | ||
2270 | else { | ||
2271 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of fq %d.\n", | ||
2272 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
2273 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
2274 | owner->comm, | ||
2275 | owner->pid, | ||
2276 | ikglp_get_idx(sem, fq)); | ||
2277 | |||
2278 | decreased_prio = NULL; | ||
2279 | } | ||
2280 | |||
2281 | // beware: recursion | ||
2282 | nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | ||
2283 | } | ||
2284 | else { | ||
2285 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | ||
2286 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2287 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2288 | } | ||
2289 | } | ||
2290 | |||
2291 | |||
2292 | static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2293 | { | ||
2294 | struct task_struct *owner = fq->owner; | ||
2295 | |||
2296 | struct task_struct *old_max_eff_prio; | ||
2297 | struct task_struct *new_max_eff_prio; | ||
2298 | |||
2299 | BUG_ON(!owner); | ||
2300 | |||
2301 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2302 | |||
2303 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2304 | |||
2305 | binheap_delete(n, &tsk_rt(owner)->hp_blocked_tasks); | ||
2306 | |||
2307 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
2308 | |||
2309 | if((old_max_eff_prio != new_max_eff_prio) && | ||
2310 | (effective_priority(owner) == old_max_eff_prio)) | ||
2311 | { | ||
2312 | // Need to set new effective_priority for owner | ||
2313 | struct task_struct *decreased_prio; | ||
2314 | |||
2315 | TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2316 | |||
2317 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
2318 | TRACE_CUR("has greater base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2319 | decreased_prio = new_max_eff_prio; | ||
2320 | } | ||
2321 | else { | ||
2322 | TRACE_CUR("has lesser base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq)); | ||
2323 | decreased_prio = NULL; | ||
2324 | } | ||
2325 | |||
2326 | // beware: recursion | ||
2327 | nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock | ||
2328 | } | ||
2329 | else { | ||
2330 | TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); | ||
2331 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
2332 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2333 | } | ||
2334 | } | ||
2335 | |||
2336 | static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, struct binheap_node *n) | ||
2337 | { | ||
2338 | struct task_struct *old_max_eff_prio; | ||
2339 | struct task_struct *new_max_eff_prio; | ||
2340 | |||
2341 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2342 | |||
2343 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
2344 | |||
2345 | binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks); | ||
2346 | |||
2347 | new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
2348 | |||
2349 | if((old_max_eff_prio != new_max_eff_prio) && | ||
2350 | (effective_priority(t) == old_max_eff_prio)) | ||
2351 | { | ||
2352 | // Need to set new effective_priority for owner | ||
2353 | struct task_struct *decreased_prio; | ||
2354 | |||
2355 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) { | ||
2356 | decreased_prio = new_max_eff_prio; | ||
2357 | } | ||
2358 | else { | ||
2359 | decreased_prio = NULL; | ||
2360 | } | ||
2361 | |||
2362 | tsk_rt(t)->inh_task = decreased_prio; | ||
2363 | } | ||
2364 | |||
2365 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2366 | } | ||
2367 | |||
2368 | static void ikglp_get_immediate(struct task_struct* t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) | ||
2369 | { | ||
2370 | // resource available now | ||
2371 | TRACE_CUR("queue %d: acquired immediately\n", ikglp_get_idx(sem, fq)); | ||
2372 | |||
2373 | fq->owner = t; | ||
2374 | |||
2375 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2376 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
2377 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
2378 | |||
2379 | ++(fq->count); | ||
2380 | |||
2381 | ikglp_add_global_list(sem, t, &fq->global_heap_node); | ||
2382 | ikglp_add_donees(sem, fq, t, &fq->donee_heap_node); | ||
2383 | |||
2384 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); | ||
2385 | |||
2386 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2387 | } | ||
2388 | |||
2389 | |||
2390 | |||
2391 | static void __ikglp_enqueue_on_fq( | ||
2392 | struct ikglp_semaphore *sem, | ||
2393 | struct fifo_queue* fq, | ||
2394 | struct task_struct* t, | ||
2395 | wait_queue_t *wait, | ||
2396 | ikglp_heap_node_t *global_heap_node, | ||
2397 | ikglp_donee_heap_node_t *donee_heap_node) | ||
2398 | { | ||
2399 | /* resource is not free => must suspend and wait */ | ||
2400 | TRACE_TASK(t, "Enqueuing on fq %d.\n", | ||
2401 | ikglp_get_idx(sem, fq)); | ||
2402 | |||
2403 | init_waitqueue_entry(wait, t); | ||
2404 | |||
2405 | __add_wait_queue_tail_exclusive(&fq->wait, wait); | ||
2406 | |||
2407 | ++(fq->count); | ||
2408 | |||
2409 | // update global list. | ||
2410 | if(likely(global_heap_node)) { | ||
2411 | if(binheap_is_in_heap(&global_heap_node->node)) { | ||
2412 | WARN_ON(1); | ||
2413 | ikglp_del_global_list(sem, t, global_heap_node); | ||
2414 | } | ||
2415 | ikglp_add_global_list(sem, t, global_heap_node); | ||
2416 | } | ||
2417 | // update donor eligiblity list. | ||
2418 | if(likely(donee_heap_node)) { | ||
2419 | // if(binheap_is_in_heap(&donee_heap_node->node)) { | ||
2420 | // WARN_ON(1); | ||
2421 | // } | ||
2422 | ikglp_add_donees(sem, fq, t, donee_heap_node); | ||
2423 | } | ||
2424 | |||
2425 | if(likely(sem->shortest_fifo_queue == fq)) { | ||
2426 | sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); | ||
2427 | } | ||
2428 | |||
2429 | TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); | ||
2430 | } | ||
2431 | |||
2432 | |||
2433 | static void ikglp_enqueue_on_fq( | ||
2434 | struct ikglp_semaphore *sem, | ||
2435 | struct fifo_queue *fq, | ||
2436 | ikglp_wait_state_t *wait, | ||
2437 | unsigned long flags) | ||
2438 | { | ||
2439 | /* resource is not free => must suspend and wait */ | ||
2440 | TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend and wait.\n", | ||
2441 | ikglp_get_idx(sem, fq)); | ||
2442 | |||
2443 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
2444 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
2445 | |||
2446 | __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, &wait->global_heap_node, &wait->donee_heap_node); | ||
2447 | |||
2448 | ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock | ||
2449 | } | ||
2450 | |||
2451 | |||
2452 | static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) | ||
2453 | { | ||
2454 | TRACE_TASK(wait->task, "goes to PQ.\n"); | ||
2455 | |||
2456 | wait->pq_node.task = wait->task; // copy over task (little redundant...) | ||
2457 | |||
2458 | binheap_add(&wait->pq_node.node, &sem->priority_queue, ikglp_heap_node_t, node); | ||
2459 | } | ||
2460 | |||
2461 | static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) | ||
2462 | { | ||
2463 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
2464 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
2465 | INIT_BINHEAP_NODE(&wait->pq_node.node); | ||
2466 | |||
2467 | __ikglp_enqueue_on_pq(sem, wait); | ||
2468 | } | ||
2469 | |||
2470 | void print_donors(struct binheap_node *n, int depth) | ||
2471 | { | ||
2472 | ikglp_wait_state_t *donor_node; | ||
2473 | char padding[81] = " "; | ||
2474 | |||
2475 | if(n == NULL) { | ||
2476 | TRACE_CUR("+-> %p\n", NULL); | ||
2477 | return; | ||
2478 | } | ||
2479 | |||
2480 | donor_node = binheap_entry(n, ikglp_wait_state_t, node); | ||
2481 | |||
2482 | if(depth*2 <= 80) | ||
2483 | padding[depth*2] = '\0'; | ||
2484 | |||
2485 | |||
2486 | TRACE_CUR("%s+-> %s/%d (donee: %s/%d)\n", | ||
2487 | padding, | ||
2488 | donor_node->task->comm, | ||
2489 | donor_node->task->pid, | ||
2490 | donor_node->donee_info->task->comm, | ||
2491 | donor_node->donee_info->task->pid); | ||
2492 | |||
2493 | if(n->left) print_donors(n->left, depth+1); | ||
2494 | if(n->right) print_donors(n->right, depth+1); | ||
2495 | } | ||
2496 | |||
2497 | |||
2498 | static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state_t* wait, unsigned long flags) | ||
2499 | { | ||
2500 | struct task_struct *t = wait->task; | ||
2501 | ikglp_donee_heap_node_t *donee_node = NULL; | ||
2502 | struct task_struct *donee; | ||
2503 | |||
2504 | struct task_struct *old_max_eff_prio; | ||
2505 | struct task_struct *new_max_eff_prio; | ||
2506 | struct task_struct *new_prio = NULL; | ||
2507 | |||
2508 | INIT_BINHEAP_NODE(&wait->global_heap_node.node); | ||
2509 | INIT_BINHEAP_NODE(&wait->donee_heap_node.node); | ||
2510 | INIT_BINHEAP_NODE(&wait->pq_node.node); | ||
2511 | INIT_BINHEAP_NODE(&wait->node); | ||
2512 | |||
2513 | // TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid); | ||
2514 | // TRACE_CUR("donors Before:\n"); | ||
2515 | // print_donors(sem->donors.root, 1); | ||
2516 | |||
2517 | // Add donor to the global list. | ||
2518 | ikglp_add_global_list(sem, t, &wait->global_heap_node); | ||
2519 | |||
2520 | // Select a donee | ||
2521 | donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); | ||
2522 | donee = donee_node->task; | ||
2523 | |||
2524 | TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid); | ||
2525 | |||
2526 | TRACE_CUR("Temporarily removing %s/%d to donee list.\n", donee->comm, donee->pid); | ||
2527 | // TRACE_CUR("donees Before:\n"); | ||
2528 | // print_donees(sem, sem->donees.root, 1); | ||
2529 | |||
2530 | binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly | ||
2531 | |||
2532 | TRACE_CUR("donees After:\n"); | ||
2533 | print_donees(sem, sem->donees.root, 1); | ||
2534 | |||
2535 | |||
2536 | wait->donee_info = donee_node; | ||
2537 | |||
2538 | // Add t to donor heap. | ||
2539 | binheap_add(&wait->node, &sem->donors, ikglp_wait_state_t, node); | ||
2540 | |||
2541 | // Now adjust the donee's priority. | ||
2542 | |||
2543 | // Lock the donee's inheritance heap. | ||
2544 | raw_spin_lock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
2545 | |||
2546 | old_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); | ||
2547 | |||
2548 | if(donee_node->donor_info) { | ||
2549 | // Steal donation relation. Evict old donor to PQ. | ||
2550 | |||
2551 | // Remove old donor from donor heap | ||
2552 | ikglp_wait_state_t *old_wait = donee_node->donor_info; | ||
2553 | struct task_struct *old_donor = old_wait->task; | ||
2554 | |||
2555 | TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. Moving old donor to PQ.\n", | ||
2556 | donee->comm, donee->pid, old_donor->comm, old_donor->pid); | ||
2557 | |||
2558 | binheap_delete(&old_wait->node, &sem->donors); | ||
2559 | |||
2560 | // Remove donation from donee's inheritance heap. | ||
2561 | binheap_delete(&old_wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks); | ||
2562 | // WARNING: have not updated inh_prio! | ||
2563 | |||
2564 | // Add old donor to PQ. | ||
2565 | __ikglp_enqueue_on_pq(sem, old_wait); | ||
2566 | |||
2567 | // Remove old donor from the global heap. | ||
2568 | ikglp_del_global_list(sem, old_donor, &old_wait->global_heap_node); | ||
2569 | } | ||
2570 | |||
2571 | // Add back donee's node to the donees heap with increased prio | ||
2572 | donee_node->donor_info = wait; | ||
2573 | INIT_BINHEAP_NODE(&donee_node->node); | ||
2574 | |||
2575 | |||
2576 | TRACE_CUR("Adding %s/%d back to donee list.\n", donee->comm, donee->pid); | ||
2577 | // TRACE_CUR("donees Before:\n"); | ||
2578 | print_donees(sem, sem->donees.root, 1); | ||
2579 | |||
2580 | binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node); | ||
2581 | |||
2582 | TRACE_CUR("donees After:\n"); | ||
2583 | print_donees(sem, sem->donees.root, 1); | ||
2584 | |||
2585 | |||
2586 | |||
2587 | // Add an inheritance/donation to the donee's inheritance heap. | ||
2588 | wait->prio_donation.lock = (struct litmus_lock*)sem; | ||
2589 | wait->prio_donation.hp_waiter_eff_prio = t; | ||
2590 | wait->prio_donation.hp_waiter_ptr = NULL; | ||
2591 | INIT_BINHEAP_NODE(&wait->prio_donation.hp_binheap_node); | ||
2592 | |||
2593 | binheap_add(&wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
2594 | |||
2595 | new_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); | ||
2596 | |||
2597 | if(new_max_eff_prio != old_max_eff_prio) { | ||
2598 | if ((effective_priority(donee) == old_max_eff_prio) || | ||
2599 | (__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){ | ||
2600 | TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", donee->comm, donee->pid); | ||
2601 | new_prio = new_max_eff_prio; | ||
2602 | } | ||
2603 | // else { | ||
2604 | // // should be bug. donor would not be in top-m. | ||
2605 | // TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid); | ||
2606 | // WARN_ON(1); | ||
2607 | // } | ||
2608 | // } | ||
2609 | // else { | ||
2610 | // // should be bug. donor would not be in top-m. | ||
2611 | // TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid); | ||
2612 | // WARN_ON(1); | ||
2613 | } | ||
2614 | |||
2615 | if(new_prio) { | ||
2616 | struct fifo_queue *donee_fq = donee_node->fq; | ||
2617 | |||
2618 | if(donee != donee_fq->owner) { | ||
2619 | TRACE_TASK(t, "%s/%d is not the owner. Propagating priority to owner %s/%d.\n", | ||
2620 | donee->comm, donee->pid, | ||
2621 | donee_fq->owner->comm, donee_fq->owner->pid); | ||
2622 | |||
2623 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
2624 | ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock | ||
2625 | } | ||
2626 | else { | ||
2627 | TRACE_TASK(t, "%s/%d is the owner. Progatating priority immediatly.\n", | ||
2628 | donee->comm, donee->pid); | ||
2629 | nested_increase_priority_inheritance(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock | ||
2630 | } | ||
2631 | } | ||
2632 | else { | ||
2633 | TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", | ||
2634 | new_max_eff_prio->comm, new_max_eff_prio->pid); | ||
2635 | raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); | ||
2636 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2637 | } | ||
2638 | |||
2639 | |||
2640 | TRACE_CUR("donors After:\n"); | ||
2641 | print_donors(sem->donors.root, 1); | ||
2642 | } | ||
2643 | |||
2644 | |||
2645 | static int gsnedf_ikglp_lock(struct litmus_lock* l) | ||
2646 | { | ||
2647 | struct task_struct* t = current; | ||
2648 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
2649 | unsigned long flags = 0, real_flags; | ||
2650 | struct fifo_queue *fq = NULL; | ||
2651 | int replica = -EINVAL; | ||
2652 | |||
2653 | ikglp_wait_state_t wait; | ||
2654 | |||
2655 | if (!is_realtime(t)) | ||
2656 | return -EPERM; | ||
2657 | |||
2658 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
2659 | |||
2660 | lock_global_irqsave(&dgl_lock, flags); | ||
2661 | lock_fine_irqsave(&sem->lock, flags); | ||
2662 | |||
2663 | if(sem->shortest_fifo_queue->count == 0) { | ||
2664 | // take available resource | ||
2665 | replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); | ||
2666 | |||
2667 | ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock | ||
2668 | |||
2669 | unlock_global_irqrestore(&dgl_lock, flags); | ||
2670 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
2671 | } | ||
2672 | else | ||
2673 | { | ||
2674 | // we have to suspend. | ||
2675 | |||
2676 | wait.task = t; // THIS IS CRITICALLY IMPORTANT!!! | ||
2677 | |||
2678 | tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked | ||
2679 | mb(); | ||
2680 | |||
2681 | /* FIXME: interruptible would be nice some day */ | ||
2682 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
2683 | |||
2684 | if(sem->shortest_fifo_queue->count < sem->max_fifo_len) { | ||
2685 | // enqueue on fq | ||
2686 | ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock | ||
2687 | } | ||
2688 | else { | ||
2689 | |||
2690 | TRACE_CUR("IKGLP fifo queues are full.\n"); | ||
2691 | |||
2692 | // no room in fifos. Go to PQ or donors. | ||
2693 | |||
2694 | if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { | ||
2695 | // enqueue on PQ | ||
2696 | ikglp_enqueue_on_pq(sem, &wait); | ||
2697 | unlock_fine_irqrestore(&sem->lock, flags); | ||
2698 | } | ||
2699 | else { | ||
2700 | // enqueue as donor | ||
2701 | ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock | ||
2702 | } | ||
2703 | } | ||
2704 | |||
2705 | unlock_global_irqrestore(&dgl_lock, flags); | ||
2706 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
2707 | |||
2708 | TS_LOCK_SUSPEND; | ||
2709 | |||
2710 | schedule(); | ||
2711 | |||
2712 | TS_LOCK_RESUME; | ||
2713 | |||
2714 | fq = ikglp_get_queue(sem, t); | ||
2715 | BUG_ON(!fq); | ||
2716 | |||
2717 | replica = ikglp_get_idx(sem, fq); | ||
2718 | } | ||
2719 | |||
2720 | TRACE_CUR("Acquired lock %d, queue %d\n", | ||
2721 | l->ident, replica); | ||
2722 | |||
2723 | return replica; | ||
2724 | } | ||
2725 | |||
2726 | static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *donor_info) | ||
2727 | { | ||
2728 | struct task_struct *t = donor_info->task; | ||
2729 | |||
2730 | TRACE_CUR("Donor %s/%d being moved to fq %d\n", | ||
2731 | t->comm, | ||
2732 | t->pid, | ||
2733 | ikglp_get_idx(sem, fq)); | ||
2734 | |||
2735 | binheap_delete(&donor_info->node, &sem->donors); | ||
2736 | |||
2737 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
2738 | &donor_info->fq_node, | ||
2739 | NULL, // already in global_list, so pass null to prevent adding 2nd time. | ||
2740 | //&donor_info->global_heap_node, | ||
2741 | &donor_info->donee_heap_node); | ||
2742 | |||
2743 | // warning: | ||
2744 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. | ||
2745 | } | ||
2746 | |||
2747 | static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *wait) | ||
2748 | { | ||
2749 | struct task_struct *t = wait->task; | ||
2750 | |||
2751 | TRACE_CUR("PQ request %s/%d being moved to fq %d\n", | ||
2752 | t->comm, | ||
2753 | t->pid, | ||
2754 | ikglp_get_idx(sem, fq)); | ||
2755 | |||
2756 | binheap_delete(&wait->pq_node.node, &sem->priority_queue); | ||
2757 | |||
2758 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
2759 | &wait->fq_node, | ||
2760 | &wait->global_heap_node, | ||
2761 | &wait->donee_heap_node); | ||
2762 | // warning: | ||
2763 | // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. | ||
2764 | } | ||
2765 | |||
2766 | static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(struct ikglp_semaphore* sem) | ||
2767 | { | ||
2768 | /* must hold sem->lock */ | ||
2769 | |||
2770 | struct fifo_queue *fq = NULL; | ||
2771 | struct list_head *pos; | ||
2772 | struct task_struct *queued; | ||
2773 | int i; | ||
2774 | |||
2775 | for(i = 0; i < sem->nr_replicas; ++i) { | ||
2776 | if( (sem->fifo_queues[i].count > 1) && | ||
2777 | (!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { | ||
2778 | |||
2779 | TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n", | ||
2780 | ikglp_get_idx(sem, &sem->fifo_queues[i]), | ||
2781 | sem->fifo_queues[i].hp_waiter->comm, | ||
2782 | sem->fifo_queues[i].hp_waiter->pid, | ||
2783 | (fq) ? ikglp_get_idx(sem, fq) : -1, | ||
2784 | (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "nil") : "nilXX", | ||
2785 | (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : -1) : -2); | ||
2786 | |||
2787 | fq = &sem->fifo_queues[i]; | ||
2788 | |||
2789 | WARN_ON(!(fq->hp_waiter)); | ||
2790 | } | ||
2791 | } | ||
2792 | |||
2793 | if(fq) { | ||
2794 | struct task_struct *max_hp = fq->hp_waiter; | ||
2795 | ikglp_wait_state_t* ret = NULL; | ||
2796 | |||
2797 | TRACE_CUR("Searching for %s/%d on fq %d\n", | ||
2798 | max_hp->comm, | ||
2799 | max_hp->pid, | ||
2800 | ikglp_get_idx(sem, fq)); | ||
2801 | |||
2802 | BUG_ON(!max_hp); | ||
2803 | |||
2804 | list_for_each(pos, &fq->wait.task_list) { | ||
2805 | wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); | ||
2806 | |||
2807 | queued = (struct task_struct*) wait->private; | ||
2808 | |||
2809 | TRACE_CUR("fq %d entry: %s/%d\n", ikglp_get_idx(sem, fq), queued->comm, queued->pid); | ||
2810 | |||
2811 | /* Compare task prios, find high prio task. */ | ||
2812 | if (queued == max_hp) { | ||
2813 | TRACE_CUR("Found it!\n"); | ||
2814 | ret = container_of(wait, ikglp_wait_state_t, fq_node); | ||
2815 | } | ||
2816 | } | ||
2817 | |||
2818 | // list_for_each(pos, &fq->wait.task_list) { | ||
2819 | // wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); | ||
2820 | // | ||
2821 | // queued = (struct task_struct*) wait->private; | ||
2822 | // /* Compare task prios, find high prio task. */ | ||
2823 | // if (queued == max_hp) { | ||
2824 | // TRACE_CUR("Found it!\n"); | ||
2825 | // return container_of(wait, ikglp_wait_state_t, fq_node); | ||
2826 | // } | ||
2827 | // } | ||
2828 | |||
2829 | if(!ret) { | ||
2830 | WARN_ON(1); | ||
2831 | } | ||
2832 | |||
2833 | return ret; | ||
2834 | } | ||
2835 | |||
2836 | return(NULL); | ||
2837 | } | ||
2838 | |||
2839 | static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *fq_wait) | ||
2840 | { | ||
2841 | struct task_struct *t = fq_wait->task; | ||
2842 | struct fifo_queue *fq_steal = fq_wait->donee_heap_node.fq; | ||
2843 | |||
2844 | WARN_ON(t != fq_steal->hp_waiter); | ||
2845 | |||
2846 | TRACE_CUR("FQ request %s/%d being moved to fq %d\n", | ||
2847 | t->comm, | ||
2848 | t->pid, | ||
2849 | ikglp_get_idx(sem, fq)); | ||
2850 | |||
2851 | fq_wait->donee_heap_node.fq = fq; // just to be safe | ||
2852 | |||
2853 | |||
2854 | __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node); | ||
2855 | --(fq_steal->count); | ||
2856 | |||
2857 | fq_steal->hp_waiter = ikglp_find_hp_waiter(fq_steal, NULL); | ||
2858 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
2859 | ikglp_get_idx(sem, fq_steal), | ||
2860 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->comm : "nil", | ||
2861 | (fq_steal->hp_waiter) ? fq_steal->hp_waiter->pid : -1); | ||
2862 | |||
2863 | |||
2864 | // Update shortest. | ||
2865 | if(fq_steal->count < sem->shortest_fifo_queue->count) { | ||
2866 | sem->shortest_fifo_queue = fq_steal; | ||
2867 | } | ||
2868 | |||
2869 | __ikglp_enqueue_on_fq(sem, fq, t, | ||
2870 | &fq_wait->fq_node, | ||
2871 | NULL, | ||
2872 | NULL); | ||
2873 | |||
2874 | // warning: We have not checked the priority inheritance of fq's owner yet. | ||
2875 | } | ||
2876 | |||
2877 | |||
2878 | static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *old_wait) | ||
2879 | { | ||
2880 | struct task_struct *t = old_wait->task; | ||
2881 | |||
2882 | BUG_ON(old_wait->donee_heap_node.fq != fq); | ||
2883 | |||
2884 | TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n", ikglp_get_idx(sem, fq)); | ||
2885 | |||
2886 | // need to migrate global_heap_node and donee_heap_node off of the stack | ||
2887 | // to the nodes allocated for the owner of this fq. | ||
2888 | |||
2889 | // TODO: Enhance binheap() to perform this operation in place. | ||
2890 | |||
2891 | ikglp_del_global_list(sem, t, &old_wait->global_heap_node); // remove | ||
2892 | fq->global_heap_node = old_wait->global_heap_node; // copy | ||
2893 | ikglp_add_global_list(sem, t, &fq->global_heap_node); // re-add | ||
2894 | |||
2895 | binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); // remove | ||
2896 | fq->donee_heap_node = old_wait->donee_heap_node; // copy | ||
2897 | |||
2898 | if(fq->donee_heap_node.donor_info) { | ||
2899 | // let donor know that our location has changed | ||
2900 | BUG_ON(fq->donee_heap_node.donor_info->donee_info->task != t); // validate cross-link | ||
2901 | fq->donee_heap_node.donor_info->donee_info = &fq->donee_heap_node; | ||
2902 | } | ||
2903 | INIT_BINHEAP_NODE(&fq->donee_heap_node.node); | ||
2904 | binheap_add(&fq->donee_heap_node.node, &sem->donees, ikglp_donee_heap_node_t, node); // re-add | ||
2905 | } | ||
2906 | |||
2907 | static int gsnedf_ikglp_unlock(struct litmus_lock* l) | ||
2908 | { | ||
2909 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
2910 | struct task_struct *t = current; | ||
2911 | struct task_struct *donee = NULL; | ||
2912 | struct task_struct *next = NULL; | ||
2913 | struct task_struct *new_on_fq = NULL; | ||
2914 | |||
2915 | ikglp_wait_state_t *other_donor_info = NULL; | ||
2916 | struct fifo_queue *to_steal = NULL; | ||
2917 | struct fifo_queue *fq; | ||
2918 | |||
2919 | unsigned long flags = 0, real_flags; | ||
2920 | |||
2921 | int err = 0; | ||
2922 | |||
2923 | raw_spin_lock_irqsave(&sem->real_lock, real_flags); | ||
2924 | |||
2925 | lock_global_irqsave(&dgl_lock, flags); // TODO: Push this deeper | ||
2926 | lock_fine_irqsave(&sem->lock, flags); | ||
2927 | |||
2928 | fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. | ||
2929 | |||
2930 | if (!fq) { | ||
2931 | err = -EINVAL; | ||
2932 | goto out; | ||
2933 | } | ||
2934 | |||
2935 | TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); | ||
2936 | |||
2937 | |||
2938 | // Remove 't' from the heaps, but data in nodes will still be good. | ||
2939 | ikglp_del_global_list(sem, t, &fq->global_heap_node); | ||
2940 | binheap_delete(&fq->donee_heap_node.node, &sem->donees); | ||
2941 | |||
2942 | // Move the next request into the FQ and update heaps as needed. | ||
2943 | // We defer re-evaluation of priorities to later in the function. | ||
2944 | if(fq->donee_heap_node.donor_info) { // move my doner to FQ | ||
2945 | ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; | ||
2946 | |||
2947 | new_on_fq = donor_info->task; | ||
2948 | |||
2949 | TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n", | ||
2950 | new_on_fq->comm, new_on_fq->pid, | ||
2951 | ikglp_get_idx(sem, fq)); | ||
2952 | // donor moved to FQ | ||
2953 | donee = t; | ||
2954 | ikglp_move_donor_to_fq(sem, fq, donor_info); | ||
2955 | // TODO: Terminate donation | ||
2956 | } | ||
2957 | else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ | ||
2958 | // move other donor to FQ | ||
2959 | other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); | ||
2960 | |||
2961 | new_on_fq = other_donor_info->task; | ||
2962 | donee = other_donor_info->donee_info->task; | ||
2963 | |||
2964 | // update the donee's heap position. | ||
2965 | other_donor_info->donee_info->donor_info = NULL; // clear the cross-link | ||
2966 | binheap_decrease(&other_donor_info->donee_info->node, &sem->donees); | ||
2967 | |||
2968 | |||
2969 | TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n", | ||
2970 | new_on_fq->comm, new_on_fq->pid, | ||
2971 | ikglp_get_idx(sem, fq)); | ||
2972 | |||
2973 | ikglp_move_donor_to_fq(sem, fq, other_donor_info); | ||
2974 | |||
2975 | // TODO: Terminate donation | ||
2976 | } | ||
2977 | else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ | ||
2978 | ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, ikglp_heap_node_t, node); | ||
2979 | ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, pq_node); | ||
2980 | |||
2981 | new_on_fq = pq_wait->task; | ||
2982 | |||
2983 | TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n", | ||
2984 | new_on_fq->comm, new_on_fq->pid, | ||
2985 | ikglp_get_idx(sem, fq)); | ||
2986 | |||
2987 | ikglp_move_pq_to_fq(sem, fq, pq_wait); | ||
2988 | } | ||
2989 | else if(fq->count == 1) { // No PQ and this queue is empty, so steal | ||
2990 | // steal. | ||
2991 | ikglp_wait_state_t *fq_wait; | ||
2992 | |||
2993 | TRACE_TASK(t, "Looking to steal a request for fq %d...\n", | ||
2994 | ikglp_get_idx(sem, fq)); | ||
2995 | |||
2996 | fq_wait = ikglp_find_hp_waiter_to_steal(sem); | ||
2997 | if(fq_wait) { | ||
2998 | to_steal = fq_wait->donee_heap_node.fq; | ||
2999 | |||
3000 | new_on_fq = fq_wait->task; | ||
3001 | |||
3002 | TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n", | ||
3003 | new_on_fq->comm, new_on_fq->pid, | ||
3004 | ikglp_get_idx(sem, to_steal), | ||
3005 | ikglp_get_idx(sem, fq)); | ||
3006 | |||
3007 | ikglp_steal_to_fq(sem, fq, fq_wait); | ||
3008 | |||
3009 | // TODO: Update prio of old queue. | ||
3010 | } | ||
3011 | else { | ||
3012 | TRACE_TASK(t, "Found nothing to steal for fq %d.\n", | ||
3013 | ikglp_get_idx(sem, fq)); | ||
3014 | } | ||
3015 | } | ||
3016 | else { // move no one | ||
3017 | } | ||
3018 | |||
3019 | // 't' must drop all priority and clean up data structures before hand-off. | ||
3020 | |||
3021 | // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST | ||
3022 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
3023 | { | ||
3024 | int count = 0; | ||
3025 | while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { | ||
3026 | binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
3027 | ++count; | ||
3028 | } | ||
3029 | decrease_priority_inheritance(t, NULL); | ||
3030 | WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. | ||
3031 | } | ||
3032 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
3033 | |||
3034 | |||
3035 | // Updating the owner and updating sem->shortest_fifo_queue | ||
3036 | // could have been done sooner, but it is deffered, hoping | ||
3037 | // that it will reduce thrashing of sem->shortest_fifo_queue | ||
3038 | // assignment. | ||
3039 | fq->owner = NULL; // no longer owned!! | ||
3040 | --(fq->count); | ||
3041 | if(fq->count < sem->shortest_fifo_queue->count) { | ||
3042 | sem->shortest_fifo_queue = fq; | ||
3043 | } | ||
3044 | |||
3045 | // Now patch up other priorities. | ||
3046 | // | ||
3047 | // At most one of the following: | ||
3048 | // if(donee && donee != t), decrease prio, propagate to owner, or onward | ||
3049 | // if(to_steal), update owner's prio (hp_waiter has already been set) | ||
3050 | // | ||
3051 | |||
3052 | BUG_ON((other_donor_info != NULL) && (to_steal != NULL)); | ||
3053 | |||
3054 | if(other_donor_info) { | ||
3055 | struct fifo_queue *other_fq = other_donor_info->donee_info->fq; | ||
3056 | |||
3057 | BUG_ON(!donee); | ||
3058 | BUG_ON(donee == t); | ||
3059 | |||
3060 | TRACE_TASK(t, "Terminating donation relation of donor %s/%d to donee %s/%d!\n", | ||
3061 | other_donor_info->task->comm, other_donor_info->task->pid, | ||
3062 | donee->comm, donee->pid); | ||
3063 | |||
3064 | // need to terminate donation relation. | ||
3065 | if(donee == other_fq->owner) { | ||
3066 | TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", | ||
3067 | donee->comm, donee->pid, | ||
3068 | ikglp_get_idx(sem, other_fq)); | ||
3069 | |||
3070 | ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); | ||
3071 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
3072 | } | ||
3073 | else { | ||
3074 | TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", | ||
3075 | donee->comm, donee->pid, | ||
3076 | ikglp_get_idx(sem, other_fq)); | ||
3077 | |||
3078 | ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node); | ||
3079 | if(donee == other_fq->hp_waiter) { | ||
3080 | TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", | ||
3081 | donee->comm, donee->pid, | ||
3082 | ikglp_get_idx(sem, other_fq)); | ||
3083 | |||
3084 | other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL); | ||
3085 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
3086 | ikglp_get_idx(sem, other_fq), | ||
3087 | (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "nil", | ||
3088 | (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); | ||
3089 | |||
3090 | ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. | ||
3091 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
3092 | } | ||
3093 | } | ||
3094 | } | ||
3095 | else if(to_steal) { | ||
3096 | TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n", | ||
3097 | ikglp_get_idx(sem, to_steal)); | ||
3098 | |||
3099 | ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. | ||
3100 | lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! | ||
3101 | } | ||
3102 | |||
3103 | // check for new HP waiter. | ||
3104 | if(new_on_fq) { | ||
3105 | // fq->owner is null, so just update the hp_waiter without locking. | ||
3106 | |||
3107 | if(new_on_fq == fq->hp_waiter) { | ||
3108 | TRACE_TASK(t, "new_on_fq is already hp_waiter.\n", fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
3109 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure... | ||
3110 | } | ||
3111 | else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) { | ||
3112 | if(fq->hp_waiter) | ||
3113 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid); | ||
3114 | else | ||
3115 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
3116 | |||
3117 | fq->hp_waiter = new_on_fq; | ||
3118 | fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); | ||
3119 | |||
3120 | TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", | ||
3121 | ikglp_get_idx(sem, fq), | ||
3122 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
3123 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
3124 | } | ||
3125 | } | ||
3126 | |||
3127 | |||
3128 | if(waitqueue_active(&fq->wait)) | ||
3129 | { | ||
3130 | wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); | ||
3131 | ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); | ||
3132 | next = (struct task_struct*) wait->private; | ||
3133 | |||
3134 | __remove_wait_queue(&fq->wait, wait); | ||
3135 | |||
3136 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
3137 | ikglp_get_idx(sem, fq), | ||
3138 | next->comm, next->pid); | ||
3139 | |||
3140 | // migrate wait-state to fifo-memory. | ||
3141 | ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); | ||
3142 | |||
3143 | /* next becomes the resouce holder */ | ||
3144 | fq->owner = next; | ||
3145 | tsk_rt(next)->blocked_lock = NULL; | ||
3146 | |||
3147 | |||
3148 | /* determine new hp_waiter if necessary */ | ||
3149 | if (next == fq->hp_waiter) { | ||
3150 | |||
3151 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
3152 | /* next has the highest priority --- it doesn't need to | ||
3153 | * inherit. However, we need to make sure that the | ||
3154 | * next-highest priority in the queue is reflected in | ||
3155 | * hp_waiter. */ | ||
3156 | fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); | ||
3157 | TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", | ||
3158 | ikglp_get_idx(sem, fq), | ||
3159 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
3160 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
3161 | |||
3162 | fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? effective_priority(fq->hp_waiter) : NULL; | ||
3163 | |||
3164 | if (fq->hp_waiter) | ||
3165 | TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n"); | ||
3166 | else | ||
3167 | TRACE("no further waiters\n"); | ||
3168 | |||
3169 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3170 | |||
3171 | //TRACE_TASK(next, "Heap Before:\n"); | ||
3172 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
3173 | |||
3174 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); | ||
3175 | |||
3176 | //TRACE_TASK(next, "Heap After:\n"); | ||
3177 | //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); | ||
3178 | |||
3179 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3180 | } | ||
3181 | else { | ||
3182 | /* Well, if 'next' is not the highest-priority waiter, | ||
3183 | * then it (probably) ought to inherit the highest-priority | ||
3184 | * waiter's priority. */ | ||
3185 | TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", | ||
3186 | ikglp_get_idx(sem, fq), | ||
3187 | (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", | ||
3188 | (fq->hp_waiter) ? fq->hp_waiter->pid : -1); | ||
3189 | |||
3190 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3191 | |||
3192 | binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, | ||
3193 | struct nested_info, hp_binheap_node); | ||
3194 | |||
3195 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
3196 | * because that update hasn't yet executed (update operation is | ||
3197 | * probably blocked on mutex->lock). So only inherit if the top of | ||
3198 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
3199 | * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
3200 | * since the effective priority of hp_waiter can change (and the | ||
3201 | * update has not made it to this lock).) | ||
3202 | */ | ||
3203 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == fq->nest.hp_waiter_eff_prio)) | ||
3204 | { | ||
3205 | if(fq->nest.hp_waiter_eff_prio) | ||
3206 | increase_priority_inheritance(next, fq->nest.hp_waiter_eff_prio); | ||
3207 | else | ||
3208 | WARN_ON(1); | ||
3209 | } | ||
3210 | |||
3211 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
3212 | } | ||
3213 | |||
3214 | |||
3215 | // wake up the new resource holder! | ||
3216 | wake_up_process(next); | ||
3217 | } | ||
3218 | |||
3219 | out: | ||
3220 | unlock_fine_irqrestore(&sem->lock, flags); | ||
3221 | unlock_global_irqrestore(&dgl_lock, flags); | ||
3222 | |||
3223 | raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); | ||
3224 | |||
3225 | return err; | ||
3226 | } | ||
3227 | |||
3228 | |||
3229 | static int gsnedf_ikglp_close(struct litmus_lock* l) | ||
3230 | { | ||
3231 | struct task_struct *t = current; | ||
3232 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
3233 | unsigned long flags; | ||
3234 | |||
3235 | int owner = 0; | ||
3236 | int i; | ||
3237 | |||
3238 | raw_spin_lock_irqsave(&sem->real_lock, flags); | ||
3239 | |||
3240 | for(i = 0; i < sem->nr_replicas; ++i) { | ||
3241 | if(sem->fifo_queues[i].owner == t) { | ||
3242 | owner = 1; | ||
3243 | break; | ||
3244 | } | ||
3245 | } | ||
3246 | |||
3247 | raw_spin_unlock_irqrestore(&sem->real_lock, flags); | ||
3248 | |||
3249 | if (owner) | ||
3250 | gsnedf_ikglp_unlock(l); | ||
3251 | |||
3252 | return 0; | ||
3253 | } | ||
3254 | |||
3255 | static void gsnedf_ikglp_free(struct litmus_lock* l) | ||
3256 | { | ||
3257 | struct ikglp_semaphore *sem = ikglp_from_lock(l); | ||
3258 | |||
3259 | kfree(sem->fifo_queues); | ||
3260 | kfree(sem); | ||
3261 | } | ||
3262 | |||
3263 | static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { | 924 | static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { |
3264 | .lock = gsnedf_ikglp_lock, | 925 | .lock = ikglp_lock, |
3265 | .unlock = gsnedf_ikglp_unlock, | 926 | .unlock = ikglp_unlock, |
3266 | 927 | .close = ikglp_close, | |
928 | .deallocate = ikglp_free, | ||
929 | |||
3267 | // ikglp can only be an outer-most lock. | 930 | // ikglp can only be an outer-most lock. |
3268 | .propagate_increase_inheritance = NULL, | 931 | .propagate_increase_inheritance = NULL, |
3269 | .propagate_decrease_inheritance = NULL, | 932 | .propagate_decrease_inheritance = NULL, |
3270 | |||
3271 | .close = gsnedf_ikglp_close, | ||
3272 | .deallocate = gsnedf_ikglp_free, | ||
3273 | }; | 933 | }; |
3274 | 934 | ||
3275 | static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) | 935 | static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) |
3276 | { | 936 | { |
3277 | struct ikglp_semaphore* sem; | 937 | return ikglp_new(num_online_cpus(), &gsnedf_ikglp_lock_ops, arg); |
3278 | int nr_replicas = 0; | ||
3279 | int i; | ||
3280 | |||
3281 | if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas))) | ||
3282 | { | ||
3283 | return(NULL); | ||
3284 | } | ||
3285 | if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas))) | ||
3286 | { | ||
3287 | return(NULL); | ||
3288 | } | ||
3289 | if(nr_replicas < 1) | ||
3290 | { | ||
3291 | return(NULL); | ||
3292 | } | ||
3293 | |||
3294 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
3295 | if(!sem) | ||
3296 | { | ||
3297 | return NULL; | ||
3298 | } | ||
3299 | |||
3300 | sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL); | ||
3301 | if(!sem->fifo_queues) | ||
3302 | { | ||
3303 | kfree(sem); | ||
3304 | return NULL; | ||
3305 | } | ||
3306 | |||
3307 | |||
3308 | sem->litmus_lock.ops = &gsnedf_ikglp_lock_ops; | ||
3309 | |||
3310 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
3311 | { | ||
3312 | __raw_spin_lock_init(&sem->lock, ((struct litmus_lock*)sem)->cheat_lockdep, &((struct litmus_lock*)sem)->key); | ||
3313 | } | ||
3314 | #else | ||
3315 | raw_spin_lock_init(&sem->lock); | ||
3316 | #endif | ||
3317 | |||
3318 | raw_spin_lock_init(&sem->real_lock); | ||
3319 | |||
3320 | sem->nr_replicas = nr_replicas; | ||
3321 | sem->m = num_online_cpus(); // default 'm' to number of CPUs. | ||
3322 | sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0); | ||
3323 | |||
3324 | TRACE("New IKGLP Sem: m = %d, k = %d, max fifo_len = %d\n", | ||
3325 | sem->m, | ||
3326 | sem->nr_replicas, | ||
3327 | sem->max_fifo_len); | ||
3328 | |||
3329 | for(i = 0; i < nr_replicas; ++i) | ||
3330 | { | ||
3331 | struct fifo_queue* q = &(sem->fifo_queues[i]); | ||
3332 | |||
3333 | q->owner = NULL; | ||
3334 | q->hp_waiter = NULL; | ||
3335 | init_waitqueue_head(&q->wait); | ||
3336 | q->count = 0; | ||
3337 | |||
3338 | q->global_heap_node.task = NULL; | ||
3339 | INIT_BINHEAP_NODE(&q->global_heap_node.node); | ||
3340 | |||
3341 | q->donee_heap_node.task = NULL; | ||
3342 | q->donee_heap_node.donor_info = NULL; | ||
3343 | q->donee_heap_node.fq = NULL; | ||
3344 | INIT_BINHEAP_NODE(&q->donee_heap_node.node); | ||
3345 | |||
3346 | q->nest.lock = (struct litmus_lock*)sem; | ||
3347 | q->nest.hp_waiter_eff_prio = NULL; | ||
3348 | q->nest.hp_waiter_ptr = &q->hp_waiter; | ||
3349 | INIT_BINHEAP_NODE(&q->nest.hp_binheap_node); | ||
3350 | } | ||
3351 | |||
3352 | sem->shortest_fifo_queue = &sem->fifo_queues[0]; | ||
3353 | |||
3354 | sem->top_m_size = 0; | ||
3355 | |||
3356 | // init heaps | ||
3357 | INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_edf_min_heap_base_priority_order); | ||
3358 | INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_edf_max_heap_base_priority_order); | ||
3359 | INIT_BINHEAP_HANDLE(&sem->donees, ikglp_edf_min_heap_donee_order); | ||
3360 | INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); | ||
3361 | INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order); | ||
3362 | |||
3363 | return &sem->litmus_lock; | ||
3364 | } | 938 | } |
3365 | 939 | ||
3366 | 940 | #endif /* CONFIG_LITMUS_NESTED_LOCKING */ | |
3367 | |||
3368 | |||
3369 | |||
3370 | #endif | ||
3371 | |||
3372 | |||
3373 | |||
3374 | |||
3375 | |||
3376 | |||
3377 | |||
3378 | |||
3379 | |||
3380 | 941 | ||
3381 | 942 | ||
3382 | /* ******************** FMLP support ********************** */ | 943 | /* ******************** FMLP support ********************** */ |
@@ -3561,7 +1122,7 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | |||
3561 | .lock = gsnedf_fmlp_lock, | 1122 | .lock = gsnedf_fmlp_lock, |
3562 | .unlock = gsnedf_fmlp_unlock, | 1123 | .unlock = gsnedf_fmlp_unlock, |
3563 | .deallocate = gsnedf_fmlp_free, | 1124 | .deallocate = gsnedf_fmlp_free, |
3564 | 1125 | ||
3565 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1126 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
3566 | .propagate_increase_inheritance = NULL, | 1127 | .propagate_increase_inheritance = NULL, |
3567 | .propagate_decrease_inheritance = NULL | 1128 | .propagate_decrease_inheritance = NULL |
@@ -3599,17 +1160,17 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
3599 | /* Flexible Multiprocessor Locking Protocol */ | 1160 | /* Flexible Multiprocessor Locking Protocol */ |
3600 | *lock = gsnedf_new_fmlp(); | 1161 | *lock = gsnedf_new_fmlp(); |
3601 | break; | 1162 | break; |
3602 | 1163 | ||
3603 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1164 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
3604 | case RSM_MUTEX: | 1165 | case RSM_MUTEX: |
3605 | *lock = gsnedf_new_rsm_mutex(); | 1166 | *lock = gsnedf_new_rsm_mutex(); |
3606 | break; | 1167 | break; |
3607 | 1168 | ||
3608 | case IKGLP_SEM: | 1169 | case IKGLP_SEM: |
3609 | *lock = gsnedf_new_ikglp(args); | 1170 | *lock = gsnedf_new_ikglp(args); |
3610 | break; | 1171 | break; |
3611 | #endif | 1172 | #endif |
3612 | 1173 | ||
3613 | default: | 1174 | default: |
3614 | err = -ENXIO; | 1175 | err = -ENXIO; |
3615 | goto UNSUPPORTED_LOCK; | 1176 | goto UNSUPPORTED_LOCK; |
@@ -3671,9 +1232,15 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
3671 | .activate_plugin = gsnedf_activate_plugin, | 1232 | .activate_plugin = gsnedf_activate_plugin, |
3672 | #ifdef CONFIG_LITMUS_LOCKING | 1233 | #ifdef CONFIG_LITMUS_LOCKING |
3673 | .allocate_lock = gsnedf_allocate_lock, | 1234 | .allocate_lock = gsnedf_allocate_lock, |
1235 | .increase_prio = increase_priority_inheritance, | ||
1236 | .decrease_prio = decrease_priority_inheritance, | ||
1237 | #endif | ||
1238 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1239 | .nested_increase_prio = nested_increase_priority_inheritance, | ||
1240 | .nested_decrease_prio = nested_decrease_priority_inheritance, | ||
3674 | #endif | 1241 | #endif |
3675 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 1242 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
3676 | .get_dgl_spinlock = gsn_edf_get_dgl_spinlock, | 1243 | .get_dgl_spinlock = gsnedf_get_dgl_spinlock, |
3677 | #endif | 1244 | #endif |
3678 | }; | 1245 | }; |
3679 | 1246 | ||
@@ -3692,11 +1259,11 @@ static int __init init_gsn_edf(void) | |||
3692 | 1259 | ||
3693 | INIT_BINHEAP_NODE(&entry->hn); | 1260 | INIT_BINHEAP_NODE(&entry->hn); |
3694 | } | 1261 | } |
3695 | 1262 | ||
3696 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 1263 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
3697 | raw_spin_lock_init(&dgl_lock); | 1264 | raw_spin_lock_init(&dgl_lock); |
3698 | #endif | 1265 | #endif |
3699 | 1266 | ||
3700 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 1267 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); |
3701 | return register_sched_plugin(&gsn_edf_plugin); | 1268 | return register_sched_plugin(&gsn_edf_plugin); |
3702 | } | 1269 | } |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 77ae3eeb3966..20c67ff4fdce 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -111,23 +111,39 @@ static long litmus_dummy_deactivate_plugin(void) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | #ifdef CONFIG_LITMUS_LOCKING | 113 | #ifdef CONFIG_LITMUS_LOCKING |
114 | |||
115 | static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, | 114 | static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, |
116 | void* __user config) | 115 | void* __user config) |
117 | { | 116 | { |
118 | return -ENXIO; | 117 | return -ENXIO; |
119 | } | 118 | } |
120 | 119 | ||
120 | static void litmus_dummy_increase_prio(struct task_struct* t, struct task_struct* prio_inh) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh) | ||
125 | { | ||
126 | } | ||
121 | #endif | 127 | #endif |
122 | 128 | ||
123 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 129 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
130 | static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task_struct* prio_inh, | ||
131 | raw_spinlock_t *to_unlock, unsigned long irqflags) | ||
132 | { | ||
133 | } | ||
124 | 134 | ||
135 | static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task_struct* prio_inh, | ||
136 | raw_spinlock_t *to_unlock, unsigned long irqflags) | ||
137 | { | ||
138 | } | ||
139 | #endif | ||
140 | |||
141 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
125 | static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t) | 142 | static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t) |
126 | { | 143 | { |
127 | BUG(); | 144 | BUG(); |
128 | return NULL; | 145 | return NULL; |
129 | } | 146 | } |
130 | |||
131 | #endif | 147 | #endif |
132 | 148 | ||
133 | 149 | ||
@@ -149,6 +165,12 @@ struct sched_plugin linux_sched_plugin = { | |||
149 | .deactivate_plugin = litmus_dummy_deactivate_plugin, | 165 | .deactivate_plugin = litmus_dummy_deactivate_plugin, |
150 | #ifdef CONFIG_LITMUS_LOCKING | 166 | #ifdef CONFIG_LITMUS_LOCKING |
151 | .allocate_lock = litmus_dummy_allocate_lock, | 167 | .allocate_lock = litmus_dummy_allocate_lock, |
168 | .increase_prio = litmus_dummy_increase_prio, | ||
169 | .decrease_prio = litmus_dummy_decrease_prio, | ||
170 | #endif | ||
171 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
172 | .nested_increase_prio = litmus_dummy_nested_increase_prio, | ||
173 | .nested_decrease_prio = litmus_dummy_nested_decrease_prio, | ||
152 | #endif | 174 | #endif |
153 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 175 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
154 | .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock, | 176 | .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock, |
@@ -190,6 +212,12 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
190 | CHECK(deactivate_plugin); | 212 | CHECK(deactivate_plugin); |
191 | #ifdef CONFIG_LITMUS_LOCKING | 213 | #ifdef CONFIG_LITMUS_LOCKING |
192 | CHECK(allocate_lock); | 214 | CHECK(allocate_lock); |
215 | CHECK(increase_prio); | ||
216 | CHECK(decrease_prio); | ||
217 | #endif | ||
218 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
219 | CHECK(nested_increase_prio); | ||
220 | CHECK(nested_decrease_prio); | ||
193 | #endif | 221 | #endif |
194 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 222 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
195 | CHECK(get_dgl_spinlock); | 223 | CHECK(get_dgl_spinlock); |