aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/litmus/ikglp_lock.h97
-rw-r--r--include/litmus/locking.h94
-rw-r--r--include/litmus/rsm_lock.h54
-rw-r--r--include/litmus/sched_plugin.h14
4 files changed, 225 insertions, 34 deletions
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h
new file mode 100644
index 000000000000..c0cc04db1bc6
--- /dev/null
+++ b/include/litmus/ikglp_lock.h
@@ -0,0 +1,97 @@
1#ifndef LITMUS_IKGLP_H
2#define LITMUS_IKGLP_H
3
4#include <litmus/litmus.h>
5#include <litmus/binheap.h>
6#include <litmus/locking.h>
7
8typedef struct ikglp_heap_node
9{
10 struct task_struct *task;
11 struct binheap_node node;
12} ikglp_heap_node_t;
13
14struct fifo_queue;
15struct ikglp_wait_state;
16
17typedef struct ikglp_donee_heap_node
18{
19 struct task_struct *task;
20 struct fifo_queue *fq;
21 struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor
22
23 struct binheap_node node;
24} ikglp_donee_heap_node_t;
25
26// Maintains the state of a request as it goes through the IKGLP
27typedef struct ikglp_wait_state {
28 struct task_struct *task; // pointer back to the requesting task
29
30 // Data for while waiting in FIFO Queue
31 wait_queue_t fq_node;
32 ikglp_heap_node_t global_heap_node;
33 ikglp_donee_heap_node_t donee_heap_node;
34
35 // Data for while waiting in PQ
36 ikglp_heap_node_t pq_node;
37
38 // Data for while waiting as a donor
39 ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t
40 struct nested_info prio_donation;
41 struct binheap_node node;
42} ikglp_wait_state_t;
43
44/* struct for semaphore with priority inheritance */
45struct fifo_queue
46{
47 wait_queue_head_t wait;
48 struct task_struct* owner;
49
50 // used for bookkeepping
51 ikglp_heap_node_t global_heap_node;
52 ikglp_donee_heap_node_t donee_heap_node;
53
54 struct task_struct* hp_waiter;
55 int count; /* number of waiters + holder */
56
57 struct nested_info nest;
58};
59
60struct ikglp_semaphore
61{
62 struct litmus_lock litmus_lock;
63
64 raw_spinlock_t lock;
65 raw_spinlock_t real_lock;
66
67 int nr_replicas; // AKA k
68 int m;
69
70 int max_fifo_len; // max len of a fifo queue
71
72 struct binheap_handle top_m; // min heap, base prio
73 int top_m_size; // number of nodes in top_m
74
75 struct binheap_handle not_top_m; // max heap, base prio
76
77 struct binheap_handle donees; // min-heap, base prio
78 struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue
79
80 /* data structures for holding requests */
81 struct fifo_queue *fifo_queues; // array nr_replicas in length
82 struct binheap_handle priority_queue; // max-heap, base prio
83 struct binheap_handle donors; // max-heap, base prio
84};
85
86static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
87{
88 return container_of(lock, struct ikglp_semaphore, litmus_lock);
89}
90
91int ikglp_lock(struct litmus_lock* l);
92int ikglp_unlock(struct litmus_lock* l);
93int ikglp_close(struct litmus_lock* l);
94void ikglp_free(struct litmus_lock* l);
95struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg);
96
97#endif
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 972cbdb7fdd5..c2324c4ccb8a 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -13,6 +13,15 @@ struct nested_info
13 struct task_struct **hp_waiter_ptr; 13 struct task_struct **hp_waiter_ptr;
14 struct binheap_node hp_binheap_node; 14 struct binheap_node hp_binheap_node;
15}; 15};
16
17static inline struct task_struct* top_priority(struct binheap_handle* handle) {
18 if(!binheap_empty(handle)) {
19 return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio);
20 }
21 return NULL;
22}
23
24void print_hp_waiters(struct binheap_node* n, int depth);
16#endif 25#endif
17 26
18 27
@@ -23,16 +32,14 @@ struct litmus_lock {
23 struct litmus_lock_ops *ops; 32 struct litmus_lock_ops *ops;
24 int type; 33 int type;
25 34
26#ifdef CONFIG_LITMUS_NESTED_LOCKING
27 int ident; 35 int ident;
28 36
37#ifdef CONFIG_LITMUS_NESTED_LOCKING
29 struct nested_info nest; 38 struct nested_info nest;
30
31//#ifdef CONFIG_DEBUG_SPINLOCK 39//#ifdef CONFIG_DEBUG_SPINLOCK
32 char cheat_lockdep[2]; 40 char cheat_lockdep[2];
33 struct lock_class_key key; 41 struct lock_class_key key;
34//#endif 42//#endif
35
36#endif 43#endif
37}; 44};
38 45
@@ -41,36 +48,42 @@ struct litmus_lock {
41#define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE 48#define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE
42 49
43typedef struct dgl_wait_state { 50typedef struct dgl_wait_state {
44 struct task_struct *task; 51 struct task_struct *task; /* task waiting on DGL */
45 struct litmus_lock *locks[MAX_DGL_SIZE]; 52 struct litmus_lock *locks[MAX_DGL_SIZE]; /* requested locks in DGL */
46 int size; 53 int size; /* size of the DGL */
47 int nr_remaining; 54 int nr_remaining; /* nr locks remainging before DGL is complete */
48 55 int last_primary; /* index lock in locks[] that has active priority */
49 int last_primary;
50
51 wait_queue_t wq_nodes[MAX_DGL_SIZE]; 56 wait_queue_t wq_nodes[MAX_DGL_SIZE];
52} dgl_wait_state_t; 57} dgl_wait_state_t;
53 58
54void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); 59void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait);
55void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock); 60void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/);
56 61
57void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); 62void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait);
58int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); 63int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key);
59void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); 64void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task);
60#endif 65#endif
61 66
67typedef int (*lock_op_t)(struct litmus_lock *l);
68typedef lock_op_t lock_close_t;
69typedef lock_op_t lock_lock_t;
70typedef lock_op_t lock_unlock_t;
71
72typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg);
73typedef void (*lock_free_t)(struct litmus_lock *l);
74
62struct litmus_lock_ops { 75struct litmus_lock_ops {
63 /* Current task tries to obtain / drop a reference to a lock. 76 /* Current task tries to obtain / drop a reference to a lock.
64 * Optional methods, allowed by default. */ 77 * Optional methods, allowed by default. */
65 int (*open)(struct litmus_lock*, void* __user); 78 lock_open_t open;
66 int (*close)(struct litmus_lock*); 79 lock_close_t close;
67 80
68 /* Current tries to lock/unlock this lock (mandatory methods). */ 81 /* Current tries to lock/unlock this lock (mandatory methods). */
69 int (*lock)(struct litmus_lock*); 82 lock_lock_t lock;
70 int (*unlock)(struct litmus_lock*); 83 lock_unlock_t unlock;
71 84
72 /* The lock is no longer being referenced (mandatory method). */ 85 /* The lock is no longer being referenced (mandatory method). */
73 void (*deallocate)(struct litmus_lock*); 86 lock_free_t deallocate;
74 87
75#ifdef CONFIG_LITMUS_NESTED_LOCKING 88#ifdef CONFIG_LITMUS_NESTED_LOCKING
76 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 89 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
@@ -86,7 +99,36 @@ struct litmus_lock_ops {
86}; 99};
87 100
88 101
89#ifdef CONFIG_LITMUS_DGL_SUPPORT 102/*
103 Nested inheritance can be achieved with fine-grain locking when there is
104 no need for DGL support, presuming locks are acquired in a partial order
105 (no cycles!). However, DGLs allow locks to be acquired in any order. This
106 makes nested inheritance very difficult (we don't yet know a solution) to
107 realize with fine-grain locks, so we use a big lock instead.
108
109 Code contains both fine-grain and coarse-grain methods together, side-by-side.
110 Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more
111 readable. However, this leads to the odd situation where both code paths
112 appear together in code as if they were both active together.
113
114 THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE!
115
116 Example:
117 lock_global_irqsave(coarseLock, flags);
118 lock_fine_irqsave(fineLock, flags);
119
120 Reality (coarse):
121 lock_global_irqsave(coarseLock, flags);
122 //lock_fine_irqsave(fineLock, flags);
123
124 Reality (fine):
125 //lock_global_irqsave(coarseLock, flags);
126 lock_fine_irqsave(fineLock, flags);
127
128 Be careful when you read code involving nested inheritance.
129 */
130#if defined(CONFIG_LITMUS_DGL_SUPPORT)
131/* DGL requires a big lock to implement nested inheritance */
90#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) 132#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags))
91#define lock_global(lock) raw_spin_lock((lock)) 133#define lock_global(lock) raw_spin_lock((lock))
92#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) 134#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags))
@@ -98,8 +140,8 @@ struct litmus_lock_ops {
98#define unlock_fine_irqrestore(lock, flags) 140#define unlock_fine_irqrestore(lock, flags)
99#define unlock_fine(lock) 141#define unlock_fine(lock)
100 142
101#elif CONFIG_LITMUS_NESTED_LOCKING 143#elif defined(CONFIG_LITMUS_NESTED_LOCKING)
102 144/* Use fine-grain locking when DGLs are disabled. */
103/* global locking are no-ops without DGL support */ 145/* global locking are no-ops without DGL support */
104#define lock_global_irqsave(lock, flags) 146#define lock_global_irqsave(lock, flags)
105#define lock_global(lock) 147#define lock_global(lock)
@@ -116,17 +158,3 @@ struct litmus_lock_ops {
116 158
117#endif 159#endif
118 160
119
120
121
122
123
124
125
126
127
128
129
130
131
132
diff --git a/include/litmus/rsm_lock.h b/include/litmus/rsm_lock.h
new file mode 100644
index 000000000000..a15189683de4
--- /dev/null
+++ b/include/litmus/rsm_lock.h
@@ -0,0 +1,54 @@
1#ifndef LITMUS_RSM_H
2#define LITMUS_RSM_H
3
4#include <litmus/litmus.h>
5#include <litmus/binheap.h>
6#include <litmus/locking.h>
7
8/* struct for semaphore with priority inheritance */
9struct rsm_mutex {
10 struct litmus_lock litmus_lock;
11
12 /* current resource holder */
13 struct task_struct *owner;
14
15 /* highest-priority waiter */
16 struct task_struct *hp_waiter;
17
18 /* FIFO queue of waiting tasks -- for now. time stamp in the future. */
19 wait_queue_head_t wait;
20
21 /* we do some nesting within spinlocks, so we can't use the normal
22 sleeplocks found in wait_queue_head_t. */
23 raw_spinlock_t lock;
24};
25
26static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock)
27{
28 return container_of(lock, struct rsm_mutex, litmus_lock);
29}
30
31#ifdef CONFIG_LITMUS_DGL_SUPPORT
32int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t);
33int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
34void rsm_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
35#endif
36
37void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
38 struct task_struct* t,
39 raw_spinlock_t* to_unlock,
40 unsigned long irqflags);
41
42void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
43 struct task_struct* t,
44 raw_spinlock_t* to_unlock,
45 unsigned long irqflags);
46
47int rsm_mutex_lock(struct litmus_lock* l);
48int rsm_mutex_unlock(struct litmus_lock* l);
49int rsm_mutex_close(struct litmus_lock* l);
50void rsm_mutex_free(struct litmus_lock* l);
51struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops*);
52
53
54#endif \ No newline at end of file
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index ae11e3ac9266..8e5167970340 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -58,6 +58,13 @@ typedef void (*task_exit_t) (struct task_struct *);
58typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, 58typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type,
59 void* __user config); 59 void* __user config);
60 60
61typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh);
62typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh);
63typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh,
64 raw_spinlock_t *to_unlock, unsigned long irqflags);
65typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh,
66 raw_spinlock_t *to_unlock, unsigned long irqflags);
67
61typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); 68typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t);
62 69
63/********************* sys call backends ********************/ 70/********************* sys call backends ********************/
@@ -97,8 +104,13 @@ struct sched_plugin {
97#ifdef CONFIG_LITMUS_LOCKING 104#ifdef CONFIG_LITMUS_LOCKING
98 /* locking protocols */ 105 /* locking protocols */
99 allocate_lock_t allocate_lock; 106 allocate_lock_t allocate_lock;
107 increase_prio_t increase_prio;
108 decrease_prio_t decrease_prio;
109#endif
110#ifdef CONFIG_LITMUS_NESTED_LOCKING
111 nested_increase_prio_t nested_increase_prio;
112 nested_decrease_prio_t nested_decrease_prio;
100#endif 113#endif
101
102#ifdef CONFIG_LITMUS_DGL_SUPPORT 114#ifdef CONFIG_LITMUS_DGL_SUPPORT
103 get_dgl_spinlock_t get_dgl_spinlock; 115 get_dgl_spinlock_t get_dgl_spinlock;
104#endif 116#endif