aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/locking.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/locking.h')
-rw-r--r--include/litmus/locking.h94
1 files changed, 61 insertions, 33 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 972cbdb7fdd5..c2324c4ccb8a 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -13,6 +13,15 @@ struct nested_info
13 struct task_struct **hp_waiter_ptr; 13 struct task_struct **hp_waiter_ptr;
14 struct binheap_node hp_binheap_node; 14 struct binheap_node hp_binheap_node;
15}; 15};
16
17static inline struct task_struct* top_priority(struct binheap_handle* handle) {
18 if(!binheap_empty(handle)) {
19 return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio);
20 }
21 return NULL;
22}
23
24void print_hp_waiters(struct binheap_node* n, int depth);
16#endif 25#endif
17 26
18 27
@@ -23,16 +32,14 @@ struct litmus_lock {
23 struct litmus_lock_ops *ops; 32 struct litmus_lock_ops *ops;
24 int type; 33 int type;
25 34
26#ifdef CONFIG_LITMUS_NESTED_LOCKING
27 int ident; 35 int ident;
28 36
37#ifdef CONFIG_LITMUS_NESTED_LOCKING
29 struct nested_info nest; 38 struct nested_info nest;
30
31//#ifdef CONFIG_DEBUG_SPINLOCK 39//#ifdef CONFIG_DEBUG_SPINLOCK
32 char cheat_lockdep[2]; 40 char cheat_lockdep[2];
33 struct lock_class_key key; 41 struct lock_class_key key;
34//#endif 42//#endif
35
36#endif 43#endif
37}; 44};
38 45
@@ -41,36 +48,42 @@ struct litmus_lock {
41#define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE 48#define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE
42 49
43typedef struct dgl_wait_state { 50typedef struct dgl_wait_state {
44 struct task_struct *task; 51 struct task_struct *task; /* task waiting on DGL */
45 struct litmus_lock *locks[MAX_DGL_SIZE]; 52 struct litmus_lock *locks[MAX_DGL_SIZE]; /* requested locks in DGL */
46 int size; 53 int size; /* size of the DGL */
47 int nr_remaining; 54 int nr_remaining; /* nr locks remainging before DGL is complete */
48 55 int last_primary; /* index lock in locks[] that has active priority */
49 int last_primary;
50
51 wait_queue_t wq_nodes[MAX_DGL_SIZE]; 56 wait_queue_t wq_nodes[MAX_DGL_SIZE];
52} dgl_wait_state_t; 57} dgl_wait_state_t;
53 58
54void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); 59void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait);
55void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock); 60void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/);
56 61
57void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); 62void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait);
58int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); 63int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key);
59void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); 64void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task);
60#endif 65#endif
61 66
67typedef int (*lock_op_t)(struct litmus_lock *l);
68typedef lock_op_t lock_close_t;
69typedef lock_op_t lock_lock_t;
70typedef lock_op_t lock_unlock_t;
71
72typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg);
73typedef void (*lock_free_t)(struct litmus_lock *l);
74
62struct litmus_lock_ops { 75struct litmus_lock_ops {
63 /* Current task tries to obtain / drop a reference to a lock. 76 /* Current task tries to obtain / drop a reference to a lock.
64 * Optional methods, allowed by default. */ 77 * Optional methods, allowed by default. */
65 int (*open)(struct litmus_lock*, void* __user); 78 lock_open_t open;
66 int (*close)(struct litmus_lock*); 79 lock_close_t close;
67 80
68 /* Current tries to lock/unlock this lock (mandatory methods). */ 81 /* Current tries to lock/unlock this lock (mandatory methods). */
69 int (*lock)(struct litmus_lock*); 82 lock_lock_t lock;
70 int (*unlock)(struct litmus_lock*); 83 lock_unlock_t unlock;
71 84
72 /* The lock is no longer being referenced (mandatory method). */ 85 /* The lock is no longer being referenced (mandatory method). */
73 void (*deallocate)(struct litmus_lock*); 86 lock_free_t deallocate;
74 87
75#ifdef CONFIG_LITMUS_NESTED_LOCKING 88#ifdef CONFIG_LITMUS_NESTED_LOCKING
76 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 89 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
@@ -86,7 +99,36 @@ struct litmus_lock_ops {
86}; 99};
87 100
88 101
89#ifdef CONFIG_LITMUS_DGL_SUPPORT 102/*
103 Nested inheritance can be achieved with fine-grain locking when there is
104 no need for DGL support, presuming locks are acquired in a partial order
105 (no cycles!). However, DGLs allow locks to be acquired in any order. This
106 makes nested inheritance very difficult (we don't yet know a solution) to
107 realize with fine-grain locks, so we use a big lock instead.
108
109 Code contains both fine-grain and coarse-grain methods together, side-by-side.
110 Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more
111 readable. However, this leads to the odd situation where both code paths
112 appear together in code as if they were both active together.
113
114 THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE!
115
116 Example:
117 lock_global_irqsave(coarseLock, flags);
118 lock_fine_irqsave(fineLock, flags);
119
120 Reality (coarse):
121 lock_global_irqsave(coarseLock, flags);
122 //lock_fine_irqsave(fineLock, flags);
123
124 Reality (fine):
125 //lock_global_irqsave(coarseLock, flags);
126 lock_fine_irqsave(fineLock, flags);
127
128 Be careful when you read code involving nested inheritance.
129 */
130#if defined(CONFIG_LITMUS_DGL_SUPPORT)
131/* DGL requires a big lock to implement nested inheritance */
90#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) 132#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags))
91#define lock_global(lock) raw_spin_lock((lock)) 133#define lock_global(lock) raw_spin_lock((lock))
92#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) 134#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags))
@@ -98,8 +140,8 @@ struct litmus_lock_ops {
98#define unlock_fine_irqrestore(lock, flags) 140#define unlock_fine_irqrestore(lock, flags)
99#define unlock_fine(lock) 141#define unlock_fine(lock)
100 142
101#elif CONFIG_LITMUS_NESTED_LOCKING 143#elif defined(CONFIG_LITMUS_NESTED_LOCKING)
102 144/* Use fine-grain locking when DGLs are disabled. */
103/* global locking are no-ops without DGL support */ 145/* global locking are no-ops without DGL support */
104#define lock_global_irqsave(lock, flags) 146#define lock_global_irqsave(lock, flags)
105#define lock_global(lock) 147#define lock_global(lock)
@@ -116,17 +158,3 @@ struct litmus_lock_ops {
116 158
117#endif 159#endif
118 160
119
120
121
122
123
124
125
126
127
128
129
130
131
132