diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
commit | a463f9a9e04385f0729f7435a0a6dff7d89b25de (patch) | |
tree | 00ff42c305926c800e18b13df8440a4de1a1a041 /include/litmus/locking.h | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
GPUSync patch for Litmus 2012.1.
Diffstat (limited to 'include/litmus/locking.h')
-rw-r--r-- | include/litmus/locking.h | 142 |
1 files changed, 137 insertions, 5 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 4d7b870cb443..36647fee03e4 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -1,28 +1,160 @@ | |||
1 | #ifndef LITMUS_LOCKING_H | 1 | #ifndef LITMUS_LOCKING_H |
2 | #define LITMUS_LOCKING_H | 2 | #define LITMUS_LOCKING_H |
3 | 3 | ||
4 | #include <linux/list.h> | ||
5 | |||
4 | struct litmus_lock_ops; | 6 | struct litmus_lock_ops; |
5 | 7 | ||
8 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
9 | struct nested_info | ||
10 | { | ||
11 | struct litmus_lock *lock; | ||
12 | struct task_struct *hp_waiter_eff_prio; | ||
13 | struct task_struct **hp_waiter_ptr; | ||
14 | struct binheap_node hp_binheap_node; | ||
15 | }; | ||
16 | |||
17 | static inline struct task_struct* top_priority(struct binheap_handle* handle) { | ||
18 | if(!binheap_empty(handle)) { | ||
19 | return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); | ||
20 | } | ||
21 | return NULL; | ||
22 | } | ||
23 | |||
24 | void print_hp_waiters(struct binheap_node* n, int depth); | ||
25 | #endif | ||
26 | |||
27 | |||
6 | /* Generic base struct for LITMUS^RT userspace semaphores. | 28 | /* Generic base struct for LITMUS^RT userspace semaphores. |
7 | * This structure should be embedded in protocol-specific semaphores. | 29 | * This structure should be embedded in protocol-specific semaphores. |
8 | */ | 30 | */ |
9 | struct litmus_lock { | 31 | struct litmus_lock { |
10 | struct litmus_lock_ops *ops; | 32 | struct litmus_lock_ops *ops; |
11 | int type; | 33 | int type; |
34 | |||
35 | int ident; | ||
36 | |||
37 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
38 | struct nested_info nest; | ||
39 | //#ifdef CONFIG_DEBUG_SPINLOCK | ||
40 | char cheat_lockdep[2]; | ||
41 | struct lock_class_key key; | ||
42 | //#endif | ||
43 | #endif | ||
12 | }; | 44 | }; |
13 | 45 | ||
46 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
47 | |||
48 | #define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE | ||
49 | |||
50 | typedef struct dgl_wait_state { | ||
51 | struct task_struct *task; /* task waiting on DGL */ | ||
52 | struct litmus_lock *locks[MAX_DGL_SIZE]; /* requested locks in DGL */ | ||
53 | int size; /* size of the DGL */ | ||
54 | int nr_remaining; /* nr locks remainging before DGL is complete */ | ||
55 | int last_primary; /* index lock in locks[] that has active priority */ | ||
56 | wait_queue_t wq_nodes[MAX_DGL_SIZE]; | ||
57 | } dgl_wait_state_t; | ||
58 | |||
59 | void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); | ||
60 | void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/); | ||
61 | |||
62 | void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); | ||
63 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); | ||
64 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); | ||
65 | #endif | ||
66 | |||
67 | typedef int (*lock_op_t)(struct litmus_lock *l); | ||
68 | typedef lock_op_t lock_close_t; | ||
69 | typedef lock_op_t lock_lock_t; | ||
70 | typedef lock_op_t lock_unlock_t; | ||
71 | |||
72 | typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg); | ||
73 | typedef void (*lock_free_t)(struct litmus_lock *l); | ||
74 | |||
14 | struct litmus_lock_ops { | 75 | struct litmus_lock_ops { |
15 | /* Current task tries to obtain / drop a reference to a lock. | 76 | /* Current task tries to obtain / drop a reference to a lock. |
16 | * Optional methods, allowed by default. */ | 77 | * Optional methods, allowed by default. */ |
17 | int (*open)(struct litmus_lock*, void* __user); | 78 | lock_open_t open; |
18 | int (*close)(struct litmus_lock*); | 79 | lock_close_t close; |
19 | 80 | ||
20 | /* Current tries to lock/unlock this lock (mandatory methods). */ | 81 | /* Current tries to lock/unlock this lock (mandatory methods). */ |
21 | int (*lock)(struct litmus_lock*); | 82 | lock_lock_t lock; |
22 | int (*unlock)(struct litmus_lock*); | 83 | lock_unlock_t unlock; |
23 | 84 | ||
24 | /* The lock is no longer being referenced (mandatory method). */ | 85 | /* The lock is no longer being referenced (mandatory method). */ |
25 | void (*deallocate)(struct litmus_lock*); | 86 | lock_free_t deallocate; |
87 | |||
88 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
89 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | ||
90 | void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | ||
91 | #endif | ||
92 | |||
93 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
94 | raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); | ||
95 | int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); | ||
96 | int (*is_owner)(struct litmus_lock *l, struct task_struct *t); | ||
97 | void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); | ||
98 | #endif | ||
26 | }; | 99 | }; |
27 | 100 | ||
101 | |||
102 | /* | ||
103 | Nested inheritance can be achieved with fine-grain locking when there is | ||
104 | no need for DGL support, presuming locks are acquired in a partial order | ||
105 | (no cycles!). However, DGLs allow locks to be acquired in any order. This | ||
106 | makes nested inheritance very difficult (we don't yet know a solution) to | ||
107 | realize with fine-grain locks, so we use a big lock instead. | ||
108 | |||
109 | Code contains both fine-grain and coarse-grain methods together, side-by-side. | ||
110 | Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more | ||
111 | readable. However, this leads to the odd situation where both code paths | ||
112 | appear together in code as if they were both active together. | ||
113 | |||
114 | THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE! | ||
115 | |||
116 | Example: | ||
117 | lock_global_irqsave(coarseLock, flags); | ||
118 | lock_fine_irqsave(fineLock, flags); | ||
119 | |||
120 | Reality (coarse): | ||
121 | lock_global_irqsave(coarseLock, flags); | ||
122 | //lock_fine_irqsave(fineLock, flags); | ||
123 | |||
124 | Reality (fine): | ||
125 | //lock_global_irqsave(coarseLock, flags); | ||
126 | lock_fine_irqsave(fineLock, flags); | ||
127 | |||
128 | Be careful when you read code involving nested inheritance. | ||
129 | */ | ||
130 | #if defined(CONFIG_LITMUS_DGL_SUPPORT) | ||
131 | /* DGL requires a big lock to implement nested inheritance */ | ||
132 | #define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) | ||
133 | #define lock_global(lock) raw_spin_lock((lock)) | ||
134 | #define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) | ||
135 | #define unlock_global(lock) raw_spin_unlock((lock)) | ||
136 | |||
137 | /* fine-grain locking are no-ops with DGL support */ | ||
138 | #define lock_fine_irqsave(lock, flags) | ||
139 | #define lock_fine(lock) | ||
140 | #define unlock_fine_irqrestore(lock, flags) | ||
141 | #define unlock_fine(lock) | ||
142 | |||
143 | #elif defined(CONFIG_LITMUS_NESTED_LOCKING) | ||
144 | /* Use fine-grain locking when DGLs are disabled. */ | ||
145 | /* global locking are no-ops without DGL support */ | ||
146 | #define lock_global_irqsave(lock, flags) | ||
147 | #define lock_global(lock) | ||
148 | #define unlock_global_irqrestore(lock, flags) | ||
149 | #define unlock_global(lock) | ||
150 | |||
151 | #define lock_fine_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) | ||
152 | #define lock_fine(lock) raw_spin_lock((lock)) | ||
153 | #define unlock_fine_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) | ||
154 | #define unlock_fine(lock) raw_spin_unlock((lock)) | ||
155 | |||
28 | #endif | 156 | #endif |
157 | |||
158 | |||
159 | #endif | ||
160 | |||