aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus/ikglp_lock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus/ikglp_lock.h')
-rw-r--r--include/litmus/ikglp_lock.h61
1 files changed, 61 insertions, 0 deletions
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h
index c0cc04db1bc6..2a75a1719815 100644
--- a/include/litmus/ikglp_lock.h
+++ b/include/litmus/ikglp_lock.h
@@ -5,6 +5,12 @@
5#include <litmus/binheap.h> 5#include <litmus/binheap.h>
6#include <litmus/locking.h> 6#include <litmus/locking.h>
7 7
8#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
9#include <litmus/kexclu_affinity.h>
10
11struct ikglp_affinity;
12#endif
13
8typedef struct ikglp_heap_node 14typedef struct ikglp_heap_node
9{ 15{
10 struct task_struct *task; 16 struct task_struct *task;
@@ -81,6 +87,10 @@ struct ikglp_semaphore
81 struct fifo_queue *fifo_queues; // array nr_replicas in length 87 struct fifo_queue *fifo_queues; // array nr_replicas in length
82 struct binheap_handle priority_queue; // max-heap, base prio 88 struct binheap_handle priority_queue; // max-heap, base prio
83 struct binheap_handle donors; // max-heap, base prio 89 struct binheap_handle donors; // max-heap, base prio
90
91#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
92 struct ikglp_affinity *aff_obs;
93#endif
84}; 94};
85 95
86static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) 96static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
@@ -94,4 +104,55 @@ int ikglp_close(struct litmus_lock* l);
94void ikglp_free(struct litmus_lock* l); 104void ikglp_free(struct litmus_lock* l);
95struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg); 105struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg);
96 106
107
108
109#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
110
111struct ikglp_queue_info
112{
113 struct fifo_queue* q;
114 lt_t estimated_len;
115 int *nr_cur_users;
116};
117
118struct ikglp_affinity_ops
119{
120 struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t); // select FIFO
121 struct task_struct* (*advise_steal)(struct ikglp_affinity* aff, wait_queue_t** to_steal, struct fifo_queue** to_steal_from); // select steal from FIFO
122 struct task_struct* (*advise_donee_selection)(struct ikglp_affinity* aff, wait_queue_t** donee, struct fifo_queue** donee_queue); // select a donee
123 struct task_struct* (*advise_doner_to_fq)(struct ikglp_affinity* aff, ikglp_wait_state_t** donor); // select a donor to move to PQ
124
125 void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue
126 void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo dequeue
127 void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica acquired
128 void (*notify_freed)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica freed
129 int (*replica_to_resource)(struct ikglp_affinity* aff, struct fifo_queue* fq); // convert a replica # to a GPU (includes offsets and simult user folding)
130};
131
132struct ikglp_affinity
133{
134 struct affinity_observer obs;
135 struct ikglp_affinity_ops *ops;
136 struct fifo_queue *q_info;
137 int *nr_cur_users_on_rsrc;
138 int offset;
139 int nr_simult;
140 int nr_rsrc;
141};
142
143static inline struct ikglp_affinity* ikglp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs)
144{
145 return container_of(aff_obs, struct ikglp_affinity, obs);
146}
147
148int ikglp_aff_obs_close(struct affinity_observer*);
149void ikglp_aff_obs_free(struct affinity_observer*);
150struct affinity_observer* ikglp_gpu_aff_obs_new(struct affinity_observer_ops*,
151 void* __user arg);
152struct affinity_observer* ikglp_simple_gpu_aff_obs_new(struct affinity_observer_ops*,
153 void* __user arg);
154#endif
155
156
157
97#endif 158#endif