1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
|
#ifndef LITMUS_IKGLP_H
#define LITMUS_IKGLP_H
#include <litmus/litmus.h>
#include <litmus/binheap.h>
#include <litmus/locking.h>
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
#include <litmus/kexclu_affinity.h>
struct ikglp_affinity;
#endif
typedef struct ikglp_heap_node
{
struct task_struct *task;
struct binheap_node node;
} ikglp_heap_node_t;
struct fifo_queue;
struct ikglp_wait_state;
typedef struct ikglp_donee_heap_node
{
struct task_struct *task;
struct fifo_queue *fq;
struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor
struct binheap_node node;
} ikglp_donee_heap_node_t;
// Maintains the state of a request as it goes through the IKGLP
typedef struct ikglp_wait_state {
struct task_struct *task; // pointer back to the requesting task
// Data for while waiting in FIFO Queue
wait_queue_t fq_node;
ikglp_heap_node_t global_heap_node;
ikglp_donee_heap_node_t donee_heap_node;
// Data for while waiting in PQ
ikglp_heap_node_t pq_node;
// Data for while waiting as a donor
ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t
struct nested_info prio_donation;
struct binheap_node node;
} ikglp_wait_state_t;
/* struct for semaphore with priority inheritance */
struct fifo_queue
{
wait_queue_head_t wait;
struct task_struct* owner;
// used for bookkeepping
ikglp_heap_node_t global_heap_node;
ikglp_donee_heap_node_t donee_heap_node;
struct task_struct* hp_waiter;
int count; /* number of waiters + holder */
struct nested_info nest;
};
struct ikglp_semaphore
{
struct litmus_lock litmus_lock;
raw_spinlock_t lock;
raw_spinlock_t real_lock;
int nr_replicas; // AKA k
int m;
int max_fifo_len; // max len of a fifo queue
int nr_in_fifos;
struct binheap top_m; // min heap, base prio
int top_m_size; // number of nodes in top_m
struct binheap not_top_m; // max heap, base prio
struct binheap donees; // min-heap, base prio
struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue
/* data structures for holding requests */
struct fifo_queue *fifo_queues; // array nr_replicas in length
struct binheap priority_queue; // max-heap, base prio
struct binheap donors; // max-heap, base prio
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
struct ikglp_affinity *aff_obs;
#endif
};
static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
{
return container_of(lock, struct ikglp_semaphore, litmus_lock);
}
int ikglp_lock(struct litmus_lock* l);
int ikglp_unlock(struct litmus_lock* l);
int ikglp_close(struct litmus_lock* l);
void ikglp_free(struct litmus_lock* l);
struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg);
#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
struct ikglp_queue_info
{
struct fifo_queue* q;
lt_t estimated_len;
int *nr_cur_users;
int64_t *nr_aff_users;
};
struct ikglp_affinity_ops
{
struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t); // select FIFO
ikglp_wait_state_t* (*advise_steal)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select steal from FIFO
ikglp_donee_heap_node_t* (*advise_donee_selection)(struct ikglp_affinity* aff, struct task_struct* t); // select a donee
ikglp_wait_state_t* (*advise_donor_to_fq)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select a donor to move to PQ
void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue
void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo dequeue
void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica acquired
void (*notify_freed)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica freed
int (*replica_to_resource)(struct ikglp_affinity* aff, struct fifo_queue* fq); // convert a replica # to a GPU (includes offsets and simult user folding)
int (*notify_exit)(struct ikglp_affinity* aff, struct task_struct* t);
};
struct ikglp_affinity
{
struct affinity_observer obs;
struct ikglp_affinity_ops *ops;
struct ikglp_queue_info *q_info;
int *nr_cur_users_on_rsrc;
int64_t *nr_aff_on_rsrc;
int offset;
int nr_simult;
int nr_rsrc;
int relax_max_fifo_len;
};
static inline struct ikglp_affinity* ikglp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs)
{
return container_of(aff_obs, struct ikglp_affinity, obs);
}
int ikglp_aff_obs_close(struct affinity_observer*);
void ikglp_aff_obs_free(struct affinity_observer*);
struct affinity_observer* ikglp_gpu_aff_obs_new(struct affinity_observer_ops*,
void* __user arg);
struct affinity_observer* ikglp_simple_gpu_aff_obs_new(struct affinity_observer_ops*,
void* __user arg);
#endif
#endif
|