1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
|
#ifndef LITMUS_KFMLP_H
#define LITMUS_KFMLP_H
#include <litmus/litmus.h>
#include <litmus/locking.h>
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
#include <litmus/kexclu_affinity.h>
struct kfmlp_affinity;
#endif
/* struct for semaphore with priority inheritance */
struct kfmlp_queue
{
wait_queue_head_t wait;
struct task_struct* owner;
struct task_struct* hp_waiter;
int count; /* number of waiters + holder */
};
struct kfmlp_semaphore
{
struct litmus_lock litmus_lock;
spinlock_t lock;
int num_resources; /* aka k */
struct kfmlp_queue *queues; /* array */
struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
struct kfmlp_affinity *aff_obs;
#endif
};
static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
{
return container_of(lock, struct kfmlp_semaphore, litmus_lock);
}
int kfmlp_lock(struct litmus_lock* l);
int kfmlp_unlock(struct litmus_lock* l);
int kfmlp_close(struct litmus_lock* l);
void kfmlp_free(struct litmus_lock* l);
struct litmus_lock* kfmlp_new(struct litmus_lock_ops*, void* __user arg);
#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
struct kfmlp_queue_info
{
struct kfmlp_queue* q;
lt_t estimated_len;
int *nr_cur_users;
};
struct kfmlp_affinity_ops
{
struct kfmlp_queue* (*advise_enqueue)(struct kfmlp_affinity* aff, struct task_struct* t);
struct task_struct* (*advise_steal)(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from);
void (*notify_enqueue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
void (*notify_dequeue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
void (*notify_acquired)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
void (*notify_freed)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
int (*replica_to_resource)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq);
};
struct kfmlp_affinity
{
struct affinity_observer obs;
struct kfmlp_affinity_ops *ops;
struct kfmlp_queue_info *q_info;
int *nr_cur_users_on_rsrc;
int offset;
int nr_simult;
int nr_rsrc;
};
static inline struct kfmlp_affinity* kfmlp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs)
{
return container_of(aff_obs, struct kfmlp_affinity, obs);
}
int kfmlp_aff_obs_close(struct affinity_observer*);
void kfmlp_aff_obs_free(struct affinity_observer*);
struct affinity_observer* kfmlp_gpu_aff_obs_new(struct affinity_observer_ops*,
void* __user arg);
struct affinity_observer* kfmlp_simple_gpu_aff_obs_new(struct affinity_observer_ops*,
void* __user arg);
#endif
#endif
|