aboutsummaryrefslogblamecommitdiffstats
path: root/include/litmus/ikglp_lock.h
blob: af6f15178cb1b1f74c15bbef46f7ed937d28a518 (plain) (tree)































































































































































                                                                                                                                                                          
#ifndef LITMUS_IKGLP_H
#define LITMUS_IKGLP_H

#include <litmus/litmus.h>
#include <litmus/binheap.h>
#include <litmus/locking.h>

#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
#include <litmus/kexclu_affinity.h>

struct ikglp_affinity;
#endif

typedef struct ikglp_heap_node
{
	struct task_struct *task;
	struct binheap_node node;
} ikglp_heap_node_t;

struct fifo_queue;
struct ikglp_wait_state;

typedef struct ikglp_donee_heap_node
{
	struct task_struct *task;
	struct fifo_queue *fq;
	struct ikglp_wait_state *donor_info;  // cross-linked with ikglp_wait_state_t of donor

	struct binheap_node node;
} ikglp_donee_heap_node_t;

// Maintains the state of a request as it goes through the IKGLP
typedef struct ikglp_wait_state {
	struct task_struct *task;  // pointer back to the requesting task

	// Data for while waiting in FIFO Queue
	wait_queue_t fq_node;
	ikglp_heap_node_t global_heap_node;
	ikglp_donee_heap_node_t donee_heap_node;

	// Data for while waiting in PQ
	ikglp_heap_node_t pq_node;

	// Data for while waiting as a donor
	ikglp_donee_heap_node_t *donee_info;  // cross-linked with donee's ikglp_donee_heap_node_t
	struct nested_info prio_donation;
	struct binheap_node node;
} ikglp_wait_state_t;

/* struct for semaphore with priority inheritance */
struct fifo_queue
{
	wait_queue_head_t wait;
	struct task_struct* owner;

	// used for bookkeepping
	ikglp_heap_node_t global_heap_node;
	ikglp_donee_heap_node_t donee_heap_node;

	struct task_struct* hp_waiter;
	int count; /* number of waiters + holder */

	struct nested_info nest;
};

struct ikglp_semaphore
{
	struct litmus_lock litmus_lock;

	raw_spinlock_t	lock;
	raw_spinlock_t	real_lock;

	int nr_replicas; // AKA k
	int m;

	int max_fifo_len; // max len of a fifo queue
	int nr_in_fifos;

	struct binheap_handle top_m;  // min heap, base prio
	int top_m_size;  // number of nodes in top_m

	struct binheap_handle not_top_m; // max heap, base prio

	struct binheap_handle donees;	// min-heap, base prio
	struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue

	/* data structures for holding requests */
	struct fifo_queue *fifo_queues; // array nr_replicas in length
	struct binheap_handle priority_queue;	// max-heap, base prio
	struct binheap_handle donors;	// max-heap, base prio

#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
	struct ikglp_affinity *aff_obs;
#endif
};

static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
{
	return container_of(lock, struct ikglp_semaphore, litmus_lock);
}

int ikglp_lock(struct litmus_lock* l);
int ikglp_unlock(struct litmus_lock* l);
int ikglp_close(struct litmus_lock* l);
void ikglp_free(struct litmus_lock* l);
struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg);



#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)

struct ikglp_queue_info
{
	struct fifo_queue* q;
	lt_t estimated_len;
	int *nr_cur_users;
};

struct ikglp_affinity_ops
{
	struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t);	// select FIFO
	ikglp_wait_state_t* (*advise_steal)(struct ikglp_affinity* aff, struct fifo_queue* dst);	// select steal from FIFO
	ikglp_donee_heap_node_t* (*advise_donee_selection)(struct ikglp_affinity* aff, struct task_struct* t);	// select a donee
	ikglp_wait_state_t* (*advise_donor_to_fq)(struct ikglp_affinity* aff, struct fifo_queue* dst);	// select a donor to move to PQ

	void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t);	// fifo enqueue
	void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t);	// fifo dequeue
	void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t);	// replica acquired
	void (*notify_freed)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t);		// replica freed
	int (*replica_to_resource)(struct ikglp_affinity* aff, struct fifo_queue* fq);		// convert a replica # to a GPU (includes offsets and simult user folding)
};

struct ikglp_affinity
{
	struct affinity_observer obs;
	struct ikglp_affinity_ops *ops;
	struct ikglp_queue_info *q_info;
	int *nr_cur_users_on_rsrc;
	int offset;
	int nr_simult;
	int nr_rsrc;
	int relax_max_fifo_len;
};

static inline struct ikglp_affinity* ikglp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs)
{
	return container_of(aff_obs, struct ikglp_affinity, obs);
}

int ikglp_aff_obs_close(struct affinity_observer*);
void ikglp_aff_obs_free(struct affinity_observer*);
struct affinity_observer* ikglp_gpu_aff_obs_new(struct affinity_observer_ops*,
												void* __user arg);
struct affinity_observer* ikglp_simple_gpu_aff_obs_new(struct affinity_observer_ops*,
												void* __user arg);
#endif



#endif