aboutsummaryrefslogblamecommitdiffstats
path: root/litmus/clustered.c
blob: de2aca2a271c1e43521dc1d08322718c4b87da57 (plain) (tree)






















































































































                                                                                                          
#include <linux/gfp.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/cacheinfo.h>

#include <litmus/debug_trace.h>
#include <litmus/clustered.h>

int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, unsigned int index)
{
	struct cpu_cacheinfo* info = get_cpu_cacheinfo(cpu);
	struct cacheinfo *ci;

	if (!info || index >= info->num_leaves) {
		TRACE("no shared-cache CPUs: info=%d index=%u\n",
			info != NULL, index);
		return 1;
	}

	if (!info->info_list) {
		TRACE("no shared-cache CPUs: no info_list (cpu\n");
	}
	ci = info->info_list + index;

	cpumask_copy(mask, &ci->shared_cpu_map);

	TRACE("get_shared: P%u@L%u -> %d siblings\n ", cpu, index, cpumask_weight(mask));

	return 0;
}

int get_cluster_size(enum cache_level level)
{
	cpumask_var_t mask;
	int ok;
	int num_cpus;

	if (level == GLOBAL_CLUSTER)
		return num_online_cpus();
	else {
		if (!zalloc_cpumask_var(&mask, GFP_ATOMIC))
			return -ENOMEM;
		/* assumes CPU 0 is representative of all CPUs */
		ok = get_shared_cpu_map(mask, 0, level);
		/* ok == 0 means we got the map; otherwise it's an invalid cache level */
		if (ok == 0)
			num_cpus = cpumask_weight(mask);
		free_cpumask_var(mask);

		if (ok == 0)
			return num_cpus;
		else
			return -EINVAL;
	}
}

int assign_cpus_to_clusters(enum cache_level level,
			    struct scheduling_cluster* clusters[],
			    unsigned int num_clusters,
			    struct cluster_cpu* cpus[],
			    unsigned int num_cpus)
{
	cpumask_var_t mask;
	unsigned int i, free_cluster = 0, low_cpu;
	int err = 0;

	if (!zalloc_cpumask_var(&mask, GFP_ATOMIC))
		return -ENOMEM;

	/* clear cluster pointers */
	for (i = 0; i < num_cpus; i++) {
		cpus[i]->id      = i;
		cpus[i]->cluster = NULL;
	}

	/* initialize clusters */
	for (i = 0; i < num_clusters; i++) {
		clusters[i]->id = i;
		INIT_LIST_HEAD(&clusters[i]->cpus);
	}

	/* Assign each CPU. Two assumtions are made:
	 * 1) The index of a cpu in cpus corresponds to its processor id (i.e., the index in a cpu mask).
	 * 2) All cpus that belong to some cluster are online.
	 */
	for_each_online_cpu(i) {
		/* get lowest-id CPU in cluster */
		if (level != GLOBAL_CLUSTER) {
			err = get_shared_cpu_map(mask, cpus[i]->id, level);
			if (err != 0) {
				/* ugh... wrong cache level? Either caller screwed up
				 * or the CPU topology is weird. */
				printk(KERN_ERR "Could not set up clusters for L%d sharing (max: L%d).\n",
				       level, err);
				err = -EINVAL;
				goto out;
			}
			low_cpu = cpumask_first(mask);
		} else
			low_cpu = 0;
		if (low_cpu == i) {
			/* caller must provide an appropriate number of clusters */
			BUG_ON(free_cluster >= num_clusters);

			/* create new cluster */
			cpus[i]->cluster = clusters[free_cluster++];
		} else {
			/* low_cpu points to the right cluster
			 * Assumption: low_cpu is actually online and was processed earlier. */
			cpus[i]->cluster = cpus[low_cpu]->cluster;
		}
		/* enqueue in cpus list */
		list_add_tail(&cpus[i]->cluster_list, &cpus[i]->cluster->cpus);
		printk(KERN_INFO "Assigning CPU%u to cluster %u\n.", i, cpus[i]->cluster->id);
	}
out:
	free_cpumask_var(mask);
	return err;
}