diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-20 21:20:21 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-20 21:20:21 -0400 |
commit | 273e902c50ef94966815a92c2af5ab8c5b2d77ce (patch) | |
tree | f3583588422740f5147a9d674ab6f0ffbedafb42 | |
parent | c6d04216a123f8e0b50eb78bbb1eaf646a1ca4e0 (diff) |
Untested donee selection heuristic for IKGLP.
-rw-r--r-- | include/litmus/ikglp_lock.h | 2 | ||||
-rw-r--r-- | litmus/ikglp_lock.c | 239 |
2 files changed, 216 insertions, 25 deletions
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h index 3fa23251b539..2558a382446c 100644 --- a/include/litmus/ikglp_lock.h +++ b/include/litmus/ikglp_lock.h | |||
@@ -119,7 +119,7 @@ struct ikglp_affinity_ops | |||
119 | { | 119 | { |
120 | struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t); // select FIFO | 120 | struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t); // select FIFO |
121 | ikglp_wait_state_t* (*advise_steal)(struct ikglp_affinity* aff); // select steal from FIFO | 121 | ikglp_wait_state_t* (*advise_steal)(struct ikglp_affinity* aff); // select steal from FIFO |
122 | ikglp_donee_heap_node_t* (*advise_donee_selection)(struct ikglp_affinity* aff); // select a donee | 122 | ikglp_donee_heap_node_t* (*advise_donee_selection)(struct ikglp_affinity* aff, struct task_struct* t); // select a donee |
123 | ikglp_wait_state_t* (*advise_doner_to_fq)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select a donor to move to PQ | 123 | ikglp_wait_state_t* (*advise_doner_to_fq)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select a donor to move to PQ |
124 | 124 | ||
125 | void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue | 125 | void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue |
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index 0e07841b86ba..9ae71422d813 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c | |||
@@ -742,7 +742,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, | |||
742 | // Select a donee | 742 | // Select a donee |
743 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 743 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
744 | donee_node = (sem->aff_obs) ? | 744 | donee_node = (sem->aff_obs) ? |
745 | sem->aff_obs->ops->advise_donee_selection(sem->aff_obs) : | 745 | sem->aff_obs->ops->advise_donee_selection(sem->aff_obs, t) : |
746 | binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); | 746 | binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); |
747 | #else | 747 | #else |
748 | donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); | 748 | donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); |
@@ -1833,24 +1833,22 @@ static int gpu_replica_to_resource(struct ikglp_affinity* aff, | |||
1833 | struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, struct task_struct* t) | 1833 | struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, struct task_struct* t) |
1834 | { | 1834 | { |
1835 | // advise_enqueue must be smart as not not break IKGLP rules: | 1835 | // advise_enqueue must be smart as not not break IKGLP rules: |
1836 | // * Total number of waiters cannot exceed ceil(m/k)*k. | 1836 | // * No queue can be greater than ceil(m/k) in length. We may return |
1837 | // such a queue, but IKGLP will be smart enough as to send requests | ||
1838 | // to donors or PQ. | ||
1837 | // * Cannot let a queue idle if there exist waiting PQ/donors | 1839 | // * Cannot let a queue idle if there exist waiting PQ/donors |
1838 | // -- needed to guarantee parallel progress of waiters. | 1840 | // -- needed to guarantee parallel progress of waiters. |
1839 | // | 1841 | // |
1840 | // Locking protocol is smart enough to noticed that a queue we return is | ||
1841 | // full and send new requests to Donors/PQ. | ||
1842 | // | ||
1843 | // We may be able to relax some of these constraints, but this will have to | 1842 | // We may be able to relax some of these constraints, but this will have to |
1844 | // be carefully evaluated. | 1843 | // be carefully evaluated. |
1844 | // | ||
1845 | // Huristic strategy: Find the shortest queue that is not full. | ||
1845 | 1846 | ||
1846 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); | 1847 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); |
1847 | |||
1848 | /* | ||
1849 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); | ||
1850 | lt_t min_len; | 1848 | lt_t min_len; |
1851 | int min_nr_users; | 1849 | int min_nr_users; |
1852 | struct ikglp_queue_info *shortest; | 1850 | struct ikglp_queue_info *shortest; |
1853 | struct ikglp_queue *to_enqueue; | 1851 | struct fifo_queue *to_enqueue; |
1854 | int i; | 1852 | int i; |
1855 | int affinity_gpu; | 1853 | int affinity_gpu; |
1856 | 1854 | ||
@@ -1877,17 +1875,20 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, struct t | |||
1877 | min_len = shortest->estimated_len + get_gpu_estimate(t, MIG_LOCAL); | 1875 | min_len = shortest->estimated_len + get_gpu_estimate(t, MIG_LOCAL); |
1878 | min_nr_users = *(shortest->nr_cur_users); | 1876 | min_nr_users = *(shortest->nr_cur_users); |
1879 | 1877 | ||
1880 | TRACE_CUR("cs is %llu on queue %d: est len = %llu\n", | 1878 | TRACE_CUR("cs is %llu on queue %d (count = %d): est len = %llu\n", |
1881 | get_gpu_estimate(t, MIG_LOCAL), | 1879 | get_gpu_estimate(t, MIG_LOCAL), |
1882 | ikglp_get_idx(sem, shortest->q), | 1880 | ikglp_get_idx(sem, shortest->q), |
1881 | shortest->q->count, | ||
1883 | min_len); | 1882 | min_len); |
1884 | 1883 | ||
1885 | for(i = 0; i < sem->nr_replicas; ++i) { | 1884 | for(i = 0; i < sem->nr_replicas; ++i) { |
1886 | if(&aff->q_info[i] != shortest) { | 1885 | if((&aff->q_info[i] != shortest) && |
1886 | (aff->q_info[i].q->count < sem->max_fifo_len)) { | ||
1887 | 1887 | ||
1888 | lt_t est_len = | 1888 | lt_t est_len = |
1889 | aff->q_info[i].estimated_len + | 1889 | aff->q_info[i].estimated_len + |
1890 | get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, replica_to_gpu(aff, i))); | 1890 | get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, |
1891 | replica_to_gpu(aff, i))); | ||
1891 | 1892 | ||
1892 | // queue is smaller, or they're equal and the other has a smaller number | 1893 | // queue is smaller, or they're equal and the other has a smaller number |
1893 | // of total users. | 1894 | // of total users. |
@@ -1901,21 +1902,34 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, struct t | |||
1901 | min_nr_users = *(aff->q_info[i].nr_cur_users); | 1902 | min_nr_users = *(aff->q_info[i].nr_cur_users); |
1902 | } | 1903 | } |
1903 | 1904 | ||
1904 | TRACE_CUR("cs is %llu on queue %d: est len = %llu\n", | 1905 | TRACE_CUR("cs is %llu on queue %d (count = %d): est len = %llu\n", |
1905 | get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, replica_to_gpu(aff, i))), | 1906 | get_gpu_estimate(t, |
1907 | gpu_migration_distance(tsk_rt(t)->last_gpu, | ||
1908 | replica_to_gpu(aff, i))), | ||
1906 | ikglp_get_idx(sem, aff->q_info[i].q), | 1909 | ikglp_get_idx(sem, aff->q_info[i].q), |
1910 | aff->q_info[i].q->count, | ||
1907 | est_len); | 1911 | est_len); |
1908 | } | 1912 | } |
1913 | else { | ||
1914 | TRACE_CUR("queue %d is too long. ineligible for enqueue.\n", | ||
1915 | ikglp_get_idx(sem, aff->q_info[i].q)); | ||
1916 | } | ||
1917 | } | ||
1918 | |||
1919 | if(shortest->q->count >= sem->max_fifo_len) { | ||
1920 | TRACE_CUR("selected fq %d is too long, but returning it anyway.\n", | ||
1921 | ikglp_get_idx(sem, shortest->q)); | ||
1909 | } | 1922 | } |
1910 | 1923 | ||
1911 | to_enqueue = shortest->q; | 1924 | to_enqueue = shortest->q; |
1912 | TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n", | 1925 | TRACE_CUR("enqueue on fq %d (count = %d) (non-aff wanted fq %d)\n", |
1913 | ikglp_get_idx(sem, to_enqueue), | 1926 | ikglp_get_idx(sem, to_enqueue), |
1914 | ikglp_get_idx(sem, sem->shortest_queue)); | 1927 | to_enqueue->count, |
1928 | ikglp_get_idx(sem, sem->shortest_fifo_queue)); | ||
1915 | 1929 | ||
1916 | return to_enqueue; | 1930 | return to_enqueue; |
1917 | */ | 1931 | |
1918 | return(sem->shortest_fifo_queue); | 1932 | //return(sem->shortest_fifo_queue); |
1919 | } | 1933 | } |
1920 | 1934 | ||
1921 | ikglp_wait_state_t* gpu_ikglp_advise_steal(struct ikglp_affinity* aff) | 1935 | ikglp_wait_state_t* gpu_ikglp_advise_steal(struct ikglp_affinity* aff) |
@@ -1928,12 +1942,189 @@ ikglp_wait_state_t* gpu_ikglp_advise_steal(struct ikglp_affinity* aff) | |||
1928 | return ikglp_find_hp_waiter_to_steal(sem); | 1942 | return ikglp_find_hp_waiter_to_steal(sem); |
1929 | } | 1943 | } |
1930 | 1944 | ||
1931 | ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(struct ikglp_affinity* aff) | 1945 | |
1946 | static inline int has_donor(wait_queue_t* fq_wait) | ||
1947 | { | ||
1948 | ikglp_wait_state_t *wait = container_of(fq_wait, ikglp_wait_state_t, fq_node); | ||
1949 | return(wait->donee_heap_node.donor_info != NULL); | ||
1950 | } | ||
1951 | |||
1952 | static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff, | ||
1953 | struct fifo_queue* fq, | ||
1954 | int* dist_from_head) | ||
1932 | { | 1955 | { |
1933 | // TODO: MAKE THIS SMARTER | ||
1934 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); | 1956 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); |
1935 | ikglp_donee_heap_node_t *donee = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); | 1957 | struct task_struct *donee; |
1936 | return(donee); | 1958 | ikglp_donee_heap_node_t *donee_node; |
1959 | |||
1960 | if(fq->owner && fq->donee_heap_node.donor_info != NULL) { | ||
1961 | donee = fq->owner; | ||
1962 | donee_node = &(fq->donee_heap_node); | ||
1963 | *dist_from_head = 0; | ||
1964 | |||
1965 | BUG_ON(donee != donee_node->task); | ||
1966 | |||
1967 | TRACE_CUR("picked owner of fq %d as donee\n", | ||
1968 | ikglp_get_idx(sem, fq)); | ||
1969 | |||
1970 | goto out; | ||
1971 | } | ||
1972 | else if(waitqueue_active(&fq->wait)) { | ||
1973 | struct list_head *pos; | ||
1974 | |||
1975 | *dist_from_head = 1; | ||
1976 | |||
1977 | // iterating from the start of the queue is nice since this means | ||
1978 | // the donee will be closer to obtaining a resource. | ||
1979 | list_for_each(pos, &fq->wait.task_list) { | ||
1980 | wait_queue_t *fq_wait = list_entry(pos, wait_queue_t, task_list); | ||
1981 | if(!has_donor(fq_wait)) { | ||
1982 | ikglp_wait_state_t *wait = container_of(fq_wait, ikglp_wait_state_t, fq_node); | ||
1983 | donee = (struct task_struct*) fq_wait->private; | ||
1984 | donee_node = &wait->donee_heap_node; | ||
1985 | |||
1986 | BUG_ON(donee != donee_node->task); | ||
1987 | |||
1988 | TRACE_CUR("picked waiter in fq %d as donee\n", | ||
1989 | ikglp_get_idx(sem, fq)); | ||
1990 | |||
1991 | goto out; | ||
1992 | } | ||
1993 | ++(*dist_from_head); | ||
1994 | } | ||
1995 | |||
1996 | WARN_ON(1); | ||
1997 | } | ||
1998 | |||
1999 | donee = NULL; | ||
2000 | donee_node = NULL; | ||
2001 | *dist_from_head = sem->max_fifo_len + 1; | ||
2002 | |||
2003 | TRACE_CUR("Found no one to be donee in fq %d!\n", ikglp_get_idx(sem, fq)); | ||
2004 | |||
2005 | out: | ||
2006 | |||
2007 | TRACE_CUR("Candidate donee for fq %d is %s/%d (dist_from_head = %d)\n", | ||
2008 | ikglp_get_idx(sem, fq), | ||
2009 | (donee) ? (donee)->comm : "nil", | ||
2010 | (donee) ? (donee)->pid : -1, | ||
2011 | *dist_from_head); | ||
2012 | |||
2013 | return donee_node; | ||
2014 | } | ||
2015 | |||
2016 | ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection( | ||
2017 | struct ikglp_affinity* aff, | ||
2018 | struct task_struct* donor) | ||
2019 | { | ||
2020 | // Huristic strategy: Find the highest-priority donee that is waiting on | ||
2021 | // a queue closest to our affinity. The donee CANNOT already have a donor | ||
2022 | // (exception: donee is the lowest-prio task in the donee heap). | ||
2023 | // | ||
2024 | // Further strategy: amongst elible donees waiting for the same GPU, pick | ||
2025 | // the one closest to the head of the FIFO queue (including owners). | ||
2026 | // | ||
2027 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); | ||
2028 | ikglp_donee_heap_node_t *donee_node; | ||
2029 | gpu_migration_dist_t distance; | ||
2030 | int start, i, j; | ||
2031 | |||
2032 | ikglp_donee_heap_node_t *default_donee; | ||
2033 | ikglp_wait_state_t *default_donee_donor_info; | ||
2034 | |||
2035 | if(tsk_rt(donor)->last_gpu < 0) { | ||
2036 | // no affinity. just return the min prio, like standard IKGLP | ||
2037 | // TODO: Find something closer to the head of the queue?? | ||
2038 | donee_node = binheap_top_entry(&sem->donees, | ||
2039 | ikglp_donee_heap_node_t, | ||
2040 | node); | ||
2041 | goto out; | ||
2042 | } | ||
2043 | |||
2044 | |||
2045 | // Temporarily break any donation relation the default donee (the lowest | ||
2046 | // prio task in the FIFO queues) to make it eligible for selection below. | ||
2047 | // | ||
2048 | // NOTE: The original donor relation *must* be restored, even if we select | ||
2049 | // the default donee throug affinity-aware selection, before returning | ||
2050 | // from this function so we don't screw up our heap ordering. | ||
2051 | // The standard IKGLP algorithm will steal the donor relationship if needed. | ||
2052 | default_donee = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); | ||
2053 | default_donee_donor_info = default_donee->donor_info; // back-up donor relation | ||
2054 | default_donee->donor_info = NULL; // temporarily break any donor relation. | ||
2055 | |||
2056 | // initialize our search | ||
2057 | donee_node = NULL; | ||
2058 | distance = MIG_NONE; | ||
2059 | |||
2060 | // begin search with affinity GPU. | ||
2061 | start = gpu_to_base_replica(aff, tsk_rt(donor)->last_gpu); | ||
2062 | i = start; | ||
2063 | do { // "for each gpu" / "for each aff->nr_rsrc" | ||
2064 | gpu_migration_dist_t temp_distance = gpu_migration_distance(start, i); | ||
2065 | |||
2066 | // only interested in queues that will improve our distance | ||
2067 | if(temp_distance < distance || donee_node == NULL) { | ||
2068 | int dist_from_head = sem->max_fifo_len + 1; | ||
2069 | |||
2070 | TRACE_CUR("searching for donor on GPU %d", i); | ||
2071 | |||
2072 | // visit each queue and pick a donee. bail as soon as we find | ||
2073 | // one for this class. | ||
2074 | |||
2075 | for(j = 0; j < aff->nr_simult; ++j) { | ||
2076 | int temp_dist_from_head; | ||
2077 | ikglp_donee_heap_node_t *temp_donee_node; | ||
2078 | struct fifo_queue *fq; | ||
2079 | |||
2080 | fq = &(sem->fifo_queues[i + j*aff->nr_rsrc]); | ||
2081 | temp_donee_node = pick_donee(aff, fq, &temp_dist_from_head); | ||
2082 | |||
2083 | if(temp_dist_from_head < dist_from_head) | ||
2084 | { | ||
2085 | // we check all the FQs for this GPU to spread priorities | ||
2086 | // out across the queues. does this decrease jitter? | ||
2087 | donee_node = temp_donee_node; | ||
2088 | dist_from_head = temp_dist_from_head; | ||
2089 | } | ||
2090 | } | ||
2091 | |||
2092 | if(dist_from_head != sem->max_fifo_len + 1) { | ||
2093 | TRACE_CUR("found donee %s/%d and is the %d-th waiter.\n", | ||
2094 | donee_node->task->comm, donee_node->task->pid, | ||
2095 | dist_from_head); | ||
2096 | } | ||
2097 | else { | ||
2098 | TRACE_CUR("found no eligible donors from GPU %d\n", i); | ||
2099 | } | ||
2100 | } | ||
2101 | else { | ||
2102 | TRACE_CUR("skipping GPU %d (distance = %d, best donor " | ||
2103 | "distance = %d)\n", i, temp_distance, distance); | ||
2104 | } | ||
2105 | |||
2106 | i = (i+1 < aff->nr_rsrc) ? i+1 : 0; // increment with wrap-around | ||
2107 | } while (i != start); | ||
2108 | |||
2109 | |||
2110 | // restore old donor info state. | ||
2111 | default_donee->donor_info = default_donee_donor_info; | ||
2112 | |||
2113 | if(!donee_node) { | ||
2114 | TRACE_CUR("Could not find a donee. We have to steal one.\n"); | ||
2115 | WARN_ON(default_donee->donor_info == NULL); | ||
2116 | |||
2117 | donee_node = default_donee; | ||
2118 | } | ||
2119 | |||
2120 | out: | ||
2121 | |||
2122 | TRACE_CUR("Selected donee %s/%d from GPU %d for %s/%d with affinity for GPU %d\n", | ||
2123 | donee_node->task->comm, donee_node->task->pid, | ||
2124 | replica_to_gpu(aff, ikglp_get_idx(sem, donee_node->fq)), | ||
2125 | donor->comm, donor->pid, tsk_rt(donor)->last_gpu); | ||
2126 | |||
2127 | return(donee_node); | ||
1937 | } | 2128 | } |
1938 | 2129 | ||
1939 | ikglp_wait_state_t* gpu_ikglp_advise_doner_to_fq(struct ikglp_affinity* aff, struct fifo_queue* fq) | 2130 | ikglp_wait_state_t* gpu_ikglp_advise_doner_to_fq(struct ikglp_affinity* aff, struct fifo_queue* fq) |
@@ -2147,7 +2338,7 @@ ikglp_wait_state_t* simple_gpu_ikglp_advise_steal(struct ikglp_affinity* aff) | |||
2147 | return ikglp_find_hp_waiter_to_steal(sem); | 2338 | return ikglp_find_hp_waiter_to_steal(sem); |
2148 | } | 2339 | } |
2149 | 2340 | ||
2150 | ikglp_donee_heap_node_t* simple_gpu_ikglp_advise_donee_selection(struct ikglp_affinity* aff) | 2341 | ikglp_donee_heap_node_t* simple_gpu_ikglp_advise_donee_selection(struct ikglp_affinity* aff, struct task_struct* donor) |
2151 | { | 2342 | { |
2152 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); | 2343 | struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); |
2153 | ikglp_donee_heap_node_t *donee = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); | 2344 | ikglp_donee_heap_node_t *donee = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); |