diff options
Diffstat (limited to 'kernel/sched/rt.c')
| -rw-r--r-- | kernel/sched/rt.c | 181 |
1 files changed, 179 insertions, 2 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f4d4b077eba0..575da76a3874 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "sched.h" | 6 | #include "sched.h" |
| 7 | 7 | ||
| 8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 9 | #include <linux/irq_work.h> | ||
| 9 | 10 | ||
| 10 | int sched_rr_timeslice = RR_TIMESLICE; | 11 | int sched_rr_timeslice = RR_TIMESLICE; |
| 11 | 12 | ||
| @@ -59,7 +60,11 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
| 59 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 60 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
| 60 | } | 61 | } |
| 61 | 62 | ||
| 62 | void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | 63 | #ifdef CONFIG_SMP |
| 64 | static void push_irq_work_func(struct irq_work *work); | ||
| 65 | #endif | ||
| 66 | |||
| 67 | void init_rt_rq(struct rt_rq *rt_rq) | ||
| 63 | { | 68 | { |
| 64 | struct rt_prio_array *array; | 69 | struct rt_prio_array *array; |
| 65 | int i; | 70 | int i; |
| @@ -78,7 +83,14 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
| 78 | rt_rq->rt_nr_migratory = 0; | 83 | rt_rq->rt_nr_migratory = 0; |
| 79 | rt_rq->overloaded = 0; | 84 | rt_rq->overloaded = 0; |
| 80 | plist_head_init(&rt_rq->pushable_tasks); | 85 | plist_head_init(&rt_rq->pushable_tasks); |
| 86 | |||
| 87 | #ifdef HAVE_RT_PUSH_IPI | ||
| 88 | rt_rq->push_flags = 0; | ||
| 89 | rt_rq->push_cpu = nr_cpu_ids; | ||
| 90 | raw_spin_lock_init(&rt_rq->push_lock); | ||
| 91 | init_irq_work(&rt_rq->push_work, push_irq_work_func); | ||
| 81 | #endif | 92 | #endif |
| 93 | #endif /* CONFIG_SMP */ | ||
| 82 | /* We start is dequeued state, because no RT tasks are queued */ | 94 | /* We start is dequeued state, because no RT tasks are queued */ |
| 83 | rt_rq->rt_queued = 0; | 95 | rt_rq->rt_queued = 0; |
| 84 | 96 | ||
| @@ -193,7 +205,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 193 | if (!rt_se) | 205 | if (!rt_se) |
| 194 | goto err_free_rq; | 206 | goto err_free_rq; |
| 195 | 207 | ||
| 196 | init_rt_rq(rt_rq, cpu_rq(i)); | 208 | init_rt_rq(rt_rq); |
| 197 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; | 209 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; |
| 198 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); | 210 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); |
| 199 | } | 211 | } |
| @@ -1778,6 +1790,164 @@ static void push_rt_tasks(struct rq *rq) | |||
| 1778 | ; | 1790 | ; |
| 1779 | } | 1791 | } |
| 1780 | 1792 | ||
| 1793 | #ifdef HAVE_RT_PUSH_IPI | ||
| 1794 | /* | ||
| 1795 | * The search for the next cpu always starts at rq->cpu and ends | ||
| 1796 | * when we reach rq->cpu again. It will never return rq->cpu. | ||
| 1797 | * This returns the next cpu to check, or nr_cpu_ids if the loop | ||
| 1798 | * is complete. | ||
| 1799 | * | ||
| 1800 | * rq->rt.push_cpu holds the last cpu returned by this function, | ||
| 1801 | * or if this is the first instance, it must hold rq->cpu. | ||
| 1802 | */ | ||
| 1803 | static int rto_next_cpu(struct rq *rq) | ||
| 1804 | { | ||
| 1805 | int prev_cpu = rq->rt.push_cpu; | ||
| 1806 | int cpu; | ||
| 1807 | |||
| 1808 | cpu = cpumask_next(prev_cpu, rq->rd->rto_mask); | ||
| 1809 | |||
| 1810 | /* | ||
| 1811 | * If the previous cpu is less than the rq's CPU, then it already | ||
| 1812 | * passed the end of the mask, and has started from the beginning. | ||
| 1813 | * We end if the next CPU is greater or equal to rq's CPU. | ||
| 1814 | */ | ||
| 1815 | if (prev_cpu < rq->cpu) { | ||
| 1816 | if (cpu >= rq->cpu) | ||
| 1817 | return nr_cpu_ids; | ||
| 1818 | |||
| 1819 | } else if (cpu >= nr_cpu_ids) { | ||
| 1820 | /* | ||
| 1821 | * We passed the end of the mask, start at the beginning. | ||
| 1822 | * If the result is greater or equal to the rq's CPU, then | ||
| 1823 | * the loop is finished. | ||
| 1824 | */ | ||
| 1825 | cpu = cpumask_first(rq->rd->rto_mask); | ||
| 1826 | if (cpu >= rq->cpu) | ||
| 1827 | return nr_cpu_ids; | ||
| 1828 | } | ||
| 1829 | rq->rt.push_cpu = cpu; | ||
| 1830 | |||
| 1831 | /* Return cpu to let the caller know if the loop is finished or not */ | ||
| 1832 | return cpu; | ||
| 1833 | } | ||
| 1834 | |||
| 1835 | static int find_next_push_cpu(struct rq *rq) | ||
| 1836 | { | ||
| 1837 | struct rq *next_rq; | ||
| 1838 | int cpu; | ||
| 1839 | |||
| 1840 | while (1) { | ||
| 1841 | cpu = rto_next_cpu(rq); | ||
| 1842 | if (cpu >= nr_cpu_ids) | ||
| 1843 | break; | ||
| 1844 | next_rq = cpu_rq(cpu); | ||
| 1845 | |||
| 1846 | /* Make sure the next rq can push to this rq */ | ||
| 1847 | if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) | ||
| 1848 | break; | ||
| 1849 | } | ||
| 1850 | |||
| 1851 | return cpu; | ||
| 1852 | } | ||
| 1853 | |||
| 1854 | #define RT_PUSH_IPI_EXECUTING 1 | ||
| 1855 | #define RT_PUSH_IPI_RESTART 2 | ||
| 1856 | |||
| 1857 | static void tell_cpu_to_push(struct rq *rq) | ||
| 1858 | { | ||
| 1859 | int cpu; | ||
| 1860 | |||
| 1861 | if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { | ||
| 1862 | raw_spin_lock(&rq->rt.push_lock); | ||
| 1863 | /* Make sure it's still executing */ | ||
| 1864 | if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { | ||
| 1865 | /* | ||
| 1866 | * Tell the IPI to restart the loop as things have | ||
| 1867 | * changed since it started. | ||
| 1868 | */ | ||
| 1869 | rq->rt.push_flags |= RT_PUSH_IPI_RESTART; | ||
| 1870 | raw_spin_unlock(&rq->rt.push_lock); | ||
| 1871 | return; | ||
| 1872 | } | ||
| 1873 | raw_spin_unlock(&rq->rt.push_lock); | ||
| 1874 | } | ||
| 1875 | |||
| 1876 | /* When here, there's no IPI going around */ | ||
| 1877 | |||
| 1878 | rq->rt.push_cpu = rq->cpu; | ||
| 1879 | cpu = find_next_push_cpu(rq); | ||
| 1880 | if (cpu >= nr_cpu_ids) | ||
| 1881 | return; | ||
| 1882 | |||
| 1883 | rq->rt.push_flags = RT_PUSH_IPI_EXECUTING; | ||
| 1884 | |||
| 1885 | irq_work_queue_on(&rq->rt.push_work, cpu); | ||
| 1886 | } | ||
| 1887 | |||
| 1888 | /* Called from hardirq context */ | ||
| 1889 | static void try_to_push_tasks(void *arg) | ||
| 1890 | { | ||
| 1891 | struct rt_rq *rt_rq = arg; | ||
| 1892 | struct rq *rq, *src_rq; | ||
| 1893 | int this_cpu; | ||
| 1894 | int cpu; | ||
| 1895 | |||
| 1896 | this_cpu = rt_rq->push_cpu; | ||
| 1897 | |||
| 1898 | /* Paranoid check */ | ||
| 1899 | BUG_ON(this_cpu != smp_processor_id()); | ||
| 1900 | |||
| 1901 | rq = cpu_rq(this_cpu); | ||
| 1902 | src_rq = rq_of_rt_rq(rt_rq); | ||
| 1903 | |||
| 1904 | again: | ||
| 1905 | if (has_pushable_tasks(rq)) { | ||
| 1906 | raw_spin_lock(&rq->lock); | ||
| 1907 | push_rt_task(rq); | ||
| 1908 | raw_spin_unlock(&rq->lock); | ||
| 1909 | } | ||
| 1910 | |||
| 1911 | /* Pass the IPI to the next rt overloaded queue */ | ||
| 1912 | raw_spin_lock(&rt_rq->push_lock); | ||
| 1913 | /* | ||
| 1914 | * If the source queue changed since the IPI went out, | ||
| 1915 | * we need to restart the search from that CPU again. | ||
| 1916 | */ | ||
| 1917 | if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) { | ||
| 1918 | rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART; | ||
| 1919 | rt_rq->push_cpu = src_rq->cpu; | ||
| 1920 | } | ||
| 1921 | |||
| 1922 | cpu = find_next_push_cpu(src_rq); | ||
| 1923 | |||
| 1924 | if (cpu >= nr_cpu_ids) | ||
| 1925 | rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING; | ||
| 1926 | raw_spin_unlock(&rt_rq->push_lock); | ||
| 1927 | |||
| 1928 | if (cpu >= nr_cpu_ids) | ||
| 1929 | return; | ||
| 1930 | |||
| 1931 | /* | ||
| 1932 | * It is possible that a restart caused this CPU to be | ||
| 1933 | * chosen again. Don't bother with an IPI, just see if we | ||
| 1934 | * have more to push. | ||
| 1935 | */ | ||
| 1936 | if (unlikely(cpu == rq->cpu)) | ||
| 1937 | goto again; | ||
| 1938 | |||
| 1939 | /* Try the next RT overloaded CPU */ | ||
| 1940 | irq_work_queue_on(&rt_rq->push_work, cpu); | ||
| 1941 | } | ||
| 1942 | |||
| 1943 | static void push_irq_work_func(struct irq_work *work) | ||
| 1944 | { | ||
| 1945 | struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work); | ||
| 1946 | |||
| 1947 | try_to_push_tasks(rt_rq); | ||
| 1948 | } | ||
| 1949 | #endif /* HAVE_RT_PUSH_IPI */ | ||
| 1950 | |||
| 1781 | static int pull_rt_task(struct rq *this_rq) | 1951 | static int pull_rt_task(struct rq *this_rq) |
| 1782 | { | 1952 | { |
| 1783 | int this_cpu = this_rq->cpu, ret = 0, cpu; | 1953 | int this_cpu = this_rq->cpu, ret = 0, cpu; |
| @@ -1793,6 +1963,13 @@ static int pull_rt_task(struct rq *this_rq) | |||
| 1793 | */ | 1963 | */ |
| 1794 | smp_rmb(); | 1964 | smp_rmb(); |
| 1795 | 1965 | ||
| 1966 | #ifdef HAVE_RT_PUSH_IPI | ||
| 1967 | if (sched_feat(RT_PUSH_IPI)) { | ||
| 1968 | tell_cpu_to_push(this_rq); | ||
| 1969 | return 0; | ||
| 1970 | } | ||
| 1971 | #endif | ||
| 1972 | |||
| 1796 | for_each_cpu(cpu, this_rq->rd->rto_mask) { | 1973 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
| 1797 | if (this_cpu == cpu) | 1974 | if (this_cpu == cpu) |
| 1798 | continue; | 1975 | continue; |
