From d2f58ddbfb1dd4fb1b6ff0bebbd886b1dcf5af34 Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Mon, 25 Nov 2013 17:16:38 -0500 Subject: First draft of C-FL-split --- arch/arm/boot/dts/include/dt-bindings | 1 - arch/microblaze/boot/dts/system.dts | 368 +++++++++++- include/litmus/edf_split_common.h | 25 + include/litmus/litmus.h | 1 + include/litmus/rt_param.h | 4 +- litmus/Makefile | 4 +- litmus/edf_split_common.c | 171 ++++++ litmus/sched_cfl_split.c | 1006 +++++++++++++++++++++++++++++++++ 8 files changed, 1576 insertions(+), 4 deletions(-) delete mode 120000 arch/arm/boot/dts/include/dt-bindings mode change 120000 => 100644 arch/microblaze/boot/dts/system.dts create mode 100644 include/litmus/edf_split_common.h create mode 100644 litmus/edf_split_common.c create mode 100644 litmus/sched_cfl_split.c diff --git a/arch/arm/boot/dts/include/dt-bindings b/arch/arm/boot/dts/include/dt-bindings deleted file mode 120000 index 08c00e4972fa..000000000000 --- a/arch/arm/boot/dts/include/dt-bindings +++ /dev/null @@ -1 +0,0 @@ -../../../../../include/dt-bindings \ No newline at end of file diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts deleted file mode 120000 index 7cb657892f21..000000000000 --- a/arch/microblaze/boot/dts/system.dts +++ /dev/null @@ -1 +0,0 @@ -../../platform/generic/system.dts \ No newline at end of file diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts new file mode 100644 index 000000000000..3f85df2b73b3 --- /dev/null +++ b/arch/microblaze/boot/dts/system.dts @@ -0,0 +1,367 @@ +/* + * Device Tree Generator version: 1.1 + * + * (C) Copyright 2007-2008 Xilinx, Inc. + * (C) Copyright 2007-2009 Michal Simek + * + * Michal SIMEK + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + * CAUTION: This file is automatically generated by libgen. + * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6 + * + * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101 + */ + +/dts-v1/; +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,microblaze"; + hard-reset-gpios = <&LEDs_8Bit 2 1>; + model = "testing"; + DDR2_SDRAM: memory@90000000 { + device_type = "memory"; + reg = < 0x90000000 0x10000000 >; + } ; + aliases { + ethernet0 = &Hard_Ethernet_MAC; + serial0 = &RS232_Uart_1; + } ; + chosen { + bootargs = "console=ttyUL0,115200 highres=on"; + linux,stdout-path = "/plb@0/serial@84000000"; + } ; + cpus { + #address-cells = <1>; + #cpus = <0x1>; + #size-cells = <0>; + microblaze_0: cpu@0 { + clock-frequency = <125000000>; + compatible = "xlnx,microblaze-7.10.d"; + d-cache-baseaddr = <0x90000000>; + d-cache-highaddr = <0x9fffffff>; + d-cache-line-size = <0x10>; + d-cache-size = <0x2000>; + device_type = "cpu"; + i-cache-baseaddr = <0x90000000>; + i-cache-highaddr = <0x9fffffff>; + i-cache-line-size = <0x10>; + i-cache-size = <0x2000>; + model = "microblaze,7.10.d"; + reg = <0>; + timebase-frequency = <125000000>; + xlnx,addr-tag-bits = <0xf>; + xlnx,allow-dcache-wr = <0x1>; + xlnx,allow-icache-wr = <0x1>; + xlnx,area-optimized = <0x0>; + xlnx,cache-byte-size = <0x2000>; + xlnx,d-lmb = <0x1>; + xlnx,d-opb = <0x0>; + xlnx,d-plb = <0x1>; + xlnx,data-size = <0x20>; + xlnx,dcache-addr-tag = <0xf>; + xlnx,dcache-always-used = <0x1>; + xlnx,dcache-byte-size = <0x2000>; + xlnx,dcache-line-len = <0x4>; + xlnx,dcache-use-fsl = <0x1>; + xlnx,debug-enabled = <0x1>; + xlnx,div-zero-exception = <0x1>; + xlnx,dopb-bus-exception = <0x0>; + xlnx,dynamic-bus-sizing = <0x1>; + xlnx,edge-is-positive = <0x1>; + xlnx,family = "virtex5"; + xlnx,endianness = <0x1>; + xlnx,fpu-exception = <0x1>; + xlnx,fsl-data-size = <0x20>; + xlnx,fsl-exception = <0x0>; + xlnx,fsl-links = <0x0>; + xlnx,i-lmb = <0x1>; + xlnx,i-opb = <0x0>; + xlnx,i-plb = <0x1>; + xlnx,icache-always-used = <0x1>; + xlnx,icache-line-len = <0x4>; + xlnx,icache-use-fsl = <0x1>; + xlnx,ill-opcode-exception = <0x1>; + xlnx,instance = "microblaze_0"; + xlnx,interconnect = <0x1>; + xlnx,interrupt-is-edge = <0x0>; + xlnx,iopb-bus-exception = <0x0>; + xlnx,mmu-dtlb-size = <0x4>; + xlnx,mmu-itlb-size = <0x2>; + xlnx,mmu-tlb-access = <0x3>; + xlnx,mmu-zones = <0x10>; + xlnx,number-of-pc-brk = <0x1>; + xlnx,number-of-rd-addr-brk = <0x0>; + xlnx,number-of-wr-addr-brk = <0x0>; + xlnx,opcode-0x0-illegal = <0x1>; + xlnx,pvr = <0x2>; + xlnx,pvr-user1 = <0x0>; + xlnx,pvr-user2 = <0x0>; + xlnx,reset-msr = <0x0>; + xlnx,sco = <0x0>; + xlnx,unaligned-exceptions = <0x1>; + xlnx,use-barrel = <0x1>; + xlnx,use-dcache = <0x1>; + xlnx,use-div = <0x1>; + xlnx,use-ext-brk = <0x1>; + xlnx,use-ext-nm-brk = <0x1>; + xlnx,use-extended-fsl-instr = <0x0>; + xlnx,use-fpu = <0x2>; + xlnx,use-hw-mul = <0x2>; + xlnx,use-icache = <0x1>; + xlnx,use-interrupt = <0x1>; + xlnx,use-mmu = <0x3>; + xlnx,use-msr-instr = <0x1>; + xlnx,use-pcmp-instr = <0x1>; + } ; + } ; + mb_plb: plb@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus"; + ranges ; + FLASH: flash@a0000000 { + bank-width = <2>; + compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash"; + reg = < 0xa0000000 0x2000000 >; + xlnx,family = "virtex5"; + xlnx,include-datawidth-matching-0 = <0x1>; + xlnx,include-datawidth-matching-1 = <0x0>; + xlnx,include-datawidth-matching-2 = <0x0>; + xlnx,include-datawidth-matching-3 = <0x0>; + xlnx,include-negedge-ioregs = <0x0>; + xlnx,include-plb-ipif = <0x1>; + xlnx,include-wrbuf = <0x1>; + xlnx,max-mem-width = <0x10>; + xlnx,mch-native-dwidth = <0x20>; + xlnx,mch-plb-clk-period-ps = <0x1f40>; + xlnx,mch-splb-awidth = <0x20>; + xlnx,mch0-accessbuf-depth = <0x10>; + xlnx,mch0-protocol = <0x0>; + xlnx,mch0-rddatabuf-depth = <0x10>; + xlnx,mch1-accessbuf-depth = <0x10>; + xlnx,mch1-protocol = <0x0>; + xlnx,mch1-rddatabuf-depth = <0x10>; + xlnx,mch2-accessbuf-depth = <0x10>; + xlnx,mch2-protocol = <0x0>; + xlnx,mch2-rddatabuf-depth = <0x10>; + xlnx,mch3-accessbuf-depth = <0x10>; + xlnx,mch3-protocol = <0x0>; + xlnx,mch3-rddatabuf-depth = <0x10>; + xlnx,mem0-width = <0x10>; + xlnx,mem1-width = <0x20>; + xlnx,mem2-width = <0x20>; + xlnx,mem3-width = <0x20>; + xlnx,num-banks-mem = <0x1>; + xlnx,num-channels = <0x0>; + xlnx,priority-mode = <0x0>; + xlnx,synch-mem-0 = <0x0>; + xlnx,synch-mem-1 = <0x0>; + xlnx,synch-mem-2 = <0x0>; + xlnx,synch-mem-3 = <0x0>; + xlnx,synch-pipedelay-0 = <0x2>; + xlnx,synch-pipedelay-1 = <0x2>; + xlnx,synch-pipedelay-2 = <0x2>; + xlnx,synch-pipedelay-3 = <0x2>; + xlnx,tavdv-ps-mem-0 = <0x1adb0>; + xlnx,tavdv-ps-mem-1 = <0x3a98>; + xlnx,tavdv-ps-mem-2 = <0x3a98>; + xlnx,tavdv-ps-mem-3 = <0x3a98>; + xlnx,tcedv-ps-mem-0 = <0x1adb0>; + xlnx,tcedv-ps-mem-1 = <0x3a98>; + xlnx,tcedv-ps-mem-2 = <0x3a98>; + xlnx,tcedv-ps-mem-3 = <0x3a98>; + xlnx,thzce-ps-mem-0 = <0x88b8>; + xlnx,thzce-ps-mem-1 = <0x1b58>; + xlnx,thzce-ps-mem-2 = <0x1b58>; + xlnx,thzce-ps-mem-3 = <0x1b58>; + xlnx,thzoe-ps-mem-0 = <0x1b58>; + xlnx,thzoe-ps-mem-1 = <0x1b58>; + xlnx,thzoe-ps-mem-2 = <0x1b58>; + xlnx,thzoe-ps-mem-3 = <0x1b58>; + xlnx,tlzwe-ps-mem-0 = <0x88b8>; + xlnx,tlzwe-ps-mem-1 = <0x0>; + xlnx,tlzwe-ps-mem-2 = <0x0>; + xlnx,tlzwe-ps-mem-3 = <0x0>; + xlnx,twc-ps-mem-0 = <0x2af8>; + xlnx,twc-ps-mem-1 = <0x3a98>; + xlnx,twc-ps-mem-2 = <0x3a98>; + xlnx,twc-ps-mem-3 = <0x3a98>; + xlnx,twp-ps-mem-0 = <0x11170>; + xlnx,twp-ps-mem-1 = <0x2ee0>; + xlnx,twp-ps-mem-2 = <0x2ee0>; + xlnx,twp-ps-mem-3 = <0x2ee0>; + xlnx,xcl0-linesize = <0x4>; + xlnx,xcl0-writexfer = <0x1>; + xlnx,xcl1-linesize = <0x4>; + xlnx,xcl1-writexfer = <0x1>; + xlnx,xcl2-linesize = <0x4>; + xlnx,xcl2-writexfer = <0x1>; + xlnx,xcl3-linesize = <0x4>; + xlnx,xcl3-writexfer = <0x1>; + } ; + Hard_Ethernet_MAC: xps-ll-temac@81c00000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,compound"; + ranges ; + ethernet@81c00000 { + compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a"; + device_type = "network"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 5 2 >; + llink-connected = <&PIM3>; + local-mac-address = [ 00 0a 35 00 00 00 ]; + reg = < 0x81c00000 0x40 >; + xlnx,bus2core-clk-ratio = <0x1>; + xlnx,phy-type = <0x1>; + xlnx,phyaddr = <0x1>; + xlnx,rxcsum = <0x0>; + xlnx,rxfifo = <0x1000>; + xlnx,temac-type = <0x0>; + xlnx,txcsum = <0x0>; + xlnx,txfifo = <0x1000>; + } ; + } ; + IIC_EEPROM: i2c@81600000 { + compatible = "xlnx,xps-iic-2.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 6 2 >; + reg = < 0x81600000 0x10000 >; + xlnx,clk-freq = <0x7735940>; + xlnx,family = "virtex5"; + xlnx,gpo-width = <0x1>; + xlnx,iic-freq = <0x186a0>; + xlnx,scl-inertial-delay = <0x0>; + xlnx,sda-inertial-delay = <0x0>; + xlnx,ten-bit-adr = <0x0>; + } ; + LEDs_8Bit: gpio@81400000 { + compatible = "xlnx,xps-gpio-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 7 2 >; + reg = < 0x81400000 0x10000 >; + xlnx,all-inputs = <0x0>; + xlnx,all-inputs-2 = <0x0>; + xlnx,dout-default = <0x0>; + xlnx,dout-default-2 = <0x0>; + xlnx,family = "virtex5"; + xlnx,gpio-width = <0x8>; + xlnx,interrupt-present = <0x1>; + xlnx,is-bidir = <0x1>; + xlnx,is-bidir-2 = <0x1>; + xlnx,is-dual = <0x0>; + xlnx,tri-default = <0xffffffff>; + xlnx,tri-default-2 = <0xffffffff>; + #gpio-cells = <2>; + gpio-controller; + } ; + + gpio-leds { + compatible = "gpio-leds"; + + heartbeat { + label = "Heartbeat"; + gpios = <&LEDs_8Bit 4 1>; + linux,default-trigger = "heartbeat"; + }; + + yellow { + label = "Yellow"; + gpios = <&LEDs_8Bit 5 1>; + }; + + red { + label = "Red"; + gpios = <&LEDs_8Bit 6 1>; + }; + + green { + label = "Green"; + gpios = <&LEDs_8Bit 7 1>; + }; + } ; + RS232_Uart_1: serial@84000000 { + clock-frequency = <125000000>; + compatible = "xlnx,xps-uartlite-1.00.a"; + current-speed = <115200>; + device_type = "serial"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 8 0 >; + port-number = <0>; + reg = < 0x84000000 0x10000 >; + xlnx,baudrate = <0x1c200>; + xlnx,data-bits = <0x8>; + xlnx,family = "virtex5"; + xlnx,odd-parity = <0x0>; + xlnx,use-parity = <0x0>; + } ; + SysACE_CompactFlash: sysace@83600000 { + compatible = "xlnx,xps-sysace-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 4 2 >; + reg = < 0x83600000 0x10000 >; + xlnx,family = "virtex5"; + xlnx,mem-width = <0x10>; + } ; + debug_module: debug@84400000 { + compatible = "xlnx,mdm-1.00.d"; + reg = < 0x84400000 0x10000 >; + xlnx,family = "virtex5"; + xlnx,interconnect = <0x1>; + xlnx,jtag-chain = <0x2>; + xlnx,mb-dbg-ports = <0x1>; + xlnx,uart-width = <0x8>; + xlnx,use-uart = <0x1>; + xlnx,write-fsl-ports = <0x0>; + } ; + mpmc@90000000 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "xlnx,mpmc-4.02.a"; + ranges ; + PIM3: sdma@84600180 { + compatible = "xlnx,ll-dma-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 2 2 1 2 >; + reg = < 0x84600180 0x80 >; + } ; + } ; + xps_intc_0: interrupt-controller@81800000 { + #interrupt-cells = <0x2>; + compatible = "xlnx,xps-intc-1.00.a"; + interrupt-controller ; + reg = < 0x81800000 0x10000 >; + xlnx,kind-of-intr = <0x100>; + xlnx,num-intr-inputs = <0x9>; + } ; + xps_timer_1: timer@83c00000 { + compatible = "xlnx,xps-timer-1.00.a"; + interrupt-parent = <&xps_intc_0>; + interrupts = < 3 2 >; + reg = < 0x83c00000 0x10000 >; + xlnx,count-width = <0x20>; + xlnx,family = "virtex5"; + xlnx,gen0-assert = <0x1>; + xlnx,gen1-assert = <0x1>; + xlnx,one-timer-only = <0x0>; + xlnx,trig0-assert = <0x1>; + xlnx,trig1-assert = <0x1>; + } ; + } ; +} ; diff --git a/include/litmus/edf_split_common.h b/include/litmus/edf_split_common.h new file mode 100644 index 000000000000..4e7c0ce23c9d --- /dev/null +++ b/include/litmus/edf_split_common.h @@ -0,0 +1,25 @@ +/* + * EDF common data structures and utility functions shared by all EDF + * based scheduler plugins + */ + +/* CLEANUP: Add comments and make it less messy. + * + */ + +#ifndef __UNC_EDF_SPLIT_COMMON_H__ +#define __UNC_EDF_SPLIT_COMMON_H__ + +#include + +void edf_split_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_jobs_t release); + +int edf_split_higher_prio(struct task_struct* first, + struct task_struct* second); + +int edf_split_ready_order(struct bheap_node* a, struct bheap_node* b); + +int edf_split_preemption_needed(rt_domain_t* rt, struct task_struct *t); + +#endif diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index e35c38c4c0a2..8bd9ab2258da 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -69,6 +69,7 @@ void litmus_exit_task(struct task_struct *tsk); #define get_deadline(t) (tsk_rt(t)->job_params.deadline) #define get_release(t) (tsk_rt(t)->job_params.release) #define get_lateness(t) (tsk_rt(t)->job_params.lateness) +#define is_in_crit_section(t) (tsk_rt(t)->in_crit_section) /* release policy macros */ #define is_periodic(t) (get_release_policy(t) == TASK_PERIODIC) diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 138799fbaad7..65798427d5c3 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -76,6 +76,7 @@ struct rt_task { lt_t period; lt_t relative_deadline; lt_t phase; + int split; unsigned int cpu; unsigned int priority; task_class_t cls; @@ -143,6 +144,8 @@ struct rt_job { lt_t release; /* What is the current deadline? */ lt_t deadline; + /* What is the dealine of the current subjob under splitting? */ + lt_t subjob_deadline; /* How much service has this job received so far? */ lt_t exec_time; @@ -191,7 +194,6 @@ struct rt_param { unsigned int priority_boosted:1; /* If so, when did this start? */ lt_t boost_start_time; - /* How many LITMUS^RT locks does the task currently hold/wait for? */ unsigned int num_locks_held; /* How many PCP/SRP locks does the task currently hold/wait for? */ diff --git a/litmus/Makefile b/litmus/Makefile index 2bddc94a399f..8f3b8b4c3342 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -11,6 +11,7 @@ obj-y = sched_plugin.o litmus.o \ sync.o \ rt_domain.o \ edf_common.o \ + edf_split_common.o \ fp_common.o \ fdso.o \ locking.o \ @@ -21,7 +22,8 @@ obj-y = sched_plugin.o litmus.o \ uncachedev.o \ sched_gsn_edf.o \ sched_psn_edf.o \ - sched_pfp.o + sched_pfp.o \ + sched_cfl_split.o obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o diff --git a/litmus/edf_split_common.c b/litmus/edf_split_common.c new file mode 100644 index 000000000000..76b100c9c5b9 --- /dev/null +++ b/litmus/edf_split_common.c @@ -0,0 +1,171 @@ +/* + * kernel/edf_split_common.c + * + * Common functions for EDF based scheduler with split jobs. + */ + +#include +#include +#include + +#include +#include +#include + +#include + +#ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM +#include +#endif + +#ifdef CONFIG_EDF_TIE_BREAK_HASH +#include +static inline long edf_split_hash(struct task_struct *t) +{ + /* pid is 32 bits, so normally we would shove that into the + * upper 32-bits and and put the job number in the bottom + * and hash the 64-bit number with hash_64(). Sadly, + * in testing, hash_64() doesn't distribute keys were the + * upper bits are close together (as would be the case with + * pids) and job numbers are equal (as would be the case with + * synchronous task sets with all relative deadlines equal). + * + * A 2006 Linux patch proposed the following solution + * (but for some reason it wasn't accepted...). + * + * At least this workaround works for 32-bit systems as well. + */ + return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32); +} +#endif + +/* edf_higher_prio - returns true if first has a higher EDF priority + * than second. Deadline ties are broken by PID. + * + * both first and second may be NULL + */ +int edf_split_higher_prio(struct task_struct* first, + struct task_struct* second) +{ + struct task_struct *first_task = first; + struct task_struct *second_task = second; + + /* There is no point in comparing a task to itself. */ + if (first && first == second) { + TRACE_TASK(first, + "WARNING: pointless edf priority comparison.\n"); + return 0; + } + + + /* check for NULL tasks */ + if (!first || !second) + return first && !second; + + if (earlier_deadline(first_task, second_task)) { + return 1; + } + else if (get_deadline(first_task) == get_deadline(second_task)) { + /* Need to tie break. All methods must set pid_break to 0/1 if + * first_task does not have priority over second_task. + */ + int pid_break; + +#if defined(CONFIG_EDF_TIE_BREAK_LATENESS) + /* Tie break by lateness. Jobs with greater lateness get + * priority. This should spread tardiness across all tasks, + * especially in task sets where all tasks have the same + * period and relative deadlines. + */ + if (get_lateness(first_task) > get_lateness(second_task)) { + return 1; + } + pid_break = (get_lateness(first_task) == get_lateness(second_task)); + + +#elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM) + /* Tie break by lateness, normalized by relative deadline. Jobs with + * greater normalized lateness get priority. + * + * Note: Considered using the algebraically equivalent + * lateness(first)*relative_deadline(second) > + lateness(second)*relative_deadline(first) + * to avoid fixed-point math, but values are prone to overflow if inputs + * are on the order of several seconds, even in 64-bit. + */ + fp_t fnorm = _frac(get_lateness(first_task), + get_rt_relative_deadline(first_task)); + fp_t snorm = _frac(get_lateness(second_task), + get_rt_relative_deadline(second_task)); + if (_gt(fnorm, snorm)) { + return 1; + } + pid_break = _eq(fnorm, snorm); + +#elif defined(CONFIG_EDF_TIE_BREAK_HASH) + /* Tie break by comparing hashs of (pid, job#) tuple. There should be + * a 50% chance that first_task has a higher priority than second_task. + */ + long fhash = edf_hash(first_task); + long shash = edf_hash(second_task); + if (fhash < shash) { + return 1; + } + pid_break = (fhash == shash); +#else + + + /* CONFIG_EDF_PID_TIE_BREAK */ + pid_break = 1; // fall through to tie-break by pid; +#endif + + /* Tie break by pid */ + if(pid_break) { + if (first_task->pid < second_task->pid) { + return 1; + } + else if (first_task->pid == second_task->pid) { + /* If the PIDs are the same then the task with the + * inherited priority wins. + */ + if (!second->rt_param.inh_task) { + return 1; + } + } + } + } + return 0; /* fall-through. prio(second_task) > prio(first_task) */ +} + +int edf_split_ready_order(struct bheap_node* a, struct bheap_node* b) +{ + return edf_split_higher_prio(bheap2task(a), bheap2task(b)); +} + +void edf_split_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_jobs_t release) +{ + rt_domain_init(rt, edf_split_ready_order, resched, release); +} + +/* need_to_preempt - check whether the task t needs to be preempted + * call only with irqs disabled and with ready_lock acquired + * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! + */ +int edf_split_preemption_needed(rt_domain_t* rt, struct task_struct *t) +{ + /* we need the read lock for edf_ready_queue */ + /* no need to preempt if there is nothing pending */ + if (!__jobs_pending(rt)) + return 0; + /* we need to reschedule if t doesn't exist */ + if (!t) + return 1; + + /* NOTE: We cannot check for non-preemptibility since we + * don't know what address space we're currently in. + */ + + /* make sure to get non-rt stuff out of the way */ + return !is_realtime(t) || edf_split_higher_prio(__next_ready(rt), t); +} diff --git a/litmus/sched_cfl_split.c b/litmus/sched_cfl_split.c new file mode 100644 index 000000000000..37180e82bb4b --- /dev/null +++ b/litmus/sched_cfl_split.c @@ -0,0 +1,1006 @@ +/* + * litmus/sched_cfl_split.c + * + * Implementation of a clustered version of the G-FL scheduling algorithm, + * with job splitting. + * + * This implementation is based on G-FL-split: + * - CPUs are clustered around L2 or L3 caches. + * - Clusters topology is automatically detected (this is arch dependent + * and is working only on x86 at the moment --- and only with modern + * cpus that exports cpuid4 information) + * - The plugins _does not_ attempt to put tasks in the right cluster i.e. + * the programmer needs to be aware of the topology to place tasks + * in the desired cluster + * - default clustering is around L2 cache (cache index = 2) + * supported clusters are: L1 (private cache: pedf), L2, L3, ALL (all + * online_cpus are placed in a single cluster). + * + * For details on functions, take a look at sched_gsn_edf.c + * + * Currently, we do not support changes in the number of online cpus. + * If the num_online_cpus() dynamically changes, the plugin is broken. + * + * This version uses the simple approach and serializes all scheduling + * decisions by the use of a queue lock. This is probably not the + * best way to do it, but it should suffice for now. + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#ifdef CONFIG_SCHED_CPU_AFFINITY +#include +#endif + +/* to configure the cluster size */ +#include +#include + +/* Reference configuration variable. Determines which cache level is used to + * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that + * all CPUs form a single cluster (just like G-FL). + */ +static enum cache_level cluster_config = GLOBAL_CLUSTER; + +struct clusterdomain; + +/* cpu_entry_t - maintain the linked and scheduled state + * + * A cpu also contains a pointer to the cflsplit_domain_t cluster + * that owns it (struct clusterdomain*) + */ +typedef struct { + int cpu; + struct clusterdomain* cluster; /* owning cluster */ + struct task_struct* linked; /* only RT tasks */ + struct task_struct* scheduled; /* only RT tasks */ + atomic_t will_schedule; /* prevent unneeded IPIs */ + struct bheap_node* hn; + struct hrtimer split_timer; + int timer_armed; +} cpu_entry_t; + +/* one cpu_entry_t per CPU */ +DEFINE_PER_CPU(cpu_entry_t, cflsplit_cpu_entries); + +#define set_will_schedule() \ + (atomic_set(&__get_cpu_var(cflsplit_cpu_entries).will_schedule, 1)) +#define clear_will_schedule() \ + (atomic_set(&__get_cpu_var(cflsplit_cpu_entries).will_schedule, 0)) +#define test_will_schedule(cpu) \ + (atomic_read(&per_cpu(cflsplit_cpu_entries, cpu).will_schedule)) + +/* + * In C-FL-split there is a cflsplit domain _per_ cluster + * The number of clusters is dynamically determined accordingly to the + * total cpu number and the cluster size + */ +typedef struct clusterdomain { + /* rt_domain for this cluster */ + rt_domain_t domain; + /* cpus in this cluster */ + cpu_entry_t* *cpus; + /* map of this cluster cpus */ + cpumask_var_t cpu_map; + /* the cpus queue themselves according to priority in here */ + struct bheap_node *heap_node; + struct bheap cpu_heap; + /* lock for this cluster */ +#define cluster_lock domain.ready_lock +} cflsplit_domain_t; + +/* a cflsplit_domain per cluster; allocation is done at init/activation time */ +cflsplit_domain_t *cflsplit; + +#define remote_cluster(cpu) ((cflsplit_domain_t *) per_cpu(cflsplit_cpu_entries, cpu).cluster) +#define task_cpu_cluster(task) remote_cluster(get_partition(task)) + +/* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling + * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose + * information during the initialization of the plugin (e.g., topology) +#define WANT_ALL_SCHED_EVENTS + */ +#define VERBOSE_INIT + +inline static int get_slice_num(struct task_struct* t) +{ + int basic = ((t->rt_param.job_params.exec_time * + t->rt_param.task_params.split) / + t->rt_param.task_params.exec_cost) + 1; + if (basic <= t->rt_param.task_params.split){ + return basic; + } + else{ + /*Since we don't police budget, just leave where it's at.*/ + return t->rt_param.task_params.split; + } +} + +/* Returns the appropriate subjob deadline.*/ +inline static lt_t get_proper_deadline(struct task_struct* t) +{ + unsigned int num_cpus = num_online_cpus(); + return t->rt_param.job_params.release + + ((t->rt_param.task_params.period * get_slice_num(t)) + / t->rt_param.task_params.split) + /* G-FL correction */ + - (((num_cpus - 1) * t->rt_param.task_params.exec_cost) + / (num_cpus * t->rt_param.task_params.split)); +} + +/* Tells us if the current deadline is too small.*/ +inline static int needs_deadline_move(struct task_struct* t) +{ + BUG_ON(get_proper_deadline(t) < t->rt_param.job_params.subjob_deadline); + return get_proper_deadline(t) != tsk_rt(t)->job_params.subjob_deadline; +} + +/*Returns execution time until the next deadline move. + * 0 means the task has no more deadline moves + */ +inline static lt_t time_to_next_move(struct task_struct* t) +{ + if (get_slice_num(t) == t->rt_param.task_params.split){ + return 0; + } + /* +1 upper bounds ceiling, since integer division is floor*/ + return ((get_slice_num(t) * t->rt_param.task_params.exec_cost) + / t->rt_param.task_params.split) + 1 + - t->rt_param.job_params.exec_time; +} + +/* Timer stuff - similar to budget.c. */ +static enum hrtimer_restart on_split_timeout(struct hrtimer *timer) +{ + cpu_entry_t* st = container_of(timer, + cpu_entry_t, + split_timer); + + unsigned long flags; + + local_irq_save(flags); + TRACE("split timer fired.\n"); + st->timer_armed = 0; + /* Activate scheduler */ + litmus_reschedule_local(); + local_irq_restore(flags); + + return HRTIMER_NORESTART; +} + +static void cancel_split_timer(cpu_entry_t* ce) +{ + int ret; + + TRACE("cancelling split time.\n"); + + /* Since interrupts are disabled and et->timer_armed is only + * modified locally, we do not need any locks. + */ + + if (ce->timer_armed) { + ret = hrtimer_try_to_cancel(&ce->split_timer); + /* Should never be inactive. */ + BUG_ON(ret == 0); + /* Should never be running concurrently.*/ + BUG_ON(ret == -1); + + ce->timer_armed = 0; + } +} + +/* assumes called with IRQs off */ +static void arm_split_timer(cpu_entry_t *ce, + struct task_struct* t) +{ + lt_t when_to_fire; + lt_t time_to_move; + TRACE_TASK(t, "arming split timer.\n"); + + /* __hrtimer_start_range_ns() cancels the timer + * anyway, so we don't have to check whether it is still armed */ + + /*We won't do any new deadline moves if the budget has been exhausted*/ + if (likely(!is_np(t) && (time_to_move = time_to_next_move(t)))) { + when_to_fire = litmus_clock() + time_to_move; + TRACE_TASK(t, "actually arming for %llu into the future\n", + time_to_move); + __hrtimer_start_range_ns(&ce->split_timer, + ns_to_ktime(when_to_fire), + 0 /* delta */, + HRTIMER_MODE_ABS_PINNED, + 0 /* no wakeup */); + ce->timer_armed = 1; + } +} + +static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) +{ + cpu_entry_t *a, *b; + a = _a->value; + b = _b->value; + /* Note that a and b are inverted: we want the lowest-priority CPU at + * the top of the heap. + */ + return edf_split_higher_prio(b->linked, a->linked); +} + +/* update_cpu_position - Move the cpu entry to the correct place to maintain + * order in the cpu queue. Caller must hold cflsplit lock. + */ +static void update_cpu_position(cpu_entry_t *entry) +{ + cflsplit_domain_t *cluster = entry->cluster; + + if (likely(bheap_node_in_heap(entry->hn))) + bheap_delete(cpu_lower_prio, + &cluster->cpu_heap, + entry->hn); + + bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn); +} + +/* caller must hold cflsplit lock */ +static cpu_entry_t* lowest_prio_cpu(cflsplit_domain_t *cluster) +{ + struct bheap_node* hn; + hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap); + return hn->value; +} + + +/* link_task_to_cpu - Update the link of a CPU. + * Handles the case where the to-be-linked task is already + * scheduled on a different CPU. + */ +static noinline void link_task_to_cpu(struct task_struct* linked, + cpu_entry_t *entry) +{ + cpu_entry_t *sched; + struct task_struct* tmp; + int on_cpu; + + BUG_ON(linked && !is_realtime(linked)); + + /* Currently linked task is set to be unlinked. */ + if (entry->linked) { + entry->linked->rt_param.linked_on = NO_CPU; + } + + /* Link new task to CPU. */ + if (linked) { + /* handle task is already scheduled somewhere! */ + on_cpu = linked->rt_param.scheduled_on; + if (on_cpu != NO_CPU) { + sched = &per_cpu(cflsplit_cpu_entries, on_cpu); + /* this should only happen if not linked already */ + BUG_ON(sched->linked == linked); + + /* If we are already scheduled on the CPU to which we + * wanted to link, we don't need to do the swap -- + * we just link ourselves to the CPU and depend on + * the caller to get things right. + */ + if (entry != sched) { + TRACE_TASK(linked, + "already scheduled on %d, updating link.\n", + sched->cpu); + tmp = sched->linked; + linked->rt_param.linked_on = sched->cpu; + sched->linked = linked; + update_cpu_position(sched); + linked = tmp; + } + } + if (linked) /* might be NULL due to swap */ + linked->rt_param.linked_on = entry->cpu; + } + entry->linked = linked; +#ifdef WANT_ALL_SCHED_EVENTS + if (linked) + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); + else + TRACE("NULL linked to %d.\n", entry->cpu); +#endif + update_cpu_position(entry); +} + +/* unlink - Make sure a task is not linked any longer to an entry + * where it was linked before. Must hold cflsplit_lock. + */ +static noinline void unlink(struct task_struct* t) +{ + cpu_entry_t *entry; + + if (t->rt_param.linked_on != NO_CPU) { + /* unlink */ + entry = &per_cpu(cflsplit_cpu_entries, t->rt_param.linked_on); + t->rt_param.linked_on = NO_CPU; + link_task_to_cpu(NULL, entry); + } else if (is_queued(t)) { + /* This is an interesting situation: t is scheduled, + * but was just recently unlinked. It cannot be + * linked anywhere else (because then it would have + * been relinked to this CPU), thus it must be in some + * queue. We must remove it from the list in this + * case. + * + * in C-FL-split case is should be somewhere in the queue for + * its domain, therefore and we can get the domain using + * task_cpu_cluster + */ + remove(&(task_cpu_cluster(t))->domain, t); + } +} + + +/* preempt - force a CPU to reschedule + */ +static void preempt(cpu_entry_t *entry) +{ + preempt_if_preemptable(entry->scheduled, entry->cpu); +} + +/* requeue - Put an unlinked task into gsn-edf domain. + * Caller must hold cflsplit_lock. + */ +static noinline void requeue(struct task_struct* task) +{ + cflsplit_domain_t *cluster = task_cpu_cluster(task); + BUG_ON(!task); + /* sanity check before insertion */ + BUG_ON(is_queued(task)); + + if (is_early_releasing(task) || is_released(task, litmus_clock())) + __add_ready(&cluster->domain, task); + else { + /* it has got to wait */ + add_release(&cluster->domain, task); + } +} + +#ifdef CONFIG_SCHED_CPU_AFFINITY +static cpu_entry_t* cflsplit_get_nearest_available_cpu( + cflsplit_domain_t *cluster, cpu_entry_t *start) +{ + cpu_entry_t *affinity; + + get_nearest_available_cpu(affinity, start, cflsplit_cpu_entries, +#ifdef CONFIG_RELEASE_MASTER + cluster->domain.release_master +#else + NO_CPU +#endif + ); + + /* make sure CPU is in our cluster */ + if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) + return(affinity); + else + return(NULL); +} +#endif + + +/* check for any necessary preemptions */ +static void check_for_preemptions(cflsplit_domain_t *cluster) +{ + struct task_struct *task; + cpu_entry_t *last; + + for(last = lowest_prio_cpu(cluster); + edf_split_preemption_needed(&cluster->domain, last->linked); + last = lowest_prio_cpu(cluster)) { + /* preemption necessary */ + task = __take_ready(&cluster->domain); + TRACE("check_for_preemptions: attempting to link task %d to %d\n", + task->pid, last->cpu); +#ifdef CONFIG_SCHED_CPU_AFFINITY + { + cpu_entry_t *affinity = + cflsplit_get_nearest_available_cpu(cluster, + &per_cpu(cflsplit_cpu_entries, task_cpu(task))); + if(affinity) + last = affinity; + else if(requeue_preempted_job(last->linked)) + requeue(last->linked); + } +#else + if (requeue_preempted_job(last->linked)) + requeue(last->linked); +#endif + link_task_to_cpu(task, last); + preempt(last); + } +} + +/* cflsplit_job_arrival: task is either resumed or released */ +static noinline void cflsplit_job_arrival(struct task_struct* task) +{ + cflsplit_domain_t *cluster = task_cpu_cluster(task); + BUG_ON(!task); + + requeue(task); + check_for_preemptions(cluster); +} + +static void cflsplit_release_jobs(rt_domain_t* rt, struct bheap* tasks) +{ + cflsplit_domain_t* cluster = container_of(rt, cflsplit_domain_t, domain); + unsigned long flags; + + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + + __merge_ready(&cluster->domain, tasks); + check_for_preemptions(cluster); + + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); +} + +/* caller holds cflsplit_lock */ +static noinline void job_completion(struct task_struct *t, int forced) +{ + BUG_ON(!t); + + sched_trace_task_completion(t, forced); + + TRACE_TASK(t, "job_completion().\n"); + + /* set flags */ + tsk_rt(t)->completed = 0; + /* prepare for next period */ + prepare_for_next_period(t); + /* We now also set the subjob deadline to what it should be for + * scheduling priority. + */ + t->rt_param.job_params.subjob_deadline = get_proper_deadline(t); + if (is_early_releasing(t) || is_released(t, litmus_clock())) + sched_trace_task_release(t); + /* unlink */ + unlink(t); + /* requeue + * But don't requeue a blocking task. */ + if (is_running(t)) + cflsplit_job_arrival(t); +} + +static void move_deadline(struct task_struct *t) +{ + tsk_rt(t)->job_params.subjob_deadline = get_proper_deadline(t); + /* Check if rescheduling needed with lower priority. */ + unlink(t); + cflsplit_job_arrival(t); +} + +/* cflsplit_tick - this function is called for every local timer + * interrupt. + * + * checks whether the current task has expired and checks + * whether we need to preempt it if it has not expired + */ +static void cflsplit_tick(struct task_struct* t) +{ + if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { + if (!is_np(t)) { + /* np tasks will be preempted when they become + * preemptable again + */ + litmus_reschedule_local(); + set_will_schedule(); + TRACE("cflsplit_scheduler_tick: " + "%d is preemptable " + " => FORCE_RESCHED\n", t->pid); + } else if (is_user_np(t)) { + TRACE("cflsplit_scheduler_tick: " + "%d is non-preemptable, " + "preemption delayed.\n", t->pid); + request_exit_np(t); + } + } +} + +/* Getting schedule() right is a bit tricky. schedule() may not make any + * assumptions on the state of the current task since it may be called for a + * number of reasons. The reasons include a scheduler_tick() determined that it + * was necessary, because sys_exit_np() was called, because some Linux + * subsystem determined so, or even (in the worst case) because there is a bug + * hidden somewhere. Thus, we must take extreme care to determine what the + * current state is. + * + * The CPU could currently be scheduling a task (or not), be linked (or not). + * + * The following assertions for the scheduled task could hold: + * + * - !is_running(scheduled) // the job blocks + * - scheduled->timeslice == 0 // the job completed (forcefully) + * - is_completed() // the job completed (by syscall) + * - linked != scheduled // we need to reschedule (for any reason) + * - is_np(scheduled) // rescheduling must be delayed, + * sys_exit_np must be requested + * + * Any of these can occur together. + */ +static struct task_struct* cflsplit_schedule(struct task_struct * prev) +{ + cpu_entry_t* entry = &__get_cpu_var(cflsplit_cpu_entries); + cflsplit_domain_t *cluster = entry->cluster; + int out_of_time, sleep, preempt, np, exists, blocks, needs_move; + struct task_struct* next = NULL; + +#ifdef CONFIG_RELEASE_MASTER + /* Bail out early if we are the release master. + * The release master never schedules any real-time tasks. + */ + if (unlikely(cluster->domain.release_master == entry->cpu)) { + sched_state_task_picked(); + return NULL; + } +#endif + + raw_spin_lock(&cluster->cluster_lock); + clear_will_schedule(); + + /* sanity checking */ + BUG_ON(entry->scheduled && entry->scheduled != prev); + BUG_ON(entry->scheduled && !is_realtime(prev)); + BUG_ON(is_realtime(prev) && !entry->scheduled); + + /* (0) Determine state */ + exists = entry->scheduled != NULL; + blocks = exists && !is_running(entry->scheduled); + out_of_time = exists && + budget_enforced(entry->scheduled) && + budget_exhausted(entry->scheduled); + needs_move = exists && needs_deadline_move(entry->scheduled); + np = exists && is_np(entry->scheduled); + sleep = exists && is_completed(entry->scheduled); + preempt = entry->scheduled != entry->linked; + +#ifdef WANT_ALL_SCHED_EVENTS + TRACE_TASK(prev, "invoked cflsplit_schedule.\n"); +#endif + + if (exists) + TRACE_TASK(prev, + "blocks:%d out_of_time:%d needs_move: %d np:%d" + " sleep:%d preempt:%d state:%d sig:%d\n", + blocks, out_of_time, needs_move, np, sleep, preempt, + prev->state, signal_pending(prev)); + if (entry->linked && preempt) + TRACE_TASK(prev, "will be preempted by %s/%d\n", + entry->linked->comm, entry->linked->pid); + + + /* If a task blocks we have no choice but to reschedule. + */ + if (blocks) + unlink(entry->scheduled); + + /* Request a sys_exit_np() call if we would like to preempt but cannot. + * We need to make sure to update the link structure anyway in case + * that we are still linked. Multiple calls to request_exit_np() don't + * hurt. + * + * Job deadline moves handled similarly + */ + if (np && (out_of_time || preempt || sleep)) { + unlink(entry->scheduled); + request_exit_np(entry->scheduled); + } + else if (np && needs_move) { + request_exit_np(entry->scheduled); + } + + /* Any task that is preemptable and either exhausts its execution + * budget or wants to sleep completes. We may have to reschedule after + * this. Don't do a job completion if we block (can't have timers running + * for blocked jobs). Preemption go first for the same reason. + */ + if (!np && (out_of_time || sleep) && !blocks) + job_completion(entry->scheduled, !sleep); + else if (!np && needs_move && !blocks) { + move_deadline(entry->scheduled); + } + + /* Link pending task if we became unlinked. + */ + if (!entry->linked) + link_task_to_cpu(__take_ready(&cluster->domain), entry); + + /* The final scheduling decision. Do we need to switch for some reason? + * If linked is different from scheduled, then select linked as next. + */ + if ((!np || blocks) && + entry->linked != entry->scheduled) { + /* Schedule a linked job? */ + if (entry->linked) { + entry->linked->rt_param.scheduled_on = entry->cpu; + next = entry->linked; + } + if (entry->scheduled) { + /* not gonna be scheduled soon */ + entry->scheduled->rt_param.scheduled_on = NO_CPU; + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); + } + } else + /* Only override Linux scheduler if we have a real-time task + * scheduled that needs to continue. + */ + if (exists) + next = prev; + + sched_state_task_picked(); + raw_spin_unlock(&cluster->cluster_lock); + + if (next) { + arm_split_timer(entry, next); + } + else if (entry->timer_armed) { + cancel_split_timer(entry); + } + +#ifdef WANT_ALL_SCHED_EVENTS + TRACE("cflsplit_lock released, next=0x%p\n", next); + + if (next) + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); + else if (exists && !next) + TRACE("becomes idle at %llu.\n", litmus_clock()); +#endif + + + return next; +} + + +/* _finish_switch - we just finished the switch away from prev + */ +static void cflsplit_finish_switch(struct task_struct *prev) +{ + cpu_entry_t* entry = &__get_cpu_var(cflsplit_cpu_entries); + + entry->scheduled = is_realtime(current) ? current : NULL; +#ifdef WANT_ALL_SCHED_EVENTS + TRACE_TASK(prev, "switched away from\n"); +#endif +} + + +static void cflsplit_release_at(struct task_struct *t, lt_t start) +{ + release_at(t, start); + t->rt_param.job_params.subjob_deadline = get_proper_deadline(t); +} + + +/* Prepare a task for running in RT mode + */ +static void cflsplit_task_new(struct task_struct * t, int on_rq, int is_scheduled) +{ + unsigned long flags; + cpu_entry_t* entry; + cflsplit_domain_t* cluster; + + TRACE("gsn edf: task new %d\n", t->pid); + + /* the cluster doesn't change even if t is scheduled */ + cluster = task_cpu_cluster(t); + + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + + /* setup job params */ + cflsplit_release_at(t, litmus_clock()); + + if (is_scheduled) { + entry = &per_cpu(cflsplit_cpu_entries, task_cpu(t)); + BUG_ON(entry->scheduled); + +#ifdef CONFIG_RELEASE_MASTER + if (entry->cpu != cluster->domain.release_master) { +#endif + entry->scheduled = t; + tsk_rt(t)->scheduled_on = task_cpu(t); +#ifdef CONFIG_RELEASE_MASTER + } else { + /* do not schedule on release master */ + preempt(entry); /* force resched */ + tsk_rt(t)->scheduled_on = NO_CPU; + } +#endif + } else { + t->rt_param.scheduled_on = NO_CPU; + } + t->rt_param.linked_on = NO_CPU; + + if (is_running(t)) + cflsplit_job_arrival(t); + raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); +} + +static void cflsplit_task_wake_up(struct task_struct *task) +{ + unsigned long flags; + lt_t now; + cflsplit_domain_t *cluster; + + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); + + cluster = task_cpu_cluster(task); + + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + now = litmus_clock(); + if (is_sporadic(task) && is_tardy(task, now)) { + /* new sporadic release */ + cflsplit_release_at(task, now); + sched_trace_task_release(task); + } + cflsplit_job_arrival(task); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); +} + +static void cflsplit_task_block(struct task_struct *t) +{ + unsigned long flags; + cflsplit_domain_t *cluster; + + TRACE_TASK(t, "block at %llu\n", litmus_clock()); + + cluster = task_cpu_cluster(t); + + /* unlink if necessary */ + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + unlink(t); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); + + BUG_ON(!is_realtime(t)); +} + + +static void cflsplit_task_exit(struct task_struct * t) +{ + unsigned long flags; + cflsplit_domain_t *cluster = task_cpu_cluster(t); + + /* unlink if necessary */ + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + unlink(t); + if (tsk_rt(t)->scheduled_on != NO_CPU) { + cpu_entry_t *cpu; + cpu = &per_cpu(cflsplit_cpu_entries, tsk_rt(t)->scheduled_on); + cpu->scheduled = NULL; + tsk_rt(t)->scheduled_on = NO_CPU; + } + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); + + BUG_ON(!is_realtime(t)); + TRACE_TASK(t, "RIP\n"); +} + +static long cflsplit_admit_task(struct task_struct* tsk) +{ + return (remote_cluster(task_cpu(tsk)) == task_cpu_cluster(tsk)) ? + 0 : -EINVAL; +} + +/* total number of cluster */ +static int num_clusters; +/* we do not support cluster of different sizes */ +static unsigned int cluster_size; + +#ifdef VERBOSE_INIT +static void print_cluster_topology(cpumask_var_t mask, int cpu) +{ + int chk; + char buf[255]; + + chk = cpulist_scnprintf(buf, 254, mask); + buf[chk] = '\0'; + printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf); + +} +#endif + +static int clusters_allocated = 0; + +static void cleanup_cflsplit(void) +{ + int i; + + if (clusters_allocated) { + for (i = 0; i < num_clusters; i++) { + kfree(cflsplit[i].cpus); + kfree(cflsplit[i].heap_node); + free_cpumask_var(cflsplit[i].cpu_map); + } + + kfree(cflsplit); + } +} + +static long cflsplit_activate_plugin(void) +{ + int i, j, cpu, ccpu, cpu_count; + cpu_entry_t *entry; + + cpumask_var_t mask; + int chk = 0; + + /* de-allocate old clusters, if any */ + cleanup_cflsplit(); + + printk(KERN_INFO "C-FL-split: Activate Plugin, cluster configuration = %d\n", + cluster_config); + + /* need to get cluster_size first */ + if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) + return -ENOMEM; + + if (unlikely(cluster_config == GLOBAL_CLUSTER)) { + cluster_size = num_online_cpus(); + } else { + chk = get_shared_cpu_map(mask, 0, cluster_config); + if (chk) { + /* if chk != 0 then it is the max allowed index */ + printk(KERN_INFO "C-FL-split: Cluster configuration = %d " + "is not supported on this hardware.\n", + cluster_config); + /* User should notice that the configuration failed, so + * let's bail out. */ + return -EINVAL; + } + + cluster_size = cpumask_weight(mask); + } + + if ((num_online_cpus() % cluster_size) != 0) { + /* this can't be right, some cpus are left out */ + printk(KERN_ERR "C-FL-split: Trying to group %d cpus in %d!\n", + num_online_cpus(), cluster_size); + return -1; + } + + num_clusters = num_online_cpus() / cluster_size; + printk(KERN_INFO "C-FL-split: %d cluster(s) of size = %d\n", + num_clusters, cluster_size); + + /* initialize clusters */ + cflsplit = kmalloc(num_clusters * sizeof(cflsplit_domain_t), GFP_ATOMIC); + for (i = 0; i < num_clusters; i++) { + + cflsplit[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), + GFP_ATOMIC); + cflsplit[i].heap_node = kmalloc( + cluster_size * sizeof(struct bheap_node), + GFP_ATOMIC); + bheap_init(&(cflsplit[i].cpu_heap)); + edf_split_domain_init(&(cflsplit[i].domain), NULL, + cflsplit_release_jobs); + + if(!zalloc_cpumask_var(&cflsplit[i].cpu_map, GFP_ATOMIC)) + return -ENOMEM; +#ifdef CONFIG_RELEASE_MASTER + cflsplit[i].domain.release_master = atomic_read(&release_master_cpu); +#endif + } + + /* cycle through cluster and add cpus to them */ + for (i = 0; i < num_clusters; i++) { + + for_each_online_cpu(cpu) { + /* check if the cpu is already in a cluster */ + for (j = 0; j < num_clusters; j++) + if (cpumask_test_cpu(cpu, cflsplit[j].cpu_map)) + break; + /* if it is in a cluster go to next cpu */ + if (j < num_clusters && + cpumask_test_cpu(cpu, cflsplit[j].cpu_map)) + continue; + + /* this cpu isn't in any cluster */ + /* get the shared cpus */ + if (unlikely(cluster_config == GLOBAL_CLUSTER)) + cpumask_copy(mask, cpu_online_mask); + else + get_shared_cpu_map(mask, cpu, cluster_config); + + cpumask_copy(cflsplit[i].cpu_map, mask); +#ifdef VERBOSE_INIT + print_cluster_topology(mask, cpu); +#endif + /* add cpus to current cluster and init cpu_entry_t */ + cpu_count = 0; + for_each_cpu(ccpu, cflsplit[i].cpu_map) { + + entry = &per_cpu(cflsplit_cpu_entries, ccpu); + cflsplit[i].cpus[cpu_count] = entry; + atomic_set(&entry->will_schedule, 0); + entry->cpu = ccpu; + entry->cluster = &cflsplit[i]; + entry->hn = &(cflsplit[i].heap_node[cpu_count]); + hrtimer_init(&entry->split_timer, + CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + entry->split_timer.function = on_split_timeout; + bheap_node_init(&entry->hn, entry); + + cpu_count++; + + entry->linked = NULL; + entry->scheduled = NULL; +#ifdef CONFIG_RELEASE_MASTER + /* only add CPUs that should schedule jobs */ + if (entry->cpu != entry->cluster->domain.release_master) +#endif + update_cpu_position(entry); + } + /* done with this cluster */ + break; + } + } + + free_cpumask_var(mask); + clusters_allocated = 1; + return 0; +} + +/* Plugin object */ +static struct sched_plugin cflsplit_plugin __cacheline_aligned_in_smp = { + .plugin_name = "C-FL-split", + .finish_switch = cflsplit_finish_switch, + .tick = cflsplit_tick, + .task_new = cflsplit_task_new, + .complete_job = complete_job, + .task_exit = cflsplit_task_exit, + .schedule = cflsplit_schedule, + .release_at = cflsplit_release_at, + .task_wake_up = cflsplit_task_wake_up, + .task_block = cflsplit_task_block, + .admit_task = cflsplit_admit_task, + .activate_plugin = cflsplit_activate_plugin, +}; + +static struct proc_dir_entry *cluster_file = NULL, *cflsplit_dir = NULL; + +static int __init init_cflsplit(void) +{ + int err, fs; + + err = register_sched_plugin(&cflsplit_plugin); + if (!err) { + fs = make_plugin_proc_dir(&cflsplit_plugin, &cflsplit_dir); + if (!fs) + cluster_file = create_cluster_file(cflsplit_dir, &cluster_config); + else + printk(KERN_ERR "Could not allocate C-FL-split procfs dir.\n"); + } + return err; +} + +static void clean_cflsplit(void) +{ + cleanup_cflsplit(); + if (cluster_file) + remove_proc_entry("cluster", cflsplit_dir); + if (cflsplit_dir) + remove_plugin_proc_dir(&cflsplit_plugin); +} + +module_init(init_cflsplit); +module_exit(clean_cflsplit); -- cgit v1.2.2