diff options
author | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-02-06 13:50:03 -0500 |
---|---|---|
committer | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-02-06 13:50:03 -0500 |
commit | eb2ba33041b84cf15367288367494c6aec1da25a (patch) | |
tree | 40c641d6dc1ada66fdd752d53770bd66efd805a4 | |
parent | 89fb43c271050f097b93f94e03a015e8d441f359 (diff) |
Added support for quantum synchronization by having all processors wait
on a barrier, after which all timers are reset concurrently. This is still
being debugged -- I locally committed it so it would be backed up
somewhere.
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 54 |
2 files changed, 55 insertions, 1 deletions
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 20 | 3 | SUBLEVEL = 20 |
4 | EXTRAVERSION = | 4 | EXTRAVERSION = -jmc |
5 | NAME = Homicidal Dwarf Hamster | 5 | NAME = Homicidal Dwarf Hamster |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 776d9be26a..268a32870c 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -1197,6 +1197,44 @@ EXPORT_SYMBOL(switch_ipi_to_APIC_timer); | |||
1197 | 1197 | ||
1198 | #undef APIC_DIVISOR | 1198 | #undef APIC_DIVISOR |
1199 | 1199 | ||
1200 | /* JOHN: Begin added code, starting with quantum sync function. */ | ||
1201 | #define STAGGER 0 | ||
1202 | #define WARM_UP_INTRS 10000 | ||
1203 | // later, define per-cpu variable and re-synch when intr_count % WARM_UP_INTRS == 0 | ||
1204 | static atomic_t quantum_sync_barrier = ATOMIC_INIT(0); // used to implement simple barrier | ||
1205 | static DEFINE_PER_CPU(int, synched) = 0; | ||
1206 | |||
1207 | /* This function is called to align all quanta, and to stagger quanta if necessary. */ | ||
1208 | void synchronize_quanta(void) | ||
1209 | { | ||
1210 | int cpu = smp_processor_id(); | ||
1211 | int total_cpus = num_online_cpus(); | ||
1212 | int stagger_interval = jiffies_to_usecs(1)/total_cpus; | ||
1213 | |||
1214 | /* | ||
1215 | * Disable APIC timer, wait for all other processors to reach barrier, | ||
1216 | * and re-enable all timers concurrently. | ||
1217 | */ | ||
1218 | disable_APIC_timer(); | ||
1219 | atomic_inc(&quantum_sync_barrier); | ||
1220 | while (atomic_read(&quantum_sync_barrier) < total_cpus); | ||
1221 | __setup_APIC_LVTT(calibration_result); | ||
1222 | enable_APIC_timer(); | ||
1223 | |||
1224 | /* Add necessary stagger for this CPU, if required. */ | ||
1225 | if (STAGGER) { | ||
1226 | int stagger_us = cpu*stagger_interval; | ||
1227 | disable_APIC_timer(); | ||
1228 | udelay(stagger_us); | ||
1229 | __setup_APIC_LVTT(calibration_result); | ||
1230 | enable_APIC_timer(); | ||
1231 | } | ||
1232 | |||
1233 | /* Set "synched" flag so this code is not executed again. */ | ||
1234 | per_cpu(synched, cpu) = 1; | ||
1235 | } | ||
1236 | /* JOHN: end added code. */ | ||
1237 | |||
1200 | /* | 1238 | /* |
1201 | * Local timer interrupt handler. It does both profiling and | 1239 | * Local timer interrupt handler. It does both profiling and |
1202 | * process statistics/rescheduling. | 1240 | * process statistics/rescheduling. |
@@ -1209,11 +1247,27 @@ EXPORT_SYMBOL(switch_ipi_to_APIC_timer); | |||
1209 | 1247 | ||
1210 | inline void smp_local_timer_interrupt(void) | 1248 | inline void smp_local_timer_interrupt(void) |
1211 | { | 1249 | { |
1250 | // JOHN: begin added code. | ||
1251 | int cpu = smp_processor_id(); | ||
1252 | // JOHN: end added code. | ||
1253 | |||
1212 | profile_tick(CPU_PROFILING); | 1254 | profile_tick(CPU_PROFILING); |
1213 | #ifdef CONFIG_SMP | 1255 | #ifdef CONFIG_SMP |
1214 | update_process_times(user_mode_vm(get_irq_regs())); | 1256 | update_process_times(user_mode_vm(get_irq_regs())); |
1215 | #endif | 1257 | #endif |
1216 | 1258 | ||
1259 | // JOHN: begin added code. | ||
1260 | /* | ||
1261 | * Time to synchronize quanta, if it hasn't been done already. To avoid | ||
1262 | * placing a lot of code in an inline function, the actual sync code is | ||
1263 | * placed in its own (non-inline) function - the majority of the time | ||
1264 | * we won't even call it. | ||
1265 | */ | ||
1266 | if (unlikely(!per_cpu(synched, cpu))) { | ||
1267 | synchronize_quanta(); | ||
1268 | } | ||
1269 | // JOHN: end added code. | ||
1270 | |||
1217 | /* | 1271 | /* |
1218 | * We take the 'long' return path, and there every subsystem | 1272 | * We take the 'long' return path, and there every subsystem |
1219 | * grabs the apropriate locks (kernel lock/ irq lock). | 1273 | * grabs the apropriate locks (kernel lock/ irq lock). |