diff options
author | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-04-25 20:59:19 -0400 |
---|---|---|
committer | John M. Calandrino <jmc@jupiter-cs.cs.unc.edu> | 2007-04-25 20:59:19 -0400 |
commit | 448188f2ca526f609f039e62d4216799fe80d011 (patch) | |
tree | bc6495924451ff9262de36c9726d06a33b095074 /arch | |
parent | d7718a1d5a09e0f8c698886c85c0c41ee88a51f4 (diff) |
Removed incomplete zone-based implementation.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/apic.c | 77 | ||||
-rw-r--r-- | arch/i386/kernel/srp_sem_syscalls.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/syscall_table.S | 6 |
3 files changed, 17 insertions, 68 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index df09855f81..f155275295 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/sysdev.h> | 26 | #include <linux/sysdev.h> |
27 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/sched.h> | ||
30 | #include <linux/litmus.h> | 29 | #include <linux/litmus.h> |
31 | 30 | ||
32 | #include <asm/atomic.h> | 31 | #include <asm/atomic.h> |
@@ -59,16 +58,12 @@ static int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enabl | |||
59 | /* | 58 | /* |
60 | * Definitions and variables related to quantum synchronization. | 59 | * Definitions and variables related to quantum synchronization. |
61 | */ | 60 | */ |
62 | #define WAIT_TO_SYNC 30000 /* time between syncs and before 1st sync */ | 61 | #define WAIT_TO_SYNC 30000 /* time after boot until sync */ |
63 | static int stagger = 0; /* are we using staggered quanta? */ | 62 | static int stagger = 0; /* are we using staggered quanta? */ |
64 | static atomic_t qsync_time = ATOMIC_INIT(INITIAL_JIFFIES); | 63 | static atomic_t qsync_time = ATOMIC_INIT(INITIAL_JIFFIES); |
65 | static atomic_t quantum_sync_barrier = ATOMIC_INIT(0); | 64 | static atomic_t quantum_sync_barrier = ATOMIC_INIT(0); |
66 | static atomic_t barrier_use_count = ATOMIC_INIT(1); | ||
67 | static atomic_t sync_done = ATOMIC_INIT(0); | 65 | static atomic_t sync_done = ATOMIC_INIT(0); |
68 | 66 | ||
69 | /* Records time of last local timer interrupt at each processor. */ | ||
70 | DEFINE_PER_CPU(struct timeval, last_local_intr); | ||
71 | |||
72 | static inline void lapic_disable(void) | 67 | static inline void lapic_disable(void) |
73 | { | 68 | { |
74 | enable_local_apic = -1; | 69 | enable_local_apic = -1; |
@@ -1234,12 +1229,7 @@ EXPORT_SYMBOL(switch_ipi_to_APIC_timer); | |||
1234 | * necessary. It relies on a barrier to synchronize all processors, so | 1229 | * necessary. It relies on a barrier to synchronize all processors, so |
1235 | * that they all reset their APIC timers at the same time. If quanta | 1230 | * that they all reset their APIC timers at the same time. If quanta |
1236 | * should be staggered, the appropriate stagger delay is then added at | 1231 | * should be staggered, the appropriate stagger delay is then added at |
1237 | * each processor. Staggered quanta are synchronized only once (it is | 1232 | * each processor. |
1238 | * only important that quanta remain relatively misaligned, not that they | ||
1239 | * remain equally spaced forever). Aligned quanta, however, are | ||
1240 | * periodically realigned, which in most cases should not result | ||
1241 | * in timing issues after the first alignment. In order | ||
1242 | * to enable periodic realignment, we use a sense-reversal barrier. | ||
1243 | */ | 1233 | */ |
1244 | 1234 | ||
1245 | void synchronize_quanta(void) | 1235 | void synchronize_quanta(void) |
@@ -1254,39 +1244,24 @@ void synchronize_quanta(void) | |||
1254 | */ | 1244 | */ |
1255 | disable_APIC_timer(); | 1245 | disable_APIC_timer(); |
1256 | atomic_inc(&quantum_sync_barrier); | 1246 | atomic_inc(&quantum_sync_barrier); |
1257 | while (atomic_read(&quantum_sync_barrier) < | 1247 | while (atomic_read(&quantum_sync_barrier) < total_cpus) { |
1258 | atomic_read(&barrier_use_count) * total_cpus) | ||
1259 | { | ||
1260 | /* Delay, otherwise atomic_inc's cannot occur. */ | 1248 | /* Delay, otherwise atomic_inc's cannot occur. */ |
1261 | udelay(1); | 1249 | udelay(1); |
1262 | } | 1250 | } |
1263 | __setup_APIC_LVTT(calibration_result); | ||
1264 | if (!stagger) | ||
1265 | enable_APIC_timer(); | ||
1266 | 1251 | ||
1267 | /* | 1252 | /* Add necessary stagger for this CPU, if required. */ |
1268 | * Add necessary stagger for this CPU, if required. Do _not_ | ||
1269 | * update qsync_time. Otherwise, we update the qsync_time | ||
1270 | * and increment the barrier count, which allows the barrier to | ||
1271 | * be "reused" (at least, until overflow occurs in roughly X*2^29 | ||
1272 | * seconds, or roughly X*17 years -- for a 30 second wait interval, | ||
1273 | * this means 510 years until overflow, which seems safe). | ||
1274 | */ | ||
1275 | if (stagger) { | 1253 | if (stagger) { |
1276 | int stagger_us = cpu * stagger_interval; | 1254 | int stagger_us = cpu * stagger_interval; |
1277 | udelay(stagger_us); | 1255 | udelay(stagger_us); |
1278 | __setup_APIC_LVTT(calibration_result); | ||
1279 | enable_APIC_timer(); | ||
1280 | atomic_inc(&sync_done); | ||
1281 | } else if (cpu == 0) { | ||
1282 | /* The first CPU updates qsync_time and "resets" barrier */ | ||
1283 | atomic_inc(&sync_done); /* temporary until resync fixed */ | ||
1284 | /* | ||
1285 | * FIXME: wraparound issue prevents periodic re-synch. | ||
1286 | */ | ||
1287 | /* atomic_add(msecs_to_jiffies(WAIT_TO_SYNC), &qsync_time); */ | ||
1288 | /* atomic_inc(&barrier_use_count); */ | ||
1289 | } | 1256 | } |
1257 | |||
1258 | /* Re-enable all timers. */ | ||
1259 | __setup_APIC_LVTT(calibration_result); | ||
1260 | enable_APIC_timer(); | ||
1261 | |||
1262 | /* The first CPU signals that quantum sync is complete. */ | ||
1263 | if (cpu == 0) | ||
1264 | atomic_inc(&sync_done); | ||
1290 | } | 1265 | } |
1291 | 1266 | ||
1292 | 1267 | ||
@@ -1302,27 +1277,8 @@ void synchronize_quanta(void) | |||
1302 | 1277 | ||
1303 | inline void smp_local_timer_interrupt(void) | 1278 | inline void smp_local_timer_interrupt(void) |
1304 | { | 1279 | { |
1305 | struct task_struct *p; | ||
1306 | int cpu = smp_processor_id(); | 1280 | int cpu = smp_processor_id(); |
1307 | 1281 | ||
1308 | /* | ||
1309 | * Read offset from last recorded global tick. | ||
1310 | * Do this before calling profiling and scheduling code, as | ||
1311 | * this indicates roughly when a scheduling decision will be made. | ||
1312 | * Note that we try to read the offset without claiming any locks. | ||
1313 | * This won't always work, but we just want enough samples | ||
1314 | * to figure out what's going on. | ||
1315 | */ | ||
1316 | /* offset = get_nsec_offset(); | ||
1317 | if (jiffies % 10 == 0 && time_after(jiffies, | ||
1318 | (unsigned long)(INITIAL_JIFFIES + msecs_to_jiffies(WAIT_TO_SYNC)))) | ||
1319 | TRACE("%d, %d: %lld\n", smp_processor_id(), jiffies, offset); | ||
1320 | */ | ||
1321 | |||
1322 | /* Record current time as timestamp of when last local | ||
1323 | * interrupt occurred. */ | ||
1324 | do_getapproxtimeofday(&(per_cpu(last_local_intr, cpu))); | ||
1325 | |||
1326 | profile_tick(CPU_PROFILING); | 1282 | profile_tick(CPU_PROFILING); |
1327 | #ifdef CONFIG_SMP | 1283 | #ifdef CONFIG_SMP |
1328 | update_process_times(user_mode_vm(get_irq_regs())); | 1284 | update_process_times(user_mode_vm(get_irq_regs())); |
@@ -1330,8 +1286,7 @@ inline void smp_local_timer_interrupt(void) | |||
1330 | 1286 | ||
1331 | /* | 1287 | /* |
1332 | * Synchronize quanta if we have reached qsync_time plus wait | 1288 | * Synchronize quanta if we have reached qsync_time plus wait |
1333 | * interval. Note that sync_done is only set in the staggered case. | 1289 | * interval. The synchronization code itself is placed in its own |
1334 | * The synchronization code itself is placed in its own | ||
1335 | * (non-inline) function, to avoid issues with creating an inline | 1290 | * (non-inline) function, to avoid issues with creating an inline |
1336 | * function that is too large. | 1291 | * function that is too large. |
1337 | */ | 1292 | */ |
@@ -1342,12 +1297,6 @@ inline void smp_local_timer_interrupt(void) | |||
1342 | synchronize_quanta(); | 1297 | synchronize_quanta(); |
1343 | } | 1298 | } |
1344 | 1299 | ||
1345 | /* Clear flags for tasks waiting until end of blocking zone, as it | ||
1346 | * will end immediately after this interrupt handler exits. | ||
1347 | */ | ||
1348 | for_each_process(p) | ||
1349 | p->rt_param.waiting_to_exit_zone = 0; | ||
1350 | |||
1351 | /* | 1300 | /* |
1352 | * We take the 'long' return path, and there every subsystem | 1301 | * We take the 'long' return path, and there every subsystem |
1353 | * grabs the apropriate locks (kernel lock/ irq lock). | 1302 | * grabs the apropriate locks (kernel lock/ irq lock). |
diff --git a/arch/i386/kernel/srp_sem_syscalls.c b/arch/i386/kernel/srp_sem_syscalls.c index 8f0545898c..838df95202 100644 --- a/arch/i386/kernel/srp_sem_syscalls.c +++ b/arch/i386/kernel/srp_sem_syscalls.c | |||
@@ -1,5 +1,7 @@ | |||
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | 2 | ||
3 | // comparison by period and pid! | ||
4 | |||
3 | /* | 5 | /* |
4 | * Uniprocessor SRP "semaphores". | 6 | * Uniprocessor SRP "semaphores". |
5 | */ | 7 | */ |
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 0b50716b03..9a5348f209 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -339,10 +339,8 @@ ENTRY(sys_call_table) | |||
339 | .long sys_down | 339 | .long sys_down |
340 | .long sys_up | 340 | .long sys_up |
341 | .long sys_sema_free | 341 | .long sys_sema_free |
342 | .long sys_in_blocking_zone | ||
343 | .long sys_wait_for_zone_exit /* 340 */ | ||
344 | .long sys_srp_sema_init | 342 | .long sys_srp_sema_init |
345 | .long sys_srp_down | 343 | .long sys_srp_down /* 340 */ |
346 | .long sys_srp_up | 344 | .long sys_srp_up |
347 | .long sys_reg_task_srp_sem | 345 | .long sys_reg_task_srp_sem |
348 | .long sys_srp_sema_free /* 345 */ | 346 | .long sys_srp_sema_free /* 343 */ |