aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-18 12:42:44 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-18 12:42:44 -0500
commit3e3b3916a9c5c28a16528585478de19fea59816b (patch)
treef7da78c1044840e62008ac7b5512b40713c342bc /kernel
parent51dad801e271f3754a728e5b9a2ef974576490cc (diff)
parent4aae07025265151e3f7041dfbf0f529e122de1d8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: x86: fix "Kernel panic - not syncing: IO-APIC + timer doesn't work!" genirq: revert lazy irq disable for simple irqs x86: also define AT_VECTOR_SIZE_ARCH x86: kprobes bugfix x86: jprobe bugfix timer: kernel/timer.c section fixes genirq: add unlocked version of set_irq_handler() clockevents: fix reprogramming decision in oneshot broadcast oprofile: op_model_athlon.c support for AMD family 10h barcelona performance counters
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/chip.c9
-rw-r--r--kernel/time/tick-broadcast.c56
-rw-r--r--kernel/timer.c4
3 files changed, 25 insertions, 44 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 9b5dff6b3f6..44019ce30a1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -297,18 +297,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
297 297
298 if (unlikely(desc->status & IRQ_INPROGRESS)) 298 if (unlikely(desc->status & IRQ_INPROGRESS))
299 goto out_unlock; 299 goto out_unlock;
300 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
300 kstat_cpu(cpu).irqs[irq]++; 301 kstat_cpu(cpu).irqs[irq]++;
301 302
302 action = desc->action; 303 action = desc->action;
303 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 304 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
304 if (desc->chip->mask)
305 desc->chip->mask(irq);
306 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
307 desc->status |= IRQ_PENDING;
308 goto out_unlock; 305 goto out_unlock;
309 }
310 306
311 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
312 desc->status |= IRQ_INPROGRESS; 307 desc->status |= IRQ_INPROGRESS;
313 spin_unlock(&desc->lock); 308 spin_unlock(&desc->lock);
314 309
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index aa82d7bf478..5b86698faa0 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -384,45 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
384} 384}
385 385
386/* 386/*
387 * Reprogram the broadcast device:
388 *
389 * Called with tick_broadcast_lock held and interrupts disabled.
390 */
391static int tick_broadcast_reprogram(void)
392{
393 ktime_t expires = { .tv64 = KTIME_MAX };
394 struct tick_device *td;
395 int cpu;
396
397 /*
398 * Find the event which expires next:
399 */
400 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
401 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
402 td = &per_cpu(tick_cpu_device, cpu);
403 if (td->evtdev->next_event.tv64 < expires.tv64)
404 expires = td->evtdev->next_event;
405 }
406
407 if (expires.tv64 == KTIME_MAX)
408 return 0;
409
410 return tick_broadcast_set_event(expires, 0);
411}
412
413/*
414 * Handle oneshot mode broadcasting 387 * Handle oneshot mode broadcasting
415 */ 388 */
416static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 389static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
417{ 390{
418 struct tick_device *td; 391 struct tick_device *td;
419 cpumask_t mask; 392 cpumask_t mask;
420 ktime_t now; 393 ktime_t now, next_event;
421 int cpu; 394 int cpu;
422 395
423 spin_lock(&tick_broadcast_lock); 396 spin_lock(&tick_broadcast_lock);
424again: 397again:
425 dev->next_event.tv64 = KTIME_MAX; 398 dev->next_event.tv64 = KTIME_MAX;
399 next_event.tv64 = KTIME_MAX;
426 mask = CPU_MASK_NONE; 400 mask = CPU_MASK_NONE;
427 now = ktime_get(); 401 now = ktime_get();
428 /* Find all expired events */ 402 /* Find all expired events */
@@ -431,19 +405,31 @@ again:
431 td = &per_cpu(tick_cpu_device, cpu); 405 td = &per_cpu(tick_cpu_device, cpu);
432 if (td->evtdev->next_event.tv64 <= now.tv64) 406 if (td->evtdev->next_event.tv64 <= now.tv64)
433 cpu_set(cpu, mask); 407 cpu_set(cpu, mask);
408 else if (td->evtdev->next_event.tv64 < next_event.tv64)
409 next_event.tv64 = td->evtdev->next_event.tv64;
434 } 410 }
435 411
436 /* 412 /*
437 * Wakeup the cpus which have an expired event. The broadcast 413 * Wakeup the cpus which have an expired event.
438 * device is reprogrammed in the return from idle code. 414 */
415 tick_do_broadcast(mask);
416
417 /*
418 * Two reasons for reprogram:
419 *
420 * - The global event did not expire any CPU local
421 * events. This happens in dyntick mode, as the maximum PIT
422 * delta is quite small.
423 *
424 * - There are pending events on sleeping CPUs which were not
425 * in the event mask
439 */ 426 */
440 if (!tick_do_broadcast(mask)) { 427 if (next_event.tv64 != KTIME_MAX) {
441 /* 428 /*
442 * The global event did not expire any CPU local 429 * Rearm the broadcast device. If event expired,
443 * events. This happens in dyntick mode, as the 430 * repeat the above
444 * maximum PIT delta is quite small.
445 */ 431 */
446 if (tick_broadcast_reprogram()) 432 if (tick_broadcast_set_event(next_event, 0))
447 goto again; 433 goto again;
448 } 434 }
449 spin_unlock(&tick_broadcast_lock); 435 spin_unlock(&tick_broadcast_lock);
diff --git a/kernel/timer.c b/kernel/timer.c
index a05817c021d..d4527dcef1a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1219,11 +1219,11 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1219 */ 1219 */
1220static struct lock_class_key base_lock_keys[NR_CPUS]; 1220static struct lock_class_key base_lock_keys[NR_CPUS];
1221 1221
1222static int __devinit init_timers_cpu(int cpu) 1222static int __cpuinit init_timers_cpu(int cpu)
1223{ 1223{
1224 int j; 1224 int j;
1225 tvec_base_t *base; 1225 tvec_base_t *base;
1226 static char __devinitdata tvec_base_done[NR_CPUS]; 1226 static char __cpuinitdata tvec_base_done[NR_CPUS];
1227 1227
1228 if (!tvec_base_done[cpu]) { 1228 if (!tvec_base_done[cpu]) {
1229 static char boot_done; 1229 static char boot_done;