aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-08-31 01:45:48 -0400
committerPaul Mackerras <paulus@samba.org>2006-08-31 01:45:48 -0400
commitaa43f77939c97bf9d3580c6a5e71a5a40290e451 (patch)
tree095c0b8b3da4b6554a3f8ef4b39240a5d9216d4d /arch/powerpc/kernel
parent2818c5dec5e28d65d52afbb7695bbbafe6377ee5 (diff)
parent4c15343167b5febe7bb0ba96aad5bef42ae94d3b (diff)
Merge branch 'merge'
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/fpu.S5
-rw-r--r--arch/powerpc/kernel/irq.c84
-rw-r--r--arch/powerpc/kernel/pci_64.c11
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/prom_parse.c15
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c5
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/kernel/traps.c2
8 files changed, 109 insertions, 48 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 7e2c9fe44ac1..821e152e093c 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -2,6 +2,11 @@
2 * FPU support code, moved here from head.S so that it can be used 2 * FPU support code, moved here from head.S so that it can be used
3 * by chips which use other head-whatever.S files. 3 * by chips which use other head-whatever.S files.
4 * 4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Copyright (C) 1996 Paul Mackerras.
8 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
9 *
5 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index fd4ddb858dbd..b4432332341f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -323,7 +323,8 @@ EXPORT_SYMBOL(do_softirq);
323 323
324static LIST_HEAD(irq_hosts); 324static LIST_HEAD(irq_hosts);
325static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; 325static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
326 326static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
327static unsigned int irq_radix_writer;
327struct irq_map_entry irq_map[NR_IRQS]; 328struct irq_map_entry irq_map[NR_IRQS];
328static unsigned int irq_virq_count = NR_IRQS; 329static unsigned int irq_virq_count = NR_IRQS;
329static struct irq_host *irq_default_host; 330static struct irq_host *irq_default_host;
@@ -456,6 +457,58 @@ void irq_set_virq_count(unsigned int count)
456 irq_virq_count = count; 457 irq_virq_count = count;
457} 458}
458 459
460/* radix tree not lockless safe ! we use a brlock-type mecanism
461 * for now, until we can use a lockless radix tree
462 */
463static void irq_radix_wrlock(unsigned long *flags)
464{
465 unsigned int cpu, ok;
466
467 spin_lock_irqsave(&irq_big_lock, *flags);
468 irq_radix_writer = 1;
469 smp_mb();
470 do {
471 barrier();
472 ok = 1;
473 for_each_possible_cpu(cpu) {
474 if (per_cpu(irq_radix_reader, cpu)) {
475 ok = 0;
476 break;
477 }
478 }
479 if (!ok)
480 cpu_relax();
481 } while(!ok);
482}
483
484static void irq_radix_wrunlock(unsigned long flags)
485{
486 smp_wmb();
487 irq_radix_writer = 0;
488 spin_unlock_irqrestore(&irq_big_lock, flags);
489}
490
491static void irq_radix_rdlock(unsigned long *flags)
492{
493 local_irq_save(*flags);
494 __get_cpu_var(irq_radix_reader) = 1;
495 smp_mb();
496 if (likely(irq_radix_writer == 0))
497 return;
498 __get_cpu_var(irq_radix_reader) = 0;
499 smp_wmb();
500 spin_lock(&irq_big_lock);
501 __get_cpu_var(irq_radix_reader) = 1;
502 spin_unlock(&irq_big_lock);
503}
504
505static void irq_radix_rdunlock(unsigned long flags)
506{
507 __get_cpu_var(irq_radix_reader) = 0;
508 local_irq_restore(flags);
509}
510
511
459unsigned int irq_create_mapping(struct irq_host *host, 512unsigned int irq_create_mapping(struct irq_host *host,
460 irq_hw_number_t hwirq) 513 irq_hw_number_t hwirq)
461{ 514{
@@ -605,13 +658,9 @@ void irq_dispose_mapping(unsigned int virq)
605 /* Check if radix tree allocated yet */ 658 /* Check if radix tree allocated yet */
606 if (host->revmap_data.tree.gfp_mask == 0) 659 if (host->revmap_data.tree.gfp_mask == 0)
607 break; 660 break;
608 /* XXX radix tree not safe ! remove lock whem it becomes safe 661 irq_radix_wrlock(&flags);
609 * and use some RCU sync to make sure everything is ok before we
610 * can re-use that map entry
611 */
612 spin_lock_irqsave(&irq_big_lock, flags);
613 radix_tree_delete(&host->revmap_data.tree, hwirq); 662 radix_tree_delete(&host->revmap_data.tree, hwirq);
614 spin_unlock_irqrestore(&irq_big_lock, flags); 663 irq_radix_wrunlock(flags);
615 break; 664 break;
616 } 665 }
617 666
@@ -678,25 +727,24 @@ unsigned int irq_radix_revmap(struct irq_host *host,
678 if (tree->gfp_mask == 0) 727 if (tree->gfp_mask == 0)
679 return irq_find_mapping(host, hwirq); 728 return irq_find_mapping(host, hwirq);
680 729
681 /* XXX Current radix trees are NOT SMP safe !!! Remove that lock
682 * when that is fixed (when Nick's patch gets in
683 */
684 spin_lock_irqsave(&irq_big_lock, flags);
685
686 /* Now try to resolve */ 730 /* Now try to resolve */
731 irq_radix_rdlock(&flags);
687 ptr = radix_tree_lookup(tree, hwirq); 732 ptr = radix_tree_lookup(tree, hwirq);
733 irq_radix_rdunlock(flags);
734
688 /* Found it, return */ 735 /* Found it, return */
689 if (ptr) { 736 if (ptr) {
690 virq = ptr - irq_map; 737 virq = ptr - irq_map;
691 goto bail; 738 return virq;
692 } 739 }
693 740
694 /* If not there, try to insert it */ 741 /* If not there, try to insert it */
695 virq = irq_find_mapping(host, hwirq); 742 virq = irq_find_mapping(host, hwirq);
696 if (virq != NO_IRQ) 743 if (virq != NO_IRQ) {
744 irq_radix_wrlock(&flags);
697 radix_tree_insert(tree, hwirq, &irq_map[virq]); 745 radix_tree_insert(tree, hwirq, &irq_map[virq]);
698 bail: 746 irq_radix_wrunlock(flags);
699 spin_unlock_irqrestore(&irq_big_lock, flags); 747 }
700 return virq; 748 return virq;
701} 749}
702 750
@@ -807,12 +855,12 @@ static int irq_late_init(void)
807 struct irq_host *h; 855 struct irq_host *h;
808 unsigned long flags; 856 unsigned long flags;
809 857
810 spin_lock_irqsave(&irq_big_lock, flags); 858 irq_radix_wrlock(&flags);
811 list_for_each_entry(h, &irq_hosts, link) { 859 list_for_each_entry(h, &irq_hosts, link) {
812 if (h->revmap_type == IRQ_HOST_MAP_TREE) 860 if (h->revmap_type == IRQ_HOST_MAP_TREE)
813 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); 861 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
814 } 862 }
815 spin_unlock_irqrestore(&irq_big_lock, flags); 863 irq_radix_wrunlock(flags);
816 864
817 return 0; 865 return 0;
818} 866}
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d51be7c7a2ef..c1b1e14775e4 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -1254,6 +1254,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1254 1254
1255 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1255 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1256 1256
1257#ifdef DEBUG
1258 memset(&oirq, 0xff, sizeof(oirq));
1259#endif
1257 /* Try to get a mapping from the device-tree */ 1260 /* Try to get a mapping from the device-tree */
1258 if (of_irq_map_pci(pci_dev, &oirq)) { 1261 if (of_irq_map_pci(pci_dev, &oirq)) {
1259 u8 line, pin; 1262 u8 line, pin;
@@ -1279,8 +1282,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1279 if (virq != NO_IRQ) 1282 if (virq != NO_IRQ)
1280 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1283 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
1281 } else { 1284 } else {
1282 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", 1285 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
1283 oirq.size, oirq.specifier[0], oirq.controller->full_name); 1286 oirq.size, oirq.specifier[0], oirq.specifier[1],
1287 oirq.controller->full_name);
1284 1288
1285 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1289 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
1286 oirq.size); 1290 oirq.size);
@@ -1289,6 +1293,9 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
1289 DBG(" -> failed to map !\n"); 1293 DBG(" -> failed to map !\n");
1290 return -1; 1294 return -1;
1291 } 1295 }
1296
1297 DBG(" -> mapped to linux irq %d\n", virq);
1298
1292 pci_dev->irq = virq; 1299 pci_dev->irq = virq;
1293 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq); 1300 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1294 1301
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 90972ef6c471..b91761639d96 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -646,13 +646,13 @@ static unsigned char ibm_architecture_vec[] = {
646 5 - 1, /* 5 option vectors */ 646 5 - 1, /* 5 option vectors */
647 647
648 /* option vector 1: processor architectures supported */ 648 /* option vector 1: processor architectures supported */
649 3 - 1, /* length */ 649 3 - 2, /* length */
650 0, /* don't ignore, don't halt */ 650 0, /* don't ignore, don't halt */
651 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 | 651 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
652 OV1_PPC_2_04 | OV1_PPC_2_05, 652 OV1_PPC_2_04 | OV1_PPC_2_05,
653 653
654 /* option vector 2: Open Firmware options supported */ 654 /* option vector 2: Open Firmware options supported */
655 34 - 1, /* length */ 655 34 - 2, /* length */
656 OV2_REAL_MODE, 656 OV2_REAL_MODE,
657 0, 0, 657 0, 0,
658 W(0xffffffff), /* real_base */ 658 W(0xffffffff), /* real_base */
@@ -666,16 +666,16 @@ static unsigned char ibm_architecture_vec[] = {
666 48, /* max log_2(hash table size) */ 666 48, /* max log_2(hash table size) */
667 667
668 /* option vector 3: processor options supported */ 668 /* option vector 3: processor options supported */
669 3 - 1, /* length */ 669 3 - 2, /* length */
670 0, /* don't ignore, don't halt */ 670 0, /* don't ignore, don't halt */
671 OV3_FP | OV3_VMX, 671 OV3_FP | OV3_VMX,
672 672
673 /* option vector 4: IBM PAPR implementation */ 673 /* option vector 4: IBM PAPR implementation */
674 2 - 1, /* length */ 674 2 - 2, /* length */
675 0, /* don't halt */ 675 0, /* don't halt */
676 676
677 /* option vector 5: PAPR/OF options */ 677 /* option vector 5: PAPR/OF options */
678 3 - 1, /* length */ 678 3 - 2, /* length */
679 0, /* don't ignore, don't halt */ 679 0, /* don't ignore, don't halt */
680 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES, 680 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
681}; 681};
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 603203276ef6..603dff3ad62a 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -642,7 +642,7 @@ void of_irq_map_init(unsigned int flags)
642 642
643} 643}
644 644
645int of_irq_map_raw(struct device_node *parent, const u32 *intspec, 645int of_irq_map_raw(struct device_node *parent, const u32 *intspec, u32 ointsize,
646 const u32 *addr, struct of_irq *out_irq) 646 const u32 *addr, struct of_irq *out_irq)
647{ 647{
648 struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; 648 struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
@@ -650,6 +650,9 @@ int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
650 u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; 650 u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
651 int imaplen, match, i; 651 int imaplen, match, i;
652 652
653 DBG("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
654 parent->full_name, intspec[0], intspec[1], ointsize);
655
653 ipar = of_node_get(parent); 656 ipar = of_node_get(parent);
654 657
655 /* First get the #interrupt-cells property of the current cursor 658 /* First get the #interrupt-cells property of the current cursor
@@ -673,6 +676,9 @@ int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
673 676
674 DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize); 677 DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
675 678
679 if (ointsize != intsize)
680 return -EINVAL;
681
676 /* Look for this #address-cells. We have to implement the old linux 682 /* Look for this #address-cells. We have to implement the old linux
677 * trick of looking for the parent here as some device-trees rely on it 683 * trick of looking for the parent here as some device-trees rely on it
678 */ 684 */
@@ -879,12 +885,15 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
879 } 885 }
880 intsize = *tmp; 886 intsize = *tmp;
881 887
888 DBG(" intsize=%d intlen=%d\n", intsize, intlen);
889
882 /* Check index */ 890 /* Check index */
883 if ((index + 1) * intsize > intlen) 891 if ((index + 1) * intsize > intlen)
884 return -EINVAL; 892 return -EINVAL;
885 893
886 /* Get new specifier and map it */ 894 /* Get new specifier and map it */
887 res = of_irq_map_raw(p, intspec + index * intsize, addr, out_irq); 895 res = of_irq_map_raw(p, intspec + index * intsize, intsize,
896 addr, out_irq);
888 of_node_put(p); 897 of_node_put(p);
889 return res; 898 return res;
890} 899}
@@ -969,7 +978,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
969 laddr[0] = (pdev->bus->number << 16) 978 laddr[0] = (pdev->bus->number << 16)
970 | (pdev->devfn << 8); 979 | (pdev->devfn << 8);
971 laddr[1] = laddr[2] = 0; 980 laddr[1] = laddr[2] = 0;
972 return of_irq_map_raw(ppnode, &lspec, laddr, out_irq); 981 return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
973} 982}
974EXPORT_SYMBOL_GPL(of_irq_map_pci); 983EXPORT_SYMBOL_GPL(of_irq_map_pci);
975#endif /* CONFIG_PCI */ 984#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index f19e2e0e61e7..de59c6c31a5b 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -45,8 +45,9 @@ void __devinit smp_generic_take_timebase(void)
45{ 45{
46 int cmd; 46 int cmd;
47 u64 tb; 47 u64 tb;
48 unsigned long flags;
48 49
49 local_irq_disable(); 50 local_irq_save(flags);
50 while (!running) 51 while (!running)
51 barrier(); 52 barrier();
52 rmb(); 53 rmb();
@@ -70,7 +71,7 @@ void __devinit smp_generic_take_timebase(void)
70 set_tb(tb >> 32, tb & 0xfffffffful); 71 set_tb(tb >> 32, tb & 0xfffffffful);
71 enter_contest(tbsync->mark, -1); 72 enter_contest(tbsync->mark, -1);
72 } 73 }
73 local_irq_enable(); 74 local_irq_restore(flags);
74} 75}
75 76
76static int __devinit start_contest(int cmd, long offset, int num) 77static int __devinit start_contest(int cmd, long offset, int num)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 272cb826901d..b9a2061cfdb7 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -125,15 +125,8 @@ static long timezone_offset;
125unsigned long ppc_proc_freq; 125unsigned long ppc_proc_freq;
126unsigned long ppc_tb_freq; 126unsigned long ppc_tb_freq;
127 127
128u64 tb_last_jiffy __cacheline_aligned_in_smp; 128static u64 tb_last_jiffy __cacheline_aligned_in_smp;
129unsigned long tb_last_stamp; 129static DEFINE_PER_CPU(u64, last_jiffy);
130
131/*
132 * Note that on ppc32 this only stores the bottom 32 bits of
133 * the timebase value, but that's enough to tell when a jiffy
134 * has passed.
135 */
136DEFINE_PER_CPU(unsigned long, last_jiffy);
137 130
138#ifdef CONFIG_VIRT_CPU_ACCOUNTING 131#ifdef CONFIG_VIRT_CPU_ACCOUNTING
139/* 132/*
@@ -458,7 +451,7 @@ void do_gettimeofday(struct timeval *tv)
458 do { 451 do {
459 seq = read_seqbegin_irqsave(&xtime_lock, flags); 452 seq = read_seqbegin_irqsave(&xtime_lock, flags);
460 sec = xtime.tv_sec; 453 sec = xtime.tv_sec;
461 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); 454 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
462 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 455 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
463 usec = nsec / 1000; 456 usec = nsec / 1000;
464 while (usec >= 1000000) { 457 while (usec >= 1000000) {
@@ -700,7 +693,6 @@ void timer_interrupt(struct pt_regs * regs)
700 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; 693 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
701 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { 694 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
702 tb_last_jiffy = tb_next_jiffy; 695 tb_last_jiffy = tb_next_jiffy;
703 tb_last_stamp = per_cpu(last_jiffy, cpu);
704 do_timer(regs); 696 do_timer(regs);
705 timer_recalc_offset(tb_last_jiffy); 697 timer_recalc_offset(tb_last_jiffy);
706 timer_check_rtc(); 698 timer_check_rtc();
@@ -749,7 +741,7 @@ void __init smp_space_timers(unsigned int max_cpus)
749 int i; 741 int i;
750 unsigned long half = tb_ticks_per_jiffy / 2; 742 unsigned long half = tb_ticks_per_jiffy / 2;
751 unsigned long offset = tb_ticks_per_jiffy / max_cpus; 743 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
752 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); 744 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
753 745
754 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 746 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
755 previous_tb -= tb_ticks_per_jiffy; 747 previous_tb -= tb_ticks_per_jiffy;
@@ -830,7 +822,7 @@ int do_settimeofday(struct timespec *tv)
830 * and therefore the (jiffies - wall_jiffies) computation 822 * and therefore the (jiffies - wall_jiffies) computation
831 * has been removed. 823 * has been removed.
832 */ 824 */
833 tb_delta = tb_ticks_since(tb_last_stamp); 825 tb_delta = tb_ticks_since(tb_last_jiffy);
834 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ 826 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
835 new_nsec -= SCALE_XSEC(tb_delta, 1000000000); 827 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
836 828
@@ -950,8 +942,7 @@ void __init time_init(void)
950 if (__USE_RTC()) { 942 if (__USE_RTC()) {
951 /* 601 processor: dec counts down by 128 every 128ns */ 943 /* 601 processor: dec counts down by 128 every 128ns */
952 ppc_tb_freq = 1000000000; 944 ppc_tb_freq = 1000000000;
953 tb_last_stamp = get_rtcl(); 945 tb_last_jiffy = get_rtcl();
954 tb_last_jiffy = tb_last_stamp;
955 } else { 946 } else {
956 /* Normal PowerPC with timebase register */ 947 /* Normal PowerPC with timebase register */
957 ppc_md.calibrate_decr(); 948 ppc_md.calibrate_decr();
@@ -959,7 +950,7 @@ void __init time_init(void)
959 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 950 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
960 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 951 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
961 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 952 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
962 tb_last_stamp = tb_last_jiffy = get_tb(); 953 tb_last_jiffy = get_tb();
963 } 954 }
964 955
965 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 956 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
@@ -1036,7 +1027,7 @@ void __init time_init(void)
1036 do_gtod.varp = &do_gtod.vars[0]; 1027 do_gtod.varp = &do_gtod.vars[0];
1037 do_gtod.var_idx = 0; 1028 do_gtod.var_idx = 0;
1038 do_gtod.varp->tb_orig_stamp = tb_last_jiffy; 1029 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
1039 __get_cpu_var(last_jiffy) = tb_last_stamp; 1030 __get_cpu_var(last_jiffy) = tb_last_jiffy;
1040 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 1031 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1041 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 1032 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
1042 do_gtod.varp->tb_to_xs = tb_to_xs; 1033 do_gtod.varp->tb_to_xs = tb_to_xs;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4d0b4e74d579..9b352bd0a460 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -148,7 +148,7 @@ int die(const char *str, struct pt_regs *regs, long err)
148 panic("Fatal exception in interrupt"); 148 panic("Fatal exception in interrupt");
149 149
150 if (panic_on_oops) 150 if (panic_on_oops)
151 panic("Fatal exception: panic_on_oops"); 151 panic("Fatal exception");
152 152
153 do_exit(err); 153 do_exit(err);
154 154