aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:21:23 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:21:23 -0400
commitdd6d1844af33acb4edd0a40b1770d091a22c94be (patch)
treee6bd3549919773a13b770324a4dddb51b194b452 /arch/mips/kernel
parent19f71153b9be219756c6b2757921433a69b7975c (diff)
parentaaf76a3245c02faba51c96b9a340c14d6bb0dcc0 (diff)
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (80 commits) [MIPS] tlbex.c: Cleanup __init usage. [MIPS] WRPPMC serial support move to platform device [MIPS] R1: Fix hazard barriers to make kernels work on R2 also. [MIPS] VPE: reimplement ELF loader. [MIPS] cleanup WRPPMC include files [MIPS] Add BUG_ON assertion for attempt to run kernel on the wrong CPU type. [MIPS] SMP: Use ISO C struct initializer for local structs. [MIPS] SMP: Kill useless casts. [MIPS] Kill num_online_cpus() loops. [MIPS] SMP: Implement smp_call_function_mask(). [MIPS] Make facility to convert CPU types to strings generally available. [MIPS] Convert list of CPU types from #define to enum. [MIPS] Optimize get_unaligned / put_unaligned implementations. [MIPS] checkfiles: Fix "need space after that ','" errors. [MIPS] Fix "no space between function name and open parenthesis" warnings. [MIPS] Allow hardwiring of the CPU type to a single type for optimization. [MIPS] tlbex: Size optimize code by declaring a few functions inline. [MIPS] pg-r4k.c: Dump the generated code [MIPS] Cobalt: Remove cobalt_machine_power_off() [MIPS] Cobalt: Move reset port definition to arch/mips/cobalt/reset.c ...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c129
-rw-r--r--arch/mips/kernel/gdb-stub.c26
-rw-r--r--arch/mips/kernel/i8253.c213
-rw-r--r--arch/mips/kernel/i8259.c37
-rw-r--r--arch/mips/kernel/irixelf.c40
-rw-r--r--arch/mips/kernel/irixinv.c42
-rw-r--r--arch/mips/kernel/irixioctl.c2
-rw-r--r--arch/mips/kernel/irixsig.c8
-rw-r--r--arch/mips/kernel/irq-gt641xx.c131
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/kernel/kspd.c12
-rw-r--r--arch/mips/kernel/linux32.c24
-rw-r--r--arch/mips/kernel/mips-mt.c2
-rw-r--r--arch/mips/kernel/proc.c73
-rw-r--r--arch/mips/kernel/process.c11
-rw-r--r--arch/mips/kernel/ptrace.c50
-rw-r--r--arch/mips/kernel/ptrace32.c16
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/signal.c4
-rw-r--r--arch/mips/kernel/signal32.c44
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c123
-rw-r--r--arch/mips/kernel/smtc.c146
-rw-r--r--arch/mips/kernel/syscall.c60
-rw-r--r--arch/mips/kernel/sysirix.c22
-rw-r--r--arch/mips/kernel/time.c416
-rw-r--r--arch/mips/kernel/traps.c45
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S339
-rw-r--r--arch/mips/kernel/vpe.c47
35 files changed, 1292 insertions, 794 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 2fd96d95a39c..a2689f93c160 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
51obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o 51obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
52obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o 52obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
53obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o 53obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
54obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
54 55
55obj-$(CONFIG_32BIT) += scall32-o32.o 56obj-$(CONFIG_32BIT) += scall32-o32.o
56obj-$(CONFIG_64BIT) += scall64-64.o 57obj-$(CONFIG_64BIT) += scall64-64.o
@@ -64,6 +65,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
64 65
65obj-$(CONFIG_64BIT) += cpu-bugs64.o 66obj-$(CONFIG_64BIT) += cpu-bugs64.o
66 67
68obj-$(CONFIG_I8253) += i8253.o
67obj-$(CONFIG_PCSPEAKER) += pcspeaker.o 69obj-$(CONFIG_PCSPEAKER) += pcspeaker.o
68 70
69obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 71obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 993f7ec70f35..da41eac195ca 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -110,7 +110,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
110} 110}
111 111
112#undef ELF_CORE_COPY_REGS 112#undef ELF_CORE_COPY_REGS
113#define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs); 113#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
114 114
115void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs) 115void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
116{ 116{
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 6648fde20b96..af78456d4138 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -29,7 +29,7 @@ static inline void align_mod(const int align, const int mod)
29 ".endr\n\t" 29 ".endr\n\t"
30 ".set pop" 30 ".set pop"
31 : 31 :
32 : GCC_IMM_ASM (align), GCC_IMM_ASM (mod)); 32 : GCC_IMM_ASM(align), GCC_IMM_ASM(mod));
33} 33}
34 34
35static inline void mult_sh_align_mod(long *v1, long *v2, long *w, 35static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 3e004161ebd5..c8c47a2d1972 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -159,6 +159,7 @@ static inline void check_wait(void)
159 case CPU_5KC: 159 case CPU_5KC:
160 case CPU_25KF: 160 case CPU_25KF:
161 case CPU_PR4450: 161 case CPU_PR4450:
162 case CPU_BCM3302:
162 cpu_wait = r4k_wait; 163 cpu_wait = r4k_wait;
163 break; 164 break;
164 165
@@ -745,14 +746,6 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
745{ 746{
746 decode_configs(c); 747 decode_configs(c);
747 748
748 /*
749 * For historical reasons the SB1 comes with it's own variant of
750 * cache code which eventually will be folded into c-r4k.c. Until
751 * then we pretend it's got it's own cache architecture.
752 */
753 c->options &= ~MIPS_CPU_4K_CACHE;
754 c->options |= MIPS_CPU_SB1_CACHE;
755
756 switch (c->processor_id & 0xff00) { 749 switch (c->processor_id & 0xff00) {
757 case PRID_IMP_SB1: 750 case PRID_IMP_SB1:
758 c->cputype = CPU_SB1; 751 c->cputype = CPU_SB1;
@@ -793,9 +786,111 @@ static inline void cpu_probe_philips(struct cpuinfo_mips *c)
793} 786}
794 787
795 788
789static inline void cpu_probe_broadcom(struct cpuinfo_mips *c)
790{
791 decode_configs(c);
792 switch (c->processor_id & 0xff00) {
793 case PRID_IMP_BCM3302:
794 c->cputype = CPU_BCM3302;
795 break;
796 case PRID_IMP_BCM4710:
797 c->cputype = CPU_BCM4710;
798 break;
799 default:
800 c->cputype = CPU_UNKNOWN;
801 break;
802 }
803}
804
805const char *__cpu_name[NR_CPUS];
806
807/*
808 * Name a CPU
809 */
810static __init const char *cpu_to_name(struct cpuinfo_mips *c)
811{
812 const char *name = NULL;
813
814 switch (c->cputype) {
815 case CPU_UNKNOWN: name = "unknown"; break;
816 case CPU_R2000: name = "R2000"; break;
817 case CPU_R3000: name = "R3000"; break;
818 case CPU_R3000A: name = "R3000A"; break;
819 case CPU_R3041: name = "R3041"; break;
820 case CPU_R3051: name = "R3051"; break;
821 case CPU_R3052: name = "R3052"; break;
822 case CPU_R3081: name = "R3081"; break;
823 case CPU_R3081E: name = "R3081E"; break;
824 case CPU_R4000PC: name = "R4000PC"; break;
825 case CPU_R4000SC: name = "R4000SC"; break;
826 case CPU_R4000MC: name = "R4000MC"; break;
827 case CPU_R4200: name = "R4200"; break;
828 case CPU_R4400PC: name = "R4400PC"; break;
829 case CPU_R4400SC: name = "R4400SC"; break;
830 case CPU_R4400MC: name = "R4400MC"; break;
831 case CPU_R4600: name = "R4600"; break;
832 case CPU_R6000: name = "R6000"; break;
833 case CPU_R6000A: name = "R6000A"; break;
834 case CPU_R8000: name = "R8000"; break;
835 case CPU_R10000: name = "R10000"; break;
836 case CPU_R12000: name = "R12000"; break;
837 case CPU_R14000: name = "R14000"; break;
838 case CPU_R4300: name = "R4300"; break;
839 case CPU_R4650: name = "R4650"; break;
840 case CPU_R4700: name = "R4700"; break;
841 case CPU_R5000: name = "R5000"; break;
842 case CPU_R5000A: name = "R5000A"; break;
843 case CPU_R4640: name = "R4640"; break;
844 case CPU_NEVADA: name = "Nevada"; break;
845 case CPU_RM7000: name = "RM7000"; break;
846 case CPU_RM9000: name = "RM9000"; break;
847 case CPU_R5432: name = "R5432"; break;
848 case CPU_4KC: name = "MIPS 4Kc"; break;
849 case CPU_5KC: name = "MIPS 5Kc"; break;
850 case CPU_R4310: name = "R4310"; break;
851 case CPU_SB1: name = "SiByte SB1"; break;
852 case CPU_SB1A: name = "SiByte SB1A"; break;
853 case CPU_TX3912: name = "TX3912"; break;
854 case CPU_TX3922: name = "TX3922"; break;
855 case CPU_TX3927: name = "TX3927"; break;
856 case CPU_AU1000: name = "Au1000"; break;
857 case CPU_AU1500: name = "Au1500"; break;
858 case CPU_AU1100: name = "Au1100"; break;
859 case CPU_AU1550: name = "Au1550"; break;
860 case CPU_AU1200: name = "Au1200"; break;
861 case CPU_4KEC: name = "MIPS 4KEc"; break;
862 case CPU_4KSC: name = "MIPS 4KSc"; break;
863 case CPU_VR41XX: name = "NEC Vr41xx"; break;
864 case CPU_R5500: name = "R5500"; break;
865 case CPU_TX49XX: name = "TX49xx"; break;
866 case CPU_20KC: name = "MIPS 20Kc"; break;
867 case CPU_24K: name = "MIPS 24K"; break;
868 case CPU_25KF: name = "MIPS 25Kf"; break;
869 case CPU_34K: name = "MIPS 34K"; break;
870 case CPU_74K: name = "MIPS 74K"; break;
871 case CPU_VR4111: name = "NEC VR4111"; break;
872 case CPU_VR4121: name = "NEC VR4121"; break;
873 case CPU_VR4122: name = "NEC VR4122"; break;
874 case CPU_VR4131: name = "NEC VR4131"; break;
875 case CPU_VR4133: name = "NEC VR4133"; break;
876 case CPU_VR4181: name = "NEC VR4181"; break;
877 case CPU_VR4181A: name = "NEC VR4181A"; break;
878 case CPU_SR71000: name = "Sandcraft SR71000"; break;
879 case CPU_BCM3302: name = "Broadcom BCM3302"; break;
880 case CPU_BCM4710: name = "Broadcom BCM4710"; break;
881 case CPU_PR4450: name = "Philips PR4450"; break;
882 case CPU_LOONGSON2: name = "ICT Loongson-2"; break;
883 default:
884 BUG();
885 }
886
887 return name;
888}
889
796__init void cpu_probe(void) 890__init void cpu_probe(void)
797{ 891{
798 struct cpuinfo_mips *c = &current_cpu_data; 892 struct cpuinfo_mips *c = &current_cpu_data;
893 unsigned int cpu = smp_processor_id();
799 894
800 c->processor_id = PRID_IMP_UNKNOWN; 895 c->processor_id = PRID_IMP_UNKNOWN;
801 c->fpu_id = FPIR_IMP_NONE; 896 c->fpu_id = FPIR_IMP_NONE;
@@ -815,6 +910,9 @@ __init void cpu_probe(void)
815 case PRID_COMP_SIBYTE: 910 case PRID_COMP_SIBYTE:
816 cpu_probe_sibyte(c); 911 cpu_probe_sibyte(c);
817 break; 912 break;
913 case PRID_COMP_BROADCOM:
914 cpu_probe_broadcom(c);
915 break;
818 case PRID_COMP_SANDCRAFT: 916 case PRID_COMP_SANDCRAFT:
819 cpu_probe_sandcraft(c); 917 cpu_probe_sandcraft(c);
820 break; 918 break;
@@ -824,6 +922,14 @@ __init void cpu_probe(void)
824 default: 922 default:
825 c->cputype = CPU_UNKNOWN; 923 c->cputype = CPU_UNKNOWN;
826 } 924 }
925
926 /*
927 * Platform code can force the cpu type to optimize code
928 * generation. In that case be sure the cpu type is correctly
929 * manually setup otherwise it could trigger some nasty bugs.
930 */
931 BUG_ON(current_cpu_type() != c->cputype);
932
827 if (c->options & MIPS_CPU_FPU) { 933 if (c->options & MIPS_CPU_FPU) {
828 c->fpu_id = cpu_get_fpu_id(); 934 c->fpu_id = cpu_get_fpu_id();
829 935
@@ -835,13 +941,16 @@ __init void cpu_probe(void)
835 c->ases |= MIPS_ASE_MIPS3D; 941 c->ases |= MIPS_ASE_MIPS3D;
836 } 942 }
837 } 943 }
944
945 __cpu_name[cpu] = cpu_to_name(c);
838} 946}
839 947
840__init void cpu_report(void) 948__init void cpu_report(void)
841{ 949{
842 struct cpuinfo_mips *c = &current_cpu_data; 950 struct cpuinfo_mips *c = &current_cpu_data;
843 951
844 printk("CPU revision is: %08x\n", c->processor_id); 952 printk(KERN_INFO "CPU revision is: %08x (%s)\n",
953 c->processor_id, cpu_name_string());
845 if (c->options & MIPS_CPU_FPU) 954 if (c->options & MIPS_CPU_FPU)
846 printk("FPU revision is: %08x\n", c->fpu_id); 955 printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
847} 956}
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index cb5623aad552..3191afa29ad8 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -676,15 +676,18 @@ static void kgdb_wait(void *arg)
676static int kgdb_smp_call_kgdb_wait(void) 676static int kgdb_smp_call_kgdb_wait(void)
677{ 677{
678#ifdef CONFIG_SMP 678#ifdef CONFIG_SMP
679 cpumask_t mask = cpu_online_map;
679 struct call_data_struct data; 680 struct call_data_struct data;
680 int i, cpus = num_online_cpus() - 1;
681 int cpu = smp_processor_id(); 681 int cpu = smp_processor_id();
682 int cpus;
682 683
683 /* 684 /*
684 * Can die spectacularly if this CPU isn't yet marked online 685 * Can die spectacularly if this CPU isn't yet marked online
685 */ 686 */
686 BUG_ON(!cpu_online(cpu)); 687 BUG_ON(!cpu_online(cpu));
687 688
689 cpu_clear(cpu, mask);
690 cpus = cpus_weight(mask);
688 if (!cpus) 691 if (!cpus)
689 return 0; 692 return 0;
690 693
@@ -711,10 +714,7 @@ static int kgdb_smp_call_kgdb_wait(void)
711 call_data = &data; 714 call_data = &data;
712 mb(); 715 mb();
713 716
714 /* Send a message to all other CPUs and wait for them to respond */ 717 core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
715 for (i = 0; i < NR_CPUS; i++)
716 if (cpu_online(i) && i != cpu)
717 core_send_ipi(i, SMP_CALL_FUNCTION);
718 718
719 /* Wait for response */ 719 /* Wait for response */
720 /* FIXME: lock-up detection, backtrace on lock-up */ 720 /* FIXME: lock-up detection, backtrace on lock-up */
@@ -733,7 +733,7 @@ static int kgdb_smp_call_kgdb_wait(void)
733 * returns 1 if you should skip the instruction at the trap address, 0 733 * returns 1 if you should skip the instruction at the trap address, 0
734 * otherwise. 734 * otherwise.
735 */ 735 */
736void handle_exception (struct gdb_regs *regs) 736void handle_exception(struct gdb_regs *regs)
737{ 737{
738 int trap; /* Trap type */ 738 int trap; /* Trap type */
739 int sigval; 739 int sigval;
@@ -769,7 +769,7 @@ void handle_exception (struct gdb_regs *regs)
769 /* 769 /*
770 * acquire the CPU spinlocks 770 * acquire the CPU spinlocks
771 */ 771 */
772 for (i = num_online_cpus()-1; i >= 0; i--) 772 for_each_online_cpu(i)
773 if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) 773 if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
774 panic("kgdb: couldn't get cpulock %d\n", i); 774 panic("kgdb: couldn't get cpulock %d\n", i);
775 775
@@ -902,7 +902,7 @@ void handle_exception (struct gdb_regs *regs)
902 hex2mem(ptr, (char *)&regs->frame_ptr, 2*sizeof(long), 0, 0); 902 hex2mem(ptr, (char *)&regs->frame_ptr, 2*sizeof(long), 0, 0);
903 ptr += 2*(2*sizeof(long)); 903 ptr += 2*(2*sizeof(long));
904 hex2mem(ptr, (char *)&regs->cp0_index, 16*sizeof(long), 0, 0); 904 hex2mem(ptr, (char *)&regs->cp0_index, 16*sizeof(long), 0, 0);
905 strcpy(output_buffer,"OK"); 905 strcpy(output_buffer, "OK");
906 } 906 }
907 break; 907 break;
908 908
@@ -917,9 +917,9 @@ void handle_exception (struct gdb_regs *regs)
917 && hexToInt(&ptr, &length)) { 917 && hexToInt(&ptr, &length)) {
918 if (mem2hex((char *)addr, output_buffer, length, 1)) 918 if (mem2hex((char *)addr, output_buffer, length, 1))
919 break; 919 break;
920 strcpy (output_buffer, "E03"); 920 strcpy(output_buffer, "E03");
921 } else 921 } else
922 strcpy(output_buffer,"E01"); 922 strcpy(output_buffer, "E01");
923 break; 923 break;
924 924
925 /* 925 /*
@@ -996,7 +996,7 @@ void handle_exception (struct gdb_regs *regs)
996 ptr = &input_buffer[1]; 996 ptr = &input_buffer[1];
997 if (!hexToInt(&ptr, &baudrate)) 997 if (!hexToInt(&ptr, &baudrate))
998 { 998 {
999 strcpy(output_buffer,"B01"); 999 strcpy(output_buffer, "B01");
1000 break; 1000 break;
1001 } 1001 }
1002 1002
@@ -1015,7 +1015,7 @@ void handle_exception (struct gdb_regs *regs)
1015 break; 1015 break;
1016 default: 1016 default:
1017 baudrate = 0; 1017 baudrate = 0;
1018 strcpy(output_buffer,"B02"); 1018 strcpy(output_buffer, "B02");
1019 goto x1; 1019 goto x1;
1020 } 1020 }
1021 1021
@@ -1044,7 +1044,7 @@ finish_kgdb:
1044 1044
1045exit_kgdb_exception: 1045exit_kgdb_exception:
1046 /* release locks so other CPUs can go */ 1046 /* release locks so other CPUs can go */
1047 for (i = num_online_cpus()-1; i >= 0; i--) 1047 for_each_online_cpu(i)
1048 __raw_spin_unlock(&kgdb_cpulock[i]); 1048 __raw_spin_unlock(&kgdb_cpulock[i]);
1049 spin_unlock(&kgdb_lock); 1049 spin_unlock(&kgdb_lock);
1050 1050
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
new file mode 100644
index 000000000000..5d9830df3595
--- /dev/null
+++ b/arch/mips/kernel/i8253.c
@@ -0,0 +1,213 @@
1/*
2 * i8253.c 8253/PIT functions
3 *
4 */
5#include <linux/clockchips.h>
6#include <linux/init.h>
7#include <linux/interrupt.h>
8#include <linux/jiffies.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11
12#include <asm/delay.h>
13#include <asm/i8253.h>
14#include <asm/io.h>
15
16static DEFINE_SPINLOCK(i8253_lock);
17
18/*
19 * Initialize the PIT timer.
20 *
21 * This is also called after resume to bring the PIT into operation again.
22 */
23static void init_pit_timer(enum clock_event_mode mode,
24 struct clock_event_device *evt)
25{
26 unsigned long flags;
27
28 spin_lock_irqsave(&i8253_lock, flags);
29
30 switch(mode) {
31 case CLOCK_EVT_MODE_PERIODIC:
32 /* binary, mode 2, LSB/MSB, ch 0 */
33 outb_p(0x34, PIT_MODE);
34 outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
35 outb(LATCH >> 8 , PIT_CH0); /* MSB */
36 break;
37
38 case CLOCK_EVT_MODE_SHUTDOWN:
39 case CLOCK_EVT_MODE_UNUSED:
40 if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
41 evt->mode == CLOCK_EVT_MODE_ONESHOT) {
42 outb_p(0x30, PIT_MODE);
43 outb_p(0, PIT_CH0);
44 outb_p(0, PIT_CH0);
45 }
46 break;
47
48 case CLOCK_EVT_MODE_ONESHOT:
49 /* One shot setup */
50 outb_p(0x38, PIT_MODE);
51 break;
52
53 case CLOCK_EVT_MODE_RESUME:
54 /* Nothing to do here */
55 break;
56 }
57 spin_unlock_irqrestore(&i8253_lock, flags);
58}
59
60/*
61 * Program the next event in oneshot mode
62 *
63 * Delta is given in PIT ticks
64 */
65static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
66{
67 unsigned long flags;
68
69 spin_lock_irqsave(&i8253_lock, flags);
70 outb_p(delta & 0xff , PIT_CH0); /* LSB */
71 outb(delta >> 8 , PIT_CH0); /* MSB */
72 spin_unlock_irqrestore(&i8253_lock, flags);
73
74 return 0;
75}
76
77/*
78 * On UP the PIT can serve all of the possible timer functions. On SMP systems
79 * it can be solely used for the global tick.
80 *
81 * The profiling and update capabilites are switched off once the local apic is
82 * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
83 * !using_apic_timer decisions in do_timer_interrupt_hook()
84 */
85struct clock_event_device pit_clockevent = {
86 .name = "pit",
87 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
88 .set_mode = init_pit_timer,
89 .set_next_event = pit_next_event,
90 .shift = 32,
91 .irq = 0,
92};
93
94irqreturn_t timer_interrupt(int irq, void *dev_id)
95{
96 pit_clockevent.event_handler(&pit_clockevent);
97
98 return IRQ_HANDLED;
99}
100
101static struct irqaction irq0 = {
102 .handler = timer_interrupt,
103 .flags = IRQF_DISABLED | IRQF_NOBALANCING,
104 .mask = CPU_MASK_NONE,
105 .name = "timer"
106};
107
108/*
109 * Initialize the conversion factor and the min/max deltas of the clock event
110 * structure and register the clock event source with the framework.
111 */
112void __init setup_pit_timer(void)
113{
114 /*
115 * Start pit with the boot cpu mask and make it global after the
116 * IO_APIC has been initialized.
117 */
118 pit_clockevent.cpumask = cpumask_of_cpu(0);
119 pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
120 pit_clockevent.max_delta_ns =
121 clockevent_delta2ns(0x7FFF, &pit_clockevent);
122 pit_clockevent.min_delta_ns =
123 clockevent_delta2ns(0xF, &pit_clockevent);
124 clockevents_register_device(&pit_clockevent);
125
126 irq0.mask = cpumask_of_cpu(0);
127 setup_irq(0, &irq0);
128}
129
130/*
131 * Since the PIT overflows every tick, its not very useful
132 * to just read by itself. So use jiffies to emulate a free
133 * running counter:
134 */
135static cycle_t pit_read(void)
136{
137 unsigned long flags;
138 int count;
139 u32 jifs;
140 static int old_count;
141 static u32 old_jifs;
142
143 spin_lock_irqsave(&i8253_lock, flags);
144 /*
145 * Although our caller may have the read side of xtime_lock,
146 * this is now a seqlock, and we are cheating in this routine
147 * by having side effects on state that we cannot undo if
148 * there is a collision on the seqlock and our caller has to
149 * retry. (Namely, old_jifs and old_count.) So we must treat
150 * jiffies as volatile despite the lock. We read jiffies
151 * before latching the timer count to guarantee that although
152 * the jiffies value might be older than the count (that is,
153 * the counter may underflow between the last point where
154 * jiffies was incremented and the point where we latch the
155 * count), it cannot be newer.
156 */
157 jifs = jiffies;
158 outb_p(0x00, PIT_MODE); /* latch the count ASAP */
159 count = inb_p(PIT_CH0); /* read the latched count */
160 count |= inb_p(PIT_CH0) << 8;
161
162 /* VIA686a test code... reset the latch if count > max + 1 */
163 if (count > LATCH) {
164 outb_p(0x34, PIT_MODE);
165 outb_p(LATCH & 0xff, PIT_CH0);
166 outb(LATCH >> 8, PIT_CH0);
167 count = LATCH - 1;
168 }
169
170 /*
171 * It's possible for count to appear to go the wrong way for a
172 * couple of reasons:
173 *
174 * 1. The timer counter underflows, but we haven't handled the
175 * resulting interrupt and incremented jiffies yet.
176 * 2. Hardware problem with the timer, not giving us continuous time,
177 * the counter does small "jumps" upwards on some Pentium systems,
178 * (see c't 95/10 page 335 for Neptun bug.)
179 *
180 * Previous attempts to handle these cases intelligently were
181 * buggy, so we just do the simple thing now.
182 */
183 if (count > old_count && jifs == old_jifs) {
184 count = old_count;
185 }
186 old_count = count;
187 old_jifs = jifs;
188
189 spin_unlock_irqrestore(&i8253_lock, flags);
190
191 count = (LATCH - 1) - count;
192
193 return (cycle_t)(jifs * LATCH) + count;
194}
195
196static struct clocksource clocksource_pit = {
197 .name = "pit",
198 .rating = 110,
199 .read = pit_read,
200 .mask = CLOCKSOURCE_MASK(32),
201 .mult = 0,
202 .shift = 20,
203};
204
205static int __init init_pit_clocksource(void)
206{
207 if (num_possible_cpus() > 1) /* PIT does not scale! */
208 return 0;
209
210 clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
211 return clocksource_register(&clocksource_pit);
212}
213arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 3a2d255361bc..471013577108 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -30,8 +30,10 @@
30 30
31static int i8259A_auto_eoi = -1; 31static int i8259A_auto_eoi = -1;
32DEFINE_SPINLOCK(i8259A_lock); 32DEFINE_SPINLOCK(i8259A_lock);
33/* some platforms call this... */ 33static void disable_8259A_irq(unsigned int irq);
34void mask_and_ack_8259A(unsigned int); 34static void enable_8259A_irq(unsigned int irq);
35static void mask_and_ack_8259A(unsigned int irq);
36static void init_8259A(int auto_eoi);
35 37
36static struct irq_chip i8259A_chip = { 38static struct irq_chip i8259A_chip = {
37 .name = "XT-PIC", 39 .name = "XT-PIC",
@@ -39,6 +41,9 @@ static struct irq_chip i8259A_chip = {
39 .disable = disable_8259A_irq, 41 .disable = disable_8259A_irq,
40 .unmask = enable_8259A_irq, 42 .unmask = enable_8259A_irq,
41 .mask_ack = mask_and_ack_8259A, 43 .mask_ack = mask_and_ack_8259A,
44#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
45 .set_affinity = plat_set_irq_affinity,
46#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
42}; 47};
43 48
44/* 49/*
@@ -53,7 +58,7 @@ static unsigned int cached_irq_mask = 0xffff;
53#define cached_master_mask (cached_irq_mask) 58#define cached_master_mask (cached_irq_mask)
54#define cached_slave_mask (cached_irq_mask >> 8) 59#define cached_slave_mask (cached_irq_mask >> 8)
55 60
56void disable_8259A_irq(unsigned int irq) 61static void disable_8259A_irq(unsigned int irq)
57{ 62{
58 unsigned int mask; 63 unsigned int mask;
59 unsigned long flags; 64 unsigned long flags;
@@ -69,7 +74,7 @@ void disable_8259A_irq(unsigned int irq)
69 spin_unlock_irqrestore(&i8259A_lock, flags); 74 spin_unlock_irqrestore(&i8259A_lock, flags);
70} 75}
71 76
72void enable_8259A_irq(unsigned int irq) 77static void enable_8259A_irq(unsigned int irq)
73{ 78{
74 unsigned int mask; 79 unsigned int mask;
75 unsigned long flags; 80 unsigned long flags;
@@ -122,14 +127,14 @@ static inline int i8259A_irq_real(unsigned int irq)
122 int irqmask = 1 << irq; 127 int irqmask = 1 << irq;
123 128
124 if (irq < 8) { 129 if (irq < 8) {
125 outb(0x0B,PIC_MASTER_CMD); /* ISR register */ 130 outb(0x0B, PIC_MASTER_CMD); /* ISR register */
126 value = inb(PIC_MASTER_CMD) & irqmask; 131 value = inb(PIC_MASTER_CMD) & irqmask;
127 outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */ 132 outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
128 return value; 133 return value;
129 } 134 }
130 outb(0x0B,PIC_SLAVE_CMD); /* ISR register */ 135 outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
131 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); 136 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
132 outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */ 137 outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
133 return value; 138 return value;
134} 139}
135 140
@@ -139,7 +144,7 @@ static inline int i8259A_irq_real(unsigned int irq)
139 * first, _then_ send the EOI, and the order of EOI 144 * first, _then_ send the EOI, and the order of EOI
140 * to the two 8259s is important! 145 * to the two 8259s is important!
141 */ 146 */
142void mask_and_ack_8259A(unsigned int irq) 147static void mask_and_ack_8259A(unsigned int irq)
143{ 148{
144 unsigned int irqmask; 149 unsigned int irqmask;
145 unsigned long flags; 150 unsigned long flags;
@@ -170,12 +175,12 @@ handle_real_irq:
170 if (irq & 8) { 175 if (irq & 8) {
171 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ 176 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
172 outb(cached_slave_mask, PIC_SLAVE_IMR); 177 outb(cached_slave_mask, PIC_SLAVE_IMR);
173 outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */ 178 outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
174 outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */ 179 outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
175 } else { 180 } else {
176 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ 181 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
177 outb(cached_master_mask, PIC_MASTER_IMR); 182 outb(cached_master_mask, PIC_MASTER_IMR);
178 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ 183 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
179 } 184 }
180 smtc_im_ack_irq(irq); 185 smtc_im_ack_irq(irq);
181 spin_unlock_irqrestore(&i8259A_lock, flags); 186 spin_unlock_irqrestore(&i8259A_lock, flags);
@@ -253,7 +258,7 @@ static int __init i8259A_init_sysfs(void)
253 258
254device_initcall(i8259A_init_sysfs); 259device_initcall(i8259A_init_sysfs);
255 260
256void init_8259A(int auto_eoi) 261static void init_8259A(int auto_eoi)
257{ 262{
258 unsigned long flags; 263 unsigned long flags;
259 264
@@ -300,7 +305,9 @@ void init_8259A(int auto_eoi)
300 * IRQ2 is cascade interrupt to second interrupt controller 305 * IRQ2 is cascade interrupt to second interrupt controller
301 */ 306 */
302static struct irqaction irq2 = { 307static struct irqaction irq2 = {
303 no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL 308 .handler = no_action,
309 .mask = CPU_MASK_NONE,
310 .name = "cascade",
304}; 311};
305 312
306static struct resource pic1_io_resource = { 313static struct resource pic1_io_resource = {
@@ -322,7 +329,7 @@ static struct resource pic2_io_resource = {
322 * driver compatibility reasons interrupts 0 - 15 to be the i8259 329 * driver compatibility reasons interrupts 0 - 15 to be the i8259
323 * interrupts even if the hardware uses a different interrupt numbering. 330 * interrupts even if the hardware uses a different interrupt numbering.
324 */ 331 */
325void __init init_i8259_irqs (void) 332void __init init_i8259_irqs(void)
326{ 333{
327 int i; 334 int i;
328 335
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 403d96f99e77..8ef5cf4cc423 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -203,8 +203,8 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
203 * Put the ELF interpreter info on the stack 203 * Put the ELF interpreter info on the stack
204 */ 204 */
205#define NEW_AUX_ENT(nr, id, val) \ 205#define NEW_AUX_ENT(nr, id, val) \
206 __put_user ((id), sp+(nr*2)); \ 206 __put_user((id), sp+(nr*2)); \
207 __put_user ((val), sp+(nr*2+1)); \ 207 __put_user((val), sp+(nr*2+1)); \
208 208
209 sp -= 2; 209 sp -= 2;
210 NEW_AUX_ENT(0, AT_NULL, 0); 210 NEW_AUX_ENT(0, AT_NULL, 0);
@@ -212,17 +212,17 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
212 if (exec) { 212 if (exec) {
213 sp -= 11*2; 213 sp -= 11*2;
214 214
215 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff); 215 NEW_AUX_ENT(0, AT_PHDR, load_addr + exec->e_phoff);
216 NEW_AUX_ENT (1, AT_PHENT, sizeof (struct elf_phdr)); 216 NEW_AUX_ENT(1, AT_PHENT, sizeof(struct elf_phdr));
217 NEW_AUX_ENT (2, AT_PHNUM, exec->e_phnum); 217 NEW_AUX_ENT(2, AT_PHNUM, exec->e_phnum);
218 NEW_AUX_ENT (3, AT_PAGESZ, ELF_EXEC_PAGESIZE); 218 NEW_AUX_ENT(3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
219 NEW_AUX_ENT (4, AT_BASE, interp_load_addr); 219 NEW_AUX_ENT(4, AT_BASE, interp_load_addr);
220 NEW_AUX_ENT (5, AT_FLAGS, 0); 220 NEW_AUX_ENT(5, AT_FLAGS, 0);
221 NEW_AUX_ENT (6, AT_ENTRY, (elf_addr_t) exec->e_entry); 221 NEW_AUX_ENT(6, AT_ENTRY, (elf_addr_t) exec->e_entry);
222 NEW_AUX_ENT (7, AT_UID, (elf_addr_t) current->uid); 222 NEW_AUX_ENT(7, AT_UID, (elf_addr_t) current->uid);
223 NEW_AUX_ENT (8, AT_EUID, (elf_addr_t) current->euid); 223 NEW_AUX_ENT(8, AT_EUID, (elf_addr_t) current->euid);
224 NEW_AUX_ENT (9, AT_GID, (elf_addr_t) current->gid); 224 NEW_AUX_ENT(9, AT_GID, (elf_addr_t) current->gid);
225 NEW_AUX_ENT (10, AT_EGID, (elf_addr_t) current->egid); 225 NEW_AUX_ENT(10, AT_EGID, (elf_addr_t) current->egid);
226 } 226 }
227#undef NEW_AUX_ENT 227#undef NEW_AUX_ENT
228 228
@@ -231,16 +231,16 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
231 sp -= argc+1; 231 sp -= argc+1;
232 argv = sp; 232 argv = sp;
233 233
234 __put_user((elf_addr_t)argc,--sp); 234 __put_user((elf_addr_t)argc, --sp);
235 current->mm->arg_start = (unsigned long) p; 235 current->mm->arg_start = (unsigned long) p;
236 while (argc-->0) { 236 while (argc-->0) {
237 __put_user((unsigned long)p,argv++); 237 __put_user((unsigned long)p, argv++);
238 p += strlen_user(p); 238 p += strlen_user(p);
239 } 239 }
240 __put_user((unsigned long) NULL, argv); 240 __put_user((unsigned long) NULL, argv);
241 current->mm->arg_end = current->mm->env_start = (unsigned long) p; 241 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
242 while (envc-->0) { 242 while (envc-->0) {
243 __put_user((unsigned long)p,envp++); 243 __put_user((unsigned long)p, envp++);
244 p += strlen_user(p); 244 p += strlen_user(p);
245 } 245 }
246 __put_user((unsigned long) NULL, envp); 246 __put_user((unsigned long) NULL, envp);
@@ -581,7 +581,7 @@ static void irix_map_prda_page(void)
581 struct prda *pp; 581 struct prda *pp;
582 582
583 down_write(&current->mm->mmap_sem); 583 down_write(&current->mm->mmap_sem);
584 v = do_brk (PRDA_ADDRESS, PAGE_SIZE); 584 v = do_brk(PRDA_ADDRESS, PAGE_SIZE);
585 up_write(&current->mm->mmap_sem); 585 up_write(&current->mm->mmap_sem);
586 586
587 if (v < 0) 587 if (v < 0)
@@ -815,7 +815,7 @@ out_free_interp:
815 kfree(elf_interpreter); 815 kfree(elf_interpreter);
816out_free_file: 816out_free_file:
817out_free_ph: 817out_free_ph:
818 kfree (elf_phdata); 818 kfree(elf_phdata);
819 goto out; 819 goto out;
820} 820}
821 821
@@ -831,7 +831,7 @@ static int load_irix_library(struct file *file)
831 int retval; 831 int retval;
832 unsigned int bss; 832 unsigned int bss;
833 int error; 833 int error;
834 int i,j, k; 834 int i, j, k;
835 835
836 error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex)); 836 error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
837 if (error != sizeof(elf_ex)) 837 if (error != sizeof(elf_ex))
@@ -1232,7 +1232,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1232 strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); 1232 strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
1233 1233
1234 /* Try to dump the FPU. */ 1234 /* Try to dump the FPU. */
1235 prstatus.pr_fpvalid = dump_fpu (regs, &fpu); 1235 prstatus.pr_fpvalid = dump_fpu(regs, &fpu);
1236 if (!prstatus.pr_fpvalid) { 1236 if (!prstatus.pr_fpvalid) {
1237 numnote--; 1237 numnote--;
1238 } else { 1238 } else {
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c
index de8584f62311..cf2dcd3d6a93 100644
--- a/arch/mips/kernel/irixinv.c
+++ b/arch/mips/kernel/irixinv.c
@@ -14,7 +14,7 @@ int inventory_items = 0;
14 14
15static inventory_t inventory [MAX_INVENTORY]; 15static inventory_t inventory [MAX_INVENTORY];
16 16
17void add_to_inventory (int class, int type, int controller, int unit, int state) 17void add_to_inventory(int class, int type, int controller, int unit, int state)
18{ 18{
19 inventory_t *ni = &inventory [inventory_items]; 19 inventory_t *ni = &inventory [inventory_items];
20 20
@@ -30,7 +30,7 @@ void add_to_inventory (int class, int type, int controller, int unit, int state)
30 inventory_items++; 30 inventory_items++;
31} 31}
32 32
33int dump_inventory_to_user (void __user *userbuf, int size) 33int dump_inventory_to_user(void __user *userbuf, int size)
34{ 34{
35 inventory_t *inv = &inventory [0]; 35 inventory_t *inv = &inventory [0];
36 inventory_t __user *user = userbuf; 36 inventory_t __user *user = userbuf;
@@ -45,7 +45,7 @@ int dump_inventory_to_user (void __user *userbuf, int size)
45 return -EFAULT; 45 return -EFAULT;
46 user++; 46 user++;
47 } 47 }
48 return inventory_items * sizeof (inventory_t); 48 return inventory_items * sizeof(inventory_t);
49} 49}
50 50
51int __init init_inventory(void) 51int __init init_inventory(void)
@@ -55,24 +55,24 @@ int __init init_inventory(void)
55 * most likely this will not let just anyone run the X server 55 * most likely this will not let just anyone run the X server
56 * until we put the right values all over the place 56 * until we put the right values all over the place
57 */ 57 */
58 add_to_inventory (10, 3, 0, 0, 16400); 58 add_to_inventory(10, 3, 0, 0, 16400);
59 add_to_inventory (1, 1, 150, -1, 12); 59 add_to_inventory(1, 1, 150, -1, 12);
60 add_to_inventory (1, 3, 0, 0, 8976); 60 add_to_inventory(1, 3, 0, 0, 8976);
61 add_to_inventory (1, 2, 0, 0, 8976); 61 add_to_inventory(1, 2, 0, 0, 8976);
62 add_to_inventory (4, 8, 0, 0, 2); 62 add_to_inventory(4, 8, 0, 0, 2);
63 add_to_inventory (5, 5, 0, 0, 1); 63 add_to_inventory(5, 5, 0, 0, 1);
64 add_to_inventory (3, 3, 0, 0, 32768); 64 add_to_inventory(3, 3, 0, 0, 32768);
65 add_to_inventory (3, 4, 0, 0, 32768); 65 add_to_inventory(3, 4, 0, 0, 32768);
66 add_to_inventory (3, 8, 0, 0, 524288); 66 add_to_inventory(3, 8, 0, 0, 524288);
67 add_to_inventory (3, 9, 0, 0, 64); 67 add_to_inventory(3, 9, 0, 0, 64);
68 add_to_inventory (3, 1, 0, 0, 67108864); 68 add_to_inventory(3, 1, 0, 0, 67108864);
69 add_to_inventory (12, 3, 0, 0, 16); 69 add_to_inventory(12, 3, 0, 0, 16);
70 add_to_inventory (8, 7, 17, 0, 16777472); 70 add_to_inventory(8, 7, 17, 0, 16777472);
71 add_to_inventory (8, 0, 0, 0, 1); 71 add_to_inventory(8, 0, 0, 0, 1);
72 add_to_inventory (2, 1, 0, 13, 2); 72 add_to_inventory(2, 1, 0, 13, 2);
73 add_to_inventory (2, 2, 0, 2, 0); 73 add_to_inventory(2, 2, 0, 2, 0);
74 add_to_inventory (2, 2, 0, 1, 0); 74 add_to_inventory(2, 2, 0, 1, 0);
75 add_to_inventory (7, 14, 0, 0, 6); 75 add_to_inventory(7, 14, 0, 0, 6);
76 76
77 return 0; 77 return 0;
78} 78}
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c
index 30f9eb09db3f..2bde200d5ad0 100644
--- a/arch/mips/kernel/irixioctl.c
+++ b/arch/mips/kernel/irixioctl.c
@@ -238,7 +238,7 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
238 current->comm, current->pid, cmd); 238 current->comm, current->pid, cmd);
239 do_exit(255); 239 do_exit(255);
240#else 240#else
241 error = sys_ioctl (fd, cmd, arg); 241 error = sys_ioctl(fd, cmd, arg);
242#endif 242#endif
243 } 243 }
244 244
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 28b2a8f00911..85c2e389edd6 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -163,9 +163,9 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
163 ret = setup_irix_frame(ka, regs, sig, oldset); 163 ret = setup_irix_frame(ka, regs, sig, oldset);
164 164
165 spin_lock_irq(&current->sighand->siglock); 165 spin_lock_irq(&current->sighand->siglock);
166 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 166 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
167 if (!(ka->sa.sa_flags & SA_NODEFER)) 167 if (!(ka->sa.sa_flags & SA_NODEFER))
168 sigaddset(&current->blocked,sig); 168 sigaddset(&current->blocked, sig);
169 recalc_sigpending(); 169 recalc_sigpending();
170 spin_unlock_irq(&current->sighand->siglock); 170 spin_unlock_irq(&current->sighand->siglock);
171 171
@@ -605,8 +605,8 @@ repeat:
605 current->state = TASK_INTERRUPTIBLE; 605 current->state = TASK_INTERRUPTIBLE;
606 read_lock(&tasklist_lock); 606 read_lock(&tasklist_lock);
607 tsk = current; 607 tsk = current;
608 list_for_each(_p,&tsk->children) { 608 list_for_each(_p, &tsk->children) {
609 p = list_entry(_p,struct task_struct,sibling); 609 p = list_entry(_p, struct task_struct, sibling);
610 if ((type == IRIX_P_PID) && p->pid != pid) 610 if ((type == IRIX_P_PID) && p->pid != pid)
611 continue; 611 continue;
612 if ((type == IRIX_P_PGID) && process_group(p) != pid) 612 if ((type == IRIX_P_PGID) && process_group(p) != pid)
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
new file mode 100644
index 000000000000..1b81b131f43c
--- /dev/null
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -0,0 +1,131 @@
1/*
2 * GT641xx IRQ routines.
3 *
4 * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/hardirq.h>
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/spinlock.h>
24#include <linux/types.h>
25
26#include <asm/gt64120.h>
27
28#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
29
30static DEFINE_SPINLOCK(gt641xx_irq_lock);
31
32static void ack_gt641xx_irq(unsigned int irq)
33{
34 unsigned long flags;
35 u32 cause;
36
37 spin_lock_irqsave(&gt641xx_irq_lock, flags);
38 cause = GT_READ(GT_INTRCAUSE_OFS);
39 cause &= ~GT641XX_IRQ_TO_BIT(irq);
40 GT_WRITE(GT_INTRCAUSE_OFS, cause);
41 spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
42}
43
44static void mask_gt641xx_irq(unsigned int irq)
45{
46 unsigned long flags;
47 u32 mask;
48
49 spin_lock_irqsave(&gt641xx_irq_lock, flags);
50 mask = GT_READ(GT_INTRMASK_OFS);
51 mask &= ~GT641XX_IRQ_TO_BIT(irq);
52 GT_WRITE(GT_INTRMASK_OFS, mask);
53 spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
54}
55
56static void mask_ack_gt641xx_irq(unsigned int irq)
57{
58 unsigned long flags;
59 u32 cause, mask;
60
61 spin_lock_irqsave(&gt641xx_irq_lock, flags);
62 mask = GT_READ(GT_INTRMASK_OFS);
63 mask &= ~GT641XX_IRQ_TO_BIT(irq);
64 GT_WRITE(GT_INTRMASK_OFS, mask);
65
66 cause = GT_READ(GT_INTRCAUSE_OFS);
67 cause &= ~GT641XX_IRQ_TO_BIT(irq);
68 GT_WRITE(GT_INTRCAUSE_OFS, cause);
69 spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
70}
71
72static void unmask_gt641xx_irq(unsigned int irq)
73{
74 unsigned long flags;
75 u32 mask;
76
77 spin_lock_irqsave(&gt641xx_irq_lock, flags);
78 mask = GT_READ(GT_INTRMASK_OFS);
79 mask |= GT641XX_IRQ_TO_BIT(irq);
80 GT_WRITE(GT_INTRMASK_OFS, mask);
81 spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
82}
83
84static struct irq_chip gt641xx_irq_chip = {
85 .name = "GT641xx",
86 .ack = ack_gt641xx_irq,
87 .mask = mask_gt641xx_irq,
88 .mask_ack = mask_ack_gt641xx_irq,
89 .unmask = unmask_gt641xx_irq,
90};
91
92void gt641xx_irq_dispatch(void)
93{
94 u32 cause, mask;
95 int i;
96
97 cause = GT_READ(GT_INTRCAUSE_OFS);
98 mask = GT_READ(GT_INTRMASK_OFS);
99 cause &= mask;
100
101 /*
102 * bit0 : logical or of all the interrupt bits.
103 * bit30: logical or of bits[29:26,20:1].
104 * bit31: logical or of bits[25:1].
105 */
106 for (i = 1; i < 30; i++) {
107 if (cause & (1U << i)) {
108 do_IRQ(GT641XX_IRQ_BASE + i);
109 return;
110 }
111 }
112
113 atomic_inc(&irq_err_count);
114}
115
116void __init gt641xx_irq_init(void)
117{
118 int i;
119
120 GT_WRITE(GT_INTRMASK_OFS, 0);
121 GT_WRITE(GT_INTRCAUSE_OFS, 0);
122
123 /*
124 * bit0 : logical or of all the interrupt bits.
125 * bit30: logical or of bits[29:26,20:1].
126 * bit31: logical or of bits[25:1].
127 */
128 for (i = 1; i < 30; i++)
129 set_irq_chip_and_handler(GT641XX_IRQ_BASE + i,
130 &gt641xx_irq_chip, handle_level_irq);
131}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 1ecdd50bfc60..4edc7e451d91 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -99,7 +99,7 @@ void ll_msc_irq(void)
99} 99}
100 100
101void 101void
102msc_bind_eic_interrupt (unsigned int irq, unsigned int set) 102msc_bind_eic_interrupt(unsigned int irq, unsigned int set)
103{ 103{
104 MSCIC_WRITE(MSC01_IC_RAMW, 104 MSCIC_WRITE(MSC01_IC_RAMW,
105 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); 105 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
@@ -130,7 +130,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
130{ 130{
131 extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset); 131 extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
132 132
133 _icctrl_msc = (unsigned long) ioremap (icubase, 0x40000); 133 _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
134 134
135 /* Reset interrupt controller - initialises all registers to 0 */ 135 /* Reset interrupt controller - initialises all registers to 0 */
136 MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT); 136 MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index a990aad2f049..d06e9c9af790 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -93,7 +93,7 @@ int show_interrupts(struct seq_file *p, void *v)
93 if (i == 0) { 93 if (i == 0) {
94 seq_printf(p, " "); 94 seq_printf(p, " ");
95 for_each_online_cpu(j) 95 for_each_online_cpu(j)
96 seq_printf(p, "CPU%d ",j); 96 seq_printf(p, "CPU%d ", j);
97 seq_putc(p, '\n'); 97 seq_putc(p, '\n');
98 } 98 }
99 99
@@ -102,7 +102,7 @@ int show_interrupts(struct seq_file *p, void *v)
102 action = irq_desc[i].action; 102 action = irq_desc[i].action;
103 if (!action) 103 if (!action)
104 goto skip; 104 goto skip;
105 seq_printf(p, "%3d: ",i); 105 seq_printf(p, "%3d: ", i);
106#ifndef CONFIG_SMP 106#ifndef CONFIG_SMP
107 seq_printf(p, "%10u ", kstat_irqs(i)); 107 seq_printf(p, "%10u ", kstat_irqs(i));
108#else 108#else
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index cb9a14a1ca5b..d2c2e00e5864 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -118,11 +118,11 @@ struct apsp_table syscall_command_table[] = {
118 118
119static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3) 119static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
120{ 120{
121 register long int _num __asm__ ("$2") = num; 121 register long int _num __asm__("$2") = num;
122 register long int _arg0 __asm__ ("$4") = arg0; 122 register long int _arg0 __asm__("$4") = arg0;
123 register long int _arg1 __asm__ ("$5") = arg1; 123 register long int _arg1 __asm__("$5") = arg1;
124 register long int _arg2 __asm__ ("$6") = arg2; 124 register long int _arg2 __asm__("$6") = arg2;
125 register long int _arg3 __asm__ ("$7") = arg3; 125 register long int _arg3 __asm__("$7") = arg3;
126 126
127 mm_segment_t old_fs; 127 mm_segment_t old_fs;
128 128
@@ -239,7 +239,7 @@ void sp_work_handle_request(void)
239 case MTSP_SYSCALL_GETTOD: 239 case MTSP_SYSCALL_GETTOD:
240 memset(&tz, 0, sizeof(tz)); 240 memset(&tz, 0, sizeof(tz));
241 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, 241 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
242 (int)&tz, 0,0)) == 0) 242 (int)&tz, 0, 0)) == 0)
243 ret.retval = tv.tv_sec; 243 ret.retval = tv.tv_sec;
244 break; 244 break;
245 245
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 135d9a5fe337..d6e01215fb2b 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -58,10 +58,10 @@
58#define AA(__x) ((unsigned long)((int)__x)) 58#define AA(__x) ((unsigned long)((int)__x))
59 59
60#ifdef __MIPSEB__ 60#ifdef __MIPSEB__
61#define merge_64(r1,r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL)) 61#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
62#endif 62#endif
63#ifdef __MIPSEL__ 63#ifdef __MIPSEL__
64#define merge_64(r1,r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL)) 64#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
65#endif 65#endif
66 66
67/* 67/*
@@ -96,7 +96,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
96#endif 96#endif
97 tmp.st_blocks = stat->blocks; 97 tmp.st_blocks = stat->blocks;
98 tmp.st_blksize = stat->blksize; 98 tmp.st_blksize = stat->blksize;
99 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 99 return copy_to_user(statbuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
100} 100}
101 101
102asmlinkage unsigned long 102asmlinkage unsigned long
@@ -300,13 +300,13 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
300{ 300{
301 struct timespec t; 301 struct timespec t;
302 int ret; 302 int ret;
303 mm_segment_t old_fs = get_fs (); 303 mm_segment_t old_fs = get_fs();
304 304
305 set_fs (KERNEL_DS); 305 set_fs(KERNEL_DS);
306 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); 306 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
307 set_fs (old_fs); 307 set_fs(old_fs);
308 if (put_user (t.tv_sec, &interval->tv_sec) || 308 if (put_user (t.tv_sec, &interval->tv_sec) ||
309 __put_user (t.tv_nsec, &interval->tv_nsec)) 309 __put_user(t.tv_nsec, &interval->tv_nsec))
310 return -EFAULT; 310 return -EFAULT;
311 return ret; 311 return ret;
312} 312}
@@ -314,7 +314,7 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
314#ifdef CONFIG_SYSVIPC 314#ifdef CONFIG_SYSVIPC
315 315
316asmlinkage long 316asmlinkage long
317sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) 317sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
318{ 318{
319 int version, err; 319 int version, err;
320 320
@@ -373,7 +373,7 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
373#else 373#else
374 374
375asmlinkage long 375asmlinkage long
376sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) 376sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
377{ 377{
378 return -ENOSYS; 378 return -ENOSYS;
379} 379}
@@ -505,16 +505,16 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
505 505
506 set_fs(KERNEL_DS); 506 set_fs(KERNEL_DS);
507 err = sys_ustat(dev, (struct ustat __user *)&tmp); 507 err = sys_ustat(dev, (struct ustat __user *)&tmp);
508 set_fs (old_fs); 508 set_fs(old_fs);
509 509
510 if (err) 510 if (err)
511 goto out; 511 goto out;
512 512
513 memset(&tmp32,0,sizeof(struct ustat32)); 513 memset(&tmp32, 0, sizeof(struct ustat32));
514 tmp32.f_tfree = tmp.f_tfree; 514 tmp32.f_tfree = tmp.f_tfree;
515 tmp32.f_tinode = tmp.f_tinode; 515 tmp32.f_tinode = tmp.f_tinode;
516 516
517 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0; 517 err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
518 518
519out: 519out:
520 return err; 520 return err;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 56750b02ab40..3d6b1ec1f328 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -236,7 +236,7 @@ void mips_mt_set_cpuoptions(void)
236 if (oconfig7 != nconfig7) { 236 if (oconfig7 != nconfig7) {
237 __asm__ __volatile("sync"); 237 __asm__ __volatile("sync");
238 write_c0_config7(nconfig7); 238 write_c0_config7(nconfig7);
239 ehb (); 239 ehb();
240 printk("Config7: 0x%08x\n", read_c0_config7()); 240 printk("Config7: 0x%08x\n", read_c0_config7());
241 } 241 }
242 242
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index ec04f5a1a5ea..efd2d1314123 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -17,76 +17,6 @@
17 17
18unsigned int vced_count, vcei_count; 18unsigned int vced_count, vcei_count;
19 19
20static const char *cpu_name[] = {
21 [CPU_UNKNOWN] = "unknown",
22 [CPU_R2000] = "R2000",
23 [CPU_R3000] = "R3000",
24 [CPU_R3000A] = "R3000A",
25 [CPU_R3041] = "R3041",
26 [CPU_R3051] = "R3051",
27 [CPU_R3052] = "R3052",
28 [CPU_R3081] = "R3081",
29 [CPU_R3081E] = "R3081E",
30 [CPU_R4000PC] = "R4000PC",
31 [CPU_R4000SC] = "R4000SC",
32 [CPU_R4000MC] = "R4000MC",
33 [CPU_R4200] = "R4200",
34 [CPU_R4400PC] = "R4400PC",
35 [CPU_R4400SC] = "R4400SC",
36 [CPU_R4400MC] = "R4400MC",
37 [CPU_R4600] = "R4600",
38 [CPU_R6000] = "R6000",
39 [CPU_R6000A] = "R6000A",
40 [CPU_R8000] = "R8000",
41 [CPU_R10000] = "R10000",
42 [CPU_R12000] = "R12000",
43 [CPU_R14000] = "R14000",
44 [CPU_R4300] = "R4300",
45 [CPU_R4650] = "R4650",
46 [CPU_R4700] = "R4700",
47 [CPU_R5000] = "R5000",
48 [CPU_R5000A] = "R5000A",
49 [CPU_R4640] = "R4640",
50 [CPU_NEVADA] = "Nevada",
51 [CPU_RM7000] = "RM7000",
52 [CPU_RM9000] = "RM9000",
53 [CPU_R5432] = "R5432",
54 [CPU_4KC] = "MIPS 4Kc",
55 [CPU_5KC] = "MIPS 5Kc",
56 [CPU_R4310] = "R4310",
57 [CPU_SB1] = "SiByte SB1",
58 [CPU_SB1A] = "SiByte SB1A",
59 [CPU_TX3912] = "TX3912",
60 [CPU_TX3922] = "TX3922",
61 [CPU_TX3927] = "TX3927",
62 [CPU_AU1000] = "Au1000",
63 [CPU_AU1500] = "Au1500",
64 [CPU_AU1100] = "Au1100",
65 [CPU_AU1550] = "Au1550",
66 [CPU_AU1200] = "Au1200",
67 [CPU_4KEC] = "MIPS 4KEc",
68 [CPU_4KSC] = "MIPS 4KSc",
69 [CPU_VR41XX] = "NEC Vr41xx",
70 [CPU_R5500] = "R5500",
71 [CPU_TX49XX] = "TX49xx",
72 [CPU_20KC] = "MIPS 20Kc",
73 [CPU_24K] = "MIPS 24K",
74 [CPU_25KF] = "MIPS 25Kf",
75 [CPU_34K] = "MIPS 34K",
76 [CPU_74K] = "MIPS 74K",
77 [CPU_VR4111] = "NEC VR4111",
78 [CPU_VR4121] = "NEC VR4121",
79 [CPU_VR4122] = "NEC VR4122",
80 [CPU_VR4131] = "NEC VR4131",
81 [CPU_VR4133] = "NEC VR4133",
82 [CPU_VR4181] = "NEC VR4181",
83 [CPU_VR4181A] = "NEC VR4181A",
84 [CPU_SR71000] = "Sandcraft SR71000",
85 [CPU_PR4450] = "Philips PR4450",
86 [CPU_LOONGSON2] = "ICT Loongson-2",
87};
88
89
90static int show_cpuinfo(struct seq_file *m, void *v) 20static int show_cpuinfo(struct seq_file *m, void *v)
91{ 21{
92 unsigned long n = (unsigned long) v - 1; 22 unsigned long n = (unsigned long) v - 1;
@@ -108,8 +38,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
108 seq_printf(m, "processor\t\t: %ld\n", n); 38 seq_printf(m, "processor\t\t: %ld\n", n);
109 sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", 39 sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
110 cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); 40 cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
111 seq_printf(m, fmt, cpu_name[cpu_data[n].cputype <= CPU_LAST ? 41 seq_printf(m, fmt, __cpu_name[smp_processor_id()],
112 cpu_data[n].cputype : CPU_UNKNOWN],
113 (version >> 4) & 0x0f, version & 0x0f, 42 (version >> 4) & 0x0f, version & 0x0f,
114 (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); 43 (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
115 seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n", 44 seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index e6ce943099a0..11cb264f59ce 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/tick.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/stddef.h> 17#include <linux/stddef.h>
@@ -52,6 +53,7 @@ void __noreturn cpu_idle(void)
52{ 53{
53 /* endless idle loop with no priority at all */ 54 /* endless idle loop with no priority at all */
54 while (1) { 55 while (1) {
56 tick_nohz_stop_sched_tick();
55 while (!need_resched()) { 57 while (!need_resched()) {
56#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 58#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
57 extern void smtc_idle_loop_hook(void); 59 extern void smtc_idle_loop_hook(void);
@@ -61,6 +63,7 @@ void __noreturn cpu_idle(void)
61 if (cpu_wait) 63 if (cpu_wait)
62 (*cpu_wait)(); 64 (*cpu_wait)();
63 } 65 }
66 tick_nohz_restart_sched_tick();
64 preempt_enable_no_resched(); 67 preempt_enable_no_resched();
65 schedule(); 68 schedule();
66 preempt_disable(); 69 preempt_disable();
@@ -199,13 +202,13 @@ void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
199#endif 202#endif
200} 203}
201 204
202int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs) 205int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
203{ 206{
204 elf_dump_regs(*regs, task_pt_regs(tsk)); 207 elf_dump_regs(*regs, task_pt_regs(tsk));
205 return 1; 208 return 1;
206} 209}
207 210
208int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr) 211int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
209{ 212{
210 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); 213 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
211 214
@@ -231,8 +234,8 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
231 regs.cp0_epc = (unsigned long) kernel_thread_helper; 234 regs.cp0_epc = (unsigned long) kernel_thread_helper;
232 regs.cp0_status = read_c0_status(); 235 regs.cp0_status = read_c0_status();
233#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 236#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
234 regs.cp0_status &= ~(ST0_KUP | ST0_IEC); 237 regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
235 regs.cp0_status |= ST0_IEP; 238 ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
236#else 239#else
237 regs.cp0_status |= ST0_EXL; 240 regs.cp0_status |= ST0_EXL;
238#endif 241#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index bbd57b20b43e..58aa6fec1146 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -54,7 +54,7 @@ void ptrace_disable(struct task_struct *child)
54 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 54 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
55 * Registers are sign extended to fill the available space. 55 * Registers are sign extended to fill the available space.
56 */ 56 */
57int ptrace_getregs (struct task_struct *child, __s64 __user *data) 57int ptrace_getregs(struct task_struct *child, __s64 __user *data)
58{ 58{
59 struct pt_regs *regs; 59 struct pt_regs *regs;
60 int i; 60 int i;
@@ -65,13 +65,13 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
65 regs = task_pt_regs(child); 65 regs = task_pt_regs(child);
66 66
67 for (i = 0; i < 32; i++) 67 for (i = 0; i < 32; i++)
68 __put_user (regs->regs[i], data + i); 68 __put_user(regs->regs[i], data + i);
69 __put_user (regs->lo, data + EF_LO - EF_R0); 69 __put_user(regs->lo, data + EF_LO - EF_R0);
70 __put_user (regs->hi, data + EF_HI - EF_R0); 70 __put_user(regs->hi, data + EF_HI - EF_R0);
71 __put_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0); 71 __put_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
72 __put_user (regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0); 72 __put_user(regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
73 __put_user (regs->cp0_status, data + EF_CP0_STATUS - EF_R0); 73 __put_user(regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
74 __put_user (regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0); 74 __put_user(regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
75 75
76 return 0; 76 return 0;
77} 77}
@@ -81,7 +81,7 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
81 * the 64-bit format. On a 32-bit kernel only the lower order half 81 * the 64-bit format. On a 32-bit kernel only the lower order half
82 * (according to endianness) will be used. 82 * (according to endianness) will be used.
83 */ 83 */
84int ptrace_setregs (struct task_struct *child, __s64 __user *data) 84int ptrace_setregs(struct task_struct *child, __s64 __user *data)
85{ 85{
86 struct pt_regs *regs; 86 struct pt_regs *regs;
87 int i; 87 int i;
@@ -92,17 +92,17 @@ int ptrace_setregs (struct task_struct *child, __s64 __user *data)
92 regs = task_pt_regs(child); 92 regs = task_pt_regs(child);
93 93
94 for (i = 0; i < 32; i++) 94 for (i = 0; i < 32; i++)
95 __get_user (regs->regs[i], data + i); 95 __get_user(regs->regs[i], data + i);
96 __get_user (regs->lo, data + EF_LO - EF_R0); 96 __get_user(regs->lo, data + EF_LO - EF_R0);
97 __get_user (regs->hi, data + EF_HI - EF_R0); 97 __get_user(regs->hi, data + EF_HI - EF_R0);
98 __get_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0); 98 __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
99 99
100 /* badvaddr, status, and cause may not be written. */ 100 /* badvaddr, status, and cause may not be written. */
101 101
102 return 0; 102 return 0;
103} 103}
104 104
105int ptrace_getfpregs (struct task_struct *child, __u32 __user *data) 105int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
106{ 106{
107 int i; 107 int i;
108 unsigned int tmp; 108 unsigned int tmp;
@@ -113,13 +113,13 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
113 if (tsk_used_math(child)) { 113 if (tsk_used_math(child)) {
114 fpureg_t *fregs = get_fpu_regs(child); 114 fpureg_t *fregs = get_fpu_regs(child);
115 for (i = 0; i < 32; i++) 115 for (i = 0; i < 32; i++)
116 __put_user (fregs[i], i + (__u64 __user *) data); 116 __put_user(fregs[i], i + (__u64 __user *) data);
117 } else { 117 } else {
118 for (i = 0; i < 32; i++) 118 for (i = 0; i < 32; i++)
119 __put_user ((__u64) -1, i + (__u64 __user *) data); 119 __put_user((__u64) -1, i + (__u64 __user *) data);
120 } 120 }
121 121
122 __put_user (child->thread.fpu.fcr31, data + 64); 122 __put_user(child->thread.fpu.fcr31, data + 64);
123 123
124 preempt_disable(); 124 preempt_disable();
125 if (cpu_has_fpu) { 125 if (cpu_has_fpu) {
@@ -142,12 +142,12 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
142 tmp = 0; 142 tmp = 0;
143 } 143 }
144 preempt_enable(); 144 preempt_enable();
145 __put_user (tmp, data + 65); 145 __put_user(tmp, data + 65);
146 146
147 return 0; 147 return 0;
148} 148}
149 149
150int ptrace_setfpregs (struct task_struct *child, __u32 __user *data) 150int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
151{ 151{
152 fpureg_t *fregs; 152 fpureg_t *fregs;
153 int i; 153 int i;
@@ -158,9 +158,9 @@ int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
158 fregs = get_fpu_regs(child); 158 fregs = get_fpu_regs(child);
159 159
160 for (i = 0; i < 32; i++) 160 for (i = 0; i < 32; i++)
161 __get_user (fregs[i], i + (__u64 __user *) data); 161 __get_user(fregs[i], i + (__u64 __user *) data);
162 162
163 __get_user (child->thread.fpu.fcr31, data + 64); 163 __get_user(child->thread.fpu.fcr31, data + 64);
164 164
165 /* FIR may not be written. */ 165 /* FIR may not be written. */
166 166
@@ -390,19 +390,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
390 } 390 }
391 391
392 case PTRACE_GETREGS: 392 case PTRACE_GETREGS:
393 ret = ptrace_getregs (child, (__u64 __user *) data); 393 ret = ptrace_getregs(child, (__u64 __user *) data);
394 break; 394 break;
395 395
396 case PTRACE_SETREGS: 396 case PTRACE_SETREGS:
397 ret = ptrace_setregs (child, (__u64 __user *) data); 397 ret = ptrace_setregs(child, (__u64 __user *) data);
398 break; 398 break;
399 399
400 case PTRACE_GETFPREGS: 400 case PTRACE_GETFPREGS:
401 ret = ptrace_getfpregs (child, (__u32 __user *) data); 401 ret = ptrace_getfpregs(child, (__u32 __user *) data);
402 break; 402 break;
403 403
404 case PTRACE_SETFPREGS: 404 case PTRACE_SETFPREGS:
405 ret = ptrace_setfpregs (child, (__u32 __user *) data); 405 ret = ptrace_setfpregs(child, (__u32 __user *) data);
406 break; 406 break;
407 407
408 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ 408 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index d9a39c169450..f2bffed94fa3 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -36,11 +36,11 @@
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/bootinfo.h> 37#include <asm/bootinfo.h>
38 38
39int ptrace_getregs (struct task_struct *child, __s64 __user *data); 39int ptrace_getregs(struct task_struct *child, __s64 __user *data);
40int ptrace_setregs (struct task_struct *child, __s64 __user *data); 40int ptrace_setregs(struct task_struct *child, __s64 __user *data);
41 41
42int ptrace_getfpregs (struct task_struct *child, __u32 __user *data); 42int ptrace_getfpregs(struct task_struct *child, __u32 __user *data);
43int ptrace_setfpregs (struct task_struct *child, __u32 __user *data); 43int ptrace_setfpregs(struct task_struct *child, __u32 __user *data);
44 44
45/* 45/*
46 * Tracing a 32-bit process with a 64-bit strace and vice versa will not 46 * Tracing a 32-bit process with a 64-bit strace and vice versa will not
@@ -346,19 +346,19 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
346 } 346 }
347 347
348 case PTRACE_GETREGS: 348 case PTRACE_GETREGS:
349 ret = ptrace_getregs (child, (__u64 __user *) (__u64) data); 349 ret = ptrace_getregs(child, (__u64 __user *) (__u64) data);
350 break; 350 break;
351 351
352 case PTRACE_SETREGS: 352 case PTRACE_SETREGS:
353 ret = ptrace_setregs (child, (__u64 __user *) (__u64) data); 353 ret = ptrace_setregs(child, (__u64 __user *) (__u64) data);
354 break; 354 break;
355 355
356 case PTRACE_GETFPREGS: 356 case PTRACE_GETFPREGS:
357 ret = ptrace_getfpregs (child, (__u32 __user *) (__u64) data); 357 ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
358 break; 358 break;
359 359
360 case PTRACE_SETFPREGS: 360 case PTRACE_SETFPREGS:
361 ret = ptrace_setfpregs (child, (__u32 __user *) (__u64) data); 361 ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
362 break; 362 break;
363 363
364 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ 364 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 316685fca059..a06a27d6cfcd 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -51,10 +51,8 @@ EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
51 * These are initialized so they are in the .data section 51 * These are initialized so they are in the .data section
52 */ 52 */
53unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; 53unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
54unsigned long mips_machgroup __read_mostly = MACH_GROUP_UNKNOWN;
55 54
56EXPORT_SYMBOL(mips_machtype); 55EXPORT_SYMBOL(mips_machtype);
57EXPORT_SYMBOL(mips_machgroup);
58 56
59struct boot_mem_map boot_mem_map; 57struct boot_mem_map boot_mem_map;
60 58
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2a08ce41bf2b..a4e106c56ab5 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -613,9 +613,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
613 ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); 613 ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
614 614
615 spin_lock_irq(&current->sighand->siglock); 615 spin_lock_irq(&current->sighand->siglock);
616 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 616 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
617 if (!(ka->sa.sa_flags & SA_NODEFER)) 617 if (!(ka->sa.sa_flags & SA_NODEFER))
618 sigaddset(&current->blocked,sig); 618 sigaddset(&current->blocked, sig);
619 recalc_sigpending(); 619 recalc_sigpending();
620 spin_unlock_irq(&current->sighand->siglock); 620 spin_unlock_irq(&current->sighand->siglock);
621 621
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 64b612a0a622..572c610db1b1 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -261,11 +261,11 @@ static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
261 default: 261 default:
262 __put_sigset_unknown_nsig(); 262 __put_sigset_unknown_nsig();
263 case 2: 263 case 2:
264 err |= __put_user (kbuf->sig[1] >> 32, &ubuf->sig[3]); 264 err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
265 err |= __put_user (kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]); 265 err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
266 case 1: 266 case 1:
267 err |= __put_user (kbuf->sig[0] >> 32, &ubuf->sig[1]); 267 err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
268 err |= __put_user (kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]); 268 err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
269 } 269 }
270 270
271 return err; 271 return err;
@@ -283,12 +283,12 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
283 default: 283 default:
284 __get_sigset_unknown_nsig(); 284 __get_sigset_unknown_nsig();
285 case 2: 285 case 2:
286 err |= __get_user (sig[3], &ubuf->sig[3]); 286 err |= __get_user(sig[3], &ubuf->sig[3]);
287 err |= __get_user (sig[2], &ubuf->sig[2]); 287 err |= __get_user(sig[2], &ubuf->sig[2]);
288 kbuf->sig[1] = sig[2] | (sig[3] << 32); 288 kbuf->sig[1] = sig[2] | (sig[3] << 32);
289 case 1: 289 case 1:
290 err |= __get_user (sig[1], &ubuf->sig[1]); 290 err |= __get_user(sig[1], &ubuf->sig[1]);
291 err |= __get_user (sig[0], &ubuf->sig[0]); 291 err |= __get_user(sig[0], &ubuf->sig[0]);
292 kbuf->sig[0] = sig[0] | (sig[1] << 32); 292 kbuf->sig[0] = sig[0] | (sig[1] << 32);
293 } 293 }
294 294
@@ -412,10 +412,10 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
412 return -EFAULT; 412 return -EFAULT;
413 } 413 }
414 414
415 set_fs (KERNEL_DS); 415 set_fs(KERNEL_DS);
416 ret = do_sigaltstack(uss ? (stack_t __user *)&kss : NULL, 416 ret = do_sigaltstack(uss ? (stack_t __user *)&kss : NULL,
417 uoss ? (stack_t __user *)&koss : NULL, usp); 417 uoss ? (stack_t __user *)&koss : NULL, usp);
418 set_fs (old_fs); 418 set_fs(old_fs);
419 419
420 if (!ret && uoss) { 420 if (!ret && uoss) {
421 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) 421 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
@@ -559,9 +559,9 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
559 /* It is more difficult to avoid calling this function than to 559 /* It is more difficult to avoid calling this function than to
560 call it and ignore errors. */ 560 call it and ignore errors. */
561 old_fs = get_fs(); 561 old_fs = get_fs();
562 set_fs (KERNEL_DS); 562 set_fs(KERNEL_DS);
563 do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); 563 do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
564 set_fs (old_fs); 564 set_fs(old_fs);
565 565
566 /* 566 /*
567 * Don't let your children do this ... 567 * Don't let your children do this ...
@@ -746,11 +746,11 @@ asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
746 if (set && get_sigset(&new_set, set)) 746 if (set && get_sigset(&new_set, set))
747 return -EFAULT; 747 return -EFAULT;
748 748
749 set_fs (KERNEL_DS); 749 set_fs(KERNEL_DS);
750 ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *)&new_set : NULL, 750 ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *)&new_set : NULL,
751 oset ? (sigset_t __user *)&old_set : NULL, 751 oset ? (sigset_t __user *)&old_set : NULL,
752 sigsetsize); 752 sigsetsize);
753 set_fs (old_fs); 753 set_fs(old_fs);
754 754
755 if (!ret && oset && put_sigset(&old_set, oset)) 755 if (!ret && oset && put_sigset(&old_set, oset))
756 return -EFAULT; 756 return -EFAULT;
@@ -765,9 +765,9 @@ asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset,
765 sigset_t set; 765 sigset_t set;
766 mm_segment_t old_fs = get_fs(); 766 mm_segment_t old_fs = get_fs();
767 767
768 set_fs (KERNEL_DS); 768 set_fs(KERNEL_DS);
769 ret = sys_rt_sigpending((sigset_t __user *)&set, sigsetsize); 769 ret = sys_rt_sigpending((sigset_t __user *)&set, sigsetsize);
770 set_fs (old_fs); 770 set_fs(old_fs);
771 771
772 if (!ret && put_sigset(&set, uset)) 772 if (!ret && put_sigset(&set, uset))
773 return -EFAULT; 773 return -EFAULT;
@@ -781,12 +781,12 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *
781 int ret; 781 int ret;
782 mm_segment_t old_fs = get_fs(); 782 mm_segment_t old_fs = get_fs();
783 783
784 if (copy_from_user (&info, uinfo, 3*sizeof(int)) || 784 if (copy_from_user(&info, uinfo, 3*sizeof(int)) ||
785 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE)) 785 copy_from_user(info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
786 return -EFAULT; 786 return -EFAULT;
787 set_fs (KERNEL_DS); 787 set_fs(KERNEL_DS);
788 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); 788 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
789 set_fs (old_fs); 789 set_fs(old_fs);
790 return ret; 790 return ret;
791} 791}
792 792
@@ -801,10 +801,10 @@ sys32_waitid(int which, compat_pid_t pid,
801 mm_segment_t old_fs = get_fs(); 801 mm_segment_t old_fs = get_fs();
802 802
803 info.si_signo = 0; 803 info.si_signo = 0;
804 set_fs (KERNEL_DS); 804 set_fs(KERNEL_DS);
805 ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options, 805 ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options,
806 uru ? (struct rusage __user *) &ru : NULL); 806 uru ? (struct rusage __user *) &ru : NULL);
807 set_fs (old_fs); 807 set_fs(old_fs);
808 808
809 if (ret < 0 || info.si_signo == 0) 809 if (ret < 0 || info.si_signo == 0)
810 return ret; 810 return ret;
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index eb7e05926ebe..bb277e82d421 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -88,7 +88,7 @@ struct rt_sigframe_n32 {
88 88
89#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ 89#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
90 90
91extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat); 91extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
92 92
93asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 93asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
94{ 94{
@@ -105,7 +105,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
105 unewset = (compat_sigset_t __user *) regs.regs[4]; 105 unewset = (compat_sigset_t __user *) regs.regs[4];
106 if (copy_from_user(&uset, unewset, sizeof(uset))) 106 if (copy_from_user(&uset, unewset, sizeof(uset)))
107 return -EFAULT; 107 return -EFAULT;
108 sigset_from_compat (&newset, &uset); 108 sigset_from_compat(&newset, &uset);
109 sigdelsetmask(&newset, ~_BLOCKABLE); 109 sigdelsetmask(&newset, ~_BLOCKABLE);
110 110
111 spin_lock_irq(&current->sighand->siglock); 111 spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 05dcce416325..94e210cc6cb6 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -353,7 +353,7 @@ void core_send_ipi(int cpu, unsigned int action)
353 unsigned long flags; 353 unsigned long flags;
354 int vpflags; 354 int vpflags;
355 355
356 local_irq_save (flags); 356 local_irq_save(flags);
357 357
358 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 358 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
359 359
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 73b0dab02668..432f2e376aea 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -38,6 +38,7 @@
38#include <asm/system.h> 38#include <asm/system.h>
39#include <asm/mmu_context.h> 39#include <asm/mmu_context.h>
40#include <asm/smp.h> 40#include <asm/smp.h>
41#include <asm/time.h>
41 42
42#ifdef CONFIG_MIPS_MT_SMTC 43#ifdef CONFIG_MIPS_MT_SMTC
43#include <asm/mipsmtregs.h> 44#include <asm/mipsmtregs.h>
@@ -70,6 +71,7 @@ asmlinkage __cpuinit void start_secondary(void)
70 cpu_probe(); 71 cpu_probe();
71 cpu_report(); 72 cpu_report();
72 per_cpu_trap_init(); 73 per_cpu_trap_init();
74 mips_clockevent_init();
73 prom_init_secondary(); 75 prom_init_secondary();
74 76
75 /* 77 /*
@@ -95,6 +97,8 @@ struct call_data_struct *call_data;
95 97
96/* 98/*
97 * Run a function on all other CPUs. 99 * Run a function on all other CPUs.
100 *
101 * <mask> cpuset_t of all processors to run the function on.
98 * <func> The function to run. This must be fast and non-blocking. 102 * <func> The function to run. This must be fast and non-blocking.
99 * <info> An arbitrary pointer to pass to the function. 103 * <info> An arbitrary pointer to pass to the function.
100 * <retry> If true, keep retrying until ready. 104 * <retry> If true, keep retrying until ready.
@@ -119,18 +123,20 @@ struct call_data_struct *call_data;
119 * Spin waiting for call_lock 123 * Spin waiting for call_lock
120 * Deadlock Deadlock 124 * Deadlock Deadlock
121 */ 125 */
122int smp_call_function (void (*func) (void *info), void *info, int retry, 126int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
123 int wait) 127 void *info, int retry, int wait)
124{ 128{
125 struct call_data_struct data; 129 struct call_data_struct data;
126 int i, cpus = num_online_cpus() - 1;
127 int cpu = smp_processor_id(); 130 int cpu = smp_processor_id();
131 int cpus;
128 132
129 /* 133 /*
130 * Can die spectacularly if this CPU isn't yet marked online 134 * Can die spectacularly if this CPU isn't yet marked online
131 */ 135 */
132 BUG_ON(!cpu_online(cpu)); 136 BUG_ON(!cpu_online(cpu));
133 137
138 cpu_clear(cpu, mask);
139 cpus = cpus_weight(mask);
134 if (!cpus) 140 if (!cpus)
135 return 0; 141 return 0;
136 142
@@ -149,9 +155,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
149 smp_mb(); 155 smp_mb();
150 156
151 /* Send a message to all other CPUs and wait for them to respond */ 157 /* Send a message to all other CPUs and wait for them to respond */
152 for_each_online_cpu(i) 158 core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
153 if (i != cpu)
154 core_send_ipi(i, SMP_CALL_FUNCTION);
155 159
156 /* Wait for response */ 160 /* Wait for response */
157 /* FIXME: lock-up detection, backtrace on lock-up */ 161 /* FIXME: lock-up detection, backtrace on lock-up */
@@ -167,6 +171,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
167 return 0; 171 return 0;
168} 172}
169 173
174int smp_call_function(void (*func) (void *info), void *info, int retry,
175 int wait)
176{
177 return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
178}
170 179
171void smp_call_function_interrupt(void) 180void smp_call_function_interrupt(void)
172{ 181{
@@ -197,8 +206,7 @@ void smp_call_function_interrupt(void)
197int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 206int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
198 int retry, int wait) 207 int retry, int wait)
199{ 208{
200 struct call_data_struct data; 209 int ret, me;
201 int me;
202 210
203 /* 211 /*
204 * Can die spectacularly if this CPU isn't yet marked online 212 * Can die spectacularly if this CPU isn't yet marked online
@@ -217,33 +225,8 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
217 return 0; 225 return 0;
218 } 226 }
219 227
220 /* Can deadlock when called with interrupts disabled */ 228 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
221 WARN_ON(irqs_disabled()); 229 wait);
222
223 data.func = func;
224 data.info = info;
225 atomic_set(&data.started, 0);
226 data.wait = wait;
227 if (wait)
228 atomic_set(&data.finished, 0);
229
230 spin_lock(&smp_call_lock);
231 call_data = &data;
232 smp_mb();
233
234 /* Send a message to the other CPU */
235 core_send_ipi(cpu, SMP_CALL_FUNCTION);
236
237 /* Wait for response */
238 /* FIXME: lock-up detection, backtrace on lock-up */
239 while (atomic_read(&data.started) != 1)
240 barrier();
241
242 if (wait)
243 while (atomic_read(&data.finished) != 1)
244 barrier();
245 call_data = NULL;
246 spin_unlock(&smp_call_lock);
247 230
248 put_cpu(); 231 put_cpu();
249 return 0; 232 return 0;
@@ -390,12 +373,15 @@ void flush_tlb_mm(struct mm_struct *mm)
390 preempt_disable(); 373 preempt_disable();
391 374
392 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 375 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
393 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); 376 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
394 } else { 377 } else {
395 int i; 378 cpumask_t mask = cpu_online_map;
396 for (i = 0; i < num_online_cpus(); i++) 379 unsigned int cpu;
397 if (smp_processor_id() != i) 380
398 cpu_context(i, mm) = 0; 381 cpu_clear(smp_processor_id(), mask);
382 for_each_online_cpu(cpu)
383 if (cpu_context(cpu, mm))
384 cpu_context(cpu, mm) = 0;
399 } 385 }
400 local_flush_tlb_mm(mm); 386 local_flush_tlb_mm(mm);
401 387
@@ -410,7 +396,7 @@ struct flush_tlb_data {
410 396
411static void flush_tlb_range_ipi(void *info) 397static void flush_tlb_range_ipi(void *info)
412{ 398{
413 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 399 struct flush_tlb_data *fd = info;
414 400
415 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 401 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
416} 402}
@@ -421,17 +407,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
421 407
422 preempt_disable(); 408 preempt_disable();
423 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 409 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
424 struct flush_tlb_data fd; 410 struct flush_tlb_data fd = {
411 .vma = vma,
412 .addr1 = start,
413 .addr2 = end,
414 };
425 415
426 fd.vma = vma; 416 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
427 fd.addr1 = start;
428 fd.addr2 = end;
429 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
430 } else { 417 } else {
431 int i; 418 cpumask_t mask = cpu_online_map;
432 for (i = 0; i < num_online_cpus(); i++) 419 unsigned int cpu;
433 if (smp_processor_id() != i) 420
434 cpu_context(i, mm) = 0; 421 cpu_clear(smp_processor_id(), mask);
422 for_each_online_cpu(cpu)
423 if (cpu_context(cpu, mm))
424 cpu_context(cpu, mm) = 0;
435 } 425 }
436 local_flush_tlb_range(vma, start, end); 426 local_flush_tlb_range(vma, start, end);
437 preempt_enable(); 427 preempt_enable();
@@ -439,23 +429,24 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
439 429
440static void flush_tlb_kernel_range_ipi(void *info) 430static void flush_tlb_kernel_range_ipi(void *info)
441{ 431{
442 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 432 struct flush_tlb_data *fd = info;
443 433
444 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 434 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
445} 435}
446 436
447void flush_tlb_kernel_range(unsigned long start, unsigned long end) 437void flush_tlb_kernel_range(unsigned long start, unsigned long end)
448{ 438{
449 struct flush_tlb_data fd; 439 struct flush_tlb_data fd = {
440 .addr1 = start,
441 .addr2 = end,
442 };
450 443
451 fd.addr1 = start; 444 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
452 fd.addr2 = end;
453 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
454} 445}
455 446
456static void flush_tlb_page_ipi(void *info) 447static void flush_tlb_page_ipi(void *info)
457{ 448{
458 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 449 struct flush_tlb_data *fd = info;
459 450
460 local_flush_tlb_page(fd->vma, fd->addr1); 451 local_flush_tlb_page(fd->vma, fd->addr1);
461} 452}
@@ -464,16 +455,20 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
464{ 455{
465 preempt_disable(); 456 preempt_disable();
466 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 457 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
467 struct flush_tlb_data fd; 458 struct flush_tlb_data fd = {
459 .vma = vma,
460 .addr1 = page,
461 };
468 462
469 fd.vma = vma; 463 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
470 fd.addr1 = page;
471 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
472 } else { 464 } else {
473 int i; 465 cpumask_t mask = cpu_online_map;
474 for (i = 0; i < num_online_cpus(); i++) 466 unsigned int cpu;
475 if (smp_processor_id() != i) 467
476 cpu_context(i, vma->vm_mm) = 0; 468 cpu_clear(smp_processor_id(), mask);
469 for_each_online_cpu(cpu)
470 if (cpu_context(cpu, vma->vm_mm))
471 cpu_context(cpu, vma->vm_mm) = 0;
477 } 472 }
478 local_flush_tlb_page(vma, page); 473 local_flush_tlb_page(vma, page);
479 preempt_enable(); 474 preempt_enable();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f09404377ef1..a8c1a698d588 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,5 +1,6 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */ 1/* Copyright (C) 2004 Mips Technologies, Inc */
2 2
3#include <linux/clockchips.h>
3#include <linux/kernel.h> 4#include <linux/kernel.h>
4#include <linux/sched.h> 5#include <linux/sched.h>
5#include <linux/cpumask.h> 6#include <linux/cpumask.h>
@@ -62,7 +63,7 @@ asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
62 * Clock interrupt "latch" buffers, per "CPU" 63 * Clock interrupt "latch" buffers, per "CPU"
63 */ 64 */
64 65
65unsigned int ipi_timer_latch[NR_CPUS]; 66static atomic_t ipi_timer_latch[NR_CPUS];
66 67
67/* 68/*
68 * Number of InterProcessor Interupt (IPI) message buffers to allocate 69 * Number of InterProcessor Interupt (IPI) message buffers to allocate
@@ -179,7 +180,7 @@ void __init sanitize_tlb_entries(void)
179 180
180static void smtc_configure_tlb(void) 181static void smtc_configure_tlb(void)
181{ 182{
182 int i,tlbsiz,vpes; 183 int i, tlbsiz, vpes;
183 unsigned long mvpconf0; 184 unsigned long mvpconf0;
184 unsigned long config1val; 185 unsigned long config1val;
185 186
@@ -296,8 +297,10 @@ int __init mipsmt_build_cpu_map(int start_cpu_slot)
296 __cpu_number_map[i] = i; 297 __cpu_number_map[i] = i;
297 __cpu_logical_map[i] = i; 298 __cpu_logical_map[i] = i;
298 } 299 }
300#ifdef CONFIG_MIPS_MT_FPAFF
299 /* Initialize map of CPUs with FPUs */ 301 /* Initialize map of CPUs with FPUs */
300 cpus_clear(mt_fpu_cpumask); 302 cpus_clear(mt_fpu_cpumask);
303#endif
301 304
302 /* One of those TC's is the one booting, and not a secondary... */ 305 /* One of those TC's is the one booting, and not a secondary... */
303 printk("%i available secondary CPU TC(s)\n", i - 1); 306 printk("%i available secondary CPU TC(s)\n", i - 1);
@@ -359,7 +362,7 @@ void mipsmt_prepare_cpus(void)
359 IPIQ[i].head = IPIQ[i].tail = NULL; 362 IPIQ[i].head = IPIQ[i].tail = NULL;
360 spin_lock_init(&IPIQ[i].lock); 363 spin_lock_init(&IPIQ[i].lock);
361 IPIQ[i].depth = 0; 364 IPIQ[i].depth = 0;
362 ipi_timer_latch[i] = 0; 365 atomic_set(&ipi_timer_latch[i], 0);
363 } 366 }
364 367
365 /* cpu_data index starts at zero */ 368 /* cpu_data index starts at zero */
@@ -369,7 +372,7 @@ void mipsmt_prepare_cpus(void)
369 cpu++; 372 cpu++;
370 373
371 /* Report on boot-time options */ 374 /* Report on boot-time options */
372 mips_mt_set_cpuoptions (); 375 mips_mt_set_cpuoptions();
373 if (vpelimit > 0) 376 if (vpelimit > 0)
374 printk("Limit of %d VPEs set\n", vpelimit); 377 printk("Limit of %d VPEs set\n", vpelimit);
375 if (tclimit > 0) 378 if (tclimit > 0)
@@ -420,7 +423,7 @@ void mipsmt_prepare_cpus(void)
420 * code. Leave it alone! 423 * code. Leave it alone!
421 */ 424 */
422 if (tc != 0) { 425 if (tc != 0) {
423 smtc_tc_setup(vpe,tc, cpu); 426 smtc_tc_setup(vpe, tc, cpu);
424 cpu++; 427 cpu++;
425 } 428 }
426 printk(" %d", tc); 429 printk(" %d", tc);
@@ -428,7 +431,7 @@ void mipsmt_prepare_cpus(void)
428 } 431 }
429 if (slop) { 432 if (slop) {
430 if (tc != 0) { 433 if (tc != 0) {
431 smtc_tc_setup(vpe,tc, cpu); 434 smtc_tc_setup(vpe, tc, cpu);
432 cpu++; 435 cpu++;
433 } 436 }
434 printk(" %d", tc); 437 printk(" %d", tc);
@@ -482,10 +485,12 @@ void mipsmt_prepare_cpus(void)
482 485
483 /* Set up coprocessor affinity CPU mask(s) */ 486 /* Set up coprocessor affinity CPU mask(s) */
484 487
488#ifdef CONFIG_MIPS_MT_FPAFF
485 for (tc = 0; tc < ntc; tc++) { 489 for (tc = 0; tc < ntc; tc++) {
486 if (cpu_data[tc].options & MIPS_CPU_FPU) 490 if (cpu_data[tc].options & MIPS_CPU_FPU)
487 cpu_set(tc, mt_fpu_cpumask); 491 cpu_set(tc, mt_fpu_cpumask);
488 } 492 }
493#endif
489 494
490 /* set up ipi interrupts... */ 495 /* set up ipi interrupts... */
491 496
@@ -567,7 +572,7 @@ void smtc_init_secondary(void)
567 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && 572 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
568 ((read_c0_tcbind() & TCBIND_CURVPE) 573 ((read_c0_tcbind() & TCBIND_CURVPE)
569 != cpu_data[smp_processor_id() - 1].vpe_id)){ 574 != cpu_data[smp_processor_id() - 1].vpe_id)){
570 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ); 575 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
571 } 576 }
572 577
573 local_irq_enable(); 578 local_irq_enable();
@@ -606,6 +611,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
606 return setup_irq(irq, new); 611 return setup_irq(irq, new);
607} 612}
608 613
614#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
615/*
616 * Support for IRQ affinity to TCs
617 */
618
619void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
620{
621 /*
622 * If a "fast path" cache of quickly decodable affinity state
623 * is maintained, this is where it gets done, on a call up
624 * from the platform affinity code.
625 */
626}
627
628void smtc_forward_irq(unsigned int irq)
629{
630 int target;
631
632 /*
633 * OK wise guy, now figure out how to get the IRQ
634 * to be serviced on an authorized "CPU".
635 *
636 * Ideally, to handle the situation where an IRQ has multiple
637 * eligible CPUS, we would maintain state per IRQ that would
638 * allow a fair distribution of service requests. Since the
639 * expected use model is any-or-only-one, for simplicity
640 * and efficiency, we just pick the easiest one to find.
641 */
642
643 target = first_cpu(irq_desc[irq].affinity);
644
645 /*
646 * We depend on the platform code to have correctly processed
647 * IRQ affinity change requests to ensure that the IRQ affinity
648 * mask has been purged of bits corresponding to nonexistent and
649 * offline "CPUs", and to TCs bound to VPEs other than the VPE
650 * connected to the physical interrupt input for the interrupt
651 * in question. Otherwise we have a nasty problem with interrupt
652 * mask management. This is best handled in non-performance-critical
653 * platform IRQ affinity setting code, to minimize interrupt-time
654 * checks.
655 */
656
657 /* If no one is eligible, service locally */
658 if (target >= NR_CPUS) {
659 do_IRQ_no_affinity(irq);
660 return;
661 }
662
663 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
664}
665
666#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
667
609/* 668/*
610 * IPI model for SMTC is tricky, because interrupts aren't TC-specific. 669 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
611 * Within a VPE one TC can interrupt another by different approaches. 670 * Within a VPE one TC can interrupt another by different approaches.
@@ -648,7 +707,7 @@ static void smtc_ipi_qdump(void)
648 * be done with the atomic.h primitives). And since this is 707 * be done with the atomic.h primitives). And since this is
649 * MIPS MT, we can assume that we have LL/SC. 708 * MIPS MT, we can assume that we have LL/SC.
650 */ 709 */
651static __inline__ int atomic_postincrement(unsigned int *pv) 710static inline int atomic_postincrement(atomic_t *v)
652{ 711{
653 unsigned long result; 712 unsigned long result;
654 713
@@ -659,9 +718,9 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
659 " addu %1, %0, 1 \n" 718 " addu %1, %0, 1 \n"
660 " sc %1, %2 \n" 719 " sc %1, %2 \n"
661 " beqz %1, 1b \n" 720 " beqz %1, 1b \n"
662 " sync \n" 721 __WEAK_LLSC_MB
663 : "=&r" (result), "=&r" (temp), "=m" (*pv) 722 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
664 : "m" (*pv) 723 : "m" (v->counter)
665 : "memory"); 724 : "memory");
666 725
667 return result; 726 return result;
@@ -689,6 +748,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
689 pipi->arg = (void *)action; 748 pipi->arg = (void *)action;
690 pipi->dest = cpu; 749 pipi->dest = cpu;
691 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 750 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
751 if (type == SMTC_CLOCK_TICK)
752 atomic_inc(&ipi_timer_latch[cpu]);
692 /* If not on same VPE, enqueue and send cross-VPE interupt */ 753 /* If not on same VPE, enqueue and send cross-VPE interupt */
693 smtc_ipi_nq(&IPIQ[cpu], pipi); 754 smtc_ipi_nq(&IPIQ[cpu], pipi);
694 LOCK_CORE_PRA(); 755 LOCK_CORE_PRA();
@@ -730,6 +791,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
730 } 791 }
731 smtc_ipi_nq(&IPIQ[cpu], pipi); 792 smtc_ipi_nq(&IPIQ[cpu], pipi);
732 } else { 793 } else {
794 if (type == SMTC_CLOCK_TICK)
795 atomic_inc(&ipi_timer_latch[cpu]);
733 post_direct_ipi(cpu, pipi); 796 post_direct_ipi(cpu, pipi);
734 write_tc_c0_tchalt(0); 797 write_tc_c0_tchalt(0);
735 UNLOCK_CORE_PRA(); 798 UNLOCK_CORE_PRA();
@@ -747,6 +810,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
747 unsigned long tcrestart; 810 unsigned long tcrestart;
748 extern u32 kernelsp[NR_CPUS]; 811 extern u32 kernelsp[NR_CPUS];
749 extern void __smtc_ipi_vector(void); 812 extern void __smtc_ipi_vector(void);
813//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
750 814
751 /* Extract Status, EPC from halted TC */ 815 /* Extract Status, EPC from halted TC */
752 tcstatus = read_tc_c0_tcstatus(); 816 tcstatus = read_tc_c0_tcstatus();
@@ -797,25 +861,31 @@ static void ipi_call_interrupt(void)
797 smp_call_function_interrupt(); 861 smp_call_function_interrupt();
798} 862}
799 863
864DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
865
800void ipi_decode(struct smtc_ipi *pipi) 866void ipi_decode(struct smtc_ipi *pipi)
801{ 867{
868 unsigned int cpu = smp_processor_id();
869 struct clock_event_device *cd;
802 void *arg_copy = pipi->arg; 870 void *arg_copy = pipi->arg;
803 int type_copy = pipi->type; 871 int type_copy = pipi->type;
804 int dest_copy = pipi->dest; 872 int ticks;
805 873
806 smtc_ipi_nq(&freeIPIq, pipi); 874 smtc_ipi_nq(&freeIPIq, pipi);
807 switch (type_copy) { 875 switch (type_copy) {
808 case SMTC_CLOCK_TICK: 876 case SMTC_CLOCK_TICK:
809 irq_enter(); 877 irq_enter();
810 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++; 878 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
811 /* Invoke Clock "Interrupt" */ 879 cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
812 ipi_timer_latch[dest_copy] = 0; 880 ticks = atomic_read(&ipi_timer_latch[cpu]);
813#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 881 atomic_sub(ticks, &ipi_timer_latch[cpu]);
814 clock_hang_reported[dest_copy] = 0; 882 while (ticks) {
815#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 883 cd->event_handler(cd);
816 local_timer_interrupt(0, NULL); 884 ticks--;
885 }
817 irq_exit(); 886 irq_exit();
818 break; 887 break;
888
819 case LINUX_SMP_IPI: 889 case LINUX_SMP_IPI:
820 switch ((int)arg_copy) { 890 switch ((int)arg_copy) {
821 case SMP_RESCHEDULE_YOURSELF: 891 case SMP_RESCHEDULE_YOURSELF:
@@ -830,6 +900,15 @@ void ipi_decode(struct smtc_ipi *pipi)
830 break; 900 break;
831 } 901 }
832 break; 902 break;
903#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
904 case IRQ_AFFINITY_IPI:
905 /*
906 * Accept a "forwarded" interrupt that was initially
907 * taken by a TC who doesn't have affinity for the IRQ.
908 */
909 do_IRQ_no_affinity((int)arg_copy);
910 break;
911#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
833 default: 912 default:
834 printk("Impossible SMTC IPI Type 0x%x\n", type_copy); 913 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
835 break; 914 break;
@@ -858,25 +937,6 @@ void deferred_smtc_ipi(void)
858} 937}
859 938
860/* 939/*
861 * Send clock tick to all TCs except the one executing the funtion
862 */
863
864void smtc_timer_broadcast(void)
865{
866 int cpu;
867 int myTC = cpu_data[smp_processor_id()].tc_id;
868 int myVPE = cpu_data[smp_processor_id()].vpe_id;
869
870 smtc_cpu_stats[smp_processor_id()].timerints++;
871
872 for_each_online_cpu(cpu) {
873 if (cpu_data[cpu].vpe_id == myVPE &&
874 cpu_data[cpu].tc_id != myTC)
875 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
876 }
877}
878
879/*
880 * Cross-VPE interrupts in the SMTC prototype use "software interrupts" 940 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
881 * set via cross-VPE MTTR manipulation of the Cause register. It would be 941 * set via cross-VPE MTTR manipulation of the Cause register. It would be
882 * in some regards preferable to have external logic for "doorbell" hardware 942 * in some regards preferable to have external logic for "doorbell" hardware
@@ -1117,11 +1177,11 @@ void smtc_idle_loop_hook(void)
1117 for (tc = 0; tc < NR_CPUS; tc++) { 1177 for (tc = 0; tc < NR_CPUS; tc++) {
1118 /* Don't check ourself - we'll dequeue IPIs just below */ 1178 /* Don't check ourself - we'll dequeue IPIs just below */
1119 if ((tc != smp_processor_id()) && 1179 if ((tc != smp_processor_id()) &&
1120 ipi_timer_latch[tc] > timerq_limit) { 1180 atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1121 if (clock_hang_reported[tc] == 0) { 1181 if (clock_hang_reported[tc] == 0) {
1122 pdb_msg += sprintf(pdb_msg, 1182 pdb_msg += sprintf(pdb_msg,
1123 "TC %d looks hung with timer latch at %d\n", 1183 "TC %d looks hung with timer latch at %d\n",
1124 tc, ipi_timer_latch[tc]); 1184 tc, atomic_read(&ipi_timer_latch[tc]));
1125 clock_hang_reported[tc]++; 1185 clock_hang_reported[tc]++;
1126 } 1186 }
1127 } 1187 }
@@ -1162,7 +1222,7 @@ void smtc_soft_dump(void)
1162 smtc_ipi_qdump(); 1222 smtc_ipi_qdump();
1163 printk("Timer IPI Backlogs:\n"); 1223 printk("Timer IPI Backlogs:\n");
1164 for (i=0; i < NR_CPUS; i++) { 1224 for (i=0; i < NR_CPUS; i++) {
1165 printk("%d: %d\n", i, ipi_timer_latch[i]); 1225 printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1166 } 1226 }
1167 printk("%d Recoveries of \"stolen\" FPU\n", 1227 printk("%d Recoveries of \"stolen\" FPU\n",
1168 atomic_read(&smtc_fpu_recoveries)); 1228 atomic_read(&smtc_fpu_recoveries));
@@ -1204,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1204 if (cpu_has_vtag_icache) 1264 if (cpu_has_vtag_icache)
1205 flush_icache_all(); 1265 flush_icache_all();
1206 /* Traverse all online CPUs (hack requires contigous range) */ 1266 /* Traverse all online CPUs (hack requires contigous range) */
1207 for (i = 0; i < num_online_cpus(); i++) { 1267 for_each_online_cpu(i) {
1208 /* 1268 /*
1209 * We don't need to worry about our own CPU, nor those of 1269 * We don't need to worry about our own CPU, nor those of
1210 * CPUs who don't share our TLB. 1270 * CPUs who don't share our TLB.
@@ -1233,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1233 /* 1293 /*
1234 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1294 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1235 */ 1295 */
1236 for (i = 0; i < num_online_cpus(); i++) { 1296 for_each_online_cpu(i) {
1237 if ((smtc_status & SMTC_TLB_SHARED) || 1297 if ((smtc_status & SMTC_TLB_SHARED) ||
1238 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) 1298 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1239 cpu_context(i, mm) = asid_cache(i) = asid; 1299 cpu_context(i, mm) = asid_cache(i) = asid;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 7c800ec3ff55..17c4374d2209 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -245,7 +245,7 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
245 245
246 if (!name) 246 if (!name)
247 return -EFAULT; 247 return -EFAULT;
248 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) 248 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
249 return -EFAULT; 249 return -EFAULT;
250 250
251 error = __copy_to_user(&name->sysname, &utsname()->sysname, 251 error = __copy_to_user(&name->sysname, &utsname()->sysname,
@@ -314,8 +314,8 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
314 * 314 *
315 * This is really horribly ugly. 315 * This is really horribly ugly.
316 */ 316 */
317asmlinkage int sys_ipc (unsigned int call, int first, int second, 317asmlinkage int sys_ipc(unsigned int call, int first, int second,
318 unsigned long third, void __user *ptr, long fifth) 318 unsigned long third, void __user *ptr, long fifth)
319{ 319{
320 int version, ret; 320 int version, ret;
321 321
@@ -324,26 +324,26 @@ asmlinkage int sys_ipc (unsigned int call, int first, int second,
324 324
325 switch (call) { 325 switch (call) {
326 case SEMOP: 326 case SEMOP:
327 return sys_semtimedop (first, (struct sembuf __user *)ptr, 327 return sys_semtimedop(first, (struct sembuf __user *)ptr,
328 second, NULL); 328 second, NULL);
329 case SEMTIMEDOP: 329 case SEMTIMEDOP:
330 return sys_semtimedop (first, (struct sembuf __user *)ptr, 330 return sys_semtimedop(first, (struct sembuf __user *)ptr,
331 second, 331 second,
332 (const struct timespec __user *)fifth); 332 (const struct timespec __user *)fifth);
333 case SEMGET: 333 case SEMGET:
334 return sys_semget (first, second, third); 334 return sys_semget(first, second, third);
335 case SEMCTL: { 335 case SEMCTL: {
336 union semun fourth; 336 union semun fourth;
337 if (!ptr) 337 if (!ptr)
338 return -EINVAL; 338 return -EINVAL;
339 if (get_user(fourth.__pad, (void __user *__user *) ptr)) 339 if (get_user(fourth.__pad, (void __user *__user *) ptr))
340 return -EFAULT; 340 return -EFAULT;
341 return sys_semctl (first, second, third, fourth); 341 return sys_semctl(first, second, third, fourth);
342 } 342 }
343 343
344 case MSGSND: 344 case MSGSND:
345 return sys_msgsnd (first, (struct msgbuf __user *) ptr, 345 return sys_msgsnd(first, (struct msgbuf __user *) ptr,
346 second, third); 346 second, third);
347 case MSGRCV: 347 case MSGRCV:
348 switch (version) { 348 switch (version) {
349 case 0: { 349 case 0: {
@@ -353,45 +353,45 @@ asmlinkage int sys_ipc (unsigned int call, int first, int second,
353 353
354 if (copy_from_user(&tmp, 354 if (copy_from_user(&tmp,
355 (struct ipc_kludge __user *) ptr, 355 (struct ipc_kludge __user *) ptr,
356 sizeof (tmp))) 356 sizeof(tmp)))
357 return -EFAULT; 357 return -EFAULT;
358 return sys_msgrcv (first, tmp.msgp, second, 358 return sys_msgrcv(first, tmp.msgp, second,
359 tmp.msgtyp, third); 359 tmp.msgtyp, third);
360 } 360 }
361 default: 361 default:
362 return sys_msgrcv (first, 362 return sys_msgrcv(first,
363 (struct msgbuf __user *) ptr, 363 (struct msgbuf __user *) ptr,
364 second, fifth, third); 364 second, fifth, third);
365 } 365 }
366 case MSGGET: 366 case MSGGET:
367 return sys_msgget ((key_t) first, second); 367 return sys_msgget((key_t) first, second);
368 case MSGCTL: 368 case MSGCTL:
369 return sys_msgctl (first, second, 369 return sys_msgctl(first, second,
370 (struct msqid_ds __user *) ptr); 370 (struct msqid_ds __user *) ptr);
371 371
372 case SHMAT: 372 case SHMAT:
373 switch (version) { 373 switch (version) {
374 default: { 374 default: {
375 unsigned long raddr; 375 unsigned long raddr;
376 ret = do_shmat (first, (char __user *) ptr, second, 376 ret = do_shmat(first, (char __user *) ptr, second,
377 &raddr); 377 &raddr);
378 if (ret) 378 if (ret)
379 return ret; 379 return ret;
380 return put_user (raddr, (unsigned long __user *) third); 380 return put_user(raddr, (unsigned long __user *) third);
381 } 381 }
382 case 1: /* iBCS2 emulator entry point */ 382 case 1: /* iBCS2 emulator entry point */
383 if (!segment_eq(get_fs(), get_ds())) 383 if (!segment_eq(get_fs(), get_ds()))
384 return -EINVAL; 384 return -EINVAL;
385 return do_shmat (first, (char __user *) ptr, second, 385 return do_shmat(first, (char __user *) ptr, second,
386 (unsigned long *) third); 386 (unsigned long *) third);
387 } 387 }
388 case SHMDT: 388 case SHMDT:
389 return sys_shmdt ((char __user *)ptr); 389 return sys_shmdt((char __user *)ptr);
390 case SHMGET: 390 case SHMGET:
391 return sys_shmget (first, second, third); 391 return sys_shmget(first, second, third);
392 case SHMCTL: 392 case SHMCTL:
393 return sys_shmctl (first, second, 393 return sys_shmctl(first, second,
394 (struct shmid_ds __user *) ptr); 394 (struct shmid_ds __user *) ptr);
395 default: 395 default:
396 return -ENOSYS; 396 return -ENOSYS;
397 } 397 }
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 93a148486f88..ee7790d9debe 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -486,10 +486,10 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
486 486
487 switch (arg1) { 487 switch (arg1) {
488 case SGI_INV_SIZEOF: 488 case SGI_INV_SIZEOF:
489 retval = sizeof (inventory_t); 489 retval = sizeof(inventory_t);
490 break; 490 break;
491 case SGI_INV_READ: 491 case SGI_INV_READ:
492 retval = dump_inventory_to_user (buffer, count); 492 retval = dump_inventory_to_user(buffer, count);
493 break; 493 break;
494 default: 494 default:
495 retval = -EINVAL; 495 retval = -EINVAL;
@@ -778,7 +778,7 @@ asmlinkage int irix_times(struct tms __user *tbuf)
778 int err = 0; 778 int err = 0;
779 779
780 if (tbuf) { 780 if (tbuf) {
781 if (!access_ok(VERIFY_WRITE,tbuf,sizeof *tbuf)) 781 if (!access_ok(VERIFY_WRITE, tbuf, sizeof *tbuf))
782 return -EFAULT; 782 return -EFAULT;
783 783
784 err = __put_user(current->utime, &tbuf->tms_utime); 784 err = __put_user(current->utime, &tbuf->tms_utime);
@@ -1042,9 +1042,9 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
1042 long max_size = offset + len; 1042 long max_size = offset + len;
1043 1043
1044 if (max_size > file->f_path.dentry->d_inode->i_size) { 1044 if (max_size > file->f_path.dentry->d_inode->i_size) {
1045 old_pos = sys_lseek (fd, max_size - 1, 0); 1045 old_pos = sys_lseek(fd, max_size - 1, 0);
1046 sys_write (fd, (void __user *) "", 1); 1046 sys_write(fd, (void __user *) "", 1);
1047 sys_lseek (fd, old_pos, 0); 1047 sys_lseek(fd, old_pos, 0);
1048 } 1048 }
1049 } 1049 }
1050 } 1050 }
@@ -1176,7 +1176,7 @@ static int irix_xstat32_xlate(struct kstat *stat, void __user *ubuf)
1176 ub.st_ctime1 = stat->atime.tv_nsec; 1176 ub.st_ctime1 = stat->atime.tv_nsec;
1177 ub.st_blksize = stat->blksize; 1177 ub.st_blksize = stat->blksize;
1178 ub.st_blocks = stat->blocks; 1178 ub.st_blocks = stat->blocks;
1179 strcpy (ub.st_fstype, "efs"); 1179 strcpy(ub.st_fstype, "efs");
1180 1180
1181 return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0; 1181 return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0;
1182} 1182}
@@ -1208,7 +1208,7 @@ static int irix_xstat64_xlate(struct kstat *stat, void __user *ubuf)
1208 ks.st_nlink = (u32) stat->nlink; 1208 ks.st_nlink = (u32) stat->nlink;
1209 ks.st_uid = (s32) stat->uid; 1209 ks.st_uid = (s32) stat->uid;
1210 ks.st_gid = (s32) stat->gid; 1210 ks.st_gid = (s32) stat->gid;
1211 ks.st_rdev = sysv_encode_dev (stat->rdev); 1211 ks.st_rdev = sysv_encode_dev(stat->rdev);
1212 ks.st_pad2[0] = ks.st_pad2[1] = 0; 1212 ks.st_pad2[0] = ks.st_pad2[1] = 0;
1213 ks.st_size = (long long) stat->size; 1213 ks.st_size = (long long) stat->size;
1214 ks.st_pad3 = 0; 1214 ks.st_pad3 = 0;
@@ -1527,9 +1527,9 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
1527 long max_size = off2 + len; 1527 long max_size = off2 + len;
1528 1528
1529 if (max_size > file->f_path.dentry->d_inode->i_size) { 1529 if (max_size > file->f_path.dentry->d_inode->i_size) {
1530 old_pos = sys_lseek (fd, max_size - 1, 0); 1530 old_pos = sys_lseek(fd, max_size - 1, 0);
1531 sys_write (fd, (void __user *) "", 1); 1531 sys_write(fd, (void __user *) "", 1);
1532 sys_lseek (fd, old_pos, 0); 1532 sys_lseek(fd, old_pos, 0);
1533 } 1533 }
1534 } 1534 }
1535 } 1535 }
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 9a5596bf8571..5892491b40eb 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,6 +11,7 @@
11 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 12 * option) any later version.
13 */ 13 */
14#include <linux/clockchips.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/init.h> 17#include <linux/init.h>
@@ -24,6 +25,7 @@
24#include <linux/spinlock.h> 25#include <linux/spinlock.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kallsyms.h>
27 29
28#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
29#include <asm/cache.h> 31#include <asm/cache.h>
@@ -32,8 +34,11 @@
32#include <asm/cpu-features.h> 34#include <asm/cpu-features.h>
33#include <asm/div64.h> 35#include <asm/div64.h>
34#include <asm/sections.h> 36#include <asm/sections.h>
37#include <asm/smtc_ipi.h>
35#include <asm/time.h> 38#include <asm/time.h>
36 39
40#include <irq.h>
41
37/* 42/*
38 * The integer part of the number of usecs per jiffy is taken from tick, 43 * The integer part of the number of usecs per jiffy is taken from tick,
39 * but the fractional part is not recorded, so we calculate it using the 44 * but the fractional part is not recorded, so we calculate it using the
@@ -49,32 +54,27 @@
49 * forward reference 54 * forward reference
50 */ 55 */
51DEFINE_SPINLOCK(rtc_lock); 56DEFINE_SPINLOCK(rtc_lock);
57EXPORT_SYMBOL(rtc_lock);
52 58
53/* 59int __weak rtc_mips_set_time(unsigned long sec)
54 * By default we provide the null RTC ops
55 */
56static unsigned long null_rtc_get_time(void)
57{ 60{
58 return mktime(2000, 1, 1, 0, 0, 0); 61 return 0;
59} 62}
63EXPORT_SYMBOL(rtc_mips_set_time);
60 64
61static int null_rtc_set_time(unsigned long sec) 65int __weak rtc_mips_set_mmss(unsigned long nowtime)
62{ 66{
63 return 0; 67 return rtc_mips_set_time(nowtime);
64} 68}
65 69
66unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time; 70int update_persistent_clock(struct timespec now)
67int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time; 71{
68int (*rtc_mips_set_mmss)(unsigned long); 72 return rtc_mips_set_mmss(now.tv_sec);
69 73}
70 74
71/* how many counter cycles in a jiffy */ 75/* how many counter cycles in a jiffy */
72static unsigned long cycles_per_jiffy __read_mostly; 76static unsigned long cycles_per_jiffy __read_mostly;
73 77
74/* expirelo is the count value for next CPU timer interrupt */
75static unsigned int expirelo;
76
77
78/* 78/*
79 * Null timer ack for systems not needing one (e.g. i8254). 79 * Null timer ack for systems not needing one (e.g. i8254).
80 */ 80 */
@@ -93,18 +93,7 @@ static cycle_t null_hpt_read(void)
93 */ 93 */
94static void c0_timer_ack(void) 94static void c0_timer_ack(void)
95{ 95{
96 unsigned int count; 96 write_c0_compare(read_c0_compare());
97
98 /* Ack this timer interrupt and set the next one. */
99 expirelo += cycles_per_jiffy;
100 write_c0_compare(expirelo);
101
102 /* Check to see if we have missed any timer interrupts. */
103 while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
104 /* missed_timer_count++; */
105 expirelo = count + cycles_per_jiffy;
106 write_c0_compare(expirelo);
107 }
108} 97}
109 98
110/* 99/*
@@ -115,19 +104,9 @@ static cycle_t c0_hpt_read(void)
115 return read_c0_count(); 104 return read_c0_count();
116} 105}
117 106
118/* For use both as a high precision timer and an interrupt source. */
119static void __init c0_hpt_timer_init(void)
120{
121 expirelo = read_c0_count() + cycles_per_jiffy;
122 write_c0_compare(expirelo);
123}
124
125int (*mips_timer_state)(void); 107int (*mips_timer_state)(void);
126void (*mips_timer_ack)(void); 108void (*mips_timer_ack)(void);
127 109
128/* last time when xtime and rtc are sync'ed up */
129static long last_rtc_update;
130
131/* 110/*
132 * local_timer_interrupt() does profiling and process accounting 111 * local_timer_interrupt() does profiling and process accounting
133 * on a per-CPU basis. 112 * on a per-CPU basis.
@@ -144,60 +123,15 @@ void local_timer_interrupt(int irq, void *dev_id)
144 update_process_times(user_mode(get_irq_regs())); 123 update_process_times(user_mode(get_irq_regs()));
145} 124}
146 125
147/*
148 * High-level timer interrupt service routines. This function
149 * is set as irqaction->handler and is invoked through do_IRQ.
150 */
151irqreturn_t timer_interrupt(int irq, void *dev_id)
152{
153 write_seqlock(&xtime_lock);
154
155 mips_timer_ack();
156
157 /*
158 * call the generic timer interrupt handling
159 */
160 do_timer(1);
161
162 /*
163 * If we have an externally synchronized Linux clock, then update
164 * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
165 * called as close as possible to 500 ms before the new second starts.
166 */
167 if (ntp_synced() &&
168 xtime.tv_sec > last_rtc_update + 660 &&
169 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
170 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
171 if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
172 last_rtc_update = xtime.tv_sec;
173 } else {
174 /* do it again in 60 s */
175 last_rtc_update = xtime.tv_sec - 600;
176 }
177 }
178
179 write_sequnlock(&xtime_lock);
180
181 /*
182 * In UP mode, we call local_timer_interrupt() to do profiling
183 * and process accouting.
184 *
185 * In SMP mode, local_timer_interrupt() is invoked by appropriate
186 * low-level local timer interrupt handler.
187 */
188 local_timer_interrupt(irq, dev_id);
189
190 return IRQ_HANDLED;
191}
192
193int null_perf_irq(void) 126int null_perf_irq(void)
194{ 127{
195 return 0; 128 return 0;
196} 129}
197 130
131EXPORT_SYMBOL(null_perf_irq);
132
198int (*perf_irq)(void) = null_perf_irq; 133int (*perf_irq)(void) = null_perf_irq;
199 134
200EXPORT_SYMBOL(null_perf_irq);
201EXPORT_SYMBOL(perf_irq); 135EXPORT_SYMBOL(perf_irq);
202 136
203/* 137/*
@@ -215,7 +149,7 @@ EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
215 * Possibly handle a performance counter interrupt. 149 * Possibly handle a performance counter interrupt.
216 * Return true if the timer interrupt should not be checked 150 * Return true if the timer interrupt should not be checked
217 */ 151 */
218static inline int handle_perf_irq (int r2) 152static inline int handle_perf_irq(int r2)
219{ 153{
220 /* 154 /*
221 * The performance counter overflow interrupt may be shared with the 155 * The performance counter overflow interrupt may be shared with the
@@ -229,63 +163,23 @@ static inline int handle_perf_irq (int r2)
229 !r2; 163 !r2;
230} 164}
231 165
232asmlinkage void ll_timer_interrupt(int irq)
233{
234 int r2 = cpu_has_mips_r2;
235
236 irq_enter();
237 kstat_this_cpu.irqs[irq]++;
238
239 if (handle_perf_irq(r2))
240 goto out;
241
242 if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
243 goto out;
244
245 timer_interrupt(irq, NULL);
246
247out:
248 irq_exit();
249}
250
251asmlinkage void ll_local_timer_interrupt(int irq)
252{
253 irq_enter();
254 if (smp_processor_id() != 0)
255 kstat_this_cpu.irqs[irq]++;
256
257 /* we keep interrupt disabled all the time */
258 local_timer_interrupt(irq, NULL);
259
260 irq_exit();
261}
262
263/* 166/*
264 * time_init() - it does the following things. 167 * time_init() - it does the following things.
265 * 168 *
266 * 1) board_time_init() - 169 * 1) plat_time_init() -
267 * a) (optional) set up RTC routines, 170 * a) (optional) set up RTC routines,
268 * b) (optional) calibrate and set the mips_hpt_frequency 171 * b) (optional) calibrate and set the mips_hpt_frequency
269 * (only needed if you intended to use cpu counter as timer interrupt 172 * (only needed if you intended to use cpu counter as timer interrupt
270 * source) 173 * source)
271 * 2) setup xtime based on rtc_mips_get_time(). 174 * 2) calculate a couple of cached variables for later usage
272 * 3) calculate a couple of cached variables for later usage 175 * 3) plat_timer_setup() -
273 * 4) plat_timer_setup() -
274 * a) (optional) over-write any choices made above by time_init(). 176 * a) (optional) over-write any choices made above by time_init().
275 * b) machine specific code should setup the timer irqaction. 177 * b) machine specific code should setup the timer irqaction.
276 * c) enable the timer interrupt 178 * c) enable the timer interrupt
277 */ 179 */
278 180
279void (*board_time_init)(void);
280
281unsigned int mips_hpt_frequency; 181unsigned int mips_hpt_frequency;
282 182
283static struct irqaction timer_irqaction = {
284 .handler = timer_interrupt,
285 .flags = IRQF_DISABLED | IRQF_PERCPU,
286 .name = "timer",
287};
288
289static unsigned int __init calibrate_hpt(void) 183static unsigned int __init calibrate_hpt(void)
290{ 184{
291 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz; 185 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
@@ -334,6 +228,84 @@ struct clocksource clocksource_mips = {
334 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 228 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
335}; 229};
336 230
231static int mips_next_event(unsigned long delta,
232 struct clock_event_device *evt)
233{
234 unsigned int cnt;
235 int res;
236
237#ifdef CONFIG_MIPS_MT_SMTC
238 {
239 unsigned long flags, vpflags;
240 local_irq_save(flags);
241 vpflags = dvpe();
242#endif
243 cnt = read_c0_count();
244 cnt += delta;
245 write_c0_compare(cnt);
246 res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
247#ifdef CONFIG_MIPS_MT_SMTC
248 evpe(vpflags);
249 local_irq_restore(flags);
250 }
251#endif
252 return res;
253}
254
255static void mips_set_mode(enum clock_event_mode mode,
256 struct clock_event_device *evt)
257{
258 /* Nothing to do ... */
259}
260
261static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
262static int cp0_timer_irq_installed;
263
264static irqreturn_t timer_interrupt(int irq, void *dev_id)
265{
266 const int r2 = cpu_has_mips_r2;
267 struct clock_event_device *cd;
268 int cpu = smp_processor_id();
269
270 /*
271 * Suckage alert:
272 * Before R2 of the architecture there was no way to see if a
273 * performance counter interrupt was pending, so we have to run
274 * the performance counter interrupt handler anyway.
275 */
276 if (handle_perf_irq(r2))
277 goto out;
278
279 /*
280 * The same applies to performance counter interrupts. But with the
281 * above we now know that the reason we got here must be a timer
282 * interrupt. Being the paranoiacs we are we check anyway.
283 */
284 if (!r2 || (read_c0_cause() & (1 << 30))) {
285 c0_timer_ack();
286#ifdef CONFIG_MIPS_MT_SMTC
287 if (cpu_data[cpu].vpe_id)
288 goto out;
289 cpu = 0;
290#endif
291 cd = &per_cpu(mips_clockevent_device, cpu);
292 cd->event_handler(cd);
293 }
294
295out:
296 return IRQ_HANDLED;
297}
298
299static struct irqaction timer_irqaction = {
300 .handler = timer_interrupt,
301#ifdef CONFIG_MIPS_MT_SMTC
302 .flags = IRQF_DISABLED,
303#else
304 .flags = IRQF_DISABLED | IRQF_PERCPU,
305#endif
306 .name = "timer",
307};
308
337static void __init init_mips_clocksource(void) 309static void __init init_mips_clocksource(void)
338{ 310{
339 u64 temp; 311 u64 temp;
@@ -357,19 +329,127 @@ static void __init init_mips_clocksource(void)
357 clocksource_register(&clocksource_mips); 329 clocksource_register(&clocksource_mips);
358} 330}
359 331
360void __init time_init(void) 332void __init __weak plat_time_init(void)
333{
334}
335
336void __init __weak plat_timer_setup(struct irqaction *irq)
337{
338}
339
340#ifdef CONFIG_MIPS_MT_SMTC
341DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
342
343static void smtc_set_mode(enum clock_event_mode mode,
344 struct clock_event_device *evt)
345{
346}
347
348int dummycnt[NR_CPUS];
349
350static void mips_broadcast(cpumask_t mask)
351{
352 unsigned int cpu;
353
354 for_each_cpu_mask(cpu, mask)
355 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
356}
357
358static void setup_smtc_dummy_clockevent_device(void)
359{
360 //uint64_t mips_freq = mips_hpt_^frequency;
361 unsigned int cpu = smp_processor_id();
362 struct clock_event_device *cd;
363
364 cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
365
366 cd->name = "SMTC";
367 cd->features = CLOCK_EVT_FEAT_DUMMY;
368
369 /* Calculate the min / max delta */
370 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
371 cd->shift = 0; //32;
372 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
373 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
374
375 cd->rating = 200;
376 cd->irq = 17; //-1;
377// if (cpu)
378// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
379// else
380 cd->cpumask = cpumask_of_cpu(cpu);
381
382 cd->set_mode = smtc_set_mode;
383
384 cd->broadcast = mips_broadcast;
385
386 clockevents_register_device(cd);
387}
388#endif
389
390static void mips_event_handler(struct clock_event_device *dev)
361{ 391{
362 if (board_time_init) 392}
363 board_time_init();
364 393
365 if (!rtc_mips_set_mmss) 394void __cpuinit mips_clockevent_init(void)
366 rtc_mips_set_mmss = rtc_mips_set_time; 395{
396 uint64_t mips_freq = mips_hpt_frequency;
397 unsigned int cpu = smp_processor_id();
398 struct clock_event_device *cd;
399 unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
367 400
368 xtime.tv_sec = rtc_mips_get_time(); 401 if (!cpu_has_counter)
369 xtime.tv_nsec = 0; 402 return;
370 403
371 set_normalized_timespec(&wall_to_monotonic, 404#ifdef CONFIG_MIPS_MT_SMTC
372 -xtime.tv_sec, -xtime.tv_nsec); 405 setup_smtc_dummy_clockevent_device();
406
407 /*
408 * On SMTC we only register VPE0's compare interrupt as clockevent
409 * device.
410 */
411 if (cpu)
412 return;
413#endif
414
415 cd = &per_cpu(mips_clockevent_device, cpu);
416
417 cd->name = "MIPS";
418 cd->features = CLOCK_EVT_FEAT_ONESHOT;
419
420 /* Calculate the min / max delta */
421 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
422 cd->shift = 32;
423 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
424 cd->min_delta_ns = clockevent_delta2ns(0x30, cd);
425
426 cd->rating = 300;
427 cd->irq = irq;
428#ifdef CONFIG_MIPS_MT_SMTC
429 cd->cpumask = CPU_MASK_ALL;
430#else
431 cd->cpumask = cpumask_of_cpu(cpu);
432#endif
433 cd->set_next_event = mips_next_event;
434 cd->set_mode = mips_set_mode;
435 cd->event_handler = mips_event_handler;
436
437 clockevents_register_device(cd);
438
439 if (!cp0_timer_irq_installed) {
440#ifdef CONFIG_MIPS_MT_SMTC
441#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
442 setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
443#else
444 setup_irq(irq, &timer_irqaction);
445#endif /* CONFIG_MIPS_MT_SMTC */
446 cp0_timer_irq_installed = 1;
447 }
448}
449
450void __init time_init(void)
451{
452 plat_time_init();
373 453
374 /* Choose appropriate high precision timer routines. */ 454 /* Choose appropriate high precision timer routines. */
375 if (!cpu_has_counter && !clocksource_mips.read) 455 if (!cpu_has_counter && !clocksource_mips.read)
@@ -392,11 +472,6 @@ void __init time_init(void)
392 /* Calculate cache parameters. */ 472 /* Calculate cache parameters. */
393 cycles_per_jiffy = 473 cycles_per_jiffy =
394 (mips_hpt_frequency + HZ / 2) / HZ; 474 (mips_hpt_frequency + HZ / 2) / HZ;
395 /*
396 * This sets up the high precision
397 * timer for the first interrupt.
398 */
399 c0_hpt_timer_init();
400 } 475 }
401 } 476 }
402 if (!mips_hpt_frequency) 477 if (!mips_hpt_frequency)
@@ -406,6 +481,10 @@ void __init time_init(void)
406 printk("Using %u.%03u MHz high precision timer.\n", 481 printk("Using %u.%03u MHz high precision timer.\n",
407 ((mips_hpt_frequency + 500) / 1000) / 1000, 482 ((mips_hpt_frequency + 500) / 1000) / 1000,
408 ((mips_hpt_frequency + 500) / 1000) % 1000); 483 ((mips_hpt_frequency + 500) / 1000) % 1000);
484
485#ifdef CONFIG_IRQ_CPU
486 setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
487#endif
409 } 488 }
410 489
411 if (!mips_timer_ack) 490 if (!mips_timer_ack)
@@ -426,56 +505,5 @@ void __init time_init(void)
426 plat_timer_setup(&timer_irqaction); 505 plat_timer_setup(&timer_irqaction);
427 506
428 init_mips_clocksource(); 507 init_mips_clocksource();
508 mips_clockevent_init();
429} 509}
430
431#define FEBRUARY 2
432#define STARTOFTIME 1970
433#define SECDAY 86400L
434#define SECYR (SECDAY * 365)
435#define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
436#define days_in_year(y) (leapyear(y) ? 366 : 365)
437#define days_in_month(m) (month_days[(m) - 1])
438
439static int month_days[12] = {
440 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
441};
442
443void to_tm(unsigned long tim, struct rtc_time *tm)
444{
445 long hms, day, gday;
446 int i;
447
448 gday = day = tim / SECDAY;
449 hms = tim % SECDAY;
450
451 /* Hours, minutes, seconds are easy */
452 tm->tm_hour = hms / 3600;
453 tm->tm_min = (hms % 3600) / 60;
454 tm->tm_sec = (hms % 3600) % 60;
455
456 /* Number of years in days */
457 for (i = STARTOFTIME; day >= days_in_year(i); i++)
458 day -= days_in_year(i);
459 tm->tm_year = i;
460
461 /* Number of months in days left */
462 if (leapyear(tm->tm_year))
463 days_in_month(FEBRUARY) = 29;
464 for (i = 1; day >= days_in_month(i); i++)
465 day -= days_in_month(i);
466 days_in_month(FEBRUARY) = 28;
467 tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
468
469 /* Days are what is left over (+1) from all that. */
470 tm->tm_mday = day + 1;
471
472 /*
473 * Determine the day of week
474 */
475 tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
476}
477
478EXPORT_SYMBOL(rtc_lock);
479EXPORT_SYMBOL(to_tm);
480EXPORT_SYMBOL(rtc_mips_set_time);
481EXPORT_SYMBOL(rtc_mips_get_time);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 6379003f9d8d..632bce1bf420 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -295,7 +295,8 @@ void show_regs(struct pt_regs *regs)
295 if (1 <= cause && cause <= 5) 295 if (1 <= cause && cause <= 5)
296 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); 296 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
297 297
298 printk("PrId : %08x\n", read_c0_prid()); 298 printk("PrId : %08x (%s)\n", read_c0_prid(),
299 cpu_name_string());
299} 300}
300 301
301void show_registers(struct pt_regs *regs) 302void show_registers(struct pt_regs *regs)
@@ -627,7 +628,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
627 lose_fpu(1); 628 lose_fpu(1);
628 629
629 /* Run the emulator */ 630 /* Run the emulator */
630 sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1); 631 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
631 632
632 /* 633 /*
633 * We can't allow the emulated instruction to leave any of 634 * We can't allow the emulated instruction to leave any of
@@ -954,7 +955,7 @@ asmlinkage void do_reserved(struct pt_regs *regs)
954 */ 955 */
955static inline void parity_protection_init(void) 956static inline void parity_protection_init(void)
956{ 957{
957 switch (current_cpu_data.cputype) { 958 switch (current_cpu_type()) {
958 case CPU_24K: 959 case CPU_24K:
959 case CPU_34K: 960 case CPU_34K:
960 case CPU_5KC: 961 case CPU_5KC:
@@ -1075,8 +1076,8 @@ void *set_except_vector(int n, void *addr)
1075 1076
1076 exception_handlers[n] = handler; 1077 exception_handlers[n] = handler;
1077 if (n == 0 && cpu_has_divec) { 1078 if (n == 0 && cpu_has_divec) {
1078 *(volatile u32 *)(ebase + 0x200) = 0x08000000 | 1079 *(u32 *)(ebase + 0x200) = 0x08000000 |
1079 (0x03ffffff & (handler >> 2)); 1080 (0x03ffffff & (handler >> 2));
1080 flush_icache_range(ebase + 0x200, ebase + 0x204); 1081 flush_icache_range(ebase + 0x200, ebase + 0x204);
1081 } 1082 }
1082 return (void *)old_handler; 1083 return (void *)old_handler;
@@ -1165,11 +1166,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1165 1166
1166 if (cpu_has_veic) { 1167 if (cpu_has_veic) {
1167 if (board_bind_eic_interrupt) 1168 if (board_bind_eic_interrupt)
1168 board_bind_eic_interrupt (n, srs); 1169 board_bind_eic_interrupt(n, srs);
1169 } else if (cpu_has_vint) { 1170 } else if (cpu_has_vint) {
1170 /* SRSMap is only defined if shadow sets are implemented */ 1171 /* SRSMap is only defined if shadow sets are implemented */
1171 if (mips_srs_max() > 1) 1172 if (mips_srs_max() > 1)
1172 change_c0_srsmap (0xf << n*4, srs << n*4); 1173 change_c0_srsmap(0xf << n*4, srs << n*4);
1173 } 1174 }
1174 1175
1175 if (srs == 0) { 1176 if (srs == 0) {
@@ -1198,10 +1199,10 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1198 * Sigh... panicing won't help as the console 1199 * Sigh... panicing won't help as the console
1199 * is probably not configured :( 1200 * is probably not configured :(
1200 */ 1201 */
1201 panic ("VECTORSPACING too small"); 1202 panic("VECTORSPACING too small");
1202 } 1203 }
1203 1204
1204 memcpy (b, &except_vec_vi, handler_len); 1205 memcpy(b, &except_vec_vi, handler_len);
1205#ifdef CONFIG_MIPS_MT_SMTC 1206#ifdef CONFIG_MIPS_MT_SMTC
1206 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1207 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1207 1208
@@ -1370,9 +1371,9 @@ void __init per_cpu_trap_init(void)
1370#endif /* CONFIG_MIPS_MT_SMTC */ 1371#endif /* CONFIG_MIPS_MT_SMTC */
1371 1372
1372 if (cpu_has_veic || cpu_has_vint) { 1373 if (cpu_has_veic || cpu_has_vint) {
1373 write_c0_ebase (ebase); 1374 write_c0_ebase(ebase);
1374 /* Setting vector spacing enables EI/VI mode */ 1375 /* Setting vector spacing enables EI/VI mode */
1375 change_c0_intctl (0x3e0, VECTORSPACING); 1376 change_c0_intctl(0x3e0, VECTORSPACING);
1376 } 1377 }
1377 if (cpu_has_divec) { 1378 if (cpu_has_divec) {
1378 if (cpu_has_mipsmt) { 1379 if (cpu_has_mipsmt) {
@@ -1390,8 +1391,8 @@ void __init per_cpu_trap_init(void)
1390 * o read IntCtl.IPPCI to determine the performance counter interrupt 1391 * o read IntCtl.IPPCI to determine the performance counter interrupt
1391 */ 1392 */
1392 if (cpu_has_mips_r2) { 1393 if (cpu_has_mips_r2) {
1393 cp0_compare_irq = (read_c0_intctl () >> 29) & 7; 1394 cp0_compare_irq = (read_c0_intctl() >> 29) & 7;
1394 cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7; 1395 cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7;
1395 if (cp0_perfcount_irq == cp0_compare_irq) 1396 if (cp0_perfcount_irq == cp0_compare_irq)
1396 cp0_perfcount_irq = -1; 1397 cp0_perfcount_irq = -1;
1397 } else { 1398 } else {
@@ -1429,14 +1430,17 @@ void __init per_cpu_trap_init(void)
1429} 1430}
1430 1431
1431/* Install CPU exception handler */ 1432/* Install CPU exception handler */
1432void __init set_handler (unsigned long offset, void *addr, unsigned long size) 1433void __init set_handler(unsigned long offset, void *addr, unsigned long size)
1433{ 1434{
1434 memcpy((void *)(ebase + offset), addr, size); 1435 memcpy((void *)(ebase + offset), addr, size);
1435 flush_icache_range(ebase + offset, ebase + offset + size); 1436 flush_icache_range(ebase + offset, ebase + offset + size);
1436} 1437}
1437 1438
1439static char panic_null_cerr[] __initdata =
1440 "Trying to set NULL cache error exception handler";
1441
1438/* Install uncached CPU exception handler */ 1442/* Install uncached CPU exception handler */
1439void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size) 1443void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size)
1440{ 1444{
1441#ifdef CONFIG_32BIT 1445#ifdef CONFIG_32BIT
1442 unsigned long uncached_ebase = KSEG1ADDR(ebase); 1446 unsigned long uncached_ebase = KSEG1ADDR(ebase);
@@ -1445,6 +1449,9 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon
1445 unsigned long uncached_ebase = TO_UNCAC(ebase); 1449 unsigned long uncached_ebase = TO_UNCAC(ebase);
1446#endif 1450#endif
1447 1451
1452 if (!addr)
1453 panic(panic_null_cerr);
1454
1448 memcpy((void *)(uncached_ebase + offset), addr, size); 1455 memcpy((void *)(uncached_ebase + offset), addr, size);
1449} 1456}
1450 1457
@@ -1464,7 +1471,7 @@ void __init trap_init(void)
1464 unsigned long i; 1471 unsigned long i;
1465 1472
1466 if (cpu_has_veic || cpu_has_vint) 1473 if (cpu_has_veic || cpu_has_vint)
1467 ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64); 1474 ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64);
1468 else 1475 else
1469 ebase = CAC_BASE; 1476 ebase = CAC_BASE;
1470 1477
@@ -1490,7 +1497,7 @@ void __init trap_init(void)
1490 * destination. 1497 * destination.
1491 */ 1498 */
1492 if (cpu_has_ejtag && board_ejtag_handler_setup) 1499 if (cpu_has_ejtag && board_ejtag_handler_setup)
1493 board_ejtag_handler_setup (); 1500 board_ejtag_handler_setup();
1494 1501
1495 /* 1502 /*
1496 * Only some CPUs have the watch exceptions. 1503 * Only some CPUs have the watch exceptions.
@@ -1543,8 +1550,8 @@ void __init trap_init(void)
1543 set_except_vector(12, handle_ov); 1550 set_except_vector(12, handle_ov);
1544 set_except_vector(13, handle_tr); 1551 set_except_vector(13, handle_tr);
1545 1552
1546 if (current_cpu_data.cputype == CPU_R6000 || 1553 if (current_cpu_type() == CPU_R6000 ||
1547 current_cpu_data.cputype == CPU_R6000A) { 1554 current_cpu_type() == CPU_R6000A) {
1548 /* 1555 /*
1549 * The R6000 is the only R-series CPU that features a machine 1556 * The R6000 is the only R-series CPU that features a machine
1550 * check exception (similar to the R4000 cache error) and 1557 * check exception (similar to the R4000 cache error) and
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index d34b1fb3665d..c327b21bca81 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -481,7 +481,7 @@ fault:
481 if (fixup_exception(regs)) 481 if (fixup_exception(regs))
482 return; 482 return;
483 483
484 die_if_kernel ("Unhandled kernel unaligned access", regs); 484 die_if_kernel("Unhandled kernel unaligned access", regs);
485 send_sig(SIGSEGV, current, 1); 485 send_sig(SIGSEGV, current, 1);
486 486
487 return; 487 return;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 087ab997487d..84f9a4cc6f2f 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -6,163 +6,202 @@
6OUTPUT_ARCH(mips) 6OUTPUT_ARCH(mips)
7ENTRY(kernel_entry) 7ENTRY(kernel_entry)
8jiffies = JIFFIES; 8jiffies = JIFFIES;
9
9SECTIONS 10SECTIONS
10{ 11{
11#ifdef CONFIG_BOOT_ELF64 12#ifdef CONFIG_BOOT_ELF64
12 /* Read-only sections, merged into text segment: */ 13 /* Read-only sections, merged into text segment: */
13 /* . = 0xc000000000000000; */ 14 /* . = 0xc000000000000000; */
14 15
15 /* This is the value for an Origin kernel, taken from an IRIX kernel. */ 16 /* This is the value for an Origin kernel, taken from an IRIX kernel. */
16 /* . = 0xc00000000001c000; */ 17 /* . = 0xc00000000001c000; */
17 18
18 /* Set the vaddr for the text segment to a value 19 /* Set the vaddr for the text segment to a value
19 >= 0xa800 0000 0001 9000 if no symmon is going to configured 20 * >= 0xa800 0000 0001 9000 if no symmon is going to configured
20 >= 0xa800 0000 0030 0000 otherwise */ 21 * >= 0xa800 0000 0030 0000 otherwise
22 */
21 23
22 /* . = 0xa800000000300000; */ 24 /* . = 0xa800000000300000; */
23 /* . = 0xa800000000300000; */ 25 /* . = 0xa800000000300000; */
24 . = 0xffffffff80300000; 26 . = 0xffffffff80300000;
25#endif 27#endif
26 . = LOADADDR; 28 . = LOADADDR;
27 /* read-only */ 29 /* read-only */
28 _text = .; /* Text and read-only data */ 30 _text = .; /* Text and read-only data */
29 .text : { 31 .text : {
30 TEXT_TEXT 32 TEXT_TEXT
31 SCHED_TEXT 33 SCHED_TEXT
32 LOCK_TEXT 34 LOCK_TEXT
33 *(.fixup) 35 *(.fixup)
34 *(.gnu.warning) 36 *(.gnu.warning)
35 } =0 37 } =0
36 38 _etext = .; /* End of text section */
37 _etext = .; /* End of text section */ 39
38 40 /* Exception table */
39 . = ALIGN(16); /* Exception table */ 41 . = ALIGN(16);
40 __start___ex_table = .; 42 __ex_table : {
41 __ex_table : { *(__ex_table) } 43 __start___ex_table = .;
42 __stop___ex_table = .; 44 *(__ex_table)
43 45 __stop___ex_table = .;
44 __start___dbe_table = .; /* Exception table for data bus errors */ 46 }
45 __dbe_table : { *(__dbe_table) } 47
46 __stop___dbe_table = .; 48 /* Exception table for data bus errors */
47 49 __dbe_table : {
48 NOTES 50 __start___dbe_table = .;
49 51 *(__dbe_table)
50 RODATA 52 __stop___dbe_table = .;
51 53 }
52 /* writeable */ 54 RODATA
53 .data : { /* Data */ 55
54 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ 56 /* writeable */
55 /* 57 .data : { /* Data */
56 * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which 58 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
57 * limits the maximum alignment to at most 32kB and results in the following 59 /*
58 * warning: 60 * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which
59 * 61 * limits the maximum alignment to at most 32kB and results in the following
60 * CC arch/mips/kernel/init_task.o 62 * warning:
61 * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’ 63 *
62 * is greater than maximum object file alignment. Using 32768 64 * CC arch/mips/kernel/init_task.o
63 */ 65 * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’
64 . = ALIGN(_PAGE_SIZE); 66 * is greater than maximum object file alignment. Using 32768
65 *(.data.init_task) 67 */
66 68 . = ALIGN(_PAGE_SIZE);
67 DATA_DATA 69 *(.data.init_task)
68 70
69 CONSTRUCTORS 71 DATA_DATA
70 } 72 CONSTRUCTORS
71 _gp = . + 0x8000; 73 }
72 .lit8 : { *(.lit8) } 74 _gp = . + 0x8000;
73 .lit4 : { *(.lit4) } 75 .lit8 : {
74 /* We want the small data sections together, so single-instruction offsets 76 *(.lit8)
75 can access them all, and initialized data all before uninitialized, so 77 }
76 we can shorten the on-disk segment size. */ 78 .lit4 : {
77 .sdata : { *(.sdata) } 79 *(.lit4)
78 80 }
79 . = ALIGN(_PAGE_SIZE); 81 /* We want the small data sections together, so single-instruction offsets
80 __nosave_begin = .; 82 can access them all, and initialized data all before uninitialized, so
81 .data_nosave : { *(.data.nosave) } 83 we can shorten the on-disk segment size. */
82 . = ALIGN(_PAGE_SIZE); 84 .sdata : {
83 __nosave_end = .; 85 *(.sdata)
84 86 }
85 . = ALIGN(32); 87
86 .data.cacheline_aligned : { *(.data.cacheline_aligned) } 88 . = ALIGN(_PAGE_SIZE);
87 89 .data_nosave : {
88 _edata = .; /* End of data section */ 90 __nosave_begin = .;
89 91 *(.data.nosave)
90 /* will be freed after init */ 92 }
91 . = ALIGN(_PAGE_SIZE); /* Init code and data */ 93 . = ALIGN(_PAGE_SIZE);
92 __init_begin = .; 94 __nosave_end = .;
93 .init.text : { 95
94 _sinittext = .; 96 . = ALIGN(32);
95 *(.init.text) 97 .data.cacheline_aligned : {
96 _einittext = .; 98 *(.data.cacheline_aligned)
97 } 99 }
98 .init.data : { *(.init.data) } 100 _edata = .; /* End of data section */
99 . = ALIGN(16); 101
100 __setup_start = .; 102 /* will be freed after init */
101 .init.setup : { *(.init.setup) } 103 . = ALIGN(_PAGE_SIZE); /* Init code and data */
102 __setup_end = .; 104 __init_begin = .;
103 105 .init.text : {
104 __initcall_start = .; 106 _sinittext = .;
105 .initcall.init : { 107 *(.init.text)
106 INITCALLS 108 _einittext = .;
107 } 109 }
108 __initcall_end = .; 110 .init.data : {
109 111 *(.init.data)
110 __con_initcall_start = .; 112 }
111 .con_initcall.init : { *(.con_initcall.init) } 113 . = ALIGN(16);
112 __con_initcall_end = .; 114 .init.setup : {
113 SECURITY_INIT 115 __setup_start = .;
114 /* .exit.text is discarded at runtime, not link time, to deal with 116 *(.init.setup)
115 references from .rodata */ 117 __setup_end = .;
116 .exit.text : { *(.exit.text) } 118 }
117 .exit.data : { *(.exit.data) } 119
120 .initcall.init : {
121 __initcall_start = .;
122 INITCALLS
123 __initcall_end = .;
124 }
125
126 .con_initcall.init : {
127 __con_initcall_start = .;
128 *(.con_initcall.init)
129 __con_initcall_end = .;
130 }
131 SECURITY_INIT
132
133 /* .exit.text is discarded at runtime, not link time, to deal with
134 * references from .rodata
135 */
136 .exit.text : {
137 *(.exit.text)
138 }
139 .exit.data : {
140 *(.exit.data)
141 }
118#if defined(CONFIG_BLK_DEV_INITRD) 142#if defined(CONFIG_BLK_DEV_INITRD)
119 . = ALIGN(_PAGE_SIZE); 143 . = ALIGN(_PAGE_SIZE);
120 __initramfs_start = .; 144 .init.ramfs : {
121 .init.ramfs : { *(.init.ramfs) } 145 __initramfs_start = .;
122 __initramfs_end = .; 146 *(.init.ramfs)
147 __initramfs_end = .;
148 }
123#endif 149#endif
124 PERCPU(_PAGE_SIZE) 150 PERCPU(_PAGE_SIZE)
125 . = ALIGN(_PAGE_SIZE); 151 . = ALIGN(_PAGE_SIZE);
126 __init_end = .; 152 __init_end = .;
127 /* freed after init ends here */ 153 /* freed after init ends here */
128 154
129 __bss_start = .; /* BSS */ 155 __bss_start = .; /* BSS */
130 .sbss : { 156 .sbss : {
131 *(.sbss) 157 *(.sbss)
132 *(.scommon) 158 *(.scommon)
133 } 159 }
134 .bss : { 160 .bss : {
135 *(.bss) 161 *(.bss)
136 *(COMMON) 162 *(COMMON)
137 } 163 }
138 __bss_stop = .; 164 __bss_stop = .;
139 165
140 _end = . ; 166 _end = . ;
141 167
142 /* Sections to be discarded */ 168 /* Sections to be discarded */
143 /DISCARD/ : { 169 /DISCARD/ : {
144 *(.exitcall.exit) 170 *(.exitcall.exit)
145 171
146 /* ABI crap starts here */ 172 /* ABI crap starts here */
147 *(.MIPS.options) 173 *(.MIPS.options)
148 *(.options) 174 *(.options)
149 *(.pdr) 175 *(.pdr)
150 *(.reginfo) 176 *(.reginfo)
151 } 177 }
152 178
153 /* These mark the ABI of the kernel for debuggers. */ 179 /* These mark the ABI of the kernel for debuggers. */
154 .mdebug.abi32 : { KEEP(*(.mdebug.abi32)) } 180 .mdebug.abi32 : {
155 .mdebug.abi64 : { KEEP(*(.mdebug.abi64)) } 181 KEEP(*(.mdebug.abi32))
156 182 }
157 /* This is the MIPS specific mdebug section. */ 183 .mdebug.abi64 : {
158 .mdebug : { *(.mdebug) } 184 KEEP(*(.mdebug.abi64))
159 185 }
160 STABS_DEBUG 186
161 187 /* This is the MIPS specific mdebug section. */
162 DWARF_DEBUG 188 .mdebug : {
163 189 *(.mdebug)
164 /* These must appear regardless of . */ 190 }
165 .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) } 191
166 .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) } 192 STABS_DEBUG
167 .note : { *(.note) } 193 DWARF_DEBUG
194
195 /* These must appear regardless of . */
196 .gptab.sdata : {
197 *(.gptab.data)
198 *(.gptab.sdata)
199 }
200 .gptab.sbss : {
201 *(.gptab.bss)
202 *(.gptab.sbss)
203 }
204 .note : {
205 *(.note)
206 }
168} 207}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 3c09b9785f4c..61b729fa0548 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -936,8 +936,18 @@ static int vpe_elfload(struct vpe * v)
936 936
937 } 937 }
938 } else { 938 } else {
939 for (i = 0; i < hdr->e_shnum; i++) { 939 struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
940 940
941 for (i = 0; i < hdr->e_phnum; i++) {
942 if (phdr->p_type != PT_LOAD)
943 continue;
944
945 memcpy((void *)phdr->p_vaddr, (char *)hdr + phdr->p_offset, phdr->p_filesz);
946 memset((void *)phdr->p_vaddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
947 phdr++;
948 }
949
950 for (i = 0; i < hdr->e_shnum; i++) {
941 /* Internal symbols and strings. */ 951 /* Internal symbols and strings. */
942 if (sechdrs[i].sh_type == SHT_SYMTAB) { 952 if (sechdrs[i].sh_type == SHT_SYMTAB) {
943 symindex = i; 953 symindex = i;
@@ -948,39 +958,6 @@ static int vpe_elfload(struct vpe * v)
948 magic symbols */ 958 magic symbols */
949 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; 959 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
950 } 960 }
951
952 /* filter sections we dont want in the final image */
953 if (!(sechdrs[i].sh_flags & SHF_ALLOC) ||
954 (sechdrs[i].sh_type == SHT_MIPS_REGINFO)) {
955 printk( KERN_DEBUG " ignoring section, "
956 "name %s type %x address 0x%x \n",
957 secstrings + sechdrs[i].sh_name,
958 sechdrs[i].sh_type, sechdrs[i].sh_addr);
959 continue;
960 }
961
962 if (sechdrs[i].sh_addr < (unsigned int)v->load_addr) {
963 printk( KERN_WARNING "VPE loader: "
964 "fully linked image has invalid section, "
965 "name %s type %x address 0x%x, before load "
966 "address of 0x%x\n",
967 secstrings + sechdrs[i].sh_name,
968 sechdrs[i].sh_type, sechdrs[i].sh_addr,
969 (unsigned int)v->load_addr);
970 return -ENOEXEC;
971 }
972
973 printk(KERN_DEBUG " copying section sh_name %s, sh_addr 0x%x "
974 "size 0x%x0 from x%p\n",
975 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr,
976 sechdrs[i].sh_size, hdr + sechdrs[i].sh_offset);
977
978 if (sechdrs[i].sh_type != SHT_NOBITS)
979 memcpy((void *)sechdrs[i].sh_addr,
980 (char *)hdr + sechdrs[i].sh_offset,
981 sechdrs[i].sh_size);
982 else
983 memset((void *)sechdrs[i].sh_addr, 0, sechdrs[i].sh_size);
984 } 961 }
985 } 962 }
986 963
@@ -1044,7 +1021,7 @@ static int getcwd(char *buff, int size)
1044 old_fs = get_fs(); 1021 old_fs = get_fs();
1045 set_fs(KERNEL_DS); 1022 set_fs(KERNEL_DS);
1046 1023
1047 ret = sys_getcwd(buff,size); 1024 ret = sys_getcwd(buff, size);
1048 1025
1049 set_fs(old_fs); 1026 set_fs(old_fs);
1050 1027