aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
authorBarry Song <barry.song@analog.com>2010-08-12 03:56:40 -0400
committerMike Frysinger <vapier@gentoo.org>2010-10-22 03:48:55 -0400
commit33ded95b1ca5ebd27a762cabaee63f50eb46dcd6 (patch)
treefe12da8aab11c04b7062be41efbdf75358bee50d /arch/blackfin
parentc0ab9387456d7660dead96ce1b5463741f087db1 (diff)
Blackfin: initial preempt support while returning from interrupt
Signed-off-by: Barry Song <barry.song@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/mach-common/entry.S112
-rw-r--r--arch/blackfin/mach-common/interrupt.S17
2 files changed, 126 insertions, 3 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index af1bffa21dc1..16ea779c3a6f 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -889,6 +889,66 @@ ENTRY(_ret_from_exception)
889 rts; 889 rts;
890ENDPROC(_ret_from_exception) 890ENDPROC(_ret_from_exception)
891 891
892#if defined(CONFIG_PREEMPT)
893
894ENTRY(_up_to_irq14)
895#if ANOMALY_05000281 || ANOMALY_05000461
896 r0.l = lo(SAFE_USER_INSTRUCTION);
897 r0.h = hi(SAFE_USER_INSTRUCTION);
898 reti = r0;
899#endif
900
901#ifdef CONFIG_DEBUG_HWERR
902 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
903 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
904#else
905 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
906 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
907#endif
908 sti r0;
909
910 p0.l = lo(EVT14);
911 p0.h = hi(EVT14);
912 p1.l = _evt_up_evt14;
913 p1.h = _evt_up_evt14;
914 [p0] = p1;
915 csync;
916
917 raise 14;
9181:
919 jump 1b;
920ENDPROC(_up_to_irq14)
921
922ENTRY(_evt_up_evt14)
923#ifdef CONFIG_DEBUG_HWERR
924 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
925 sti r0;
926#else
927 cli r0;
928#endif
929#ifdef CONFIG_TRACE_IRQFLAGS
930 [--sp] = rets;
931 sp += -12;
932 call _trace_hardirqs_off;
933 sp += 12;
934 rets = [sp++];
935#endif
936 [--sp] = RETI;
937 SP += 4;
938
939 /* restore normal evt14 */
940 p0.l = lo(EVT14);
941 p0.h = hi(EVT14);
942 p1.l = _evt_evt14;
943 p1.h = _evt_evt14;
944 [p0] = p1;
945 csync;
946
947 rts;
948ENDPROC(_evt_up_evt14)
949
950#endif
951
892#ifdef CONFIG_IPIPE 952#ifdef CONFIG_IPIPE
893 953
894_resume_kernel_from_int: 954_resume_kernel_from_int:
@@ -902,8 +962,54 @@ _resume_kernel_from_int:
902 ( r7:4, p5:3 ) = [sp++]; 962 ( r7:4, p5:3 ) = [sp++];
903 rets = [sp++]; 963 rets = [sp++];
904 rts 964 rts
965#elif defined(CONFIG_PREEMPT)
966
967_resume_kernel_from_int:
968 /* check preempt_count */
969 r7 = sp;
970 r4.l = lo(ALIGN_PAGE_MASK);
971 r4.h = hi(ALIGN_PAGE_MASK);
972 r7 = r7 & r4;
973 p5 = r7;
974 r7 = [p5 + TI_PREEMPT];
975 cc = r7 == 0x0;
976 if !cc jump .Lreturn_to_kernel;
977.Lneed_schedule:
978 r7 = [p5 + TI_FLAGS];
979 r4.l = lo(_TIF_WORK_MASK);
980 r4.h = hi(_TIF_WORK_MASK);
981 r7 = r7 & r4;
982 cc = BITTST(r7, TIF_NEED_RESCHED);
983 if !cc jump .Lreturn_to_kernel;
984 /*
985 * let schedule done at level 15, otherwise sheduled process will run
986 * at high level and block low level interrupt
987 */
988 r6 = reti; /* save reti */
989 r5.l = .Lkernel_schedule;
990 r5.h = .Lkernel_schedule;
991 reti = r5;
992 rti;
993.Lkernel_schedule:
994 [--sp] = rets;
995 sp += -12;
996 pseudo_long_call _preempt_schedule_irq, p4;
997 sp += 12;
998 rets = [sp++];
999
1000 [--sp] = rets;
1001 sp += -12;
1002 /* up to irq14 so that reti after restore_all can return to irq15(kernel) */
1003 pseudo_long_call _up_to_irq14, p4;
1004 sp += 12;
1005 rets = [sp++];
1006
1007 reti = r6; /* restore reti so that origin process can return to interrupted point */
1008
1009 jump .Lneed_schedule;
905#else 1010#else
906#define _resume_kernel_from_int 2f 1011
1012#define _resume_kernel_from_int .Lreturn_to_kernel
907#endif 1013#endif
908 1014
909ENTRY(_return_from_int) 1015ENTRY(_return_from_int)
@@ -913,7 +1019,7 @@ ENTRY(_return_from_int)
913 p2.h = hi(ILAT); 1019 p2.h = hi(ILAT);
914 r0 = [p2]; 1020 r0 = [p2];
915 cc = bittst (r0, EVT_IVG15_P); 1021 cc = bittst (r0, EVT_IVG15_P);
916 if cc jump 2f; 1022 if cc jump .Lreturn_to_kernel;
917 1023
918 /* if not return to user mode, get out */ 1024 /* if not return to user mode, get out */
919 p2.l = lo(IPEND); 1025 p2.l = lo(IPEND);
@@ -945,7 +1051,7 @@ ENTRY(_return_from_int)
945 STI r0; 1051 STI r0;
946 raise 15; /* raise evt15 to do signal or reschedule */ 1052 raise 15; /* raise evt15 to do signal or reschedule */
947 rti; 1053 rti;
9482: 1054.Lreturn_to_kernel:
949 rts; 1055 rts;
950ENDPROC(_return_from_int) 1056ENDPROC(_return_from_int)
951 1057
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index cee62cf4acd4..2df37db3b49b 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -116,7 +116,24 @@ __common_int_entry:
116 cc = r0 == 0; 116 cc = r0 == 0;
117 if cc jump .Lcommon_restore_context; 117 if cc jump .Lcommon_restore_context;
118#else /* CONFIG_IPIPE */ 118#else /* CONFIG_IPIPE */
119
120#ifdef CONFIG_PREEMPT
121 r7 = sp;
122 r4.l = lo(ALIGN_PAGE_MASK);
123 r4.h = hi(ALIGN_PAGE_MASK);
124 r7 = r7 & r4;
125 p5 = r7;
126 r7 = [p5 + TI_PREEMPT]; /* get preempt count */
127 r7 += 1; /* increment it */
128 [p5 + TI_PREEMPT] = r7;
129#endif
119 pseudo_long_call _do_irq, p2; 130 pseudo_long_call _do_irq, p2;
131
132#ifdef CONFIG_PREEMPT
133 r7 += -1;
134 [p5 + TI_PREEMPT] = r7; /* restore preempt count */
135#endif
136
120 SP += 12; 137 SP += 12;
121#endif /* CONFIG_IPIPE */ 138#endif /* CONFIG_IPIPE */
122 pseudo_long_call _return_from_int, p2; 139 pseudo_long_call _return_from_int, p2;