diff options
author | Steve French <sfrench@hera.kernel.org> | 2005-06-06 12:57:33 -0400 |
---|---|---|
committer | Steve French <sfrench@hera.kernel.org> | 2005-06-06 12:57:33 -0400 |
commit | 0b68177ccd12866d9f19cafad212b861c9d02a8c (patch) | |
tree | 6fc26aa59fb183fe9c86d44ae14ce423ad7211da /arch | |
parent | d0d2f2df65ddea9a30ddd117f769bfff65d3fc56 (diff) | |
parent | 7cef5677ef3a8084f2588ce0a129dc95d65161f6 (diff) |
Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'arch')
-rw-r--r-- | arch/m68knommu/kernel/process.c | 17 | ||||
-rw-r--r-- | arch/ppc64/kernel/entry.S | 9 | ||||
-rw-r--r-- | arch/ppc64/kernel/head.S | 10 | ||||
-rw-r--r-- | arch/ppc64/kernel/iSeries_setup.c | 22 | ||||
-rw-r--r-- | arch/ppc64/kernel/idle.c | 8 | ||||
-rw-r--r-- | arch/ppc64/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/ppc64/kernel/prom_init.c | 2 | ||||
-rw-r--r-- | arch/ppc64/kernel/setup.c | 56 | ||||
-rw-r--r-- | arch/ppc64/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/ppc64/kernel/sysfs.c | 8 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_base.c | 72 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_mem.c | 2 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_net_sum.c | 2 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_os.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 55 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 2 |
16 files changed, 143 insertions, 132 deletions
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c index 2b6c9d32b7a6..c4a33f265dc0 100644 --- a/arch/m68knommu/kernel/process.c +++ b/arch/m68knommu/kernel/process.c | |||
@@ -45,11 +45,13 @@ asmlinkage void ret_from_fork(void); | |||
45 | */ | 45 | */ |
46 | void default_idle(void) | 46 | void default_idle(void) |
47 | { | 47 | { |
48 | while(1) { | 48 | local_irq_disable(); |
49 | if (need_resched()) | 49 | while (!need_resched()) { |
50 | __asm__("stop #0x2000" : : : "cc"); | 50 | /* This stop will re-enable interrupts */ |
51 | schedule(); | 51 | __asm__("stop #0x2000" : : : "cc"); |
52 | local_irq_disable(); | ||
52 | } | 53 | } |
54 | local_irq_enable(); | ||
53 | } | 55 | } |
54 | 56 | ||
55 | void (*idle)(void) = default_idle; | 57 | void (*idle)(void) = default_idle; |
@@ -63,7 +65,12 @@ void (*idle)(void) = default_idle; | |||
63 | void cpu_idle(void) | 65 | void cpu_idle(void) |
64 | { | 66 | { |
65 | /* endless idle loop with no priority at all */ | 67 | /* endless idle loop with no priority at all */ |
66 | idle(); | 68 | while (1) { |
69 | idle(); | ||
70 | preempt_enable_no_resched(); | ||
71 | schedule(); | ||
72 | preempt_disable(); | ||
73 | } | ||
67 | } | 74 | } |
68 | 75 | ||
69 | void machine_restart(char * __unused) | 76 | void machine_restart(char * __unused) |
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S index d3604056e1a9..b61572eb2a71 100644 --- a/arch/ppc64/kernel/entry.S +++ b/arch/ppc64/kernel/entry.S | |||
@@ -436,15 +436,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
436 | REST_8GPRS(14, r1) | 436 | REST_8GPRS(14, r1) |
437 | REST_10GPRS(22, r1) | 437 | REST_10GPRS(22, r1) |
438 | 438 | ||
439 | #ifdef CONFIG_PPC_ISERIES | ||
440 | clrrdi r7,r1,THREAD_SHIFT /* get current_thread_info() */ | ||
441 | ld r7,TI_FLAGS(r7) /* Get run light flag */ | ||
442 | mfspr r9,CTRLF | ||
443 | srdi r7,r7,TIF_RUN_LIGHT | ||
444 | insrdi r9,r7,1,63 /* Insert run light into CTRL */ | ||
445 | mtspr CTRLT,r9 | ||
446 | #endif | ||
447 | |||
448 | /* convert old thread to its task_struct for return value */ | 439 | /* convert old thread to its task_struct for return value */ |
449 | addi r3,r3,-THREAD | 440 | addi r3,r3,-THREAD |
450 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | 441 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ |
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S index 92a744c31ab1..346dbf606b5d 100644 --- a/arch/ppc64/kernel/head.S +++ b/arch/ppc64/kernel/head.S | |||
@@ -626,10 +626,10 @@ system_reset_iSeries: | |||
626 | lhz r24,PACAPACAINDEX(r13) /* Get processor # */ | 626 | lhz r24,PACAPACAINDEX(r13) /* Get processor # */ |
627 | cmpwi 0,r24,0 /* Are we processor 0? */ | 627 | cmpwi 0,r24,0 /* Are we processor 0? */ |
628 | beq .__start_initialization_iSeries /* Start up the first processor */ | 628 | beq .__start_initialization_iSeries /* Start up the first processor */ |
629 | mfspr r4,CTRLF | 629 | mfspr r4,SPRN_CTRLF |
630 | li r5,RUNLATCH /* Turn off the run light */ | 630 | li r5,CTRL_RUNLATCH /* Turn off the run light */ |
631 | andc r4,r4,r5 | 631 | andc r4,r4,r5 |
632 | mtspr CTRLT,r4 | 632 | mtspr SPRN_CTRLT,r4 |
633 | 633 | ||
634 | 1: | 634 | 1: |
635 | HMT_LOW | 635 | HMT_LOW |
@@ -2082,9 +2082,9 @@ _GLOBAL(hmt_start_secondary) | |||
2082 | mfspr r4, HID0 | 2082 | mfspr r4, HID0 |
2083 | ori r4, r4, 0x1 | 2083 | ori r4, r4, 0x1 |
2084 | mtspr HID0, r4 | 2084 | mtspr HID0, r4 |
2085 | mfspr r4, CTRLF | 2085 | mfspr r4, SPRN_CTRLF |
2086 | oris r4, r4, 0x40 | 2086 | oris r4, r4, 0x40 |
2087 | mtspr CTRLT, r4 | 2087 | mtspr SPRN_CTRLT, r4 |
2088 | blr | 2088 | blr |
2089 | #endif | 2089 | #endif |
2090 | 2090 | ||
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index da20120f2261..6d06eb550a3f 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c | |||
@@ -852,6 +852,28 @@ static int __init iSeries_src_init(void) | |||
852 | 852 | ||
853 | late_initcall(iSeries_src_init); | 853 | late_initcall(iSeries_src_init); |
854 | 854 | ||
855 | static int set_spread_lpevents(char *str) | ||
856 | { | ||
857 | unsigned long i; | ||
858 | unsigned long val = simple_strtoul(str, NULL, 0); | ||
859 | |||
860 | /* | ||
861 | * The parameter is the number of processors to share in processing | ||
862 | * lp events. | ||
863 | */ | ||
864 | if (( val > 0) && (val <= NR_CPUS)) { | ||
865 | for (i = 1; i < val; ++i) | ||
866 | paca[i].lpqueue_ptr = paca[0].lpqueue_ptr; | ||
867 | |||
868 | printk("lpevent processing spread over %ld processors\n", val); | ||
869 | } else { | ||
870 | printk("invalid spread_lpevents %ld\n", val); | ||
871 | } | ||
872 | |||
873 | return 1; | ||
874 | } | ||
875 | __setup("spread_lpevents=", set_spread_lpevents); | ||
876 | |||
855 | void __init iSeries_early_setup(void) | 877 | void __init iSeries_early_setup(void) |
856 | { | 878 | { |
857 | iSeries_fixup_klimit(); | 879 | iSeries_fixup_klimit(); |
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 6abc621d3ba0..f24ce2b87200 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c | |||
@@ -75,13 +75,9 @@ static int iSeries_idle(void) | |||
75 | { | 75 | { |
76 | struct paca_struct *lpaca; | 76 | struct paca_struct *lpaca; |
77 | long oldval; | 77 | long oldval; |
78 | unsigned long CTRL; | ||
79 | 78 | ||
80 | /* ensure iSeries run light will be out when idle */ | 79 | /* ensure iSeries run light will be out when idle */ |
81 | clear_thread_flag(TIF_RUN_LIGHT); | 80 | ppc64_runlatch_off(); |
82 | CTRL = mfspr(CTRLF); | ||
83 | CTRL &= ~RUNLATCH; | ||
84 | mtspr(CTRLT, CTRL); | ||
85 | 81 | ||
86 | lpaca = get_paca(); | 82 | lpaca = get_paca(); |
87 | 83 | ||
@@ -111,7 +107,9 @@ static int iSeries_idle(void) | |||
111 | } | 107 | } |
112 | } | 108 | } |
113 | 109 | ||
110 | ppc64_runlatch_on(); | ||
114 | schedule(); | 111 | schedule(); |
112 | ppc64_runlatch_off(); | ||
115 | } | 113 | } |
116 | 114 | ||
117 | return 0; | 115 | return 0; |
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c index 8b0686122738..cdfecbeb331f 100644 --- a/arch/ppc64/kernel/process.c +++ b/arch/ppc64/kernel/process.c | |||
@@ -378,9 +378,6 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
378 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | 378 | childregs->gpr[1] = sp + sizeof(struct pt_regs); |
379 | p->thread.regs = NULL; /* no user register state */ | 379 | p->thread.regs = NULL; /* no user register state */ |
380 | clear_ti_thread_flag(p->thread_info, TIF_32BIT); | 380 | clear_ti_thread_flag(p->thread_info, TIF_32BIT); |
381 | #ifdef CONFIG_PPC_ISERIES | ||
382 | set_ti_thread_flag(p->thread_info, TIF_RUN_LIGHT); | ||
383 | #endif | ||
384 | } else { | 381 | } else { |
385 | childregs->gpr[1] = usp; | 382 | childregs->gpr[1] = usp; |
386 | p->thread.regs = childregs; | 383 | p->thread.regs = childregs; |
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index 1ac531ba7056..b7683abfbe6a 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c | |||
@@ -1370,7 +1370,7 @@ static int __init prom_find_machine_type(void) | |||
1370 | } | 1370 | } |
1371 | /* Default to pSeries. We need to know if we are running LPAR */ | 1371 | /* Default to pSeries. We need to know if we are running LPAR */ |
1372 | rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); | 1372 | rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); |
1373 | if (!PHANDLE_VALID(rtas)) { | 1373 | if (PHANDLE_VALID(rtas)) { |
1374 | int x = prom_getproplen(rtas, "ibm,hypertas-functions"); | 1374 | int x = prom_getproplen(rtas, "ibm,hypertas-functions"); |
1375 | if (x != PROM_ERROR) { | 1375 | if (x != PROM_ERROR) { |
1376 | prom_printf("Hypertas detected, assuming LPAR !\n"); | 1376 | prom_printf("Hypertas detected, assuming LPAR !\n"); |
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c index 21c57f539c29..dce198d39328 100644 --- a/arch/ppc64/kernel/setup.c +++ b/arch/ppc64/kernel/setup.c | |||
@@ -103,11 +103,6 @@ extern void unflatten_device_tree(void); | |||
103 | 103 | ||
104 | extern void smp_release_cpus(void); | 104 | extern void smp_release_cpus(void); |
105 | 105 | ||
106 | unsigned long decr_overclock = 1; | ||
107 | unsigned long decr_overclock_proc0 = 1; | ||
108 | unsigned long decr_overclock_set = 0; | ||
109 | unsigned long decr_overclock_proc0_set = 0; | ||
110 | |||
111 | int have_of = 1; | 106 | int have_of = 1; |
112 | int boot_cpuid = 0; | 107 | int boot_cpuid = 0; |
113 | int boot_cpuid_phys = 0; | 108 | int boot_cpuid_phys = 0; |
@@ -1120,64 +1115,15 @@ void ppc64_dump_msg(unsigned int src, const char *msg) | |||
1120 | printk("[dump]%04x %s\n", src, msg); | 1115 | printk("[dump]%04x %s\n", src, msg); |
1121 | } | 1116 | } |
1122 | 1117 | ||
1123 | int set_spread_lpevents( char * str ) | ||
1124 | { | ||
1125 | /* The parameter is the number of processors to share in processing lp events */ | ||
1126 | unsigned long i; | ||
1127 | unsigned long val = simple_strtoul( str, NULL, 0 ); | ||
1128 | if ( ( val > 0 ) && ( val <= NR_CPUS ) ) { | ||
1129 | for ( i=1; i<val; ++i ) | ||
1130 | paca[i].lpqueue_ptr = paca[0].lpqueue_ptr; | ||
1131 | printk("lpevent processing spread over %ld processors\n", val); | ||
1132 | } | ||
1133 | else | ||
1134 | printk("invalid spreaqd_lpevents %ld\n", val); | ||
1135 | return 1; | ||
1136 | } | ||
1137 | |||
1138 | /* This should only be called on processor 0 during calibrate decr */ | 1118 | /* This should only be called on processor 0 during calibrate decr */ |
1139 | void setup_default_decr(void) | 1119 | void setup_default_decr(void) |
1140 | { | 1120 | { |
1141 | struct paca_struct *lpaca = get_paca(); | 1121 | struct paca_struct *lpaca = get_paca(); |
1142 | 1122 | ||
1143 | if ( decr_overclock_set && !decr_overclock_proc0_set ) | 1123 | lpaca->default_decr = tb_ticks_per_jiffy; |
1144 | decr_overclock_proc0 = decr_overclock; | ||
1145 | |||
1146 | lpaca->default_decr = tb_ticks_per_jiffy / decr_overclock_proc0; | ||
1147 | lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy; | 1124 | lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy; |
1148 | } | 1125 | } |
1149 | 1126 | ||
1150 | int set_decr_overclock_proc0( char * str ) | ||
1151 | { | ||
1152 | unsigned long val = simple_strtoul( str, NULL, 0 ); | ||
1153 | if ( ( val >= 1 ) && ( val <= 48 ) ) { | ||
1154 | decr_overclock_proc0_set = 1; | ||
1155 | decr_overclock_proc0 = val; | ||
1156 | printk("proc 0 decrementer overclock factor of %ld\n", val); | ||
1157 | } | ||
1158 | else | ||
1159 | printk("invalid proc 0 decrementer overclock factor of %ld\n", val); | ||
1160 | return 1; | ||
1161 | } | ||
1162 | |||
1163 | int set_decr_overclock( char * str ) | ||
1164 | { | ||
1165 | unsigned long val = simple_strtoul( str, NULL, 0 ); | ||
1166 | if ( ( val >= 1 ) && ( val <= 48 ) ) { | ||
1167 | decr_overclock_set = 1; | ||
1168 | decr_overclock = val; | ||
1169 | printk("decrementer overclock factor of %ld\n", val); | ||
1170 | } | ||
1171 | else | ||
1172 | printk("invalid decrementer overclock factor of %ld\n", val); | ||
1173 | return 1; | ||
1174 | |||
1175 | } | ||
1176 | |||
1177 | __setup("spread_lpevents=", set_spread_lpevents ); | ||
1178 | __setup("decr_overclock_proc0=", set_decr_overclock_proc0 ); | ||
1179 | __setup("decr_overclock=", set_decr_overclock ); | ||
1180 | |||
1181 | #ifndef CONFIG_PPC_ISERIES | 1127 | #ifndef CONFIG_PPC_ISERIES |
1182 | /* | 1128 | /* |
1183 | * This function can be used by platforms to "find" legacy serial ports. | 1129 | * This function can be used by platforms to "find" legacy serial ports. |
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c index 3b906cd94037..9ef5d36d6b25 100644 --- a/arch/ppc64/kernel/smp.c +++ b/arch/ppc64/kernel/smp.c | |||
@@ -334,7 +334,6 @@ void smp_call_function_interrupt(void) | |||
334 | } | 334 | } |
335 | } | 335 | } |
336 | 336 | ||
337 | extern unsigned long decr_overclock; | ||
338 | extern struct gettimeofday_struct do_gtod; | 337 | extern struct gettimeofday_struct do_gtod; |
339 | 338 | ||
340 | struct thread_info *current_set[NR_CPUS]; | 339 | struct thread_info *current_set[NR_CPUS]; |
@@ -491,7 +490,7 @@ int __devinit __cpu_up(unsigned int cpu) | |||
491 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) | 490 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) |
492 | return -EINVAL; | 491 | return -EINVAL; |
493 | 492 | ||
494 | paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock; | 493 | paca[cpu].default_decr = tb_ticks_per_jiffy; |
495 | 494 | ||
496 | if (!cpu_has_feature(CPU_FTR_SLB)) { | 495 | if (!cpu_has_feature(CPU_FTR_SLB)) { |
497 | void *tmp; | 496 | void *tmp; |
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c index 0925694c3ce5..c8fa6569b2fd 100644 --- a/arch/ppc64/kernel/sysfs.c +++ b/arch/ppc64/kernel/sysfs.c | |||
@@ -113,7 +113,6 @@ void ppc64_enable_pmcs(void) | |||
113 | #ifdef CONFIG_PPC_PSERIES | 113 | #ifdef CONFIG_PPC_PSERIES |
114 | unsigned long set, reset; | 114 | unsigned long set, reset; |
115 | int ret; | 115 | int ret; |
116 | unsigned int ctrl; | ||
117 | #endif /* CONFIG_PPC_PSERIES */ | 116 | #endif /* CONFIG_PPC_PSERIES */ |
118 | 117 | ||
119 | /* Only need to enable them once */ | 118 | /* Only need to enable them once */ |
@@ -167,11 +166,8 @@ void ppc64_enable_pmcs(void) | |||
167 | * On SMT machines we have to set the run latch in the ctrl register | 166 | * On SMT machines we have to set the run latch in the ctrl register |
168 | * in order to make PMC6 spin. | 167 | * in order to make PMC6 spin. |
169 | */ | 168 | */ |
170 | if (cpu_has_feature(CPU_FTR_SMT)) { | 169 | if (cpu_has_feature(CPU_FTR_SMT)) |
171 | ctrl = mfspr(CTRLF); | 170 | ppc64_runlatch_on(); |
172 | ctrl |= RUNLATCH; | ||
173 | mtspr(CTRLT, ctrl); | ||
174 | } | ||
175 | #endif /* CONFIG_PPC_PSERIES */ | 171 | #endif /* CONFIG_PPC_PSERIES */ |
176 | } | 172 | } |
177 | 173 | ||
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 01ae1964c938..c067435bae45 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -28,6 +28,7 @@ | |||
28 | //#include <linux/kernel_stat.h> | 28 | //#include <linux/kernel_stat.h> |
29 | #include <linux/notifier.h> | 29 | #include <linux/notifier.h> |
30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
31 | #include <linux/workqueue.h> | ||
31 | 32 | ||
32 | #include "appldata.h" | 33 | #include "appldata.h" |
33 | 34 | ||
@@ -133,9 +134,12 @@ static int appldata_interval = APPLDATA_CPU_INTERVAL; | |||
133 | static int appldata_timer_active; | 134 | static int appldata_timer_active; |
134 | 135 | ||
135 | /* | 136 | /* |
136 | * Tasklet | 137 | * Work queue |
137 | */ | 138 | */ |
138 | static struct tasklet_struct appldata_tasklet_struct; | 139 | static struct workqueue_struct *appldata_wq; |
140 | static void appldata_work_fn(void *data); | ||
141 | static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); | ||
142 | |||
139 | 143 | ||
140 | /* | 144 | /* |
141 | * Ops list | 145 | * Ops list |
@@ -144,11 +148,11 @@ static DEFINE_SPINLOCK(appldata_ops_lock); | |||
144 | static LIST_HEAD(appldata_ops_list); | 148 | static LIST_HEAD(appldata_ops_list); |
145 | 149 | ||
146 | 150 | ||
147 | /************************* timer, tasklet, DIAG ******************************/ | 151 | /*************************** timer, work, DIAG *******************************/ |
148 | /* | 152 | /* |
149 | * appldata_timer_function() | 153 | * appldata_timer_function() |
150 | * | 154 | * |
151 | * schedule tasklet and reschedule timer | 155 | * schedule work and reschedule timer |
152 | */ | 156 | */ |
153 | static void appldata_timer_function(unsigned long data, struct pt_regs *regs) | 157 | static void appldata_timer_function(unsigned long data, struct pt_regs *regs) |
154 | { | 158 | { |
@@ -157,22 +161,22 @@ static void appldata_timer_function(unsigned long data, struct pt_regs *regs) | |||
157 | atomic_read(&appldata_expire_count)); | 161 | atomic_read(&appldata_expire_count)); |
158 | if (atomic_dec_and_test(&appldata_expire_count)) { | 162 | if (atomic_dec_and_test(&appldata_expire_count)) { |
159 | atomic_set(&appldata_expire_count, num_online_cpus()); | 163 | atomic_set(&appldata_expire_count, num_online_cpus()); |
160 | tasklet_schedule((struct tasklet_struct *) data); | 164 | queue_work(appldata_wq, (struct work_struct *) data); |
161 | } | 165 | } |
162 | } | 166 | } |
163 | 167 | ||
164 | /* | 168 | /* |
165 | * appldata_tasklet_function() | 169 | * appldata_work_fn() |
166 | * | 170 | * |
167 | * call data gathering function for each (active) module | 171 | * call data gathering function for each (active) module |
168 | */ | 172 | */ |
169 | static void appldata_tasklet_function(unsigned long data) | 173 | static void appldata_work_fn(void *data) |
170 | { | 174 | { |
171 | struct list_head *lh; | 175 | struct list_head *lh; |
172 | struct appldata_ops *ops; | 176 | struct appldata_ops *ops; |
173 | int i; | 177 | int i; |
174 | 178 | ||
175 | P_DEBUG(" -= Tasklet =-\n"); | 179 | P_DEBUG(" -= Work Queue =-\n"); |
176 | i = 0; | 180 | i = 0; |
177 | spin_lock(&appldata_ops_lock); | 181 | spin_lock(&appldata_ops_lock); |
178 | list_for_each(lh, &appldata_ops_list) { | 182 | list_for_each(lh, &appldata_ops_list) { |
@@ -231,7 +235,7 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer, | |||
231 | : "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc"); | 235 | : "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc"); |
232 | return (int) ry; | 236 | return (int) ry; |
233 | } | 237 | } |
234 | /********************** timer, tasklet, DIAG <END> ***************************/ | 238 | /************************ timer, work, DIAG <END> ****************************/ |
235 | 239 | ||
236 | 240 | ||
237 | /****************************** /proc stuff **********************************/ | 241 | /****************************** /proc stuff **********************************/ |
@@ -411,7 +415,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
411 | struct list_head *lh; | 415 | struct list_head *lh; |
412 | 416 | ||
413 | found = 0; | 417 | found = 0; |
414 | spin_lock_bh(&appldata_ops_lock); | 418 | spin_lock(&appldata_ops_lock); |
415 | list_for_each(lh, &appldata_ops_list) { | 419 | list_for_each(lh, &appldata_ops_list) { |
416 | tmp_ops = list_entry(lh, struct appldata_ops, list); | 420 | tmp_ops = list_entry(lh, struct appldata_ops, list); |
417 | if (&tmp_ops->ctl_table[2] == ctl) { | 421 | if (&tmp_ops->ctl_table[2] == ctl) { |
@@ -419,15 +423,15 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
419 | } | 423 | } |
420 | } | 424 | } |
421 | if (!found) { | 425 | if (!found) { |
422 | spin_unlock_bh(&appldata_ops_lock); | 426 | spin_unlock(&appldata_ops_lock); |
423 | return -ENODEV; | 427 | return -ENODEV; |
424 | } | 428 | } |
425 | ops = ctl->data; | 429 | ops = ctl->data; |
426 | if (!try_module_get(ops->owner)) { // protect this function | 430 | if (!try_module_get(ops->owner)) { // protect this function |
427 | spin_unlock_bh(&appldata_ops_lock); | 431 | spin_unlock(&appldata_ops_lock); |
428 | return -ENODEV; | 432 | return -ENODEV; |
429 | } | 433 | } |
430 | spin_unlock_bh(&appldata_ops_lock); | 434 | spin_unlock(&appldata_ops_lock); |
431 | 435 | ||
432 | if (!*lenp || *ppos) { | 436 | if (!*lenp || *ppos) { |
433 | *lenp = 0; | 437 | *lenp = 0; |
@@ -451,10 +455,11 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
451 | return -EFAULT; | 455 | return -EFAULT; |
452 | } | 456 | } |
453 | 457 | ||
454 | spin_lock_bh(&appldata_ops_lock); | 458 | spin_lock(&appldata_ops_lock); |
455 | if ((buf[0] == '1') && (ops->active == 0)) { | 459 | if ((buf[0] == '1') && (ops->active == 0)) { |
456 | if (!try_module_get(ops->owner)) { // protect tasklet | 460 | // protect work queue callback |
457 | spin_unlock_bh(&appldata_ops_lock); | 461 | if (!try_module_get(ops->owner)) { |
462 | spin_unlock(&appldata_ops_lock); | ||
458 | module_put(ops->owner); | 463 | module_put(ops->owner); |
459 | return -ENODEV; | 464 | return -ENODEV; |
460 | } | 465 | } |
@@ -485,7 +490,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
485 | } | 490 | } |
486 | module_put(ops->owner); | 491 | module_put(ops->owner); |
487 | } | 492 | } |
488 | spin_unlock_bh(&appldata_ops_lock); | 493 | spin_unlock(&appldata_ops_lock); |
489 | out: | 494 | out: |
490 | *lenp = len; | 495 | *lenp = len; |
491 | *ppos += len; | 496 | *ppos += len; |
@@ -529,7 +534,7 @@ int appldata_register_ops(struct appldata_ops *ops) | |||
529 | } | 534 | } |
530 | memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table)); | 535 | memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table)); |
531 | 536 | ||
532 | spin_lock_bh(&appldata_ops_lock); | 537 | spin_lock(&appldata_ops_lock); |
533 | list_for_each(lh, &appldata_ops_list) { | 538 | list_for_each(lh, &appldata_ops_list) { |
534 | tmp_ops = list_entry(lh, struct appldata_ops, list); | 539 | tmp_ops = list_entry(lh, struct appldata_ops, list); |
535 | P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n", | 540 | P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n", |
@@ -541,18 +546,18 @@ int appldata_register_ops(struct appldata_ops *ops) | |||
541 | APPLDATA_PROC_NAME_LENGTH) == 0) { | 546 | APPLDATA_PROC_NAME_LENGTH) == 0) { |
542 | P_ERROR("Name \"%s\" already registered!\n", ops->name); | 547 | P_ERROR("Name \"%s\" already registered!\n", ops->name); |
543 | kfree(ops->ctl_table); | 548 | kfree(ops->ctl_table); |
544 | spin_unlock_bh(&appldata_ops_lock); | 549 | spin_unlock(&appldata_ops_lock); |
545 | return -EBUSY; | 550 | return -EBUSY; |
546 | } | 551 | } |
547 | if (tmp_ops->ctl_nr == ops->ctl_nr) { | 552 | if (tmp_ops->ctl_nr == ops->ctl_nr) { |
548 | P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr); | 553 | P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr); |
549 | kfree(ops->ctl_table); | 554 | kfree(ops->ctl_table); |
550 | spin_unlock_bh(&appldata_ops_lock); | 555 | spin_unlock(&appldata_ops_lock); |
551 | return -EBUSY; | 556 | return -EBUSY; |
552 | } | 557 | } |
553 | } | 558 | } |
554 | list_add(&ops->list, &appldata_ops_list); | 559 | list_add(&ops->list, &appldata_ops_list); |
555 | spin_unlock_bh(&appldata_ops_lock); | 560 | spin_unlock(&appldata_ops_lock); |
556 | 561 | ||
557 | ops->ctl_table[0].ctl_name = CTL_APPLDATA; | 562 | ops->ctl_table[0].ctl_name = CTL_APPLDATA; |
558 | ops->ctl_table[0].procname = appldata_proc_name; | 563 | ops->ctl_table[0].procname = appldata_proc_name; |
@@ -583,12 +588,12 @@ int appldata_register_ops(struct appldata_ops *ops) | |||
583 | */ | 588 | */ |
584 | void appldata_unregister_ops(struct appldata_ops *ops) | 589 | void appldata_unregister_ops(struct appldata_ops *ops) |
585 | { | 590 | { |
586 | spin_lock_bh(&appldata_ops_lock); | 591 | spin_lock(&appldata_ops_lock); |
587 | unregister_sysctl_table(ops->sysctl_header); | 592 | unregister_sysctl_table(ops->sysctl_header); |
588 | list_del(&ops->list); | 593 | list_del(&ops->list); |
589 | kfree(ops->ctl_table); | 594 | kfree(ops->ctl_table); |
590 | ops->ctl_table = NULL; | 595 | ops->ctl_table = NULL; |
591 | spin_unlock_bh(&appldata_ops_lock); | 596 | spin_unlock(&appldata_ops_lock); |
592 | P_INFO("%s-ops unregistered!\n", ops->name); | 597 | P_INFO("%s-ops unregistered!\n", ops->name); |
593 | } | 598 | } |
594 | /********************** module-ops management <END> **************************/ | 599 | /********************** module-ops management <END> **************************/ |
@@ -602,7 +607,7 @@ appldata_online_cpu(int cpu) | |||
602 | init_virt_timer(&per_cpu(appldata_timer, cpu)); | 607 | init_virt_timer(&per_cpu(appldata_timer, cpu)); |
603 | per_cpu(appldata_timer, cpu).function = appldata_timer_function; | 608 | per_cpu(appldata_timer, cpu).function = appldata_timer_function; |
604 | per_cpu(appldata_timer, cpu).data = (unsigned long) | 609 | per_cpu(appldata_timer, cpu).data = (unsigned long) |
605 | &appldata_tasklet_struct; | 610 | &appldata_work; |
606 | atomic_inc(&appldata_expire_count); | 611 | atomic_inc(&appldata_expire_count); |
607 | spin_lock(&appldata_timer_lock); | 612 | spin_lock(&appldata_timer_lock); |
608 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); | 613 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); |
@@ -615,7 +620,7 @@ appldata_offline_cpu(int cpu) | |||
615 | del_virt_timer(&per_cpu(appldata_timer, cpu)); | 620 | del_virt_timer(&per_cpu(appldata_timer, cpu)); |
616 | if (atomic_dec_and_test(&appldata_expire_count)) { | 621 | if (atomic_dec_and_test(&appldata_expire_count)) { |
617 | atomic_set(&appldata_expire_count, num_online_cpus()); | 622 | atomic_set(&appldata_expire_count, num_online_cpus()); |
618 | tasklet_schedule(&appldata_tasklet_struct); | 623 | queue_work(appldata_wq, &appldata_work); |
619 | } | 624 | } |
620 | spin_lock(&appldata_timer_lock); | 625 | spin_lock(&appldata_timer_lock); |
621 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); | 626 | __appldata_vtimer_setup(APPLDATA_MOD_TIMER); |
@@ -648,7 +653,7 @@ static struct notifier_block __devinitdata appldata_nb = { | |||
648 | /* | 653 | /* |
649 | * appldata_init() | 654 | * appldata_init() |
650 | * | 655 | * |
651 | * init timer and tasklet, register /proc entries | 656 | * init timer, register /proc entries |
652 | */ | 657 | */ |
653 | static int __init appldata_init(void) | 658 | static int __init appldata_init(void) |
654 | { | 659 | { |
@@ -657,6 +662,12 @@ static int __init appldata_init(void) | |||
657 | P_DEBUG("sizeof(parameter_list) = %lu\n", | 662 | P_DEBUG("sizeof(parameter_list) = %lu\n", |
658 | sizeof(struct appldata_parameter_list)); | 663 | sizeof(struct appldata_parameter_list)); |
659 | 664 | ||
665 | appldata_wq = create_singlethread_workqueue("appldata"); | ||
666 | if (!appldata_wq) { | ||
667 | P_ERROR("Could not create work queue\n"); | ||
668 | return -ENOMEM; | ||
669 | } | ||
670 | |||
660 | for_each_online_cpu(i) | 671 | for_each_online_cpu(i) |
661 | appldata_online_cpu(i); | 672 | appldata_online_cpu(i); |
662 | 673 | ||
@@ -670,7 +681,6 @@ static int __init appldata_init(void) | |||
670 | appldata_table[1].de->owner = THIS_MODULE; | 681 | appldata_table[1].de->owner = THIS_MODULE; |
671 | #endif | 682 | #endif |
672 | 683 | ||
673 | tasklet_init(&appldata_tasklet_struct, appldata_tasklet_function, 0); | ||
674 | P_DEBUG("Base interface initialized.\n"); | 684 | P_DEBUG("Base interface initialized.\n"); |
675 | return 0; | 685 | return 0; |
676 | } | 686 | } |
@@ -678,7 +688,7 @@ static int __init appldata_init(void) | |||
678 | /* | 688 | /* |
679 | * appldata_exit() | 689 | * appldata_exit() |
680 | * | 690 | * |
681 | * stop timer and tasklet, unregister /proc entries | 691 | * stop timer, unregister /proc entries |
682 | */ | 692 | */ |
683 | static void __exit appldata_exit(void) | 693 | static void __exit appldata_exit(void) |
684 | { | 694 | { |
@@ -690,7 +700,7 @@ static void __exit appldata_exit(void) | |||
690 | /* | 700 | /* |
691 | * ops list should be empty, but just in case something went wrong... | 701 | * ops list should be empty, but just in case something went wrong... |
692 | */ | 702 | */ |
693 | spin_lock_bh(&appldata_ops_lock); | 703 | spin_lock(&appldata_ops_lock); |
694 | list_for_each(lh, &appldata_ops_list) { | 704 | list_for_each(lh, &appldata_ops_list) { |
695 | ops = list_entry(lh, struct appldata_ops, list); | 705 | ops = list_entry(lh, struct appldata_ops, list); |
696 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, | 706 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, |
@@ -700,7 +710,7 @@ static void __exit appldata_exit(void) | |||
700 | "return code: %d\n", ops->name, rc); | 710 | "return code: %d\n", ops->name, rc); |
701 | } | 711 | } |
702 | } | 712 | } |
703 | spin_unlock_bh(&appldata_ops_lock); | 713 | spin_unlock(&appldata_ops_lock); |
704 | 714 | ||
705 | for_each_online_cpu(i) | 715 | for_each_online_cpu(i) |
706 | appldata_offline_cpu(i); | 716 | appldata_offline_cpu(i); |
@@ -709,7 +719,7 @@ static void __exit appldata_exit(void) | |||
709 | 719 | ||
710 | unregister_sysctl_table(appldata_sysctl_header); | 720 | unregister_sysctl_table(appldata_sysctl_header); |
711 | 721 | ||
712 | tasklet_kill(&appldata_tasklet_struct); | 722 | destroy_workqueue(appldata_wq); |
713 | P_DEBUG("... module unloaded!\n"); | 723 | P_DEBUG("... module unloaded!\n"); |
714 | } | 724 | } |
715 | /**************************** init / exit <END> ******************************/ | 725 | /**************************** init / exit <END> ******************************/ |
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 462ee9a84e76..f0e2fbed3d4c 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c | |||
@@ -68,7 +68,7 @@ struct appldata_mem_data { | |||
68 | u64 pgmajfault; /* page faults (major only) */ | 68 | u64 pgmajfault; /* page faults (major only) */ |
69 | // <-- New in 2.6 | 69 | // <-- New in 2.6 |
70 | 70 | ||
71 | } appldata_mem_data; | 71 | } __attribute__((packed)) appldata_mem_data; |
72 | 72 | ||
73 | 73 | ||
74 | static inline void appldata_debug_print(struct appldata_mem_data *mem_data) | 74 | static inline void appldata_debug_print(struct appldata_mem_data *mem_data) |
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index dd61638d3027..2a4c7432db4a 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c | |||
@@ -57,7 +57,7 @@ struct appldata_net_sum_data { | |||
57 | u64 rx_dropped; /* no space in linux buffers */ | 57 | u64 rx_dropped; /* no space in linux buffers */ |
58 | u64 tx_dropped; /* no space available in linux */ | 58 | u64 tx_dropped; /* no space available in linux */ |
59 | u64 collisions; /* collisions while transmitting */ | 59 | u64 collisions; /* collisions while transmitting */ |
60 | } appldata_net_sum_data; | 60 | } __attribute__((packed)) appldata_net_sum_data; |
61 | 61 | ||
62 | 62 | ||
63 | static inline void appldata_print_debug(struct appldata_net_sum_data *net_data) | 63 | static inline void appldata_print_debug(struct appldata_net_sum_data *net_data) |
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index b83f07484551..e0a476bf4fd6 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c | |||
@@ -49,7 +49,7 @@ struct appldata_os_per_cpu { | |||
49 | u32 per_cpu_softirq; /* ... spent in softirqs */ | 49 | u32 per_cpu_softirq; /* ... spent in softirqs */ |
50 | u32 per_cpu_iowait; /* ... spent while waiting for I/O */ | 50 | u32 per_cpu_iowait; /* ... spent while waiting for I/O */ |
51 | // <-- New in 2.6 | 51 | // <-- New in 2.6 |
52 | }; | 52 | } __attribute__((packed)); |
53 | 53 | ||
54 | struct appldata_os_data { | 54 | struct appldata_os_data { |
55 | u64 timestamp; | 55 | u64 timestamp; |
@@ -75,7 +75,7 @@ struct appldata_os_data { | |||
75 | 75 | ||
76 | /* per cpu data */ | 76 | /* per cpu data */ |
77 | struct appldata_os_per_cpu os_cpu[0]; | 77 | struct appldata_os_per_cpu os_cpu[0]; |
78 | }; | 78 | } __attribute__((packed)); |
79 | 79 | ||
80 | static struct appldata_os_data *appldata_os_data; | 80 | static struct appldata_os_data *appldata_os_data; |
81 | 81 | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 26889366929a..06afa3103ace 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
41 | #include <asm/system.h> | 41 | #include <asm/system.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/unistd.h> | ||
43 | 44 | ||
44 | #ifdef CONFIG_S390_SUPPORT | 45 | #ifdef CONFIG_S390_SUPPORT |
45 | #include "compat_ptrace.h" | 46 | #include "compat_ptrace.h" |
@@ -130,13 +131,19 @@ static int | |||
130 | peek_user(struct task_struct *child, addr_t addr, addr_t data) | 131 | peek_user(struct task_struct *child, addr_t addr, addr_t data) |
131 | { | 132 | { |
132 | struct user *dummy = NULL; | 133 | struct user *dummy = NULL; |
133 | addr_t offset, tmp; | 134 | addr_t offset, tmp, mask; |
134 | 135 | ||
135 | /* | 136 | /* |
136 | * Stupid gdb peeks/pokes the access registers in 64 bit with | 137 | * Stupid gdb peeks/pokes the access registers in 64 bit with |
137 | * an alignment of 4. Programmers from hell... | 138 | * an alignment of 4. Programmers from hell... |
138 | */ | 139 | */ |
139 | if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK) | 140 | mask = __ADDR_MASK; |
141 | #ifdef CONFIG_ARCH_S390X | ||
142 | if (addr >= (addr_t) &dummy->regs.acrs && | ||
143 | addr < (addr_t) &dummy->regs.orig_gpr2) | ||
144 | mask = 3; | ||
145 | #endif | ||
146 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | ||
140 | return -EIO; | 147 | return -EIO; |
141 | 148 | ||
142 | if (addr < (addr_t) &dummy->regs.acrs) { | 149 | if (addr < (addr_t) &dummy->regs.acrs) { |
@@ -153,6 +160,16 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) | |||
153 | * access registers are stored in the thread structure | 160 | * access registers are stored in the thread structure |
154 | */ | 161 | */ |
155 | offset = addr - (addr_t) &dummy->regs.acrs; | 162 | offset = addr - (addr_t) &dummy->regs.acrs; |
163 | #ifdef CONFIG_ARCH_S390X | ||
164 | /* | ||
165 | * Very special case: old & broken 64 bit gdb reading | ||
166 | * from acrs[15]. Result is a 64 bit value. Read the | ||
167 | * 32 bit acrs[15] value and shift it by 32. Sick... | ||
168 | */ | ||
169 | if (addr == (addr_t) &dummy->regs.acrs[15]) | ||
170 | tmp = ((unsigned long) child->thread.acrs[15]) << 32; | ||
171 | else | ||
172 | #endif | ||
156 | tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); | 173 | tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); |
157 | 174 | ||
158 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | 175 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { |
@@ -167,6 +184,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) | |||
167 | */ | 184 | */ |
168 | offset = addr - (addr_t) &dummy->regs.fp_regs; | 185 | offset = addr - (addr_t) &dummy->regs.fp_regs; |
169 | tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); | 186 | tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); |
187 | if (addr == (addr_t) &dummy->regs.fp_regs.fpc) | ||
188 | tmp &= (unsigned long) FPC_VALID_MASK | ||
189 | << (BITS_PER_LONG - 32); | ||
170 | 190 | ||
171 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { | 191 | } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { |
172 | /* | 192 | /* |
@@ -191,13 +211,19 @@ static int | |||
191 | poke_user(struct task_struct *child, addr_t addr, addr_t data) | 211 | poke_user(struct task_struct *child, addr_t addr, addr_t data) |
192 | { | 212 | { |
193 | struct user *dummy = NULL; | 213 | struct user *dummy = NULL; |
194 | addr_t offset; | 214 | addr_t offset, mask; |
195 | 215 | ||
196 | /* | 216 | /* |
197 | * Stupid gdb peeks/pokes the access registers in 64 bit with | 217 | * Stupid gdb peeks/pokes the access registers in 64 bit with |
198 | * an alignment of 4. Programmers from hell indeed... | 218 | * an alignment of 4. Programmers from hell indeed... |
199 | */ | 219 | */ |
200 | if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK) | 220 | mask = __ADDR_MASK; |
221 | #ifdef CONFIG_ARCH_S390X | ||
222 | if (addr >= (addr_t) &dummy->regs.acrs && | ||
223 | addr < (addr_t) &dummy->regs.orig_gpr2) | ||
224 | mask = 3; | ||
225 | #endif | ||
226 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | ||
201 | return -EIO; | 227 | return -EIO; |
202 | 228 | ||
203 | if (addr < (addr_t) &dummy->regs.acrs) { | 229 | if (addr < (addr_t) &dummy->regs.acrs) { |
@@ -224,6 +250,17 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
224 | * access registers are stored in the thread structure | 250 | * access registers are stored in the thread structure |
225 | */ | 251 | */ |
226 | offset = addr - (addr_t) &dummy->regs.acrs; | 252 | offset = addr - (addr_t) &dummy->regs.acrs; |
253 | #ifdef CONFIG_ARCH_S390X | ||
254 | /* | ||
255 | * Very special case: old & broken 64 bit gdb writing | ||
256 | * to acrs[15] with a 64 bit value. Ignore the lower | ||
257 | * half of the value and write the upper 32 bit to | ||
258 | * acrs[15]. Sick... | ||
259 | */ | ||
260 | if (addr == (addr_t) &dummy->regs.acrs[15]) | ||
261 | child->thread.acrs[15] = (unsigned int) (data >> 32); | ||
262 | else | ||
263 | #endif | ||
227 | *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; | 264 | *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; |
228 | 265 | ||
229 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { | 266 | } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { |
@@ -237,7 +274,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
237 | * floating point regs. are stored in the thread structure | 274 | * floating point regs. are stored in the thread structure |
238 | */ | 275 | */ |
239 | if (addr == (addr_t) &dummy->regs.fp_regs.fpc && | 276 | if (addr == (addr_t) &dummy->regs.fp_regs.fpc && |
240 | (data & ~FPC_VALID_MASK) != 0) | 277 | (data & ~((unsigned long) FPC_VALID_MASK |
278 | << (BITS_PER_LONG - 32))) != 0) | ||
241 | return -EINVAL; | 279 | return -EINVAL; |
242 | offset = addr - (addr_t) &dummy->regs.fp_regs; | 280 | offset = addr - (addr_t) &dummy->regs.fp_regs; |
243 | *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; | 281 | *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; |
@@ -723,6 +761,13 @@ syscall_trace(struct pt_regs *regs, int entryexit) | |||
723 | ? 0x80 : 0)); | 761 | ? 0x80 : 0)); |
724 | 762 | ||
725 | /* | 763 | /* |
764 | * If the debuffer has set an invalid system call number, | ||
765 | * we prepare to skip the system call restart handling. | ||
766 | */ | ||
767 | if (!entryexit && regs->gprs[2] >= NR_syscalls) | ||
768 | regs->trap = -1; | ||
769 | |||
770 | /* | ||
726 | * this isn't the same as continuing with a signal, but it will do | 771 | * this isn't the same as continuing with a signal, but it will do |
727 | * for normal use. strace only continues with a signal if the | 772 | * for normal use. strace only continues with a signal if the |
728 | * stopping signal is not SIGTRAP. -brl | 773 | * stopping signal is not SIGTRAP. -brl |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 80306bc8c799..75fde949d125 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -207,7 +207,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
207 | * we are not in an interrupt and that there is a | 207 | * we are not in an interrupt and that there is a |
208 | * user context. | 208 | * user context. |
209 | */ | 209 | */ |
210 | if (user_address == 0 || in_interrupt() || !mm) | 210 | if (user_address == 0 || in_atomic() || !mm) |
211 | goto no_context; | 211 | goto no_context; |
212 | 212 | ||
213 | /* | 213 | /* |