aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/time.c19
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
3 files changed, 10 insertions, 16 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 1753f6a30d55..80d50b83d419 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -582,6 +582,8 @@ out:
582 /* Get the CPE error record and log it */ 582 /* Get the CPE error record and log it */
583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); 583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
584 584
585 local_irq_disable();
586
585 return IRQ_HANDLED; 587 return IRQ_HANDLED;
586} 588}
587 589
@@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data)
1859 data = mca_bootmem(); 1861 data = mca_bootmem();
1860 first_time = 0; 1862 first_time = 0;
1861 } else 1863 } else
1862 data = __get_free_pages(GFP_KERNEL, get_order(sz)); 1864 data = (void *)__get_free_pages(GFP_KERNEL,
1865 get_order(sz));
1863 if (!data) 1866 if (!data)
1864 panic("Could not allocate MCA memory for cpu %d\n", 1867 panic("Could not allocate MCA memory for cpu %d\n",
1865 cpu); 1868 cpu);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 9702fa92489e..156ad803d5b7 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id)
190 190
191 new_itm += local_cpu_data->itm_delta; 191 new_itm += local_cpu_data->itm_delta;
192 192
193 if (smp_processor_id() == time_keeper_id) { 193 if (smp_processor_id() == time_keeper_id)
194 /* 194 xtime_update(1);
195 * Here we are in the timer irq handler. We have irqs locally 195
196 * disabled, but we don't know if the timer_bh is running on 196 local_cpu_data->itm_next = new_itm;
197 * another CPU. We need to avoid to SMP race by acquiring the
198 * xtime_lock.
199 */
200 write_seqlock(&xtime_lock);
201 do_timer(1);
202 local_cpu_data->itm_next = new_itm;
203 write_sequnlock(&xtime_lock);
204 } else
205 local_cpu_data->itm_next = new_itm;
206 197
207 if (time_after(new_itm, ia64_get_itc())) 198 if (time_after(new_itm, ia64_get_itc()))
208 break; 199 break;
@@ -222,7 +213,7 @@ skip_process_time_accounting:
222 * comfort, we increase the safety margin by 213 * comfort, we increase the safety margin by
223 * intentionally dropping the next tick(s). We do NOT 214 * intentionally dropping the next tick(s). We do NOT
224 * update itm.next because that would force us to call 215 * update itm.next because that would force us to call
225 * do_timer() which in turn would let our clock run 216 * xtime_update() which in turn would let our clock run
226 * too fast (with the potentially devastating effect 217 * too fast (with the potentially devastating effect
227 * of losing monotony of time). 218 * of losing monotony of time).
228 */ 219 */
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a4d044dcb1c..787de4a77d82 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -198,7 +198,7 @@ SECTIONS {
198 198
199 /* Per-cpu data: */ 199 /* Per-cpu data: */
200 . = ALIGN(PERCPU_PAGE_SIZE); 200 . = ALIGN(PERCPU_PAGE_SIZE);
201 PERCPU_VADDR(PERCPU_ADDR, :percpu) 201 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202 __phys_per_cpu_start = __per_cpu_load; 202 __phys_per_cpu_start = __per_cpu_load;
203 /* 203 /*
204 * ensure percpu data fits 204 * ensure percpu data fits