aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/gate.S1
-rw-r--r--arch/ia64/kernel/mca.c60
-rw-r--r--arch/ia64/kernel/mca_asm.S12
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S6
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/ia64/kernel/time.c1
-rw-r--r--arch/ia64/kernel/unwind.c16
8 files changed, 72 insertions, 33 deletions
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 3274850cf272..74b1ccce4e84 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -30,6 +30,7 @@
30 .previous 30 .previous
31#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ 31#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
32[1:](pr)brl.cond.sptk 0; \ 32[1:](pr)brl.cond.sptk 0; \
33 ;; \
33 .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. 34 .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
34 35
35GLOBAL_ENTRY(__kernel_syscall_via_break) 36GLOBAL_ENTRY(__kernel_syscall_via_break)
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 1ead5ea6c5ce..4b5daa3cc0fe 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -57,6 +57,9 @@
57 * 57 *
58 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 58 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
59 * Add printing support for MCA/INIT. 59 * Add printing support for MCA/INIT.
60 *
61 * 2007-04-27 Russ Anderson <rja@sgi.com>
62 * Support multiple cpus going through OS_MCA in the same event.
60 */ 63 */
61#include <linux/types.h> 64#include <linux/types.h>
62#include <linux/init.h> 65#include <linux/init.h>
@@ -96,7 +99,6 @@
96#endif 99#endif
97 100
98/* Used by mca_asm.S */ 101/* Used by mca_asm.S */
99u32 ia64_mca_serialize;
100DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 102DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
101DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 103DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
102DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ 104DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
@@ -963,11 +965,12 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
963 goto no_mod; 965 goto no_mod;
964 } 966 }
965 967
968 if (r13 != sos->prev_IA64_KR_CURRENT) {
969 msg = "inconsistent previous current and r13";
970 goto no_mod;
971 }
972
966 if (!mca_recover_range(ms->pmsa_iip)) { 973 if (!mca_recover_range(ms->pmsa_iip)) {
967 if (r13 != sos->prev_IA64_KR_CURRENT) {
968 msg = "inconsistent previous current and r13";
969 goto no_mod;
970 }
971 if ((r12 - r13) >= KERNEL_STACK_SIZE) { 974 if ((r12 - r13) >= KERNEL_STACK_SIZE) {
972 msg = "inconsistent r12 and r13"; 975 msg = "inconsistent r12 and r13";
973 goto no_mod; 976 goto no_mod;
@@ -1187,6 +1190,13 @@ all_in:
1187 * further MCA logging is enabled by clearing logs. 1190 * further MCA logging is enabled by clearing logs.
1188 * Monarch also has the duty of sending wakeup-IPIs to pull the 1191 * Monarch also has the duty of sending wakeup-IPIs to pull the
1189 * slave processors out of rendezvous spinloop. 1192 * slave processors out of rendezvous spinloop.
1193 *
1194 * If multiple processors call into OS_MCA, the first will become
1195 * the monarch. Subsequent cpus will be recorded in the mca_cpu
1196 * bitmask. After the first monarch has processed its MCA, it
1197 * will wake up the next cpu in the mca_cpu bitmask and then go
1198 * into the rendezvous loop. When all processors have serviced
1199 * their MCA, the last monarch frees up the rest of the processors.
1190 */ 1200 */
1191void 1201void
1192ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, 1202ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
@@ -1196,16 +1206,32 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1196 struct task_struct *previous_current; 1206 struct task_struct *previous_current;
1197 struct ia64_mca_notify_die nd = 1207 struct ia64_mca_notify_die nd =
1198 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1208 { .sos = sos, .monarch_cpu = &monarch_cpu };
1209 static atomic_t mca_count;
1210 static cpumask_t mca_cpu;
1199 1211
1212 if (atomic_add_return(1, &mca_count) == 1) {
1213 monarch_cpu = cpu;
1214 sos->monarch = 1;
1215 } else {
1216 cpu_set(cpu, mca_cpu);
1217 sos->monarch = 0;
1218 }
1200 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " 1219 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
1201 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); 1220 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
1202 1221
1203 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1222 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1204 monarch_cpu = cpu; 1223
1205 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1224 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
1206 == NOTIFY_STOP) 1225 == NOTIFY_STOP)
1207 ia64_mca_spin(__FUNCTION__); 1226 ia64_mca_spin(__FUNCTION__);
1208 ia64_wait_for_slaves(cpu, "MCA"); 1227 if (sos->monarch) {
1228 ia64_wait_for_slaves(cpu, "MCA");
1229 } else {
1230 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1231 while (cpu_isset(cpu, mca_cpu))
1232 cpu_relax(); /* spin until monarch wakes us */
1233 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1234 }
1209 1235
1210 /* Wakeup all the processors which are spinning in the rendezvous loop. 1236 /* Wakeup all the processors which are spinning in the rendezvous loop.
1211 * They will leave SAL, then spin in the OS with interrupts disabled 1237 * They will leave SAL, then spin in the OS with interrupts disabled
@@ -1244,6 +1270,26 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1244 == NOTIFY_STOP) 1270 == NOTIFY_STOP)
1245 ia64_mca_spin(__FUNCTION__); 1271 ia64_mca_spin(__FUNCTION__);
1246 1272
1273
1274 if (atomic_dec_return(&mca_count) > 0) {
1275 int i;
1276
1277 /* wake up the next monarch cpu,
1278 * and put this cpu in the rendez loop.
1279 */
1280 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1281 for_each_online_cpu(i) {
1282 if (cpu_isset(i, mca_cpu)) {
1283 monarch_cpu = i;
1284 cpu_clear(i, mca_cpu); /* wake next cpu */
1285 while (monarch_cpu != -1)
1286 cpu_relax(); /* spin until last cpu leaves */
1287 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1288 set_curr_task(cpu, previous_current);
1289 return;
1290 }
1291 }
1292 }
1247 set_curr_task(cpu, previous_current); 1293 set_curr_task(cpu, previous_current);
1248 monarch_cpu = -1; 1294 monarch_cpu = -1;
1249} 1295}
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 8c9c26aa6ae0..0f5965fcdf85 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -133,14 +133,6 @@ ia64_do_tlb_purge:
133//StartMain//////////////////////////////////////////////////////////////////// 133//StartMain////////////////////////////////////////////////////////////////////
134 134
135ia64_os_mca_dispatch: 135ia64_os_mca_dispatch:
136 // Serialize all MCA processing
137 mov r3=1;;
138 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
139ia64_os_mca_spin:
140 xchg4 r4=[r2],r3;;
141 cmp.ne p6,p0=r4,r0
142(p6) br ia64_os_mca_spin
143
144 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack 136 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
145 LOAD_PHYSICAL(p0,r2,1f) // return address 137 LOAD_PHYSICAL(p0,r2,1f) // return address
146 mov r19=1 // All MCA events are treated as monarch (for now) 138 mov r19=1 // All MCA events are treated as monarch (for now)
@@ -291,10 +283,6 @@ END(ia64_os_mca_virtual_begin)
291 283
292 mov b0=r12 // SAL_CHECK return address 284 mov b0=r12 // SAL_CHECK return address
293 285
294 // release lock
295 LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);;
296 st4.rel [r3]=r0
297
298 br b0 286 br b0
299 287
300//EndMain////////////////////////////////////////////////////////////////////// 288//EndMain//////////////////////////////////////////////////////////////////////
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index f2d4900751ba..3bccb06c8d21 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -40,7 +40,11 @@ GLOBAL_ENTRY(mca_handler_bhhook)
40 mov b6=loc1 40 mov b6=loc1
41 ;; 41 ;;
42 mov loc1=rp 42 mov loc1=rp
43 ssm psr.i | psr.ic 43 ssm psr.ic
44 ;;
45 srlz.i
46 ;;
47 ssm psr.i
44 br.call.sptk.many rp=b6 // does not return ... 48 br.call.sptk.many rp=b6 // does not return ...
45 ;; 49 ;;
46 mov ar.pfs=loc0 50 mov ar.pfs=loc0
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index af73b8dfde28..fa40cba43350 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -513,7 +513,8 @@ copy_thread (int nr, unsigned long clone_flags,
513static void 513static void
514do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) 514do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
515{ 515{
516 unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm; 516 unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
517 unsigned long uninitialized_var(ip); /* GCC be quiet */
517 elf_greg_t *dst = arg; 518 elf_greg_t *dst = arg;
518 struct pt_regs *pt; 519 struct pt_regs *pt;
519 char nat; 520 char nat;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index eaa6a24bc0b6..188fb73c6845 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -805,7 +805,6 @@ static void __cpuinit
805get_max_cacheline_size (void) 805get_max_cacheline_size (void)
806{ 806{
807 unsigned long line_size, max = 1; 807 unsigned long line_size, max = 1;
808 unsigned int cache_size = 0;
809 u64 l, levels, unique_caches; 808 u64 l, levels, unique_caches;
810 pal_cache_config_info_t cci; 809 pal_cache_config_info_t cci;
811 s64 status; 810 s64 status;
@@ -835,8 +834,6 @@ get_max_cacheline_size (void)
835 line_size = 1 << cci.pcci_line_size; 834 line_size = 1 << cci.pcci_line_size;
836 if (line_size > max) 835 if (line_size > max)
837 max = line_size; 836 max = line_size;
838 if (cache_size < cci.pcci_cache_size)
839 cache_size = cci.pcci_cache_size;
840 if (!cci.pcci_unified) { 837 if (!cci.pcci_unified) {
841 status = ia64_pal_cache_config_info(l, 838 status = ia64_pal_cache_config_info(l,
842 /* cache_type (instruction)= */ 1, 839 /* cache_type (instruction)= */ 1,
@@ -853,9 +850,6 @@ get_max_cacheline_size (void)
853 ia64_i_cache_stride_shift = cci.pcci_stride; 850 ia64_i_cache_stride_shift = cci.pcci_stride;
854 } 851 }
855 out: 852 out:
856#ifdef CONFIG_SMP
857 max_cache_size = max(max_cache_size, cache_size);
858#endif
859 if (max > ia64_max_cacheline_size) 853 if (max > ia64_max_cacheline_size)
860 ia64_max_cacheline_size = max; 854 ia64_max_cacheline_size = max;
861} 855}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index a06667c7acc0..3486fe7d6e65 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -18,7 +18,6 @@
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/efi.h> 20#include <linux/efi.h>
21#include <linux/profile.h>
22#include <linux/timex.h> 21#include <linux/timex.h>
23 22
24#include <asm/machvec.h> 23#include <asm/machvec.h>
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index b0b08b5f3eca..c1bdb5131814 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1856,11 +1856,19 @@ find_save_locs (struct unw_frame_info *info)
1856 return 0; 1856 return 0;
1857} 1857}
1858 1858
1859static int
1860unw_valid(const struct unw_frame_info *info, unsigned long* p)
1861{
1862 unsigned long loc = (unsigned long)p;
1863 return (loc >= info->regstk.limit && loc < info->regstk.top) ||
1864 (loc >= info->memstk.top && loc < info->memstk.limit);
1865}
1866
1859int 1867int
1860unw_unwind (struct unw_frame_info *info) 1868unw_unwind (struct unw_frame_info *info)
1861{ 1869{
1862 unsigned long prev_ip, prev_sp, prev_bsp; 1870 unsigned long prev_ip, prev_sp, prev_bsp;
1863 unsigned long ip, pr, num_regs, rp_loc, pfs_loc; 1871 unsigned long ip, pr, num_regs;
1864 STAT(unsigned long start, flags;) 1872 STAT(unsigned long start, flags;)
1865 int retval; 1873 int retval;
1866 1874
@@ -1871,8 +1879,7 @@ unw_unwind (struct unw_frame_info *info)
1871 prev_bsp = info->bsp; 1879 prev_bsp = info->bsp;
1872 1880
1873 /* validate the return IP pointer */ 1881 /* validate the return IP pointer */
1874 rp_loc = (unsigned long) info->rp_loc; 1882 if (!unw_valid(info, info->rp_loc)) {
1875 if ((rp_loc < info->regstk.limit) || (rp_loc > info->regstk.top)) {
1876 /* FIXME: should really be level 0 but it occurs too often. KAO */ 1883 /* FIXME: should really be level 0 but it occurs too often. KAO */
1877 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", 1884 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1878 __FUNCTION__, info->ip); 1885 __FUNCTION__, info->ip);
@@ -1888,8 +1895,7 @@ unw_unwind (struct unw_frame_info *info)
1888 } 1895 }
1889 1896
1890 /* validate the previous stack frame pointer */ 1897 /* validate the previous stack frame pointer */
1891 pfs_loc = (unsigned long) info->pfs_loc; 1898 if (!unw_valid(info, info->pfs_loc)) {
1892 if ((pfs_loc < info->regstk.limit) || (pfs_loc > info->regstk.top)) {
1893 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__); 1899 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1894 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); 1900 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1895 return -1; 1901 return -1;