diff options
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/ptrace.c | 23 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 41 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 1 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 4 |
6 files changed, 45 insertions, 28 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 9372d4f376d5..9e94db2573a2 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -1092,7 +1092,7 @@ void sun4v_pci_init(int node, char *model_name) | |||
1092 | } | 1092 | } |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | for_each_cpu(i) { | 1095 | for_each_possible_cpu(i) { |
1096 | unsigned long page = get_zeroed_page(GFP_ATOMIC); | 1096 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1097 | 1097 | ||
1098 | if (!page) | 1098 | if (!page) |
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index eb93e9c52846..49e6dedd027d 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c | |||
@@ -244,6 +244,13 @@ asmlinkage void do_ptrace(struct pt_regs *regs) | |||
244 | } | 244 | } |
245 | 245 | ||
246 | switch(request) { | 246 | switch(request) { |
247 | case PTRACE_PEEKUSR: | ||
248 | if (addr != 0) | ||
249 | pt_error_return(regs, EIO); | ||
250 | else | ||
251 | pt_succ_return(regs, 0); | ||
252 | goto out_tsk; | ||
253 | |||
247 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | 254 | case PTRACE_PEEKTEXT: /* read word at location addr. */ |
248 | case PTRACE_PEEKDATA: { | 255 | case PTRACE_PEEKDATA: { |
249 | unsigned long tmp64; | 256 | unsigned long tmp64; |
@@ -602,6 +609,22 @@ asmlinkage void do_ptrace(struct pt_regs *regs) | |||
602 | 609 | ||
603 | /* PTRACE_DUMPCORE unsupported... */ | 610 | /* PTRACE_DUMPCORE unsupported... */ |
604 | 611 | ||
612 | case PTRACE_GETEVENTMSG: { | ||
613 | int err; | ||
614 | |||
615 | if (test_thread_flag(TIF_32BIT)) | ||
616 | err = put_user(child->ptrace_message, | ||
617 | (unsigned int __user *) data); | ||
618 | else | ||
619 | err = put_user(child->ptrace_message, | ||
620 | (unsigned long __user *) data); | ||
621 | if (err) | ||
622 | pt_error_return(regs, -err); | ||
623 | else | ||
624 | pt_succ_return(regs, 0); | ||
625 | break; | ||
626 | } | ||
627 | |||
605 | default: { | 628 | default: { |
606 | int err = ptrace_request(child, request, addr, data); | 629 | int err = ptrace_request(child, request, addr, data); |
607 | if (err) | 630 | if (err) |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 7d0e67c1ce50..005167f82419 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -535,7 +535,7 @@ static int __init topology_init(void) | |||
535 | while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) | 535 | while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) |
536 | ncpus_probed++; | 536 | ncpus_probed++; |
537 | 537 | ||
538 | for_each_cpu(i) { | 538 | for_each_possible_cpu(i) { |
539 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); | 539 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); |
540 | if (p) { | 540 | if (p) { |
541 | register_cpu(p, i, NULL); | 541 | register_cpu(p, i, NULL); |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 8175a6968c6b..90eaca3ec9a6 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -745,12 +745,21 @@ struct call_data_struct { | |||
745 | int wait; | 745 | int wait; |
746 | }; | 746 | }; |
747 | 747 | ||
748 | static DEFINE_SPINLOCK(call_lock); | 748 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); |
749 | static struct call_data_struct *call_data; | 749 | static struct call_data_struct *call_data; |
750 | 750 | ||
751 | extern unsigned long xcall_call_function; | 751 | extern unsigned long xcall_call_function; |
752 | 752 | ||
753 | /* | 753 | /** |
754 | * smp_call_function(): Run a function on all other CPUs. | ||
755 | * @func: The function to run. This must be fast and non-blocking. | ||
756 | * @info: An arbitrary pointer to pass to the function. | ||
757 | * @nonatomic: currently unused. | ||
758 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
759 | * | ||
760 | * Returns 0 on success, else a negative status code. Does not return until | ||
761 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
762 | * | ||
754 | * You must not call this function with disabled interrupts or from a | 763 | * You must not call this function with disabled interrupts or from a |
755 | * hardware interrupt handler or from a bottom half handler. | 764 | * hardware interrupt handler or from a bottom half handler. |
756 | */ | 765 | */ |
@@ -759,7 +768,6 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, | |||
759 | { | 768 | { |
760 | struct call_data_struct data; | 769 | struct call_data_struct data; |
761 | int cpus; | 770 | int cpus; |
762 | long timeout; | ||
763 | 771 | ||
764 | /* Can deadlock when called with interrupts disabled */ | 772 | /* Can deadlock when called with interrupts disabled */ |
765 | WARN_ON(irqs_disabled()); | 773 | WARN_ON(irqs_disabled()); |
@@ -777,31 +785,18 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, | |||
777 | goto out_unlock; | 785 | goto out_unlock; |
778 | 786 | ||
779 | call_data = &data; | 787 | call_data = &data; |
788 | mb(); | ||
780 | 789 | ||
781 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); | 790 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); |
782 | 791 | ||
783 | /* | 792 | /* Wait for response */ |
784 | * Wait for other cpus to complete function or at | 793 | while (atomic_read(&data.finished) != cpus) |
785 | * least snap the call data. | 794 | cpu_relax(); |
786 | */ | ||
787 | timeout = 1000000; | ||
788 | while (atomic_read(&data.finished) != cpus) { | ||
789 | if (--timeout <= 0) | ||
790 | goto out_timeout; | ||
791 | barrier(); | ||
792 | udelay(1); | ||
793 | } | ||
794 | 795 | ||
795 | out_unlock: | 796 | out_unlock: |
796 | spin_unlock(&call_lock); | 797 | spin_unlock(&call_lock); |
797 | 798 | ||
798 | return 0; | 799 | return 0; |
799 | |||
800 | out_timeout: | ||
801 | spin_unlock(&call_lock); | ||
802 | printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n", | ||
803 | cpus, atomic_read(&data.finished)); | ||
804 | return 0; | ||
805 | } | 800 | } |
806 | 801 | ||
807 | int smp_call_function(void (*func)(void *info), void *info, | 802 | int smp_call_function(void (*func)(void *info), void *info, |
@@ -1285,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
1285 | return -EINVAL; | 1280 | return -EINVAL; |
1286 | 1281 | ||
1287 | spin_lock_irqsave(&prof_setup_lock, flags); | 1282 | spin_lock_irqsave(&prof_setup_lock, flags); |
1288 | for_each_cpu(i) | 1283 | for_each_possible_cpu(i) |
1289 | prof_multiplier(i) = multiplier; | 1284 | prof_multiplier(i) = multiplier; |
1290 | current_tick_offset = (timer_tick_offset / multiplier); | 1285 | current_tick_offset = (timer_tick_offset / multiplier); |
1291 | spin_unlock_irqrestore(&prof_setup_lock, flags); | 1286 | spin_unlock_irqrestore(&prof_setup_lock, flags); |
@@ -1313,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
1313 | } | 1308 | } |
1314 | } | 1309 | } |
1315 | 1310 | ||
1316 | for_each_cpu(i) { | 1311 | for_each_possible_cpu(i) { |
1317 | if (tlb_type == hypervisor) { | 1312 | if (tlb_type == hypervisor) { |
1318 | int j; | 1313 | int j; |
1319 | 1314 | ||
1320 | /* XXX get this mapping from machine description */ | 1315 | /* XXX get this mapping from machine description */ |
1321 | for_each_cpu(j) { | 1316 | for_each_possible_cpu(j) { |
1322 | if ((j >> 2) == (i >> 2)) | 1317 | if ((j >> 2) == (i >> 2)) |
1323 | cpu_set(j, cpu_sibling_map[i]); | 1318 | cpu_set(j, cpu_sibling_map[i]); |
1324 | } | 1319 | } |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index f5e8db1de76b..62d8a99271ea 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -276,7 +276,6 @@ EXPORT_SYMBOL(__prom_getsibling); | |||
276 | EXPORT_SYMBOL(strlen); | 276 | EXPORT_SYMBOL(strlen); |
277 | EXPORT_SYMBOL(__strlen_user); | 277 | EXPORT_SYMBOL(__strlen_user); |
278 | EXPORT_SYMBOL(__strnlen_user); | 278 | EXPORT_SYMBOL(__strnlen_user); |
279 | EXPORT_SYMBOL(strpbrk); | ||
280 | 279 | ||
281 | #ifdef CONFIG_SOLARIS_EMUL_MODULE | 280 | #ifdef CONFIG_SOLARIS_EMUL_MODULE |
282 | EXPORT_SYMBOL(linux_sparc_syscall); | 281 | EXPORT_SYMBOL(linux_sparc_syscall); |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index ff090bb9734b..2793a5d82380 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -1130,9 +1130,9 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in | |||
1130 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | 1130 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), |
1131 | afsr, afar, | 1131 | afsr, afar, |
1132 | (afsr & CHAFSR_TL1) ? 1 : 0); | 1132 | (afsr & CHAFSR_TL1) ? 1 : 0); |
1133 | printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n", | 1133 | printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n", |
1134 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | 1134 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), |
1135 | regs->tpc, regs->tnpc, regs->tstate); | 1135 | regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); |
1136 | printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", | 1136 | printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", |
1137 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), | 1137 | (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), |
1138 | (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, | 1138 | (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, |