aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/gdb-stub.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/gdb-stub.c')
-rw-r--r--arch/mips/kernel/gdb-stub.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index d3fd1ab14274..96d18c43dca0 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -176,8 +176,10 @@ int kgdb_enabled;
176/* 176/*
177 * spin locks for smp case 177 * spin locks for smp case
178 */ 178 */
179static spinlock_t kgdb_lock = SPIN_LOCK_UNLOCKED; 179static DEFINE_SPINLOCK(kgdb_lock);
180static spinlock_t kgdb_cpulock[NR_CPUS] = { [0 ... NR_CPUS-1] = SPIN_LOCK_UNLOCKED}; 180static raw_spinlock_t kgdb_cpulock[NR_CPUS] = {
181 [0 ... NR_CPUS-1] = __RAW_SPIN_LOCK_UNLOCKED;
182};
181 183
182/* 184/*
183 * BUFMAX defines the maximum number of characters in inbound/outbound buffers 185 * BUFMAX defines the maximum number of characters in inbound/outbound buffers
@@ -637,29 +639,32 @@ static struct gdb_bp_save async_bp;
637 * and only one can be active at a time. 639 * and only one can be active at a time.
638 */ 640 */
639extern spinlock_t smp_call_lock; 641extern spinlock_t smp_call_lock;
642
640void set_async_breakpoint(unsigned long *epc) 643void set_async_breakpoint(unsigned long *epc)
641{ 644{
642 /* skip breaking into userland */ 645 /* skip breaking into userland */
643 if ((*epc & 0x80000000) == 0) 646 if ((*epc & 0x80000000) == 0)
644 return; 647 return;
645 648
649#ifdef CONFIG_SMP
646 /* avoid deadlock if someone is make IPC */ 650 /* avoid deadlock if someone is make IPC */
647 if (spin_is_locked(&smp_call_lock)) 651 if (spin_is_locked(&smp_call_lock))
648 return; 652 return;
653#endif
649 654
650 async_bp.addr = *epc; 655 async_bp.addr = *epc;
651 *epc = (unsigned long)async_breakpoint; 656 *epc = (unsigned long)async_breakpoint;
652} 657}
653 658
654void kgdb_wait(void *arg) 659static void kgdb_wait(void *arg)
655{ 660{
656 unsigned flags; 661 unsigned flags;
657 int cpu = smp_processor_id(); 662 int cpu = smp_processor_id();
658 663
659 local_irq_save(flags); 664 local_irq_save(flags);
660 665
661 spin_lock(&kgdb_cpulock[cpu]); 666 __raw_spin_lock(&kgdb_cpulock[cpu]);
662 spin_unlock(&kgdb_cpulock[cpu]); 667 __raw_spin_unlock(&kgdb_cpulock[cpu]);
663 668
664 local_irq_restore(flags); 669 local_irq_restore(flags);
665} 670}
@@ -707,7 +712,7 @@ void handle_exception (struct gdb_regs *regs)
707 * acquire the CPU spinlocks 712 * acquire the CPU spinlocks
708 */ 713 */
709 for (i = num_online_cpus()-1; i >= 0; i--) 714 for (i = num_online_cpus()-1; i >= 0; i--)
710 if (spin_trylock(&kgdb_cpulock[i]) == 0) 715 if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
711 panic("kgdb: couldn't get cpulock %d\n", i); 716 panic("kgdb: couldn't get cpulock %d\n", i);
712 717
713 /* 718 /*
@@ -982,7 +987,7 @@ finish_kgdb:
982exit_kgdb_exception: 987exit_kgdb_exception:
983 /* release locks so other CPUs can go */ 988 /* release locks so other CPUs can go */
984 for (i = num_online_cpus()-1; i >= 0; i--) 989 for (i = num_online_cpus()-1; i >= 0; i--)
985 spin_unlock(&kgdb_cpulock[i]); 990 __raw_spin_unlock(&kgdb_cpulock[i]);
986 spin_unlock(&kgdb_lock); 991 spin_unlock(&kgdb_lock);
987 992
988 __flush_cache_all(); 993 __flush_cache_all();
@@ -1036,12 +1041,12 @@ void adel(void)
1036 * malloc is needed by gdb client in "call func()", even a private one 1041 * malloc is needed by gdb client in "call func()", even a private one
1037 * will make gdb happy 1042 * will make gdb happy
1038 */ 1043 */
1039static void *malloc(size_t size) 1044static void * __attribute_used__ malloc(size_t size)
1040{ 1045{
1041 return kmalloc(size, GFP_ATOMIC); 1046 return kmalloc(size, GFP_ATOMIC);
1042} 1047}
1043 1048
1044static void free(void *where) 1049static void __attribute_used__ free (void *where)
1045{ 1050{
1046 kfree(where); 1051 kfree(where);
1047} 1052}