diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2015-08-19 07:53:58 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2015-08-20 09:35:49 -0400 |
commit | 090749502ff20d7d9ec244036fe636b6bf0433b6 (patch) | |
tree | 52ce66c6c0a525b4a9eff0ee5917c52f72479ead /arch/arc/include | |
parent | 6de6066c0d24a66df465cf87a4041ef7ef35ba6f (diff) |
ARC: add/fix some comments in code - no functional change
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/cmpxchg.h | 22 | ||||
-rw-r--r-- | arch/arc/include/asm/perf_event.h | 2 |
2 files changed, 12 insertions, 12 deletions
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index 44fd531f4d7b..af7a2db139c9 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h | |||
@@ -110,18 +110,18 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, | |||
110 | sizeof(*(ptr)))) | 110 | sizeof(*(ptr)))) |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need | 113 | * xchg() maps directly to ARC EX instruction which guarantees atomicity. |
114 | * not require any locking. However there's a quirk. | 114 | * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock |
115 | * ARC lacks native CMPXCHG, thus emulated (see above), using external locking - | 115 | * due to a subtle reason: |
116 | * incidently it "reuses" the same atomic_ops_lock used by atomic APIs. | 116 | * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot |
117 | * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to | 117 | * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h) |
118 | * abide by same serializing rules, thus ends up using atomic_ops_lock as well. | 118 | * Hence xchg() needs to follow same locking rules. |
119 | * | 119 | * |
120 | * This however is only relevant if SMP and/or ARC lacks LLSC | 120 | * Technically the lock is also needed for UP (boils down to irq save/restore) |
121 | * if (UP or LLSC) | 121 | * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to |
122 | * xchg doesn't need serialization | 122 | * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg() |
123 | * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC) | 123 | * Other way around, xchg is one instruction anyways, so can't be interrupted |
124 | * xchg needs serialization | 124 | * as such |
125 | */ | 125 | */ |
126 | 126 | ||
127 | #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) | 127 | #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) |
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 2b8880e953a2..e2eaf6fb0468 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h | |||
@@ -95,7 +95,7 @@ static const char * const arc_pmu_ev_hw_map[] = { | |||
95 | 95 | ||
96 | /* counts condition */ | 96 | /* counts condition */ |
97 | [PERF_COUNT_HW_INSTRUCTIONS] = "iall", | 97 | [PERF_COUNT_HW_INSTRUCTIONS] = "iall", |
98 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", | 98 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ |
99 | [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ | 99 | [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ |
100 | [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ | 100 | [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ |
101 | 101 | ||