diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2010-08-13 08:52:19 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2010-08-13 08:52:19 -0400 |
commit | c745a8a11fa1df6078bfc61fc29492ed43f71c2b (patch) | |
tree | 2db1cdf9cd0d0e892f4f92de1fd2700ac319f04a /arch/tile/include | |
parent | 1fcbe027b5d29ec9cd0eeb753c14fb366ae852ac (diff) |
arch/tile: Various cleanups.
This change rolls up random cleanups not representing any actual bugs.
- Remove a stale CONFIG_ value from the default tile_defconfig
- Remove unused tns_atomic_xxx() family of methods from <asm/atomic.h>
- Optimize get_order() using Tile's "clz" instruction
- Fix a bad hypervisor upcall name (not currently used in Linux anyway)
- Use __copy_in_user_inatomic() name for consistency, and export it
- Export some additional hypervisor driver I/O upcalls and some homecache calls
- Remove the obfuscating MEMCPY_TEST_WH64 support code
- Other stray comment cleanups, #if 0 removal, etc.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include')
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 37 | ||||
-rw-r--r-- | arch/tile/include/asm/page.h | 6 | ||||
-rw-r--r-- | arch/tile/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/tile/include/hv/hypervisor.h | 8 |
4 files changed, 11 insertions, 44 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 40a5a3a876d9..ed359aee8837 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -255,43 +255,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n) | |||
255 | #define smp_mb__after_atomic_dec() do { } while (0) | 255 | #define smp_mb__after_atomic_dec() do { } while (0) |
256 | #define smp_mb__after_atomic_inc() do { } while (0) | 256 | #define smp_mb__after_atomic_inc() do { } while (0) |
257 | 257 | ||
258 | |||
259 | /* | ||
260 | * Support "tns" atomic integers. These are atomic integers that can | ||
261 | * hold any value but "1". They are more efficient than regular atomic | ||
262 | * operations because the "lock" (aka acquire) step is a single "tns" | ||
263 | * in the uncontended case, and the "unlock" (aka release) step is a | ||
264 | * single "store" without an mf. (However, note that on tilepro the | ||
265 | * "tns" will evict the local cache line, so it's not all upside.) | ||
266 | * | ||
267 | * Note that you can ONLY observe the value stored in the pointer | ||
268 | * using these operations; a direct read of the value may confusingly | ||
269 | * return the special value "1". | ||
270 | */ | ||
271 | |||
272 | int __tns_atomic_acquire(atomic_t *); | ||
273 | void __tns_atomic_release(atomic_t *p, int v); | ||
274 | |||
275 | static inline void tns_atomic_set(atomic_t *v, int i) | ||
276 | { | ||
277 | __tns_atomic_acquire(v); | ||
278 | __tns_atomic_release(v, i); | ||
279 | } | ||
280 | |||
281 | static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n) | ||
282 | { | ||
283 | int ret = __tns_atomic_acquire(v); | ||
284 | __tns_atomic_release(v, (ret == o) ? n : ret); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static inline int tns_atomic_xchg(atomic_t *v, int n) | ||
289 | { | ||
290 | int ret = __tns_atomic_acquire(v); | ||
291 | __tns_atomic_release(v, n); | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | #endif /* !__ASSEMBLY__ */ | 258 | #endif /* !__ASSEMBLY__ */ |
296 | 259 | ||
297 | /* | 260 | /* |
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index f894a9016da6..7d90641cf18d 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h | |||
@@ -129,6 +129,11 @@ static inline u64 pmd_val(pmd_t pmd) | |||
129 | 129 | ||
130 | #endif | 130 | #endif |
131 | 131 | ||
132 | static inline __attribute_const__ int get_order(unsigned long size) | ||
133 | { | ||
134 | return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT); | ||
135 | } | ||
136 | |||
132 | #endif /* !__ASSEMBLY__ */ | 137 | #endif /* !__ASSEMBLY__ */ |
133 | 138 | ||
134 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 139 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
@@ -332,7 +337,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | |||
332 | (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 337 | (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
333 | 338 | ||
334 | #include <asm-generic/memory_model.h> | 339 | #include <asm-generic/memory_model.h> |
335 | #include <asm-generic/getorder.h> | ||
336 | 340 | ||
337 | #endif /* __KERNEL__ */ | 341 | #endif /* __KERNEL__ */ |
338 | 342 | ||
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index ed17a80ec0ed..ef34d2caa5b1 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -389,14 +389,14 @@ static inline unsigned long __must_check copy_from_user(void *to, | |||
389 | * Returns number of bytes that could not be copied. | 389 | * Returns number of bytes that could not be copied. |
390 | * On success, this will be zero. | 390 | * On success, this will be zero. |
391 | */ | 391 | */ |
392 | extern unsigned long __copy_in_user_asm( | 392 | extern unsigned long __copy_in_user_inatomic( |
393 | void __user *to, const void __user *from, unsigned long n); | 393 | void __user *to, const void __user *from, unsigned long n); |
394 | 394 | ||
395 | static inline unsigned long __must_check | 395 | static inline unsigned long __must_check |
396 | __copy_in_user(void __user *to, const void __user *from, unsigned long n) | 396 | __copy_in_user(void __user *to, const void __user *from, unsigned long n) |
397 | { | 397 | { |
398 | might_sleep(); | 398 | might_sleep(); |
399 | return __copy_in_user_asm(to, from, n); | 399 | return __copy_in_user_inatomic(to, from, n); |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline unsigned long __must_check | 402 | static inline unsigned long __must_check |
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h index 59b46dc53994..9bd303a141b2 100644 --- a/arch/tile/include/hv/hypervisor.h +++ b/arch/tile/include/hv/hypervisor.h | |||
@@ -532,11 +532,11 @@ void hv_disable_intr(HV_IntrMask disab_mask); | |||
532 | */ | 532 | */ |
533 | void hv_clear_intr(HV_IntrMask clear_mask); | 533 | void hv_clear_intr(HV_IntrMask clear_mask); |
534 | 534 | ||
535 | /** Assert a set of device interrupts. | 535 | /** Raise a set of device interrupts. |
536 | * | 536 | * |
537 | * @param assert_mask Bitmap of interrupts to clear. | 537 | * @param raise_mask Bitmap of interrupts to raise. |
538 | */ | 538 | */ |
539 | void hv_assert_intr(HV_IntrMask assert_mask); | 539 | void hv_raise_intr(HV_IntrMask raise_mask); |
540 | 540 | ||
541 | /** Trigger a one-shot interrupt on some tile | 541 | /** Trigger a one-shot interrupt on some tile |
542 | * | 542 | * |
@@ -1712,7 +1712,7 @@ typedef struct | |||
1712 | * @param cache_control This argument allows you to specify a length of | 1712 | * @param cache_control This argument allows you to specify a length of |
1713 | * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN). | 1713 | * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN). |
1714 | * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache. | 1714 | * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache. |
1715 | * You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache. | 1715 | * You can "or" in HV_FLUSH_EVICT_L1I to flush the whole L1I cache. |
1716 | * HV_FLUSH_ALL flushes all caches. | 1716 | * HV_FLUSH_ALL flushes all caches. |
1717 | * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of | 1717 | * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of |
1718 | * tile indices to perform cache flush on. The low bit of the first | 1718 | * tile indices to perform cache flush on. The low bit of the first |