aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/vmw_vmci_defs.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/vmw_vmci_defs.h')
-rw-r--r--include/linux/vmw_vmci_defs.h43
1 files changed, 39 insertions, 4 deletions
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 65ac54c61c18..1bd31a38c51e 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -734,6 +734,41 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
734} 734}
735 735
736/* 736/*
737 * Helper to read a value from a head or tail pointer. For X86_32, the
738 * pointer is treated as a 32bit value, since the pointer value
739 * never exceeds a 32bit value in this case. Also, doing an
740 * atomic64_read on X86_32 uniprocessor systems may be implemented
741 * as a non locked cmpxchg8b, that may end up overwriting updates done
742 * by the VMCI device to the memory location. On 32bit SMP, the lock
743 * prefix will be used, so correctness isn't an issue, but using a
744 * 64bit operation still adds unnecessary overhead.
745 */
746static inline u64 vmci_q_read_pointer(atomic64_t *var)
747{
748#if defined(CONFIG_X86_32)
749 return atomic_read((atomic_t *)var);
750#else
751 return atomic64_read(var);
752#endif
753}
754
755/*
756 * Helper to set the value of a head or tail pointer. For X86_32, the
757 * pointer is treated as a 32bit value, since the pointer value
758 * never exceeds a 32bit value in this case. On 32bit SMP, using a
759 * locked cmpxchg8b adds unnecessary overhead.
760 */
761static inline void vmci_q_set_pointer(atomic64_t *var,
762 u64 new_val)
763{
764#if defined(CONFIG_X86_32)
765 return atomic_set((atomic_t *)var, (u32)new_val);
766#else
767 return atomic64_set(var, new_val);
768#endif
769}
770
771/*
737 * Helper to add a given offset to a head or tail pointer. Wraps the 772 * Helper to add a given offset to a head or tail pointer. Wraps the
738 * value of the pointer around the max size of the queue. 773 * value of the pointer around the max size of the queue.
739 */ 774 */
@@ -741,14 +776,14 @@ static inline void vmci_qp_add_pointer(atomic64_t *var,
741 size_t add, 776 size_t add,
742 u64 size) 777 u64 size)
743{ 778{
744 u64 new_val = atomic64_read(var); 779 u64 new_val = vmci_q_read_pointer(var);
745 780
746 if (new_val >= size - add) 781 if (new_val >= size - add)
747 new_val -= size; 782 new_val -= size;
748 783
749 new_val += add; 784 new_val += add;
750 785
751 atomic64_set(var, new_val); 786 vmci_q_set_pointer(var, new_val);
752} 787}
753 788
754/* 789/*
@@ -758,7 +793,7 @@ static inline u64
758vmci_q_header_producer_tail(const struct vmci_queue_header *q_header) 793vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
759{ 794{
760 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; 795 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
761 return atomic64_read(&qh->producer_tail); 796 return vmci_q_read_pointer(&qh->producer_tail);
762} 797}
763 798
764/* 799/*
@@ -768,7 +803,7 @@ static inline u64
768vmci_q_header_consumer_head(const struct vmci_queue_header *q_header) 803vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
769{ 804{
770 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; 805 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
771 return atomic64_read(&qh->consumer_head); 806 return vmci_q_read_pointer(&qh->consumer_head);
772} 807}
773 808
774/* 809/*