diff options
Diffstat (limited to 'include/linux/hyperv.h')
-rw-r--r-- | include/linux/hyperv.h | 128 |
1 files changed, 63 insertions, 65 deletions
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 183efde54269..62bbf3c1aa4a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -32,11 +32,10 @@ | |||
32 | #include <linux/scatterlist.h> | 32 | #include <linux/scatterlist.h> |
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
35 | #include <linux/workqueue.h> | ||
36 | #include <linux/completion.h> | 35 | #include <linux/completion.h> |
37 | #include <linux/device.h> | 36 | #include <linux/device.h> |
38 | #include <linux/mod_devicetable.h> | 37 | #include <linux/mod_devicetable.h> |
39 | 38 | #include <linux/interrupt.h> | |
40 | 39 | ||
41 | #define MAX_PAGE_BUFFER_COUNT 32 | 40 | #define MAX_PAGE_BUFFER_COUNT 32 |
42 | #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ | 41 | #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ |
@@ -139,8 +138,8 @@ struct hv_ring_buffer_info { | |||
139 | * for the specified ring buffer | 138 | * for the specified ring buffer |
140 | */ | 139 | */ |
141 | static inline void | 140 | static inline void |
142 | hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, | 141 | hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, |
143 | u32 *read, u32 *write) | 142 | u32 *read, u32 *write) |
144 | { | 143 | { |
145 | u32 read_loc, write_loc, dsize; | 144 | u32 read_loc, write_loc, dsize; |
146 | 145 | ||
@@ -154,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, | |||
154 | *read = dsize - *write; | 153 | *read = dsize - *write; |
155 | } | 154 | } |
156 | 155 | ||
157 | static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) | 156 | static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) |
158 | { | 157 | { |
159 | u32 read_loc, write_loc, dsize, read; | 158 | u32 read_loc, write_loc, dsize, read; |
160 | 159 | ||
@@ -168,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) | |||
168 | return read; | 167 | return read; |
169 | } | 168 | } |
170 | 169 | ||
171 | static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) | 170 | static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) |
172 | { | 171 | { |
173 | u32 read_loc, write_loc, dsize, write; | 172 | u32 read_loc, write_loc, dsize, write; |
174 | 173 | ||
@@ -641,6 +640,7 @@ struct vmbus_channel_msginfo { | |||
641 | 640 | ||
642 | /* Synchronize the request/response if needed */ | 641 | /* Synchronize the request/response if needed */ |
643 | struct completion waitevent; | 642 | struct completion waitevent; |
643 | struct vmbus_channel *waiting_channel; | ||
644 | union { | 644 | union { |
645 | struct vmbus_channel_version_supported version_supported; | 645 | struct vmbus_channel_version_supported version_supported; |
646 | struct vmbus_channel_open_result open_result; | 646 | struct vmbus_channel_open_result open_result; |
@@ -683,11 +683,6 @@ struct hv_input_signal_event_buffer { | |||
683 | struct hv_input_signal_event event; | 683 | struct hv_input_signal_event event; |
684 | }; | 684 | }; |
685 | 685 | ||
686 | enum hv_signal_policy { | ||
687 | HV_SIGNAL_POLICY_DEFAULT = 0, | ||
688 | HV_SIGNAL_POLICY_EXPLICIT, | ||
689 | }; | ||
690 | |||
691 | enum hv_numa_policy { | 686 | enum hv_numa_policy { |
692 | HV_BALANCED = 0, | 687 | HV_BALANCED = 0, |
693 | HV_LOCALIZED, | 688 | HV_LOCALIZED, |
@@ -747,26 +742,27 @@ struct vmbus_channel { | |||
747 | 742 | ||
748 | struct vmbus_close_msg close_msg; | 743 | struct vmbus_close_msg close_msg; |
749 | 744 | ||
750 | /* Channel callback are invoked in this workqueue context */ | 745 | /* Channel callback's invoked in softirq context */ |
751 | /* HANDLE dataWorkQueue; */ | 746 | struct tasklet_struct callback_event; |
752 | |||
753 | void (*onchannel_callback)(void *context); | 747 | void (*onchannel_callback)(void *context); |
754 | void *channel_callback_context; | 748 | void *channel_callback_context; |
755 | 749 | ||
756 | /* | 750 | /* |
757 | * A channel can be marked for efficient (batched) | 751 | * A channel can be marked for one of three modes of reading: |
758 | * reading: | 752 | * BATCHED - callback called from taslket and should read |
759 | * If batched_reading is set to "true", we read until the | 753 | * channel until empty. Interrupts from the host |
760 | * channel is empty and hold off interrupts from the host | 754 | * are masked while read is in process (default). |
761 | * during the entire read process. | 755 | * DIRECT - callback called from tasklet (softirq). |
762 | * If batched_reading is set to "false", the client is not | 756 | * ISR - callback called in interrupt context and must |
763 | * going to perform batched reading. | 757 | * invoke its own deferred processing. |
764 | * | 758 | * Host interrupts are disabled and must be re-enabled |
765 | * By default we will enable batched reading; specific | 759 | * when ring is empty. |
766 | * drivers that don't want this behavior can turn it off. | ||
767 | */ | 760 | */ |
768 | 761 | enum hv_callback_mode { | |
769 | bool batched_reading; | 762 | HV_CALL_BATCHED, |
763 | HV_CALL_DIRECT, | ||
764 | HV_CALL_ISR | ||
765 | } callback_mode; | ||
770 | 766 | ||
771 | bool is_dedicated_interrupt; | 767 | bool is_dedicated_interrupt; |
772 | struct hv_input_signal_event_buffer sig_buf; | 768 | struct hv_input_signal_event_buffer sig_buf; |
@@ -850,23 +846,6 @@ struct vmbus_channel { | |||
850 | */ | 846 | */ |
851 | struct list_head percpu_list; | 847 | struct list_head percpu_list; |
852 | /* | 848 | /* |
853 | * Host signaling policy: The default policy will be | ||
854 | * based on the ring buffer state. We will also support | ||
855 | * a policy where the client driver can have explicit | ||
856 | * signaling control. | ||
857 | */ | ||
858 | enum hv_signal_policy signal_policy; | ||
859 | /* | ||
860 | * On the channel send side, many of the VMBUS | ||
861 | * device drivers explicity serialize access to the | ||
862 | * outgoing ring buffer. Give more control to the | ||
863 | * VMBUS device drivers in terms how to serialize | ||
864 | * accesss to the outgoing ring buffer. | ||
865 | * The default behavior will be to aquire the | ||
866 | * ring lock to preserve the current behavior. | ||
867 | */ | ||
868 | bool acquire_ring_lock; | ||
869 | /* | ||
870 | * For performance critical channels (storage, networking | 849 | * For performance critical channels (storage, networking |
871 | * etc,), Hyper-V has a mechanism to enhance the throughput | 850 | * etc,), Hyper-V has a mechanism to enhance the throughput |
872 | * at the expense of latency: | 851 | * at the expense of latency: |
@@ -906,32 +885,22 @@ struct vmbus_channel { | |||
906 | 885 | ||
907 | }; | 886 | }; |
908 | 887 | ||
909 | static inline void set_channel_lock_state(struct vmbus_channel *c, bool state) | ||
910 | { | ||
911 | c->acquire_ring_lock = state; | ||
912 | } | ||
913 | |||
914 | static inline bool is_hvsock_channel(const struct vmbus_channel *c) | 888 | static inline bool is_hvsock_channel(const struct vmbus_channel *c) |
915 | { | 889 | { |
916 | return !!(c->offermsg.offer.chn_flags & | 890 | return !!(c->offermsg.offer.chn_flags & |
917 | VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); | 891 | VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); |
918 | } | 892 | } |
919 | 893 | ||
920 | static inline void set_channel_signal_state(struct vmbus_channel *c, | ||
921 | enum hv_signal_policy policy) | ||
922 | { | ||
923 | c->signal_policy = policy; | ||
924 | } | ||
925 | |||
926 | static inline void set_channel_affinity_state(struct vmbus_channel *c, | 894 | static inline void set_channel_affinity_state(struct vmbus_channel *c, |
927 | enum hv_numa_policy policy) | 895 | enum hv_numa_policy policy) |
928 | { | 896 | { |
929 | c->affinity_policy = policy; | 897 | c->affinity_policy = policy; |
930 | } | 898 | } |
931 | 899 | ||
932 | static inline void set_channel_read_state(struct vmbus_channel *c, bool state) | 900 | static inline void set_channel_read_mode(struct vmbus_channel *c, |
901 | enum hv_callback_mode mode) | ||
933 | { | 902 | { |
934 | c->batched_reading = state; | 903 | c->callback_mode = mode; |
935 | } | 904 | } |
936 | 905 | ||
937 | static inline void set_per_channel_state(struct vmbus_channel *c, void *s) | 906 | static inline void set_per_channel_state(struct vmbus_channel *c, void *s) |
@@ -1054,8 +1023,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel, | |||
1054 | u32 bufferLen, | 1023 | u32 bufferLen, |
1055 | u64 requestid, | 1024 | u64 requestid, |
1056 | enum vmbus_packet_type type, | 1025 | enum vmbus_packet_type type, |
1057 | u32 flags, | 1026 | u32 flags); |
1058 | bool kick_q); | ||
1059 | 1027 | ||
1060 | extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, | 1028 | extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, |
1061 | struct hv_page_buffer pagebuffers[], | 1029 | struct hv_page_buffer pagebuffers[], |
@@ -1070,8 +1038,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, | |||
1070 | void *buffer, | 1038 | void *buffer, |
1071 | u32 bufferlen, | 1039 | u32 bufferlen, |
1072 | u64 requestid, | 1040 | u64 requestid, |
1073 | u32 flags, | 1041 | u32 flags); |
1074 | bool kick_q); | ||
1075 | 1042 | ||
1076 | extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | 1043 | extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, |
1077 | struct hv_multipage_buffer *mpb, | 1044 | struct hv_multipage_buffer *mpb, |
@@ -1458,9 +1425,10 @@ struct hyperv_service_callback { | |||
1458 | }; | 1425 | }; |
1459 | 1426 | ||
1460 | #define MAX_SRV_VER 0x7ffffff | 1427 | #define MAX_SRV_VER 0x7ffffff |
1461 | extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, | 1428 | extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, |
1462 | struct icmsg_negotiate *, u8 *, int, | 1429 | const int *fw_version, int fw_vercnt, |
1463 | int); | 1430 | const int *srv_version, int srv_vercnt, |
1431 | int *nego_fw_version, int *nego_srv_version); | ||
1464 | 1432 | ||
1465 | void hv_event_tasklet_disable(struct vmbus_channel *channel); | 1433 | void hv_event_tasklet_disable(struct vmbus_channel *channel); |
1466 | void hv_event_tasklet_enable(struct vmbus_channel *channel); | 1434 | void hv_event_tasklet_enable(struct vmbus_channel *channel); |
@@ -1480,9 +1448,9 @@ void vmbus_set_event(struct vmbus_channel *channel); | |||
1480 | 1448 | ||
1481 | /* Get the start of the ring buffer. */ | 1449 | /* Get the start of the ring buffer. */ |
1482 | static inline void * | 1450 | static inline void * |
1483 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) | 1451 | hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) |
1484 | { | 1452 | { |
1485 | return (void *)ring_info->ring_buffer->buffer; | 1453 | return ring_info->ring_buffer->buffer; |
1486 | } | 1454 | } |
1487 | 1455 | ||
1488 | /* | 1456 | /* |
@@ -1545,6 +1513,36 @@ init_cached_read_index(struct vmbus_channel *channel) | |||
1545 | } | 1513 | } |
1546 | 1514 | ||
1547 | /* | 1515 | /* |
1516 | * Mask off host interrupt callback notifications | ||
1517 | */ | ||
1518 | static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) | ||
1519 | { | ||
1520 | rbi->ring_buffer->interrupt_mask = 1; | ||
1521 | |||
1522 | /* make sure mask update is not reordered */ | ||
1523 | virt_mb(); | ||
1524 | } | ||
1525 | |||
1526 | /* | ||
1527 | * Re-enable host callback and return number of outstanding bytes | ||
1528 | */ | ||
1529 | static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) | ||
1530 | { | ||
1531 | |||
1532 | rbi->ring_buffer->interrupt_mask = 0; | ||
1533 | |||
1534 | /* make sure mask update is not reordered */ | ||
1535 | virt_mb(); | ||
1536 | |||
1537 | /* | ||
1538 | * Now check to see if the ring buffer is still empty. | ||
1539 | * If it is not, we raced and we need to process new | ||
1540 | * incoming messages. | ||
1541 | */ | ||
1542 | return hv_get_bytes_to_read(rbi); | ||
1543 | } | ||
1544 | |||
1545 | /* | ||
1548 | * An API to support in-place processing of incoming VMBUS packets. | 1546 | * An API to support in-place processing of incoming VMBUS packets. |
1549 | */ | 1547 | */ |
1550 | #define VMBUS_PKT_TRAILER 8 | 1548 | #define VMBUS_PKT_TRAILER 8 |