summaryrefslogtreecommitdiffstats
path: root/drivers/hv
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 19:40:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 19:40:27 -0400
commit8d65b08debc7e62b2c6032d7fe7389d895b92cbc (patch)
tree0c3141b60c3a03cc32742b5750c5e763b9dae489 /drivers/hv
parent5a0387a8a8efb90ae7fea1e2e5c62de3efa74691 (diff)
parent5d15af6778b8e4ed1fd41b040283af278e7a9a72 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Millar: "Here are some highlights from the 2065 networking commits that happened this development cycle: 1) XDP support for IXGBE (John Fastabend) and thunderx (Sunil Kowuri) 2) Add a generic XDP driver, so that anyone can test XDP even if they lack a networking device whose driver has explicit XDP support (me). 3) Sparc64 now has an eBPF JIT too (me) 4) Add a BPF program testing framework via BPF_PROG_TEST_RUN (Alexei Starovoitov) 5) Make netfitler network namespace teardown less expensive (Florian Westphal) 6) Add symmetric hashing support to nft_hash (Laura Garcia Liebana) 7) Implement NAPI and GRO in netvsc driver (Stephen Hemminger) 8) Support TC flower offload statistics in mlxsw (Arkadi Sharshevsky) 9) Multiqueue support in stmmac driver (Joao Pinto) 10) Remove TCP timewait recycling, it never really could possibly work well in the real world and timestamp randomization really zaps any hint of usability this feature had (Soheil Hassas Yeganeh) 11) Support level3 vs level4 ECMP route hashing in ipv4 (Nikolay Aleksandrov) 12) Add socket busy poll support to epoll (Sridhar Samudrala) 13) Netlink extended ACK support (Johannes Berg, Pablo Neira Ayuso, and several others) 14) IPSEC hw offload infrastructure (Steffen Klassert)" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2065 commits) tipc: refactor function tipc_sk_recv_stream() tipc: refactor function tipc_sk_recvmsg() net: thunderx: Optimize page recycling for XDP net: thunderx: Support for XDP header adjustment net: thunderx: Add support for XDP_TX net: thunderx: Add support for XDP_DROP net: thunderx: Add basic XDP support net: thunderx: Cleanup receive buffer allocation net: thunderx: Optimize CQE_TX handling net: thunderx: Optimize RBDR descriptor handling net: thunderx: Support for page recycling ipx: call ipxitf_put() in ioctl error path net: sched: add helpers to handle extended actions qed*: Fix issues in the ptp filter config implementation. qede: Fix concurrency issue in PTP Tx path processing. stmmac: Add support for SIMATIC IOT2000 platform net: hns: fix ethtool_get_strings overflow in hns driver tcp: fix wraparound issue in tcp_lp bpf, arm64: fix jit branch offset related to ldimm64 bpf, arm64: implement jiting of BPF_XADD ...
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/ring_buffer.c94
1 files changed, 93 insertions, 1 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 87799e81af97..c3f1a9e33cef 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -32,6 +32,8 @@
32 32
33#include "hyperv_vmbus.h" 33#include "hyperv_vmbus.h"
34 34
35#define VMBUS_PKT_TRAILER 8
36
35/* 37/*
36 * When we write to the ring buffer, check if the host needs to 38 * When we write to the ring buffer, check if the host needs to
37 * be signaled. Here is the details of this protocol: 39 * be signaled. Here is the details of this protocol:
@@ -336,6 +338,12 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
336 return 0; 338 return 0;
337} 339}
338 340
341static inline void
342init_cached_read_index(struct hv_ring_buffer_info *rbi)
343{
344 rbi->cached_read_index = rbi->ring_buffer->read_index;
345}
346
339int hv_ringbuffer_read(struct vmbus_channel *channel, 347int hv_ringbuffer_read(struct vmbus_channel *channel,
340 void *buffer, u32 buflen, u32 *buffer_actual_len, 348 void *buffer, u32 buflen, u32 *buffer_actual_len,
341 u64 *requestid, bool raw) 349 u64 *requestid, bool raw)
@@ -366,7 +374,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
366 return ret; 374 return ret;
367 } 375 }
368 376
369 init_cached_read_index(channel); 377 init_cached_read_index(inring_info);
378
370 next_read_location = hv_get_next_read_location(inring_info); 379 next_read_location = hv_get_next_read_location(inring_info);
371 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 380 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
372 sizeof(desc), 381 sizeof(desc),
@@ -410,3 +419,86 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
410 419
411 return ret; 420 return ret;
412} 421}
422
423/*
424 * Determine number of bytes available in ring buffer after
425 * the current iterator (priv_read_index) location.
426 *
427 * This is similar to hv_get_bytes_to_read but with private
428 * read index instead.
429 */
430static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
431{
432 u32 priv_read_loc = rbi->priv_read_index;
433 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
434
435 if (write_loc >= priv_read_loc)
436 return write_loc - priv_read_loc;
437 else
438 return (rbi->ring_datasize - priv_read_loc) + write_loc;
439}
440
441/*
442 * Get first vmbus packet from ring buffer after read_index
443 *
444 * If ring buffer is empty, returns NULL and no other action needed.
445 */
446struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
447{
448 struct hv_ring_buffer_info *rbi = &channel->inbound;
449
450 /* set state for later hv_signal_on_read() */
451 init_cached_read_index(rbi);
452
453 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
454 return NULL;
455
456 return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
457}
458EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
459
460/*
461 * Get next vmbus packet from ring buffer.
462 *
463 * Advances the current location (priv_read_index) and checks for more
464 * data. If the end of the ring buffer is reached, then return NULL.
465 */
466struct vmpacket_descriptor *
467__hv_pkt_iter_next(struct vmbus_channel *channel,
468 const struct vmpacket_descriptor *desc)
469{
470 struct hv_ring_buffer_info *rbi = &channel->inbound;
471 u32 packetlen = desc->len8 << 3;
472 u32 dsize = rbi->ring_datasize;
473
474 /* bump offset to next potential packet */
475 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
476 if (rbi->priv_read_index >= dsize)
477 rbi->priv_read_index -= dsize;
478
479 /* more data? */
480 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
481 return NULL;
482 else
483 return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
484}
485EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
486
487/*
488 * Update host ring buffer after iterating over packets.
489 */
490void hv_pkt_iter_close(struct vmbus_channel *channel)
491{
492 struct hv_ring_buffer_info *rbi = &channel->inbound;
493
494 /*
495 * Make sure all reads are done before we update the read index since
496 * the writer may start writing to the read area once the read index
497 * is updated.
498 */
499 virt_rmb();
500 rbi->ring_buffer->read_index = rbi->priv_read_index;
501
502 hv_signal_on_read(channel);
503}
504EXPORT_SYMBOL_GPL(hv_pkt_iter_close);