aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a105006ca63..765c2d6358da 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -48,8 +48,16 @@ module_param(gso, bool, 0444);
48 */ 48 */
49DECLARE_EWMA(pkt_len, 1, 64) 49DECLARE_EWMA(pkt_len, 1, 64)
50 50
51/* With mergeable buffers we align buffer address and use the low bits to
52 * encode its true size. Buffer size is up to 1 page so we need to align to
53 * square root of page size to ensure we reserve enough bits to encode the true
54 * size.
55 */
56#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
57
51/* Minimum alignment for mergeable packet buffers. */ 58/* Minimum alignment for mergeable packet buffers. */
52#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) 59#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
60 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
53 61
54#define VIRTNET_DRIVER_VERSION "1.0.0" 62#define VIRTNET_DRIVER_VERSION "1.0.0"
55 63
@@ -1104,7 +1112,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1104 hdr = skb_vnet_hdr(skb); 1112 hdr = skb_vnet_hdr(skb);
1105 1113
1106 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 1114 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1107 virtio_is_little_endian(vi->vdev))) 1115 virtio_is_little_endian(vi->vdev), false))
1108 BUG(); 1116 BUG();
1109 1117
1110 if (vi->mergeable_rx_bufs) 1118 if (vi->mergeable_rx_bufs)
@@ -1707,6 +1715,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
1707 u16 xdp_qp = 0, curr_qp; 1715 u16 xdp_qp = 0, curr_qp;
1708 int i, err; 1716 int i, err;
1709 1717
1718 if (prog && prog->xdp_adjust_head) {
1719 netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
1720 return -EOPNOTSUPP;
1721 }
1722
1710 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 1723 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1711 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 1724 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1712 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 1725 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1890,8 +1903,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
1890 put_page(vi->rq[i].alloc_frag.page); 1903 put_page(vi->rq[i].alloc_frag.page);
1891} 1904}
1892 1905
1893static bool is_xdp_queue(struct virtnet_info *vi, int q) 1906static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1894{ 1907{
1908 /* For small receive mode always use kfree_skb variants */
1909 if (!vi->mergeable_rx_bufs)
1910 return false;
1911
1895 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1912 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1896 return false; 1913 return false;
1897 else if (q < vi->curr_queue_pairs) 1914 else if (q < vi->curr_queue_pairs)
@@ -1908,7 +1925,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
1908 for (i = 0; i < vi->max_queue_pairs; i++) { 1925 for (i = 0; i < vi->max_queue_pairs; i++) {
1909 struct virtqueue *vq = vi->sq[i].vq; 1926 struct virtqueue *vq = vi->sq[i].vq;
1910 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1927 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1911 if (!is_xdp_queue(vi, i)) 1928 if (!is_xdp_raw_buffer_queue(vi, i))
1912 dev_kfree_skb(buf); 1929 dev_kfree_skb(buf);
1913 else 1930 else
1914 put_page(virt_to_head_page(buf)); 1931 put_page(virt_to_head_page(buf));