aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tun.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2013-11-13 01:00:39 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-14 16:05:27 -0500
commit96f8d9ecf227638c89f98ccdcdd50b569891976c (patch)
tree8c6c163cd3914426873843c0fdb1c850dd5fe053 /drivers/net/tun.c
parent6115c11fe1a5a636ac99fc823b00df4ff3c0674e (diff)
tuntap: limit head length of skb allocated
We currently use hdr_len as a hint of head length which is advertised by guest. But when guest advertise a very big value, it can lead to an 64K+ allocating of kmalloc() which has a very high possibility of failure when host memory is fragmented or under heavy stress. The huge hdr_len also reduce the effect of zerocopy or even disable if a gso skb is linearized in guest. To solves those issues, this patch introduces an upper limit (PAGE_SIZE) of the head, which guarantees an order 0 allocation each time. Cc: Stefan Hajnoczi <stefanha@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r--drivers/net/tun.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7cb105c103fe..782e38bfc1ee 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
981 struct sk_buff *skb; 981 struct sk_buff *skb;
982 size_t len = total_len, align = NET_SKB_PAD, linear; 982 size_t len = total_len, align = NET_SKB_PAD, linear;
983 struct virtio_net_hdr gso = { 0 }; 983 struct virtio_net_hdr gso = { 0 };
984 int good_linear;
984 int offset = 0; 985 int offset = 0;
985 int copylen; 986 int copylen;
986 bool zerocopy = false; 987 bool zerocopy = false;
@@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1021 return -EINVAL; 1022 return -EINVAL;
1022 } 1023 }
1023 1024
1025 good_linear = SKB_MAX_HEAD(align);
1026
1024 if (msg_control) { 1027 if (msg_control) {
1025 /* There are 256 bytes to be copied in skb, so there is 1028 /* There are 256 bytes to be copied in skb, so there is
1026 * enough room for skb expand head in case it is used. 1029 * enough room for skb expand head in case it is used.
1027 * The rest of the buffer is mapped from userspace. 1030 * The rest of the buffer is mapped from userspace.
1028 */ 1031 */
1029 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; 1032 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1033 if (copylen > good_linear)
1034 copylen = good_linear;
1030 linear = copylen; 1035 linear = copylen;
1031 if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) 1036 if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1032 zerocopy = true; 1037 zerocopy = true;
@@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1034 1039
1035 if (!zerocopy) { 1040 if (!zerocopy) {
1036 copylen = len; 1041 copylen = len;
1037 linear = gso.hdr_len; 1042 if (gso.hdr_len > good_linear)
1043 linear = good_linear;
1044 else
1045 linear = gso.hdr_len;
1038 } 1046 }
1039 1047
1040 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); 1048 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);