aboutsummaryrefslogtreecommitdiffstats
path: root/net/packet
diff options
context:
space:
mode:
authorWillem de Bruijn <willemb@google.com>2015-05-12 11:56:49 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-13 15:43:00 -0400
commit3b3a5b0aab5b9ad345d4beb9a364a7dd02c23d40 (patch)
tree07714d34021e3505276c82d8c84c39324c4ddc97 /net/packet
parent2ccdbaa6d55b0656244ba57c4b56765a0af76c0a (diff)
packet: rollover huge flows before small flows
Migrate flows from a socket to another socket in the fanout group not only when the socket is full. Start migrating huge flows early, to divert possible 4-tuple attacks without affecting normal traffic. Introduce fanout_flow_is_huge(). This detects huge flows, which are defined as taking up more than half the load. It does so cheaply, by storing the rxhashes of the N most recent packets. If over half of these are the same rxhash as the current packet, then drop it. This only protects against 4-tuple attacks. N is chosen to fit all data in a single cache line. Tested: Ran bench_rollover for 10 sec with 1.5 Mpps of single flow input. lpbb5:/export/hda3/willemb# ./bench_rollover -l 1000 -r -s cpu rx rx.k drop.k rollover r.huge r.failed 0 14 14 0 0 0 0 1 20 20 0 0 0 0 2 16 16 0 0 0 0 3 6168824 6168824 0 4867721 4867721 0 4 4867741 4867741 0 0 0 0 5 12 12 0 0 0 0 6 15 15 0 0 0 0 7 17 17 0 0 0 0 Signed-off-by: Willem de Bruijn <willemb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c25
-rw-r--r--net/packet/internal.h2
2 files changed, 24 insertions, 3 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3a383fd72f82..8f0156b10f8d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1341,6 +1341,20 @@ static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1341 return x; 1341 return x;
1342} 1342}
1343 1343
1344static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1345{
1346 u32 rxhash;
1347 int i, count = 0;
1348
1349 rxhash = skb_get_hash(skb);
1350 for (i = 0; i < ROLLOVER_HLEN; i++)
1351 if (po->rollover->history[i] == rxhash)
1352 count++;
1353
1354 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1355 return count > (ROLLOVER_HLEN >> 1);
1356}
1357
1344static unsigned int fanout_demux_hash(struct packet_fanout *f, 1358static unsigned int fanout_demux_hash(struct packet_fanout *f,
1345 struct sk_buff *skb, 1359 struct sk_buff *skb,
1346 unsigned int num) 1360 unsigned int num)
@@ -1381,11 +1395,16 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1381 unsigned int num) 1395 unsigned int num)
1382{ 1396{
1383 struct packet_sock *po, *po_next; 1397 struct packet_sock *po, *po_next;
1384 unsigned int i, j; 1398 unsigned int i, j, room;
1385 1399
1386 po = pkt_sk(f->arr[idx]); 1400 po = pkt_sk(f->arr[idx]);
1387 if (try_self && packet_rcv_has_room(po, skb) != ROOM_NONE) 1401
1388 return idx; 1402 if (try_self) {
1403 room = packet_rcv_has_room(po, skb);
1404 if (room == ROOM_NORMAL ||
1405 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1406 return idx;
1407 }
1389 1408
1390 i = j = min_t(int, po->rollover->sock, num - 1); 1409 i = j = min_t(int, po->rollover->sock, num - 1);
1391 do { 1410 do {
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 22d7d778c5b7..a9d30a17c714 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -89,6 +89,8 @@ struct packet_fanout {
89 89
90struct packet_rollover { 90struct packet_rollover {
91 int sock; 91 int sock;
92#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
93 u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
92} ____cacheline_aligned_in_smp; 94} ____cacheline_aligned_in_smp;
93 95
94struct packet_sock { 96struct packet_sock {