diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2019-02-19 13:53:02 -0500 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-02-19 15:56:05 -0500 |
commit | 74e31ca850c1cddeca03503171dd145b6ce293b6 (patch) | |
tree | 069ba2607739490c6dcd75cb47da15b7105995b4 | |
parent | 568f196756ad9fe2b49c46bbf6a9de1b190438b4 (diff) |
bpf: add skb->queue_mapping write access from tc clsact
The skb->queue_mapping already have read access, via __sk_buff->queue_mapping.
This patch allow BPF tc qdisc clsact write access to the queue_mapping via
tc_cls_act_is_valid_access. Also handle that the value NO_QUEUE_MAPPING
is not allowed.
It is already possible to change this via TC filter action skbedit
tc-skbedit(8). Due to the lack of TC examples, lets show one:
# tc qdisc add dev ixgbe1 clsact
# tc filter add dev ixgbe1 ingress matchall action skbedit queue_mapping 5
# tc filter list dev ixgbe1 ingress
The most common mistake is that XPS (Transmit Packet Steering) takes
precedence over setting skb->queue_mapping. XPS is configured per DEVICE
via /sys/class/net/DEVICE/queues/tx-*/xps_cpus via a CPU hex mask. To
disable set mask=00.
The purpose of changing skb->queue_mapping is to influence the selection of
the net_device "txq" (struct netdev_queue), which influence selection of
the qdisc "root_lock" (via txq->qdisc->q.lock) and txq->_xmit_lock. When
using the MQ qdisc the txq->qdisc points to different qdiscs and associated
locks, and HARD_TX_LOCK (txq->_xmit_lock), allowing for CPU scalability.
Due to lack of TC examples, lets show howto attach clsact BPF programs:
# tc qdisc add dev ixgbe2 clsact
# tc filter add dev ixgbe2 egress bpf da obj XXX_kern.o sec tc_qmap2cpu
# tc filter list dev ixgbe2 egress
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r-- | net/core/filter.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index b584cb42a803..85749f6ec789 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -6279,6 +6279,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, | |||
6279 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 6279 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
6280 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): | 6280 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): |
6281 | case bpf_ctx_range(struct __sk_buff, tstamp): | 6281 | case bpf_ctx_range(struct __sk_buff, tstamp): |
6282 | case bpf_ctx_range(struct __sk_buff, queue_mapping): | ||
6282 | break; | 6283 | break; |
6283 | default: | 6284 | default: |
6284 | return false; | 6285 | return false; |
@@ -6683,9 +6684,18 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, | |||
6683 | break; | 6684 | break; |
6684 | 6685 | ||
6685 | case offsetof(struct __sk_buff, queue_mapping): | 6686 | case offsetof(struct __sk_buff, queue_mapping): |
6686 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, | 6687 | if (type == BPF_WRITE) { |
6687 | bpf_target_off(struct sk_buff, queue_mapping, 2, | 6688 | *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); |
6688 | target_size)); | 6689 | *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, |
6690 | bpf_target_off(struct sk_buff, | ||
6691 | queue_mapping, | ||
6692 | 2, target_size)); | ||
6693 | } else { | ||
6694 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, | ||
6695 | bpf_target_off(struct sk_buff, | ||
6696 | queue_mapping, | ||
6697 | 2, target_size)); | ||
6698 | } | ||
6689 | break; | 6699 | break; |
6690 | 6700 | ||
6691 | case offsetof(struct __sk_buff, vlan_present): | 6701 | case offsetof(struct __sk_buff, vlan_present): |