diff options
| author | Björn Töpel <bjorn.topel@intel.com> | 2018-06-04 07:57:14 -0400 |
|---|---|---|
| committer | Daniel Borkmann <daniel@iogearbox.net> | 2018-06-04 11:21:02 -0400 |
| commit | a412ef54fc2eb81bb55428dcdcdaa2e38ae9bba5 (patch) | |
| tree | 9ce42e40e4f53677387d85d4ddc97f07b327f3b2 /samples | |
| parent | bbff2f321a864ee07c9d3d1245af498023146951 (diff) | |
samples/bpf: adapted to new uapi
Here, the xdpsock sample application is adjusted to the new descriptor
format.
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'samples')
| -rw-r--r-- | samples/bpf/xdpsock_user.c | 84 |
1 files changed, 36 insertions, 48 deletions
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index e379eac034ac..b71a342b9082 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | 46 | ||
| 47 | #define NUM_FRAMES 131072 | 47 | #define NUM_FRAMES 131072 |
| 48 | #define FRAME_HEADROOM 0 | 48 | #define FRAME_HEADROOM 0 |
| 49 | #define FRAME_SHIFT 11 | ||
| 49 | #define FRAME_SIZE 2048 | 50 | #define FRAME_SIZE 2048 |
| 50 | #define NUM_DESCS 1024 | 51 | #define NUM_DESCS 1024 |
| 51 | #define BATCH_SIZE 16 | 52 | #define BATCH_SIZE 16 |
| @@ -55,6 +56,7 @@ | |||
| 55 | 56 | ||
| 56 | #define DEBUG_HEXDUMP 0 | 57 | #define DEBUG_HEXDUMP 0 |
| 57 | 58 | ||
| 59 | typedef __u64 u64; | ||
| 58 | typedef __u32 u32; | 60 | typedef __u32 u32; |
| 59 | 61 | ||
| 60 | static unsigned long prev_time; | 62 | static unsigned long prev_time; |
| @@ -81,12 +83,12 @@ struct xdp_umem_uqueue { | |||
| 81 | u32 size; | 83 | u32 size; |
| 82 | u32 *producer; | 84 | u32 *producer; |
| 83 | u32 *consumer; | 85 | u32 *consumer; |
| 84 | u32 *ring; | 86 | u64 *ring; |
| 85 | void *map; | 87 | void *map; |
| 86 | }; | 88 | }; |
| 87 | 89 | ||
| 88 | struct xdp_umem { | 90 | struct xdp_umem { |
| 89 | char (*frames)[FRAME_SIZE]; | 91 | char *frames; |
| 90 | struct xdp_umem_uqueue fq; | 92 | struct xdp_umem_uqueue fq; |
| 91 | struct xdp_umem_uqueue cq; | 93 | struct xdp_umem_uqueue cq; |
| 92 | int fd; | 94 | int fd; |
| @@ -214,7 +216,7 @@ static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq, | |||
| 214 | for (i = 0; i < nb; i++) { | 216 | for (i = 0; i < nb; i++) { |
| 215 | u32 idx = fq->cached_prod++ & fq->mask; | 217 | u32 idx = fq->cached_prod++ & fq->mask; |
| 216 | 218 | ||
| 217 | fq->ring[idx] = d[i].idx; | 219 | fq->ring[idx] = d[i].addr; |
| 218 | } | 220 | } |
| 219 | 221 | ||
| 220 | u_smp_wmb(); | 222 | u_smp_wmb(); |
| @@ -224,7 +226,7 @@ static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq, | |||
| 224 | return 0; | 226 | return 0; |
| 225 | } | 227 | } |
| 226 | 228 | ||
| 227 | static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u32 *d, | 229 | static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d, |
| 228 | size_t nb) | 230 | size_t nb) |
| 229 | { | 231 | { |
| 230 | u32 i; | 232 | u32 i; |
| @@ -246,7 +248,7 @@ static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u32 *d, | |||
| 246 | } | 248 | } |
| 247 | 249 | ||
| 248 | static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq, | 250 | static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq, |
| 249 | u32 *d, size_t nb) | 251 | u64 *d, size_t nb) |
| 250 | { | 252 | { |
| 251 | u32 idx, i, entries = umem_nb_avail(cq, nb); | 253 | u32 idx, i, entries = umem_nb_avail(cq, nb); |
| 252 | 254 | ||
| @@ -266,10 +268,9 @@ static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq, | |||
| 266 | return entries; | 268 | return entries; |
| 267 | } | 269 | } |
| 268 | 270 | ||
| 269 | static inline void *xq_get_data(struct xdpsock *xsk, __u32 idx, __u32 off) | 271 | static inline void *xq_get_data(struct xdpsock *xsk, u64 addr) |
| 270 | { | 272 | { |
| 271 | lassert(idx < NUM_FRAMES); | 273 | return &xsk->umem->frames[addr]; |
| 272 | return &xsk->umem->frames[idx][off]; | ||
| 273 | } | 274 | } |
| 274 | 275 | ||
| 275 | static inline int xq_enq(struct xdp_uqueue *uq, | 276 | static inline int xq_enq(struct xdp_uqueue *uq, |
| @@ -285,9 +286,8 @@ static inline int xq_enq(struct xdp_uqueue *uq, | |||
| 285 | for (i = 0; i < ndescs; i++) { | 286 | for (i = 0; i < ndescs; i++) { |
| 286 | u32 idx = uq->cached_prod++ & uq->mask; | 287 | u32 idx = uq->cached_prod++ & uq->mask; |
| 287 | 288 | ||
| 288 | r[idx].idx = descs[i].idx; | 289 | r[idx].addr = descs[i].addr; |
| 289 | r[idx].len = descs[i].len; | 290 | r[idx].len = descs[i].len; |
| 290 | r[idx].offset = descs[i].offset; | ||
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | u_smp_wmb(); | 293 | u_smp_wmb(); |
| @@ -297,7 +297,7 @@ static inline int xq_enq(struct xdp_uqueue *uq, | |||
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | static inline int xq_enq_tx_only(struct xdp_uqueue *uq, | 299 | static inline int xq_enq_tx_only(struct xdp_uqueue *uq, |
| 300 | __u32 idx, unsigned int ndescs) | 300 | unsigned int id, unsigned int ndescs) |
| 301 | { | 301 | { |
| 302 | struct xdp_desc *r = uq->ring; | 302 | struct xdp_desc *r = uq->ring; |
| 303 | unsigned int i; | 303 | unsigned int i; |
| @@ -308,9 +308,8 @@ static inline int xq_enq_tx_only(struct xdp_uqueue *uq, | |||
| 308 | for (i = 0; i < ndescs; i++) { | 308 | for (i = 0; i < ndescs; i++) { |
| 309 | u32 idx = uq->cached_prod++ & uq->mask; | 309 | u32 idx = uq->cached_prod++ & uq->mask; |
| 310 | 310 | ||
| 311 | r[idx].idx = idx + i; | 311 | r[idx].addr = (id + i) << FRAME_SHIFT; |
| 312 | r[idx].len = sizeof(pkt_data) - 1; | 312 | r[idx].len = sizeof(pkt_data) - 1; |
| 313 | r[idx].offset = 0; | ||
| 314 | } | 313 | } |
| 315 | 314 | ||
| 316 | u_smp_wmb(); | 315 | u_smp_wmb(); |
| @@ -357,17 +356,21 @@ static void swap_mac_addresses(void *data) | |||
| 357 | *dst_addr = tmp; | 356 | *dst_addr = tmp; |
| 358 | } | 357 | } |
| 359 | 358 | ||
| 360 | #if DEBUG_HEXDUMP | 359 | static void hex_dump(void *pkt, size_t length, u64 addr) |
| 361 | static void hex_dump(void *pkt, size_t length, const char *prefix) | ||
| 362 | { | 360 | { |
| 363 | int i = 0; | ||
| 364 | const unsigned char *address = (unsigned char *)pkt; | 361 | const unsigned char *address = (unsigned char *)pkt; |
| 365 | const unsigned char *line = address; | 362 | const unsigned char *line = address; |
| 366 | size_t line_size = 32; | 363 | size_t line_size = 32; |
| 367 | unsigned char c; | 364 | unsigned char c; |
| 365 | char buf[32]; | ||
| 366 | int i = 0; | ||
| 368 | 367 | ||
| 368 | if (!DEBUG_HEXDUMP) | ||
| 369 | return; | ||
| 370 | |||
| 371 | sprintf(buf, "addr=%llu", addr); | ||
| 369 | printf("length = %zu\n", length); | 372 | printf("length = %zu\n", length); |
| 370 | printf("%s | ", prefix); | 373 | printf("%s | ", buf); |
| 371 | while (length-- > 0) { | 374 | while (length-- > 0) { |
| 372 | printf("%02X ", *address++); | 375 | printf("%02X ", *address++); |
| 373 | if (!(++i % line_size) || (length == 0 && i % line_size)) { | 376 | if (!(++i % line_size) || (length == 0 && i % line_size)) { |
| @@ -382,12 +385,11 @@ static void hex_dump(void *pkt, size_t length, const char *prefix) | |||
| 382 | } | 385 | } |
| 383 | printf("\n"); | 386 | printf("\n"); |
| 384 | if (length > 0) | 387 | if (length > 0) |
| 385 | printf("%s | ", prefix); | 388 | printf("%s | ", buf); |
| 386 | } | 389 | } |
| 387 | } | 390 | } |
| 388 | printf("\n"); | 391 | printf("\n"); |
| 389 | } | 392 | } |
| 390 | #endif | ||
| 391 | 393 | ||
| 392 | static size_t gen_eth_frame(char *frame) | 394 | static size_t gen_eth_frame(char *frame) |
| 393 | { | 395 | { |
| @@ -412,8 +414,8 @@ static struct xdp_umem *xdp_umem_configure(int sfd) | |||
| 412 | 414 | ||
| 413 | mr.addr = (__u64)bufs; | 415 | mr.addr = (__u64)bufs; |
| 414 | mr.len = NUM_FRAMES * FRAME_SIZE; | 416 | mr.len = NUM_FRAMES * FRAME_SIZE; |
| 415 | mr.frame_size = FRAME_SIZE; | 417 | mr.chunk_size = FRAME_SIZE; |
| 416 | mr.frame_headroom = FRAME_HEADROOM; | 418 | mr.headroom = FRAME_HEADROOM; |
| 417 | 419 | ||
| 418 | lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0); | 420 | lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0); |
| 419 | lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size, | 421 | lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size, |
| @@ -426,7 +428,7 @@ static struct xdp_umem *xdp_umem_configure(int sfd) | |||
| 426 | &optlen) == 0); | 428 | &optlen) == 0); |
| 427 | 429 | ||
| 428 | umem->fq.map = mmap(0, off.fr.desc + | 430 | umem->fq.map = mmap(0, off.fr.desc + |
| 429 | FQ_NUM_DESCS * sizeof(u32), | 431 | FQ_NUM_DESCS * sizeof(u64), |
| 430 | PROT_READ | PROT_WRITE, | 432 | PROT_READ | PROT_WRITE, |
| 431 | MAP_SHARED | MAP_POPULATE, sfd, | 433 | MAP_SHARED | MAP_POPULATE, sfd, |
| 432 | XDP_UMEM_PGOFF_FILL_RING); | 434 | XDP_UMEM_PGOFF_FILL_RING); |
| @@ -439,7 +441,7 @@ static struct xdp_umem *xdp_umem_configure(int sfd) | |||
| 439 | umem->fq.ring = umem->fq.map + off.fr.desc; | 441 | umem->fq.ring = umem->fq.map + off.fr.desc; |
| 440 | 442 | ||
| 441 | umem->cq.map = mmap(0, off.cr.desc + | 443 | umem->cq.map = mmap(0, off.cr.desc + |
| 442 | CQ_NUM_DESCS * sizeof(u32), | 444 | CQ_NUM_DESCS * sizeof(u64), |
| 443 | PROT_READ | PROT_WRITE, | 445 | PROT_READ | PROT_WRITE, |
| 444 | MAP_SHARED | MAP_POPULATE, sfd, | 446 | MAP_SHARED | MAP_POPULATE, sfd, |
| 445 | XDP_UMEM_PGOFF_COMPLETION_RING); | 447 | XDP_UMEM_PGOFF_COMPLETION_RING); |
| @@ -451,14 +453,14 @@ static struct xdp_umem *xdp_umem_configure(int sfd) | |||
| 451 | umem->cq.consumer = umem->cq.map + off.cr.consumer; | 453 | umem->cq.consumer = umem->cq.map + off.cr.consumer; |
| 452 | umem->cq.ring = umem->cq.map + off.cr.desc; | 454 | umem->cq.ring = umem->cq.map + off.cr.desc; |
| 453 | 455 | ||
| 454 | umem->frames = (char (*)[FRAME_SIZE])bufs; | 456 | umem->frames = bufs; |
| 455 | umem->fd = sfd; | 457 | umem->fd = sfd; |
| 456 | 458 | ||
| 457 | if (opt_bench == BENCH_TXONLY) { | 459 | if (opt_bench == BENCH_TXONLY) { |
| 458 | int i; | 460 | int i; |
| 459 | 461 | ||
| 460 | for (i = 0; i < NUM_FRAMES; i++) | 462 | for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE) |
| 461 | (void)gen_eth_frame(&umem->frames[i][0]); | 463 | (void)gen_eth_frame(&umem->frames[i]); |
| 462 | } | 464 | } |
| 463 | 465 | ||
| 464 | return umem; | 466 | return umem; |
| @@ -472,7 +474,7 @@ static struct xdpsock *xsk_configure(struct xdp_umem *umem) | |||
| 472 | struct xdpsock *xsk; | 474 | struct xdpsock *xsk; |
| 473 | bool shared = true; | 475 | bool shared = true; |
| 474 | socklen_t optlen; | 476 | socklen_t optlen; |
| 475 | u32 i; | 477 | u64 i; |
| 476 | 478 | ||
| 477 | sfd = socket(PF_XDP, SOCK_RAW, 0); | 479 | sfd = socket(PF_XDP, SOCK_RAW, 0); |
| 478 | lassert(sfd >= 0); | 480 | lassert(sfd >= 0); |
| @@ -508,7 +510,7 @@ static struct xdpsock *xsk_configure(struct xdp_umem *umem) | |||
| 508 | lassert(xsk->rx.map != MAP_FAILED); | 510 | lassert(xsk->rx.map != MAP_FAILED); |
| 509 | 511 | ||
| 510 | if (!shared) { | 512 | if (!shared) { |
| 511 | for (i = 0; i < NUM_DESCS / 2; i++) | 513 | for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE) |
| 512 | lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1) | 514 | lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1) |
| 513 | == 0); | 515 | == 0); |
| 514 | } | 516 | } |
| @@ -727,7 +729,7 @@ static void kick_tx(int fd) | |||
| 727 | 729 | ||
| 728 | static inline void complete_tx_l2fwd(struct xdpsock *xsk) | 730 | static inline void complete_tx_l2fwd(struct xdpsock *xsk) |
| 729 | { | 731 | { |
| 730 | u32 descs[BATCH_SIZE]; | 732 | u64 descs[BATCH_SIZE]; |
| 731 | unsigned int rcvd; | 733 | unsigned int rcvd; |
| 732 | size_t ndescs; | 734 | size_t ndescs; |
| 733 | 735 | ||
| @@ -749,7 +751,7 @@ static inline void complete_tx_l2fwd(struct xdpsock *xsk) | |||
| 749 | 751 | ||
| 750 | static inline void complete_tx_only(struct xdpsock *xsk) | 752 | static inline void complete_tx_only(struct xdpsock *xsk) |
| 751 | { | 753 | { |
| 752 | u32 descs[BATCH_SIZE]; | 754 | u64 descs[BATCH_SIZE]; |
| 753 | unsigned int rcvd; | 755 | unsigned int rcvd; |
| 754 | 756 | ||
| 755 | if (!xsk->outstanding_tx) | 757 | if (!xsk->outstanding_tx) |
| @@ -774,17 +776,9 @@ static void rx_drop(struct xdpsock *xsk) | |||
| 774 | return; | 776 | return; |
| 775 | 777 | ||
| 776 | for (i = 0; i < rcvd; i++) { | 778 | for (i = 0; i < rcvd; i++) { |
| 777 | u32 idx = descs[i].idx; | 779 | char *pkt = xq_get_data(xsk, descs[i].addr); |
| 778 | |||
| 779 | lassert(idx < NUM_FRAMES); | ||
| 780 | #if DEBUG_HEXDUMP | ||
| 781 | char *pkt; | ||
| 782 | char buf[32]; | ||
| 783 | 780 | ||
| 784 | pkt = xq_get_data(xsk, idx, descs[i].offset); | 781 | hex_dump(pkt, descs[i].len, descs[i].addr); |
| 785 | sprintf(buf, "idx=%d", idx); | ||
| 786 | hex_dump(pkt, descs[i].len, buf); | ||
| 787 | #endif | ||
| 788 | } | 782 | } |
| 789 | 783 | ||
| 790 | xsk->rx_npkts += rcvd; | 784 | xsk->rx_npkts += rcvd; |
| @@ -867,17 +861,11 @@ static void l2fwd(struct xdpsock *xsk) | |||
| 867 | } | 861 | } |
| 868 | 862 | ||
| 869 | for (i = 0; i < rcvd; i++) { | 863 | for (i = 0; i < rcvd; i++) { |
| 870 | char *pkt = xq_get_data(xsk, descs[i].idx, | 864 | char *pkt = xq_get_data(xsk, descs[i].addr); |
| 871 | descs[i].offset); | ||
| 872 | 865 | ||
| 873 | swap_mac_addresses(pkt); | 866 | swap_mac_addresses(pkt); |
| 874 | #if DEBUG_HEXDUMP | ||
| 875 | char buf[32]; | ||
| 876 | u32 idx = descs[i].idx; | ||
| 877 | 867 | ||
| 878 | sprintf(buf, "idx=%d", idx); | 868 | hex_dump(pkt, descs[i].len, descs[i].addr); |
| 879 | hex_dump(pkt, descs[i].len, buf); | ||
| 880 | #endif | ||
| 881 | } | 869 | } |
| 882 | 870 | ||
| 883 | xsk->rx_npkts += rcvd; | 871 | xsk->rx_npkts += rcvd; |
