diff options
83 files changed, 1225 insertions, 368 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net-dsa b/Documentation/ABI/testing/sysfs-class-net-dsa index f240221e071e..985d84c585c6 100644 --- a/Documentation/ABI/testing/sysfs-class-net-dsa +++ b/Documentation/ABI/testing/sysfs-class-net-dsa | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | What: /sys/class/net/<iface>/tagging | 1 | What: /sys/class/net/<iface>/dsa/tagging |
| 2 | Date: August 2018 | 2 | Date: August 2018 |
| 3 | KernelVersion: 4.20 | 3 | KernelVersion: 4.20 |
| 4 | Contact: netdev@vger.kernel.org | 4 | Contact: netdev@vger.kernel.org |
diff --git a/MAINTAINERS b/MAINTAINERS index f3ea157b7fc3..8119141a926f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -13890,6 +13890,13 @@ F: drivers/md/raid* | |||
| 13890 | F: include/linux/raid/ | 13890 | F: include/linux/raid/ |
| 13891 | F: include/uapi/linux/raid/ | 13891 | F: include/uapi/linux/raid/ |
| 13892 | 13892 | ||
| 13893 | SOCIONEXT (SNI) AVE NETWORK DRIVER | ||
| 13894 | M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> | ||
| 13895 | L: netdev@vger.kernel.org | ||
| 13896 | S: Maintained | ||
| 13897 | F: drivers/net/ethernet/socionext/sni_ave.c | ||
| 13898 | F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt | ||
| 13899 | |||
| 13893 | SOCIONEXT (SNI) NETSEC NETWORK DRIVER | 13900 | SOCIONEXT (SNI) NETSEC NETWORK DRIVER |
| 13894 | M: Jassi Brar <jaswinder.singh@linaro.org> | 13901 | M: Jassi Brar <jaswinder.singh@linaro.org> |
| 13895 | L: netdev@vger.kernel.org | 13902 | L: netdev@vger.kernel.org |
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 17482f5de3e2..9393e231cbc2 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c | |||
| @@ -891,6 +891,55 @@ cond_branch: | |||
| 891 | return 0; | 891 | return 0; |
| 892 | } | 892 | } |
| 893 | 893 | ||
| 894 | /* Fix the branch target addresses for subprog calls */ | ||
| 895 | static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, | ||
| 896 | struct codegen_context *ctx, u32 *addrs) | ||
| 897 | { | ||
| 898 | const struct bpf_insn *insn = fp->insnsi; | ||
| 899 | bool func_addr_fixed; | ||
| 900 | u64 func_addr; | ||
| 901 | u32 tmp_idx; | ||
| 902 | int i, ret; | ||
| 903 | |||
| 904 | for (i = 0; i < fp->len; i++) { | ||
| 905 | /* | ||
| 906 | * During the extra pass, only the branch target addresses for | ||
| 907 | * the subprog calls need to be fixed. All other instructions | ||
| 908 | * can left untouched. | ||
| 909 | * | ||
| 910 | * The JITed image length does not change because we already | ||
| 911 | * ensure that the JITed instruction sequence for these calls | ||
| 912 | * are of fixed length by padding them with NOPs. | ||
| 913 | */ | ||
| 914 | if (insn[i].code == (BPF_JMP | BPF_CALL) && | ||
| 915 | insn[i].src_reg == BPF_PSEUDO_CALL) { | ||
| 916 | ret = bpf_jit_get_func_addr(fp, &insn[i], true, | ||
| 917 | &func_addr, | ||
| 918 | &func_addr_fixed); | ||
| 919 | if (ret < 0) | ||
| 920 | return ret; | ||
| 921 | |||
| 922 | /* | ||
| 923 | * Save ctx->idx as this would currently point to the | ||
| 924 | * end of the JITed image and set it to the offset of | ||
| 925 | * the instruction sequence corresponding to the | ||
| 926 | * subprog call temporarily. | ||
| 927 | */ | ||
| 928 | tmp_idx = ctx->idx; | ||
| 929 | ctx->idx = addrs[i] / 4; | ||
| 930 | bpf_jit_emit_func_call_rel(image, ctx, func_addr); | ||
| 931 | |||
| 932 | /* | ||
| 933 | * Restore ctx->idx here. This is safe as the length | ||
| 934 | * of the JITed sequence remains unchanged. | ||
| 935 | */ | ||
| 936 | ctx->idx = tmp_idx; | ||
| 937 | } | ||
| 938 | } | ||
| 939 | |||
| 940 | return 0; | ||
| 941 | } | ||
| 942 | |||
| 894 | struct powerpc64_jit_data { | 943 | struct powerpc64_jit_data { |
| 895 | struct bpf_binary_header *header; | 944 | struct bpf_binary_header *header; |
| 896 | u32 *addrs; | 945 | u32 *addrs; |
| @@ -989,6 +1038,22 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) | |||
| 989 | skip_init_ctx: | 1038 | skip_init_ctx: |
| 990 | code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); | 1039 | code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); |
| 991 | 1040 | ||
| 1041 | if (extra_pass) { | ||
| 1042 | /* | ||
| 1043 | * Do not touch the prologue and epilogue as they will remain | ||
| 1044 | * unchanged. Only fix the branch target address for subprog | ||
| 1045 | * calls in the body. | ||
| 1046 | * | ||
| 1047 | * This does not change the offsets and lengths of the subprog | ||
| 1048 | * call instruction sequences and hence, the size of the JITed | ||
| 1049 | * image as well. | ||
| 1050 | */ | ||
| 1051 | bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); | ||
| 1052 | |||
| 1053 | /* There is no need to perform the usual passes. */ | ||
| 1054 | goto skip_codegen_passes; | ||
| 1055 | } | ||
| 1056 | |||
| 992 | /* Code generation passes 1-2 */ | 1057 | /* Code generation passes 1-2 */ |
| 993 | for (pass = 1; pass < 3; pass++) { | 1058 | for (pass = 1; pass < 3; pass++) { |
| 994 | /* Now build the prologue, body code & epilogue for real. */ | 1059 | /* Now build the prologue, body code & epilogue for real. */ |
| @@ -1002,6 +1067,7 @@ skip_init_ctx: | |||
| 1002 | proglen - (cgctx.idx * 4), cgctx.seen); | 1067 | proglen - (cgctx.idx * 4), cgctx.seen); |
| 1003 | } | 1068 | } |
| 1004 | 1069 | ||
| 1070 | skip_codegen_passes: | ||
| 1005 | if (bpf_jit_enable > 1) | 1071 | if (bpf_jit_enable > 1) |
| 1006 | /* | 1072 | /* |
| 1007 | * Note that we output the base address of the code_base | 1073 | * Note that we output the base address of the code_base |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index f43fb2f958a5..93dfcef8afc4 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
| @@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave) | |||
| 2086 | aggregator->aggregator_identifier); | 2086 | aggregator->aggregator_identifier); |
| 2087 | 2087 | ||
| 2088 | /* Tell the partner that this port is not suitable for aggregation */ | 2088 | /* Tell the partner that this port is not suitable for aggregation */ |
| 2089 | port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; | ||
| 2090 | port->actor_oper_port_state &= ~AD_STATE_COLLECTING; | ||
| 2091 | port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; | ||
| 2089 | port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; | 2092 | port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; |
| 2090 | __update_lacpdu_from_port(port); | 2093 | __update_lacpdu_from_port(port); |
| 2091 | ad_lacpdu_send(port); | 2094 | ad_lacpdu_send(port); |
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 65f10fec25b3..0b3e51f248c2 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c | |||
| @@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) | |||
| 116 | /* Reset the switch. */ | 116 | /* Reset the switch. */ |
| 117 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, | 117 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, |
| 118 | GLOBAL_ATU_CONTROL_SWRESET | | 118 | GLOBAL_ATU_CONTROL_SWRESET | |
| 119 | GLOBAL_ATU_CONTROL_ATUSIZE_1024 | | 119 | GLOBAL_ATU_CONTROL_LEARNDIS); |
| 120 | GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); | ||
| 121 | 120 | ||
| 122 | /* Wait up to one second for reset to complete. */ | 121 | /* Wait up to one second for reset to complete. */ |
| 123 | timeout = jiffies + 1 * HZ; | 122 | timeout = jiffies + 1 * HZ; |
| @@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds) | |||
| 142 | */ | 141 | */ |
| 143 | REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); | 142 | REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); |
| 144 | 143 | ||
| 145 | /* Enable automatic address learning, set the address | 144 | /* Disable automatic address learning. |
| 146 | * database size to 1024 entries, and set the default aging | ||
| 147 | * time to 5 minutes. | ||
| 148 | */ | 145 | */ |
| 149 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, | 146 | REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, |
| 150 | GLOBAL_ATU_CONTROL_ATUSIZE_1024 | | 147 | GLOBAL_ATU_CONTROL_LEARNDIS); |
| 151 | GLOBAL_ATU_CONTROL_ATE_AGE_5MIN); | ||
| 152 | 148 | ||
| 153 | return 0; | 149 | return 0; |
| 154 | } | 150 | } |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index f02592f43fe3..a7e853fa43c2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
| @@ -674,7 +674,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
| 674 | 674 | ||
| 675 | rx_stat = (0x0000003CU & rxd_wb->status) >> 2; | 675 | rx_stat = (0x0000003CU & rxd_wb->status) >> 2; |
| 676 | 676 | ||
| 677 | is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); | 677 | is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U; |
| 678 | 678 | ||
| 679 | pkt_type = 0xFFU & (rxd_wb->type >> 4); | 679 | pkt_type = 0xFFU & (rxd_wb->type >> 4); |
| 680 | 680 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d4c300117529..5d21c14853ac 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -5162,6 +5162,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) | |||
| 5162 | cp = le16_to_cpu(resp->alloc_cmpl_rings); | 5162 | cp = le16_to_cpu(resp->alloc_cmpl_rings); |
| 5163 | stats = le16_to_cpu(resp->alloc_stat_ctx); | 5163 | stats = le16_to_cpu(resp->alloc_stat_ctx); |
| 5164 | cp = min_t(u16, cp, stats); | 5164 | cp = min_t(u16, cp, stats); |
| 5165 | hw_resc->resv_irqs = cp; | ||
| 5165 | if (bp->flags & BNXT_FLAG_CHIP_P5) { | 5166 | if (bp->flags & BNXT_FLAG_CHIP_P5) { |
| 5166 | int rx = hw_resc->resv_rx_rings; | 5167 | int rx = hw_resc->resv_rx_rings; |
| 5167 | int tx = hw_resc->resv_tx_rings; | 5168 | int tx = hw_resc->resv_tx_rings; |
| @@ -5175,7 +5176,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) | |||
| 5175 | hw_resc->resv_rx_rings = rx; | 5176 | hw_resc->resv_rx_rings = rx; |
| 5176 | hw_resc->resv_tx_rings = tx; | 5177 | hw_resc->resv_tx_rings = tx; |
| 5177 | } | 5178 | } |
| 5178 | cp = le16_to_cpu(resp->alloc_msix); | 5179 | hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); |
| 5179 | hw_resc->resv_hw_ring_grps = rx; | 5180 | hw_resc->resv_hw_ring_grps = rx; |
| 5180 | } | 5181 | } |
| 5181 | hw_resc->resv_cp_rings = cp; | 5182 | hw_resc->resv_cp_rings = cp; |
| @@ -5353,7 +5354,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, | |||
| 5353 | return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); | 5354 | return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); |
| 5354 | } | 5355 | } |
| 5355 | 5356 | ||
| 5356 | static int bnxt_cp_rings_in_use(struct bnxt *bp) | 5357 | static int bnxt_nq_rings_in_use(struct bnxt *bp) |
| 5357 | { | 5358 | { |
| 5358 | int cp = bp->cp_nr_rings; | 5359 | int cp = bp->cp_nr_rings; |
| 5359 | int ulp_msix, ulp_base; | 5360 | int ulp_msix, ulp_base; |
| @@ -5368,10 +5369,22 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp) | |||
| 5368 | return cp; | 5369 | return cp; |
| 5369 | } | 5370 | } |
| 5370 | 5371 | ||
| 5372 | static int bnxt_cp_rings_in_use(struct bnxt *bp) | ||
| 5373 | { | ||
| 5374 | int cp; | ||
| 5375 | |||
| 5376 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 5377 | return bnxt_nq_rings_in_use(bp); | ||
| 5378 | |||
| 5379 | cp = bp->tx_nr_rings + bp->rx_nr_rings; | ||
| 5380 | return cp; | ||
| 5381 | } | ||
| 5382 | |||
| 5371 | static bool bnxt_need_reserve_rings(struct bnxt *bp) | 5383 | static bool bnxt_need_reserve_rings(struct bnxt *bp) |
| 5372 | { | 5384 | { |
| 5373 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5385 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 5374 | int cp = bnxt_cp_rings_in_use(bp); | 5386 | int cp = bnxt_cp_rings_in_use(bp); |
| 5387 | int nq = bnxt_nq_rings_in_use(bp); | ||
| 5375 | int rx = bp->rx_nr_rings; | 5388 | int rx = bp->rx_nr_rings; |
| 5376 | int vnic = 1, grp = rx; | 5389 | int vnic = 1, grp = rx; |
| 5377 | 5390 | ||
| @@ -5387,7 +5400,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) | |||
| 5387 | rx <<= 1; | 5400 | rx <<= 1; |
| 5388 | if (BNXT_NEW_RM(bp) && | 5401 | if (BNXT_NEW_RM(bp) && |
| 5389 | (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || | 5402 | (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || |
| 5390 | hw_resc->resv_vnics != vnic || | 5403 | hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic || |
| 5391 | (hw_resc->resv_hw_ring_grps != grp && | 5404 | (hw_resc->resv_hw_ring_grps != grp && |
| 5392 | !(bp->flags & BNXT_FLAG_CHIP_P5)))) | 5405 | !(bp->flags & BNXT_FLAG_CHIP_P5)))) |
| 5393 | return true; | 5406 | return true; |
| @@ -5397,7 +5410,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) | |||
| 5397 | static int __bnxt_reserve_rings(struct bnxt *bp) | 5410 | static int __bnxt_reserve_rings(struct bnxt *bp) |
| 5398 | { | 5411 | { |
| 5399 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5412 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 5400 | int cp = bnxt_cp_rings_in_use(bp); | 5413 | int cp = bnxt_nq_rings_in_use(bp); |
| 5401 | int tx = bp->tx_nr_rings; | 5414 | int tx = bp->tx_nr_rings; |
| 5402 | int rx = bp->rx_nr_rings; | 5415 | int rx = bp->rx_nr_rings; |
| 5403 | int grp, rx_rings, rc; | 5416 | int grp, rx_rings, rc; |
| @@ -5422,7 +5435,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) | |||
| 5422 | tx = hw_resc->resv_tx_rings; | 5435 | tx = hw_resc->resv_tx_rings; |
| 5423 | if (BNXT_NEW_RM(bp)) { | 5436 | if (BNXT_NEW_RM(bp)) { |
| 5424 | rx = hw_resc->resv_rx_rings; | 5437 | rx = hw_resc->resv_rx_rings; |
| 5425 | cp = hw_resc->resv_cp_rings; | 5438 | cp = hw_resc->resv_irqs; |
| 5426 | grp = hw_resc->resv_hw_ring_grps; | 5439 | grp = hw_resc->resv_hw_ring_grps; |
| 5427 | vnic = hw_resc->resv_vnics; | 5440 | vnic = hw_resc->resv_vnics; |
| 5428 | } | 5441 | } |
| @@ -6292,6 +6305,8 @@ hwrm_func_qcaps_exit: | |||
| 6292 | return rc; | 6305 | return rc; |
| 6293 | } | 6306 | } |
| 6294 | 6307 | ||
| 6308 | static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); | ||
| 6309 | |||
| 6295 | static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | 6310 | static int bnxt_hwrm_func_qcaps(struct bnxt *bp) |
| 6296 | { | 6311 | { |
| 6297 | int rc; | 6312 | int rc; |
| @@ -6299,6 +6314,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
| 6299 | rc = __bnxt_hwrm_func_qcaps(bp); | 6314 | rc = __bnxt_hwrm_func_qcaps(bp); |
| 6300 | if (rc) | 6315 | if (rc) |
| 6301 | return rc; | 6316 | return rc; |
| 6317 | rc = bnxt_hwrm_queue_qportcfg(bp); | ||
| 6318 | if (rc) { | ||
| 6319 | netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); | ||
| 6320 | return rc; | ||
| 6321 | } | ||
| 6302 | if (bp->hwrm_spec_code >= 0x10803) { | 6322 | if (bp->hwrm_spec_code >= 0x10803) { |
| 6303 | rc = bnxt_alloc_ctx_mem(bp); | 6323 | rc = bnxt_alloc_ctx_mem(bp); |
| 6304 | if (rc) | 6324 | if (rc) |
| @@ -7026,7 +7046,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) | |||
| 7026 | 7046 | ||
| 7027 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) | 7047 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
| 7028 | { | 7048 | { |
| 7029 | return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); | 7049 | unsigned int cp = bp->hw_resc.max_cp_rings; |
| 7050 | |||
| 7051 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 7052 | cp -= bnxt_get_ulp_msix_num(bp); | ||
| 7053 | |||
| 7054 | return cp; | ||
| 7030 | } | 7055 | } |
| 7031 | 7056 | ||
| 7032 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) | 7057 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
| @@ -7048,7 +7073,9 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num) | |||
| 7048 | int total_req = bp->cp_nr_rings + num; | 7073 | int total_req = bp->cp_nr_rings + num; |
| 7049 | int max_idx, avail_msix; | 7074 | int max_idx, avail_msix; |
| 7050 | 7075 | ||
| 7051 | max_idx = min_t(int, bp->total_irqs, max_cp); | 7076 | max_idx = bp->total_irqs; |
| 7077 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 7078 | max_idx = min_t(int, bp->total_irqs, max_cp); | ||
| 7052 | avail_msix = max_idx - bp->cp_nr_rings; | 7079 | avail_msix = max_idx - bp->cp_nr_rings; |
| 7053 | if (!BNXT_NEW_RM(bp) || avail_msix >= num) | 7080 | if (!BNXT_NEW_RM(bp) || avail_msix >= num) |
| 7054 | return avail_msix; | 7081 | return avail_msix; |
| @@ -7066,7 +7093,7 @@ static int bnxt_get_num_msix(struct bnxt *bp) | |||
| 7066 | if (!BNXT_NEW_RM(bp)) | 7093 | if (!BNXT_NEW_RM(bp)) |
| 7067 | return bnxt_get_max_func_irqs(bp); | 7094 | return bnxt_get_max_func_irqs(bp); |
| 7068 | 7095 | ||
| 7069 | return bnxt_cp_rings_in_use(bp); | 7096 | return bnxt_nq_rings_in_use(bp); |
| 7070 | } | 7097 | } |
| 7071 | 7098 | ||
| 7072 | static int bnxt_init_msix(struct bnxt *bp) | 7099 | static int bnxt_init_msix(struct bnxt *bp) |
| @@ -7794,6 +7821,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) | |||
| 7794 | 7821 | ||
| 7795 | rc = bnxt_hwrm_func_resc_qcaps(bp, true); | 7822 | rc = bnxt_hwrm_func_resc_qcaps(bp, true); |
| 7796 | hw_resc->resv_cp_rings = 0; | 7823 | hw_resc->resv_cp_rings = 0; |
| 7824 | hw_resc->resv_irqs = 0; | ||
| 7797 | hw_resc->resv_tx_rings = 0; | 7825 | hw_resc->resv_tx_rings = 0; |
| 7798 | hw_resc->resv_rx_rings = 0; | 7826 | hw_resc->resv_rx_rings = 0; |
| 7799 | hw_resc->resv_hw_ring_grps = 0; | 7827 | hw_resc->resv_hw_ring_grps = 0; |
| @@ -9799,13 +9827,16 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
| 9799 | int *max_cp) | 9827 | int *max_cp) |
| 9800 | { | 9828 | { |
| 9801 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 9829 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 9802 | int max_ring_grps = 0; | 9830 | int max_ring_grps = 0, max_irq; |
| 9803 | 9831 | ||
| 9804 | *max_tx = hw_resc->max_tx_rings; | 9832 | *max_tx = hw_resc->max_tx_rings; |
| 9805 | *max_rx = hw_resc->max_rx_rings; | 9833 | *max_rx = hw_resc->max_rx_rings; |
| 9806 | *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), | 9834 | *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); |
| 9807 | hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp)); | 9835 | max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - |
| 9808 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); | 9836 | bnxt_get_ulp_msix_num(bp), |
| 9837 | bnxt_get_max_func_stat_ctxs(bp)); | ||
| 9838 | if (!(bp->flags & BNXT_FLAG_CHIP_P5)) | ||
| 9839 | *max_cp = min_t(int, *max_cp, max_irq); | ||
| 9809 | max_ring_grps = hw_resc->max_hw_ring_grps; | 9840 | max_ring_grps = hw_resc->max_hw_ring_grps; |
| 9810 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { | 9841 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
| 9811 | *max_cp -= 1; | 9842 | *max_cp -= 1; |
| @@ -9813,6 +9844,11 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
| 9813 | } | 9844 | } |
| 9814 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 9845 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
| 9815 | *max_rx >>= 1; | 9846 | *max_rx >>= 1; |
| 9847 | if (bp->flags & BNXT_FLAG_CHIP_P5) { | ||
| 9848 | bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); | ||
| 9849 | /* On P5 chips, max_cp output param should be available NQs */ | ||
| 9850 | *max_cp = max_irq; | ||
| 9851 | } | ||
| 9816 | *max_rx = min_t(int, *max_rx, max_ring_grps); | 9852 | *max_rx = min_t(int, *max_rx, max_ring_grps); |
| 9817 | } | 9853 | } |
| 9818 | 9854 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9e99d4ab3e06..3030931ccaf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -928,6 +928,7 @@ struct bnxt_hw_resc { | |||
| 928 | u16 min_stat_ctxs; | 928 | u16 min_stat_ctxs; |
| 929 | u16 max_stat_ctxs; | 929 | u16 max_stat_ctxs; |
| 930 | u16 max_irqs; | 930 | u16 max_irqs; |
| 931 | u16 resv_irqs; | ||
| 931 | }; | 932 | }; |
| 932 | 933 | ||
| 933 | #if defined(CONFIG_BNXT_SRIOV) | 934 | #if defined(CONFIG_BNXT_SRIOV) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index b59b382d34f9..0a3097baafde 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
| @@ -168,7 +168,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, | |||
| 168 | if (BNXT_NEW_RM(bp)) { | 168 | if (BNXT_NEW_RM(bp)) { |
| 169 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 169 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 170 | 170 | ||
| 171 | avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings; | 171 | avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings; |
| 172 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; | 172 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
| 173 | } | 173 | } |
| 174 | bnxt_fill_msix_vecs(bp, ent); | 174 | bnxt_fill_msix_vecs(bp, ent); |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 4c3925af53bc..abe5d0dac851 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | |||
| @@ -111,7 +111,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { | |||
| 111 | "mac_tx_one_collision", | 111 | "mac_tx_one_collision", |
| 112 | "mac_tx_multi_collision", | 112 | "mac_tx_multi_collision", |
| 113 | "mac_tx_max_collision_fail", | 113 | "mac_tx_max_collision_fail", |
| 114 | "mac_tx_max_deferal_fail", | 114 | "mac_tx_max_deferral_fail", |
| 115 | "mac_tx_fifo_err", | 115 | "mac_tx_fifo_err", |
| 116 | "mac_tx_runts", | 116 | "mac_tx_runts", |
| 117 | 117 | ||
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index ea9859e028d4..de61060721c4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | |||
| @@ -349,13 +349,15 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct, | |||
| 349 | struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; | 349 | struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; |
| 350 | struct sk_buff *skb = sc->ctxptr; | 350 | struct sk_buff *skb = sc->ctxptr; |
| 351 | struct net_device *ndev = skb->dev; | 351 | struct net_device *ndev = skb->dev; |
| 352 | u32 iq_no; | ||
| 352 | 353 | ||
| 353 | dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, | 354 | dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, |
| 354 | sc->datasize, DMA_TO_DEVICE); | 355 | sc->datasize, DMA_TO_DEVICE); |
| 355 | dev_kfree_skb_any(skb); | 356 | dev_kfree_skb_any(skb); |
| 357 | iq_no = sc->iq_no; | ||
| 356 | octeon_free_soft_command(oct, sc); | 358 | octeon_free_soft_command(oct, sc); |
| 357 | 359 | ||
| 358 | if (octnet_iq_is_full(oct, sc->iq_no)) | 360 | if (octnet_iq_is_full(oct, iq_no)) |
| 359 | return; | 361 | return; |
| 360 | 362 | ||
| 361 | if (netif_queue_stopped(ndev)) | 363 | if (netif_queue_stopped(ndev)) |
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index c415ac67cb7b..e80fedb27cee 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c | |||
| @@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2786 | if (!muram_node) { | 2786 | if (!muram_node) { |
| 2787 | dev_err(&of_dev->dev, "%s: could not find MURAM node\n", | 2787 | dev_err(&of_dev->dev, "%s: could not find MURAM node\n", |
| 2788 | __func__); | 2788 | __func__); |
| 2789 | goto fman_node_put; | 2789 | goto fman_free; |
| 2790 | } | 2790 | } |
| 2791 | 2791 | ||
| 2792 | err = of_address_to_resource(muram_node, 0, | 2792 | err = of_address_to_resource(muram_node, 0, |
| @@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev) | |||
| 2795 | of_node_put(muram_node); | 2795 | of_node_put(muram_node); |
| 2796 | dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", | 2796 | dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", |
| 2797 | __func__, err); | 2797 | __func__, err); |
| 2798 | goto fman_node_put; | 2798 | goto fman_free; |
| 2799 | } | 2799 | } |
| 2800 | 2800 | ||
| 2801 | of_node_put(muram_node); | 2801 | of_node_put(muram_node); |
| 2802 | of_node_put(fm_node); | ||
| 2803 | 2802 | ||
| 2804 | err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, | 2803 | err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, |
| 2805 | "fman", fman); | 2804 | "fman", fman); |
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h index e2f80cca9bed..0d2de6f67676 100644 --- a/drivers/net/ethernet/ibm/emac/emac.h +++ b/drivers/net/ethernet/ibm/emac/emac.h | |||
| @@ -231,7 +231,7 @@ struct emac_regs { | |||
| 231 | #define EMAC_STACR_PHYE 0x00004000 | 231 | #define EMAC_STACR_PHYE 0x00004000 |
| 232 | #define EMAC_STACR_STAC_MASK 0x00003000 | 232 | #define EMAC_STACR_STAC_MASK 0x00003000 |
| 233 | #define EMAC_STACR_STAC_READ 0x00001000 | 233 | #define EMAC_STACR_STAC_READ 0x00001000 |
| 234 | #define EMAC_STACR_STAC_WRITE 0x00000800 | 234 | #define EMAC_STACR_STAC_WRITE 0x00002000 |
| 235 | #define EMAC_STACR_OPBC_MASK 0x00000C00 | 235 | #define EMAC_STACR_OPBC_MASK 0x00000C00 |
| 236 | #define EMAC_STACR_OPBC_50 0x00000000 | 236 | #define EMAC_STACR_OPBC_50 0x00000000 |
| 237 | #define EMAC_STACR_OPBC_66 0x00000400 | 237 | #define EMAC_STACR_OPBC_66 0x00000400 |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c0203a0d5e3b..ed50b8dee44f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1859,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1859 | 1859 | ||
| 1860 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && | 1860 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
| 1861 | adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) | 1861 | adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) |
| 1862 | netdev_notify_peers(netdev); | 1862 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); |
| 1863 | 1863 | ||
| 1864 | netif_carrier_on(netdev); | 1864 | netif_carrier_on(netdev); |
| 1865 | 1865 | ||
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 7a37a37e3fb3..125ea99418df 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
| @@ -4375,8 +4375,27 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4375 | unsigned long *supported, | 4375 | unsigned long *supported, |
| 4376 | struct phylink_link_state *state) | 4376 | struct phylink_link_state *state) |
| 4377 | { | 4377 | { |
| 4378 | struct mvpp2_port *port = netdev_priv(dev); | ||
| 4378 | __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; | 4379 | __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
| 4379 | 4380 | ||
| 4381 | /* Invalid combinations */ | ||
| 4382 | switch (state->interface) { | ||
| 4383 | case PHY_INTERFACE_MODE_10GKR: | ||
| 4384 | case PHY_INTERFACE_MODE_XAUI: | ||
| 4385 | if (port->gop_id != 0) | ||
| 4386 | goto empty_set; | ||
| 4387 | break; | ||
| 4388 | case PHY_INTERFACE_MODE_RGMII: | ||
| 4389 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 4390 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 4391 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 4392 | if (port->gop_id == 0) | ||
| 4393 | goto empty_set; | ||
| 4394 | break; | ||
| 4395 | default: | ||
| 4396 | break; | ||
| 4397 | } | ||
| 4398 | |||
| 4380 | phylink_set(mask, Autoneg); | 4399 | phylink_set(mask, Autoneg); |
| 4381 | phylink_set_port_modes(mask); | 4400 | phylink_set_port_modes(mask); |
| 4382 | phylink_set(mask, Pause); | 4401 | phylink_set(mask, Pause); |
| @@ -4384,6 +4403,8 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4384 | 4403 | ||
| 4385 | switch (state->interface) { | 4404 | switch (state->interface) { |
| 4386 | case PHY_INTERFACE_MODE_10GKR: | 4405 | case PHY_INTERFACE_MODE_10GKR: |
| 4406 | case PHY_INTERFACE_MODE_XAUI: | ||
| 4407 | case PHY_INTERFACE_MODE_NA: | ||
| 4387 | phylink_set(mask, 10000baseCR_Full); | 4408 | phylink_set(mask, 10000baseCR_Full); |
| 4388 | phylink_set(mask, 10000baseSR_Full); | 4409 | phylink_set(mask, 10000baseSR_Full); |
| 4389 | phylink_set(mask, 10000baseLR_Full); | 4410 | phylink_set(mask, 10000baseLR_Full); |
| @@ -4391,7 +4412,11 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4391 | phylink_set(mask, 10000baseER_Full); | 4412 | phylink_set(mask, 10000baseER_Full); |
| 4392 | phylink_set(mask, 10000baseKR_Full); | 4413 | phylink_set(mask, 10000baseKR_Full); |
| 4393 | /* Fall-through */ | 4414 | /* Fall-through */ |
| 4394 | default: | 4415 | case PHY_INTERFACE_MODE_RGMII: |
| 4416 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 4417 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 4418 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 4419 | case PHY_INTERFACE_MODE_SGMII: | ||
| 4395 | phylink_set(mask, 10baseT_Half); | 4420 | phylink_set(mask, 10baseT_Half); |
| 4396 | phylink_set(mask, 10baseT_Full); | 4421 | phylink_set(mask, 10baseT_Full); |
| 4397 | phylink_set(mask, 100baseT_Half); | 4422 | phylink_set(mask, 100baseT_Half); |
| @@ -4403,11 +4428,18 @@ static void mvpp2_phylink_validate(struct net_device *dev, | |||
| 4403 | phylink_set(mask, 1000baseT_Full); | 4428 | phylink_set(mask, 1000baseT_Full); |
| 4404 | phylink_set(mask, 1000baseX_Full); | 4429 | phylink_set(mask, 1000baseX_Full); |
| 4405 | phylink_set(mask, 2500baseX_Full); | 4430 | phylink_set(mask, 2500baseX_Full); |
| 4431 | break; | ||
| 4432 | default: | ||
| 4433 | goto empty_set; | ||
| 4406 | } | 4434 | } |
| 4407 | 4435 | ||
| 4408 | bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); | 4436 | bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); |
| 4409 | bitmap_and(state->advertising, state->advertising, mask, | 4437 | bitmap_and(state->advertising, state->advertising, mask, |
| 4410 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 4438 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
| 4439 | return; | ||
| 4440 | |||
| 4441 | empty_set: | ||
| 4442 | bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); | ||
| 4411 | } | 4443 | } |
| 4412 | 4444 | ||
| 4413 | static void mvpp22_xlg_link_state(struct mvpp2_port *port, | 4445 | static void mvpp22_xlg_link_state(struct mvpp2_port *port, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 36054e6fb9d3..f200b8c420d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | config MLX4_EN | 5 | config MLX4_EN |
| 6 | tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" | 6 | tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" |
| 7 | depends on MAY_USE_DEVLINK | 7 | depends on MAY_USE_DEVLINK |
| 8 | depends on PCI | 8 | depends on PCI && NETDEVICES && ETHERNET && INET |
| 9 | select MLX4_CORE | 9 | select MLX4_CORE |
| 10 | imply PTP_1588_CLOCK | 10 | imply PTP_1588_CLOCK |
| 11 | ---help--- | 11 | ---help--- |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index f11b45001cad..d290f0787dfb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, | |||
| 1084 | 1084 | ||
| 1085 | tx_pause = !!(pause->tx_pause); | 1085 | tx_pause = !!(pause->tx_pause); |
| 1086 | rx_pause = !!(pause->rx_pause); | 1086 | rx_pause = !!(pause->rx_pause); |
| 1087 | rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); | 1087 | rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp; |
| 1088 | tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); | 1088 | tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp; |
| 1089 | 1089 | ||
| 1090 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 1090 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
| 1091 | priv->rx_skb_size + ETH_FCS_LEN, | 1091 | priv->rx_skb_size + ETH_FCS_LEN, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index b744cd49a785..6b88881b8e35 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -3493,8 +3493,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
| 3493 | dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; | 3493 | dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; |
| 3494 | } | 3494 | } |
| 3495 | 3495 | ||
| 3496 | /* MTU range: 46 - hw-specific max */ | 3496 | /* MTU range: 68 - hw-specific max */ |
| 3497 | dev->min_mtu = MLX4_EN_MIN_MTU; | 3497 | dev->min_mtu = ETH_MIN_MTU; |
| 3498 | dev->max_mtu = priv->max_mtu; | 3498 | dev->max_mtu = priv->max_mtu; |
| 3499 | 3499 | ||
| 3500 | mdev->pndev[port] = dev; | 3500 | mdev->pndev[port] = dev; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 485d856546c6..8137454e2534 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -161,7 +161,6 @@ | |||
| 161 | #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ | 161 | #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ |
| 162 | ETH_HLEN + PREAMBLE_LEN) | 162 | ETH_HLEN + PREAMBLE_LEN) |
| 163 | 163 | ||
| 164 | #define MLX4_EN_MIN_MTU 46 | ||
| 165 | /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple | 164 | /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple |
| 166 | * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). | 165 | * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). |
| 167 | */ | 166 | */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 16985ca3248d..624eed345b5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -724,9 +724,9 @@ static u32 mlx5e_get_fcs(const struct sk_buff *skb) | |||
| 724 | return __get_unaligned_cpu32(fcs_bytes); | 724 | return __get_unaligned_cpu32(fcs_bytes); |
| 725 | } | 725 | } |
| 726 | 726 | ||
| 727 | static u8 get_ip_proto(struct sk_buff *skb, __be16 proto) | 727 | static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) |
| 728 | { | 728 | { |
| 729 | void *ip_p = skb->data + sizeof(struct ethhdr); | 729 | void *ip_p = skb->data + network_depth; |
| 730 | 730 | ||
| 731 | return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : | 731 | return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : |
| 732 | ((struct ipv6hdr *)ip_p)->nexthdr; | 732 | ((struct ipv6hdr *)ip_p)->nexthdr; |
| @@ -755,7 +755,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 755 | goto csum_unnecessary; | 755 | goto csum_unnecessary; |
| 756 | 756 | ||
| 757 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { | 757 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { |
| 758 | if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP)) | 758 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) |
| 759 | goto csum_unnecessary; | 759 | goto csum_unnecessary; |
| 760 | 760 | ||
| 761 | skb->ip_summed = CHECKSUM_COMPLETE; | 761 | skb->ip_summed = CHECKSUM_COMPLETE; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index ad06d9969bc1..5c13674439f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c | |||
| @@ -560,7 +560,7 @@ static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp, | |||
| 560 | 560 | ||
| 561 | mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr, | 561 | mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr, |
| 562 | &mc_entry); | 562 | &mc_entry); |
| 563 | if (WARN_ON(!mc_record)) | 563 | if (!mc_record) |
| 564 | return; | 564 | return; |
| 565 | 565 | ||
| 566 | mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry); | 566 | mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry); |
| @@ -647,7 +647,7 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp, | |||
| 647 | 647 | ||
| 648 | key.fid_index = mlxsw_sp_fid_index(fid); | 648 | key.fid_index = mlxsw_sp_fid_index(fid); |
| 649 | mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key); | 649 | mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key); |
| 650 | if (WARN_ON(!mc_list)) | 650 | if (!mc_list) |
| 651 | return; | 651 | return; |
| 652 | 652 | ||
| 653 | mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list); | 653 | mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9e9bb57134f2..6ebf99cc3154 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -1275,15 +1275,12 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp, | |||
| 1275 | { | 1275 | { |
| 1276 | u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; | 1276 | u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN; |
| 1277 | enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; | 1277 | enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt; |
| 1278 | struct net_device *ipip_ul_dev; | ||
| 1279 | 1278 | ||
| 1280 | if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) | 1279 | if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto) |
| 1281 | return false; | 1280 | return false; |
| 1282 | 1281 | ||
| 1283 | ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); | ||
| 1284 | return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, | 1282 | return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip, |
| 1285 | ul_tb_id, ipip_entry) && | 1283 | ul_tb_id, ipip_entry); |
| 1286 | (!ipip_ul_dev || ipip_ul_dev == ul_dev); | ||
| 1287 | } | 1284 | } |
| 1288 | 1285 | ||
| 1289 | /* Given decap parameters, find the corresponding IPIP entry. */ | 1286 | /* Given decap parameters, find the corresponding IPIP entry. */ |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 739a51f0a366..50080c60a279 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -296,7 +296,13 @@ static bool | |||
| 296 | mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * | 296 | mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * |
| 297 | bridge_port) | 297 | bridge_port) |
| 298 | { | 298 | { |
| 299 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); | 299 | struct net_device *dev = bridge_port->dev; |
| 300 | struct mlxsw_sp *mlxsw_sp; | ||
| 301 | |||
| 302 | if (is_vlan_dev(dev)) | ||
| 303 | mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev)); | ||
| 304 | else | ||
| 305 | mlxsw_sp = mlxsw_sp_lower_get(dev); | ||
| 300 | 306 | ||
| 301 | /* In case ports were pulled from out of a bridged LAG, then | 307 | /* In case ports were pulled from out of a bridged LAG, then |
| 302 | * it's possible the reference count isn't zero, yet the bridge | 308 | * it's possible the reference count isn't zero, yet the bridge |
| @@ -2109,7 +2115,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, | |||
| 2109 | 2115 | ||
| 2110 | vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; | 2116 | vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; |
| 2111 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); | 2117 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); |
| 2112 | if (WARN_ON(!mlxsw_sp_port_vlan)) | 2118 | if (!mlxsw_sp_port_vlan) |
| 2113 | return; | 2119 | return; |
| 2114 | 2120 | ||
| 2115 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); | 2121 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); |
| @@ -2134,8 +2140,10 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, | |||
| 2134 | if (!fid) | 2140 | if (!fid) |
| 2135 | return -EINVAL; | 2141 | return -EINVAL; |
| 2136 | 2142 | ||
| 2137 | if (mlxsw_sp_fid_vni_is_set(fid)) | 2143 | if (mlxsw_sp_fid_vni_is_set(fid)) { |
| 2138 | return -EINVAL; | 2144 | err = -EINVAL; |
| 2145 | goto err_vni_exists; | ||
| 2146 | } | ||
| 2139 | 2147 | ||
| 2140 | err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); | 2148 | err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); |
| 2141 | if (err) | 2149 | if (err) |
| @@ -2149,6 +2157,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, | |||
| 2149 | return 0; | 2157 | return 0; |
| 2150 | 2158 | ||
| 2151 | err_nve_fid_enable: | 2159 | err_nve_fid_enable: |
| 2160 | err_vni_exists: | ||
| 2152 | mlxsw_sp_fid_put(fid); | 2161 | mlxsw_sp_fid_put(fid); |
| 2153 | return err; | 2162 | return err; |
| 2154 | } | 2163 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 29c95423ab64..2f49eb75f3cc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c | |||
| @@ -476,16 +476,16 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, | |||
| 476 | if (err) | 476 | if (err) |
| 477 | goto err_destroy_flow; | 477 | goto err_destroy_flow; |
| 478 | 478 | ||
| 479 | err = nfp_flower_xmit_flow(netdev, flow_pay, | ||
| 480 | NFP_FLOWER_CMSG_TYPE_FLOW_ADD); | ||
| 481 | if (err) | ||
| 482 | goto err_destroy_flow; | ||
| 483 | |||
| 484 | flow_pay->tc_flower_cookie = flow->cookie; | 479 | flow_pay->tc_flower_cookie = flow->cookie; |
| 485 | err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, | 480 | err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, |
| 486 | nfp_flower_table_params); | 481 | nfp_flower_table_params); |
| 487 | if (err) | 482 | if (err) |
| 488 | goto err_destroy_flow; | 483 | goto err_release_metadata; |
| 484 | |||
| 485 | err = nfp_flower_xmit_flow(netdev, flow_pay, | ||
| 486 | NFP_FLOWER_CMSG_TYPE_FLOW_ADD); | ||
| 487 | if (err) | ||
| 488 | goto err_remove_rhash; | ||
| 489 | 489 | ||
| 490 | port->tc_offload_cnt++; | 490 | port->tc_offload_cnt++; |
| 491 | 491 | ||
| @@ -494,6 +494,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, | |||
| 494 | 494 | ||
| 495 | return 0; | 495 | return 0; |
| 496 | 496 | ||
| 497 | err_remove_rhash: | ||
| 498 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, | ||
| 499 | &flow_pay->fl_node, | ||
| 500 | nfp_flower_table_params)); | ||
| 501 | err_release_metadata: | ||
| 502 | nfp_modify_flow_metadata(app, flow_pay); | ||
| 497 | err_destroy_flow: | 503 | err_destroy_flow: |
| 498 | kfree(flow_pay->action_data); | 504 | kfree(flow_pay->action_data); |
| 499 | kfree(flow_pay->mask_data); | 505 | kfree(flow_pay->mask_data); |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 81045dfa1cd8..44f6e4873aad 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
| @@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
| 571 | struct cp_private *cp; | 571 | struct cp_private *cp; |
| 572 | int handled = 0; | 572 | int handled = 0; |
| 573 | u16 status; | 573 | u16 status; |
| 574 | u16 mask; | ||
| 574 | 575 | ||
| 575 | if (unlikely(dev == NULL)) | 576 | if (unlikely(dev == NULL)) |
| 576 | return IRQ_NONE; | 577 | return IRQ_NONE; |
| @@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
| 578 | 579 | ||
| 579 | spin_lock(&cp->lock); | 580 | spin_lock(&cp->lock); |
| 580 | 581 | ||
| 582 | mask = cpr16(IntrMask); | ||
| 583 | if (!mask) | ||
| 584 | goto out_unlock; | ||
| 585 | |||
| 581 | status = cpr16(IntrStatus); | 586 | status = cpr16(IntrStatus); |
| 582 | if (!status || (status == 0xFFFF)) | 587 | if (!status || (status == 0xFFFF)) |
| 583 | goto out_unlock; | 588 | goto out_unlock; |
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 6732f5cbde08..7c7cd9d94bcc 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c | |||
| @@ -185,8 +185,8 @@ | |||
| 185 | NETIF_MSG_TX_ERR) | 185 | NETIF_MSG_TX_ERR) |
| 186 | 186 | ||
| 187 | /* Parameter for descriptor */ | 187 | /* Parameter for descriptor */ |
| 188 | #define AVE_NR_TXDESC 32 /* Tx descriptor */ | 188 | #define AVE_NR_TXDESC 64 /* Tx descriptor */ |
| 189 | #define AVE_NR_RXDESC 64 /* Rx descriptor */ | 189 | #define AVE_NR_RXDESC 256 /* Rx descriptor */ |
| 190 | 190 | ||
| 191 | #define AVE_DESC_OFS_CMDSTS 0 | 191 | #define AVE_DESC_OFS_CMDSTS 0 |
| 192 | #define AVE_DESC_OFS_ADDRL 4 | 192 | #define AVE_DESC_OFS_ADDRL 4 |
| @@ -194,6 +194,7 @@ | |||
| 194 | 194 | ||
| 195 | /* Parameter for ethernet frame */ | 195 | /* Parameter for ethernet frame */ |
| 196 | #define AVE_MAX_ETHFRAME 1518 | 196 | #define AVE_MAX_ETHFRAME 1518 |
| 197 | #define AVE_FRAME_HEADROOM 2 | ||
| 197 | 198 | ||
| 198 | /* Parameter for interrupt */ | 199 | /* Parameter for interrupt */ |
| 199 | #define AVE_INTM_COUNT 20 | 200 | #define AVE_INTM_COUNT 20 |
| @@ -576,12 +577,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) | |||
| 576 | 577 | ||
| 577 | skb = priv->rx.desc[entry].skbs; | 578 | skb = priv->rx.desc[entry].skbs; |
| 578 | if (!skb) { | 579 | if (!skb) { |
| 579 | skb = netdev_alloc_skb_ip_align(ndev, | 580 | skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME); |
| 580 | AVE_MAX_ETHFRAME); | ||
| 581 | if (!skb) { | 581 | if (!skb) { |
| 582 | netdev_err(ndev, "can't allocate skb for Rx\n"); | 582 | netdev_err(ndev, "can't allocate skb for Rx\n"); |
| 583 | return -ENOMEM; | 583 | return -ENOMEM; |
| 584 | } | 584 | } |
| 585 | skb->data += AVE_FRAME_HEADROOM; | ||
| 586 | skb->tail += AVE_FRAME_HEADROOM; | ||
| 585 | } | 587 | } |
| 586 | 588 | ||
| 587 | /* set disable to cmdsts */ | 589 | /* set disable to cmdsts */ |
| @@ -594,12 +596,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) | |||
| 594 | * - Rx buffer begins with 2 byte headroom, and data will be put from | 596 | * - Rx buffer begins with 2 byte headroom, and data will be put from |
| 595 | * (buffer + 2). | 597 | * (buffer + 2). |
| 596 | * To satisfy this, specify the address to put back the buffer | 598 | * To satisfy this, specify the address to put back the buffer |
| 597 | * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(), | 599 | * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size |
| 598 | * and expand the map size by NET_IP_ALIGN. | 600 | * by AVE_FRAME_HEADROOM. |
| 599 | */ | 601 | */ |
| 600 | ret = ave_dma_map(ndev, &priv->rx.desc[entry], | 602 | ret = ave_dma_map(ndev, &priv->rx.desc[entry], |
| 601 | skb->data - NET_IP_ALIGN, | 603 | skb->data - AVE_FRAME_HEADROOM, |
| 602 | AVE_MAX_ETHFRAME + NET_IP_ALIGN, | 604 | AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM, |
| 603 | DMA_FROM_DEVICE, &paddr); | 605 | DMA_FROM_DEVICE, &paddr); |
| 604 | if (ret) { | 606 | if (ret) { |
| 605 | netdev_err(ndev, "can't map skb for Rx\n"); | 607 | netdev_err(ndev, "can't map skb for Rx\n"); |
| @@ -1689,9 +1691,10 @@ static int ave_probe(struct platform_device *pdev) | |||
| 1689 | pdev->name, pdev->id); | 1691 | pdev->name, pdev->id); |
| 1690 | 1692 | ||
| 1691 | /* Register as a NAPI supported driver */ | 1693 | /* Register as a NAPI supported driver */ |
| 1692 | netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc); | 1694 | netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, |
| 1695 | NAPI_POLL_WEIGHT); | ||
| 1693 | netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, | 1696 | netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, |
| 1694 | priv->tx.ndesc); | 1697 | NAPI_POLL_WEIGHT); |
| 1695 | 1698 | ||
| 1696 | platform_set_drvdata(pdev, ndev); | 1699 | platform_set_drvdata(pdev, ndev); |
| 1697 | 1700 | ||
| @@ -1913,5 +1916,6 @@ static struct platform_driver ave_driver = { | |||
| 1913 | }; | 1916 | }; |
| 1914 | module_platform_driver(ave_driver); | 1917 | module_platform_driver(ave_driver); |
| 1915 | 1918 | ||
| 1919 | MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); | ||
| 1916 | MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); | 1920 | MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); |
| 1917 | MODULE_LICENSE("GPL v2"); | 1921 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 076a8be18d67..5551fead8f66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -2550,12 +2550,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) | |||
| 2550 | netdev_warn(priv->dev, "PTP init failed\n"); | 2550 | netdev_warn(priv->dev, "PTP init failed\n"); |
| 2551 | } | 2551 | } |
| 2552 | 2552 | ||
| 2553 | #ifdef CONFIG_DEBUG_FS | ||
| 2554 | ret = stmmac_init_fs(dev); | ||
| 2555 | if (ret < 0) | ||
| 2556 | netdev_warn(priv->dev, "%s: failed debugFS registration\n", | ||
| 2557 | __func__); | ||
| 2558 | #endif | ||
| 2559 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; | 2553 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
| 2560 | 2554 | ||
| 2561 | if (priv->use_riwt) { | 2555 | if (priv->use_riwt) { |
| @@ -2756,10 +2750,6 @@ static int stmmac_release(struct net_device *dev) | |||
| 2756 | 2750 | ||
| 2757 | netif_carrier_off(dev); | 2751 | netif_carrier_off(dev); |
| 2758 | 2752 | ||
| 2759 | #ifdef CONFIG_DEBUG_FS | ||
| 2760 | stmmac_exit_fs(dev); | ||
| 2761 | #endif | ||
| 2762 | |||
| 2763 | stmmac_release_ptp(priv); | 2753 | stmmac_release_ptp(priv); |
| 2764 | 2754 | ||
| 2765 | return 0; | 2755 | return 0; |
| @@ -3899,6 +3889,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) | |||
| 3899 | u32 tx_count = priv->plat->tx_queues_to_use; | 3889 | u32 tx_count = priv->plat->tx_queues_to_use; |
| 3900 | u32 queue; | 3890 | u32 queue; |
| 3901 | 3891 | ||
| 3892 | if ((dev->flags & IFF_UP) == 0) | ||
| 3893 | return 0; | ||
| 3894 | |||
| 3902 | for (queue = 0; queue < rx_count; queue++) { | 3895 | for (queue = 0; queue < rx_count; queue++) { |
| 3903 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3896 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
| 3904 | 3897 | ||
| @@ -4397,6 +4390,13 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4397 | goto error_netdev_register; | 4390 | goto error_netdev_register; |
| 4398 | } | 4391 | } |
| 4399 | 4392 | ||
| 4393 | #ifdef CONFIG_DEBUG_FS | ||
| 4394 | ret = stmmac_init_fs(ndev); | ||
| 4395 | if (ret < 0) | ||
| 4396 | netdev_warn(priv->dev, "%s: failed debugFS registration\n", | ||
| 4397 | __func__); | ||
| 4398 | #endif | ||
| 4399 | |||
| 4400 | return ret; | 4400 | return ret; |
| 4401 | 4401 | ||
| 4402 | error_netdev_register: | 4402 | error_netdev_register: |
| @@ -4432,6 +4432,9 @@ int stmmac_dvr_remove(struct device *dev) | |||
| 4432 | 4432 | ||
| 4433 | netdev_info(priv->dev, "%s: removing driver", __func__); | 4433 | netdev_info(priv->dev, "%s: removing driver", __func__); |
| 4434 | 4434 | ||
| 4435 | #ifdef CONFIG_DEBUG_FS | ||
| 4436 | stmmac_exit_fs(ndev); | ||
| 4437 | #endif | ||
| 4435 | stmmac_stop_all_dma(priv); | 4438 | stmmac_stop_all_dma(priv); |
| 4436 | 4439 | ||
| 4437 | stmmac_mac_set(priv, priv->ioaddr, false); | 4440 | stmmac_mac_set(priv, priv->ioaddr, false); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index fc8d5f1ee1ad..0da3d36b283b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -608,7 +608,7 @@ static int macvlan_open(struct net_device *dev) | |||
| 608 | goto hash_add; | 608 | goto hash_add; |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | err = -EBUSY; | 611 | err = -EADDRINUSE; |
| 612 | if (macvlan_addr_busy(vlan->port, dev->dev_addr)) | 612 | if (macvlan_addr_busy(vlan->port, dev->dev_addr)) |
| 613 | goto out; | 613 | goto out; |
| 614 | 614 | ||
| @@ -706,7 +706,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) | |||
| 706 | } else { | 706 | } else { |
| 707 | /* Rehash and update the device filters */ | 707 | /* Rehash and update the device filters */ |
| 708 | if (macvlan_addr_busy(vlan->port, addr)) | 708 | if (macvlan_addr_busy(vlan->port, addr)) |
| 709 | return -EBUSY; | 709 | return -EADDRINUSE; |
| 710 | 710 | ||
| 711 | if (!macvlan_passthru(port)) { | 711 | if (!macvlan_passthru(port)) { |
| 712 | err = dev_uc_add(lowerdev, addr); | 712 | err = dev_uc_add(lowerdev, addr); |
| @@ -747,6 +747,9 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) | |||
| 747 | return dev_set_mac_address(vlan->lowerdev, addr); | 747 | return dev_set_mac_address(vlan->lowerdev, addr); |
| 748 | } | 748 | } |
| 749 | 749 | ||
| 750 | if (macvlan_addr_busy(vlan->port, addr->sa_data)) | ||
| 751 | return -EADDRINUSE; | ||
| 752 | |||
| 750 | return macvlan_sync_address(dev, addr->sa_data); | 753 | return macvlan_sync_address(dev, addr->sa_data); |
| 751 | } | 754 | } |
| 752 | 755 | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 23ee3967c166..18e92c19c5ab 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -1880,20 +1880,17 @@ EXPORT_SYMBOL(genphy_loopback); | |||
| 1880 | 1880 | ||
| 1881 | static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) | 1881 | static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) |
| 1882 | { | 1882 | { |
| 1883 | phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | | ||
| 1884 | PHY_10BT_FEATURES); | ||
| 1885 | |||
| 1886 | switch (max_speed) { | 1883 | switch (max_speed) { |
| 1887 | default: | 1884 | case SPEED_10: |
| 1888 | return -ENOTSUPP; | 1885 | phydev->supported &= ~PHY_100BT_FEATURES; |
| 1889 | case SPEED_1000: | ||
| 1890 | phydev->supported |= PHY_1000BT_FEATURES; | ||
| 1891 | /* fall through */ | 1886 | /* fall through */ |
| 1892 | case SPEED_100: | 1887 | case SPEED_100: |
| 1893 | phydev->supported |= PHY_100BT_FEATURES; | 1888 | phydev->supported &= ~PHY_1000BT_FEATURES; |
| 1894 | /* fall through */ | 1889 | break; |
| 1895 | case SPEED_10: | 1890 | case SPEED_1000: |
| 1896 | phydev->supported |= PHY_10BT_FEATURES; | 1891 | break; |
| 1892 | default: | ||
| 1893 | return -ENOTSUPP; | ||
| 1897 | } | 1894 | } |
| 1898 | 1895 | ||
| 1899 | return 0; | 1896 | return 0; |
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 83060fb349f4..ad9db652874d 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
| @@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, | |||
| 162 | /* 1000Base-PX or 1000Base-BX10 */ | 162 | /* 1000Base-PX or 1000Base-BX10 */ |
| 163 | if ((id->base.e_base_px || id->base.e_base_bx10) && | 163 | if ((id->base.e_base_px || id->base.e_base_bx10) && |
| 164 | br_min <= 1300 && br_max >= 1200) | 164 | br_min <= 1300 && br_max >= 1200) |
| 165 | phylink_set(support, 1000baseX_Full); | 165 | phylink_set(modes, 1000baseX_Full); |
| 166 | 166 | ||
| 167 | /* For active or passive cables, select the link modes | 167 | /* For active or passive cables, select the link modes |
| 168 | * based on the bit rates and the cable compliance bytes. | 168 | * based on the bit rates and the cable compliance bytes. |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e244f5d7512a..005020042be9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -2293,9 +2293,9 @@ static void tun_setup(struct net_device *dev) | |||
| 2293 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[], | 2293 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[], |
| 2294 | struct netlink_ext_ack *extack) | 2294 | struct netlink_ext_ack *extack) |
| 2295 | { | 2295 | { |
| 2296 | if (!data) | 2296 | NL_SET_ERR_MSG(extack, |
| 2297 | return 0; | 2297 | "tun/tap creation via rtnetlink is not supported."); |
| 2298 | return -EINVAL; | 2298 | return -EOPNOTSUPP; |
| 2299 | } | 2299 | } |
| 2300 | 2300 | ||
| 2301 | static size_t tun_get_size(const struct net_device *dev) | 2301 | static size_t tun_get_size(const struct net_device *dev) |
| @@ -2385,6 +2385,7 @@ static int tun_xdp_one(struct tun_struct *tun, | |||
| 2385 | struct tun_file *tfile, | 2385 | struct tun_file *tfile, |
| 2386 | struct xdp_buff *xdp, int *flush) | 2386 | struct xdp_buff *xdp, int *flush) |
| 2387 | { | 2387 | { |
| 2388 | unsigned int datasize = xdp->data_end - xdp->data; | ||
| 2388 | struct tun_xdp_hdr *hdr = xdp->data_hard_start; | 2389 | struct tun_xdp_hdr *hdr = xdp->data_hard_start; |
| 2389 | struct virtio_net_hdr *gso = &hdr->gso; | 2390 | struct virtio_net_hdr *gso = &hdr->gso; |
| 2390 | struct tun_pcpu_stats *stats; | 2391 | struct tun_pcpu_stats *stats; |
| @@ -2461,7 +2462,7 @@ build: | |||
| 2461 | stats = get_cpu_ptr(tun->pcpu_stats); | 2462 | stats = get_cpu_ptr(tun->pcpu_stats); |
| 2462 | u64_stats_update_begin(&stats->syncp); | 2463 | u64_stats_update_begin(&stats->syncp); |
| 2463 | stats->rx_packets++; | 2464 | stats->rx_packets++; |
| 2464 | stats->rx_bytes += skb->len; | 2465 | stats->rx_bytes += datasize; |
| 2465 | u64_stats_update_end(&stats->syncp); | 2466 | u64_stats_update_end(&stats->syncp); |
| 2466 | put_cpu_ptr(stats); | 2467 | put_cpu_ptr(stats); |
| 2467 | 2468 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cecfd77c9f3c..ea672145f6a6 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -365,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) | |||
| 365 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, | 365 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 366 | struct receive_queue *rq, | 366 | struct receive_queue *rq, |
| 367 | struct page *page, unsigned int offset, | 367 | struct page *page, unsigned int offset, |
| 368 | unsigned int len, unsigned int truesize) | 368 | unsigned int len, unsigned int truesize, |
| 369 | bool hdr_valid) | ||
| 369 | { | 370 | { |
| 370 | struct sk_buff *skb; | 371 | struct sk_buff *skb; |
| 371 | struct virtio_net_hdr_mrg_rxbuf *hdr; | 372 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
| @@ -387,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, | |||
| 387 | else | 388 | else |
| 388 | hdr_padded_len = sizeof(struct padded_vnet_hdr); | 389 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
| 389 | 390 | ||
| 390 | memcpy(hdr, p, hdr_len); | 391 | if (hdr_valid) |
| 392 | memcpy(hdr, p, hdr_len); | ||
| 391 | 393 | ||
| 392 | len -= hdr_len; | 394 | len -= hdr_len; |
| 393 | offset += hdr_padded_len; | 395 | offset += hdr_padded_len; |
| @@ -739,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev, | |||
| 739 | struct virtnet_rq_stats *stats) | 741 | struct virtnet_rq_stats *stats) |
| 740 | { | 742 | { |
| 741 | struct page *page = buf; | 743 | struct page *page = buf; |
| 742 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); | 744 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, |
| 745 | PAGE_SIZE, true); | ||
| 743 | 746 | ||
| 744 | stats->bytes += len - vi->hdr_len; | 747 | stats->bytes += len - vi->hdr_len; |
| 745 | if (unlikely(!skb)) | 748 | if (unlikely(!skb)) |
| @@ -842,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 842 | rcu_read_unlock(); | 845 | rcu_read_unlock(); |
| 843 | put_page(page); | 846 | put_page(page); |
| 844 | head_skb = page_to_skb(vi, rq, xdp_page, | 847 | head_skb = page_to_skb(vi, rq, xdp_page, |
| 845 | offset, len, PAGE_SIZE); | 848 | offset, len, |
| 849 | PAGE_SIZE, false); | ||
| 846 | return head_skb; | 850 | return head_skb; |
| 847 | } | 851 | } |
| 848 | break; | 852 | break; |
| @@ -898,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 898 | goto err_skb; | 902 | goto err_skb; |
| 899 | } | 903 | } |
| 900 | 904 | ||
| 901 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize); | 905 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog); |
| 902 | curr_skb = head_skb; | 906 | curr_skb = head_skb; |
| 903 | 907 | ||
| 904 | if (unlikely(!curr_skb)) | 908 | if (unlikely(!curr_skb)) |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index aa8058264d5b..d1464e3e1be2 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2884,6 +2884,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2884 | 2884 | ||
| 2885 | wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); | 2885 | wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); |
| 2886 | 2886 | ||
| 2887 | tasklet_hrtimer_init(&data->beacon_timer, | ||
| 2888 | mac80211_hwsim_beacon, | ||
| 2889 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 2890 | |||
| 2887 | err = ieee80211_register_hw(hw); | 2891 | err = ieee80211_register_hw(hw); |
| 2888 | if (err < 0) { | 2892 | if (err < 0) { |
| 2889 | pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", | 2893 | pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", |
| @@ -2908,10 +2912,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2908 | data->debugfs, | 2912 | data->debugfs, |
| 2909 | data, &hwsim_simulate_radar); | 2913 | data, &hwsim_simulate_radar); |
| 2910 | 2914 | ||
| 2911 | tasklet_hrtimer_init(&data->beacon_timer, | ||
| 2912 | mac80211_hwsim_beacon, | ||
| 2913 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
| 2914 | |||
| 2915 | spin_lock_bh(&hwsim_radio_lock); | 2915 | spin_lock_bh(&hwsim_radio_lock); |
| 2916 | err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, | 2916 | err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, |
| 2917 | hwsim_rht_params); | 2917 | hwsim_rht_params); |
| @@ -3703,16 +3703,16 @@ static int __init init_mac80211_hwsim(void) | |||
| 3703 | if (err) | 3703 | if (err) |
| 3704 | goto out_unregister_pernet; | 3704 | goto out_unregister_pernet; |
| 3705 | 3705 | ||
| 3706 | err = hwsim_init_netlink(); | ||
| 3707 | if (err) | ||
| 3708 | goto out_unregister_driver; | ||
| 3709 | |||
| 3706 | hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); | 3710 | hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); |
| 3707 | if (IS_ERR(hwsim_class)) { | 3711 | if (IS_ERR(hwsim_class)) { |
| 3708 | err = PTR_ERR(hwsim_class); | 3712 | err = PTR_ERR(hwsim_class); |
| 3709 | goto out_unregister_driver; | 3713 | goto out_exit_netlink; |
| 3710 | } | 3714 | } |
| 3711 | 3715 | ||
| 3712 | err = hwsim_init_netlink(); | ||
| 3713 | if (err < 0) | ||
| 3714 | goto out_unregister_driver; | ||
| 3715 | |||
| 3716 | for (i = 0; i < radios; i++) { | 3716 | for (i = 0; i < radios; i++) { |
| 3717 | struct hwsim_new_radio_params param = { 0 }; | 3717 | struct hwsim_new_radio_params param = { 0 }; |
| 3718 | 3718 | ||
| @@ -3818,6 +3818,8 @@ out_free_mon: | |||
| 3818 | free_netdev(hwsim_mon); | 3818 | free_netdev(hwsim_mon); |
| 3819 | out_free_radios: | 3819 | out_free_radios: |
| 3820 | mac80211_hwsim_free(); | 3820 | mac80211_hwsim_free(); |
| 3821 | out_exit_netlink: | ||
| 3822 | hwsim_exit_netlink(); | ||
| 3821 | out_unregister_driver: | 3823 | out_unregister_driver: |
| 3822 | platform_driver_unregister(&mac80211_hwsim_driver); | 3824 | platform_driver_unregister(&mac80211_hwsim_driver); |
| 3823 | out_unregister_pernet: | 3825 | out_unregister_pernet: |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 3a5f81a66d34..6b98d8e3a5bf 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -944,10 +944,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, | |||
| 944 | if (msg->iova <= vq_msg->iova && | 944 | if (msg->iova <= vq_msg->iova && |
| 945 | msg->iova + msg->size - 1 >= vq_msg->iova && | 945 | msg->iova + msg->size - 1 >= vq_msg->iova && |
| 946 | vq_msg->type == VHOST_IOTLB_MISS) { | 946 | vq_msg->type == VHOST_IOTLB_MISS) { |
| 947 | mutex_lock(&node->vq->mutex); | ||
| 948 | vhost_poll_queue(&node->vq->poll); | 947 | vhost_poll_queue(&node->vq->poll); |
| 949 | mutex_unlock(&node->vq->mutex); | ||
| 950 | |||
| 951 | list_del(&node->node); | 948 | list_del(&node->node); |
| 952 | kfree(node); | 949 | kfree(node); |
| 953 | } | 950 | } |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 448dcc448f1f..795ff0b869bb 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -449,6 +449,13 @@ struct sock_reuseport; | |||
| 449 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | 449 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 |
| 450 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ | 450 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ |
| 451 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 | 451 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 |
| 452 | #if BITS_PER_LONG == 64 | ||
| 453 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | ||
| 454 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | ||
| 455 | #else | ||
| 456 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | ||
| 457 | offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 | ||
| 458 | #endif /* BITS_PER_LONG == 64 */ | ||
| 452 | 459 | ||
| 453 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ | 460 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ |
| 454 | ({ \ | 461 | ({ \ |
diff --git a/include/linux/sfp.h b/include/linux/sfp.h index d37518e89db2..d9d9de3fcf8e 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h | |||
| @@ -224,7 +224,7 @@ struct sfp_eeprom_ext { | |||
| 224 | * | 224 | * |
| 225 | * See the SFF-8472 specification and related documents for the definition | 225 | * See the SFF-8472 specification and related documents for the definition |
| 226 | * of these structure members. This can be obtained from | 226 | * of these structure members. This can be obtained from |
| 227 | * ftp://ftp.seagate.com/sff | 227 | * https://www.snia.org/technology-communities/sff/specifications |
| 228 | */ | 228 | */ |
| 229 | struct sfp_eeprom_id { | 229 | struct sfp_eeprom_id { |
| 230 | struct sfp_eeprom_base base; | 230 | struct sfp_eeprom_base base; |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f58b384aa6c9..665990c7dec8 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
| @@ -454,6 +454,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) | |||
| 454 | 454 | ||
| 455 | static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) | 455 | static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) |
| 456 | { | 456 | { |
| 457 | unsigned int hh_alen = 0; | ||
| 457 | unsigned int seq; | 458 | unsigned int seq; |
| 458 | unsigned int hh_len; | 459 | unsigned int hh_len; |
| 459 | 460 | ||
| @@ -461,16 +462,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb | |||
| 461 | seq = read_seqbegin(&hh->hh_lock); | 462 | seq = read_seqbegin(&hh->hh_lock); |
| 462 | hh_len = hh->hh_len; | 463 | hh_len = hh->hh_len; |
| 463 | if (likely(hh_len <= HH_DATA_MOD)) { | 464 | if (likely(hh_len <= HH_DATA_MOD)) { |
| 464 | /* this is inlined by gcc */ | 465 | hh_alen = HH_DATA_MOD; |
| 465 | memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); | 466 | |
| 467 | /* skb_push() would proceed silently if we have room for | ||
| 468 | * the unaligned size but not for the aligned size: | ||
| 469 | * check headroom explicitly. | ||
| 470 | */ | ||
| 471 | if (likely(skb_headroom(skb) >= HH_DATA_MOD)) { | ||
| 472 | /* this is inlined by gcc */ | ||
| 473 | memcpy(skb->data - HH_DATA_MOD, hh->hh_data, | ||
| 474 | HH_DATA_MOD); | ||
| 475 | } | ||
| 466 | } else { | 476 | } else { |
| 467 | unsigned int hh_alen = HH_DATA_ALIGN(hh_len); | 477 | hh_alen = HH_DATA_ALIGN(hh_len); |
| 468 | 478 | ||
| 469 | memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); | 479 | if (likely(skb_headroom(skb) >= hh_alen)) { |
| 480 | memcpy(skb->data - hh_alen, hh->hh_data, | ||
| 481 | hh_alen); | ||
| 482 | } | ||
| 470 | } | 483 | } |
| 471 | } while (read_seqretry(&hh->hh_lock, seq)); | 484 | } while (read_seqretry(&hh->hh_lock, seq)); |
| 472 | 485 | ||
| 473 | skb_push(skb, hh_len); | 486 | if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) { |
| 487 | kfree_skb(skb); | ||
| 488 | return NET_XMIT_DROP; | ||
| 489 | } | ||
| 490 | |||
| 491 | __skb_push(skb, hh_len); | ||
| 474 | return dev_queue_xmit(skb); | 492 | return dev_queue_xmit(skb); |
| 475 | } | 493 | } |
| 476 | 494 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ab9242e51d9e..2abbc15824af 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
| @@ -620,4 +620,9 @@ static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) | |||
| 620 | return false; | 620 | return false; |
| 621 | } | 621 | } |
| 622 | 622 | ||
| 623 | static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) | ||
| 624 | { | ||
| 625 | return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); | ||
| 626 | } | ||
| 627 | |||
| 623 | #endif /* __net_sctp_h__ */ | 628 | #endif /* __net_sctp_h__ */ |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a11f93790476..feada358d872 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -2075,6 +2075,8 @@ struct sctp_association { | |||
| 2075 | 2075 | ||
| 2076 | __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; | 2076 | __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; |
| 2077 | __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; | 2077 | __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; |
| 2078 | |||
| 2079 | struct rcu_head rcu; | ||
| 2078 | }; | 2080 | }; |
| 2079 | 2081 | ||
| 2080 | 2082 | ||
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 852dc17ab47a..72c453a8bf50 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
| @@ -2170,7 +2170,7 @@ union bpf_attr { | |||
| 2170 | * Return | 2170 | * Return |
| 2171 | * 0 on success, or a negative error in case of failure. | 2171 | * 0 on success, or a negative error in case of failure. |
| 2172 | * | 2172 | * |
| 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2174 | * Description | 2174 | * Description |
| 2175 | * Look for TCP socket matching *tuple*, optionally in a child | 2175 | * Look for TCP socket matching *tuple*, optionally in a child |
| 2176 | * network namespace *netns*. The return value must be checked, | 2176 | * network namespace *netns*. The return value must be checked, |
| @@ -2187,12 +2187,14 @@ union bpf_attr { | |||
| 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2188 | * Look for an IPv6 socket. | 2188 | * Look for an IPv6 socket. |
| 2189 | * | 2189 | * |
| 2190 | * If the *netns* is zero, then the socket lookup table in the | 2190 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2191 | * netns associated with the *ctx* will be used. For the TC hooks, | 2191 | * socket lookup table in the netns associated with the *ctx* will |
| 2192 | * this in the netns of the device in the skb. For socket hooks, | 2192 | * will be used. For the TC hooks, this is the netns of the device |
| 2193 | * this in the netns of the socket. If *netns* is non-zero, then | 2193 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2194 | * it specifies the ID of the netns relative to the netns | 2194 | * If *netns* is any other signed 32-bit value greater than or |
| 2195 | * associated with the *ctx*. | 2195 | * equal to zero then it specifies the ID of the netns relative to |
| 2196 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2197 | * range of 32-bit integers are reserved for future use. | ||
| 2196 | * | 2198 | * |
| 2197 | * All values for *flags* are reserved for future usage, and must | 2199 | * All values for *flags* are reserved for future usage, and must |
| 2198 | * be left at zero. | 2200 | * be left at zero. |
| @@ -2201,8 +2203,10 @@ union bpf_attr { | |||
| 2201 | * **CONFIG_NET** configuration option. | 2203 | * **CONFIG_NET** configuration option. |
| 2202 | * Return | 2204 | * Return |
| 2203 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2205 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2206 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2207 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2204 | * | 2208 | * |
| 2205 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2209 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2206 | * Description | 2210 | * Description |
| 2207 | * Look for UDP socket matching *tuple*, optionally in a child | 2211 | * Look for UDP socket matching *tuple*, optionally in a child |
| 2208 | * network namespace *netns*. The return value must be checked, | 2212 | * network namespace *netns*. The return value must be checked, |
| @@ -2219,12 +2223,14 @@ union bpf_attr { | |||
| 2219 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2223 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2220 | * Look for an IPv6 socket. | 2224 | * Look for an IPv6 socket. |
| 2221 | * | 2225 | * |
| 2222 | * If the *netns* is zero, then the socket lookup table in the | 2226 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2223 | * netns associated with the *ctx* will be used. For the TC hooks, | 2227 | * socket lookup table in the netns associated with the *ctx* will |
| 2224 | * this in the netns of the device in the skb. For socket hooks, | 2228 | * will be used. For the TC hooks, this is the netns of the device |
| 2225 | * this in the netns of the socket. If *netns* is non-zero, then | 2229 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2226 | * it specifies the ID of the netns relative to the netns | 2230 | * If *netns* is any other signed 32-bit value greater than or |
| 2227 | * associated with the *ctx*. | 2231 | * equal to zero then it specifies the ID of the netns relative to |
| 2232 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2233 | * range of 32-bit integers are reserved for future use. | ||
| 2228 | * | 2234 | * |
| 2229 | * All values for *flags* are reserved for future usage, and must | 2235 | * All values for *flags* are reserved for future usage, and must |
| 2230 | * be left at zero. | 2236 | * be left at zero. |
| @@ -2233,6 +2239,8 @@ union bpf_attr { | |||
| 2233 | * **CONFIG_NET** configuration option. | 2239 | * **CONFIG_NET** configuration option. |
| 2234 | * Return | 2240 | * Return |
| 2235 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2241 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2242 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2243 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2236 | * | 2244 | * |
| 2237 | * int bpf_sk_release(struct bpf_sock *sk) | 2245 | * int bpf_sk_release(struct bpf_sock *sk) |
| 2238 | * Description | 2246 | * Description |
| @@ -2405,6 +2413,9 @@ enum bpf_func_id { | |||
| 2405 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ | 2413 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
| 2406 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) | 2414 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) |
| 2407 | 2415 | ||
| 2416 | /* Current network namespace */ | ||
| 2417 | #define BPF_F_CURRENT_NETNS (-1L) | ||
| 2418 | |||
| 2408 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ | 2419 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ |
| 2409 | enum bpf_adj_room_mode { | 2420 | enum bpf_adj_room_mode { |
| 2410 | BPF_ADJ_ROOM_NET, | 2421 | BPF_ADJ_ROOM_NET, |
| @@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode { | |||
| 2422 | BPF_LWT_ENCAP_SEG6_INLINE | 2433 | BPF_LWT_ENCAP_SEG6_INLINE |
| 2423 | }; | 2434 | }; |
| 2424 | 2435 | ||
| 2436 | #define __bpf_md_ptr(type, name) \ | ||
| 2437 | union { \ | ||
| 2438 | type name; \ | ||
| 2439 | __u64 :64; \ | ||
| 2440 | } __attribute__((aligned(8))) | ||
| 2441 | |||
| 2425 | /* user accessible mirror of in-kernel sk_buff. | 2442 | /* user accessible mirror of in-kernel sk_buff. |
| 2426 | * new fields can only be added to the end of this structure | 2443 | * new fields can only be added to the end of this structure |
| 2427 | */ | 2444 | */ |
| @@ -2456,7 +2473,7 @@ struct __sk_buff { | |||
| 2456 | /* ... here. */ | 2473 | /* ... here. */ |
| 2457 | 2474 | ||
| 2458 | __u32 data_meta; | 2475 | __u32 data_meta; |
| 2459 | struct bpf_flow_keys *flow_keys; | 2476 | __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); |
| 2460 | }; | 2477 | }; |
| 2461 | 2478 | ||
| 2462 | struct bpf_tunnel_key { | 2479 | struct bpf_tunnel_key { |
| @@ -2572,8 +2589,8 @@ enum sk_action { | |||
| 2572 | * be added to the end of this structure | 2589 | * be added to the end of this structure |
| 2573 | */ | 2590 | */ |
| 2574 | struct sk_msg_md { | 2591 | struct sk_msg_md { |
| 2575 | void *data; | 2592 | __bpf_md_ptr(void *, data); |
| 2576 | void *data_end; | 2593 | __bpf_md_ptr(void *, data_end); |
| 2577 | 2594 | ||
| 2578 | __u32 family; | 2595 | __u32 family; |
| 2579 | __u32 remote_ip4; /* Stored in network byte order */ | 2596 | __u32 remote_ip4; /* Stored in network byte order */ |
| @@ -2589,8 +2606,9 @@ struct sk_reuseport_md { | |||
| 2589 | * Start of directly accessible data. It begins from | 2606 | * Start of directly accessible data. It begins from |
| 2590 | * the tcp/udp header. | 2607 | * the tcp/udp header. |
| 2591 | */ | 2608 | */ |
| 2592 | void *data; | 2609 | __bpf_md_ptr(void *, data); |
| 2593 | void *data_end; /* End of directly accessible data */ | 2610 | /* End of directly accessible data */ |
| 2611 | __bpf_md_ptr(void *, data_end); | ||
| 2594 | /* | 2612 | /* |
| 2595 | * Total length of packet (starting from the tcp/udp header). | 2613 | * Total length of packet (starting from the tcp/udp header). |
| 2596 | * Note that the directly accessible bytes (data_end - data) | 2614 | * Note that the directly accessible bytes (data_end - data) |
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ee4c82667d65..4da543d6bea2 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <uapi/linux/types.h> | 5 | #include <uapi/linux/types.h> |
| 6 | #include <linux/seq_file.h> | 6 | #include <linux/seq_file.h> |
| 7 | #include <linux/compiler.h> | 7 | #include <linux/compiler.h> |
| 8 | #include <linux/ctype.h> | ||
| 8 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
| 9 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 10 | #include <linux/anon_inodes.h> | 11 | #include <linux/anon_inodes.h> |
| @@ -426,6 +427,30 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset) | |||
| 426 | offset < btf->hdr.str_len; | 427 | offset < btf->hdr.str_len; |
| 427 | } | 428 | } |
| 428 | 429 | ||
| 430 | /* Only C-style identifier is permitted. This can be relaxed if | ||
| 431 | * necessary. | ||
| 432 | */ | ||
| 433 | static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) | ||
| 434 | { | ||
| 435 | /* offset must be valid */ | ||
| 436 | const char *src = &btf->strings[offset]; | ||
| 437 | const char *src_limit; | ||
| 438 | |||
| 439 | if (!isalpha(*src) && *src != '_') | ||
| 440 | return false; | ||
| 441 | |||
| 442 | /* set a limit on identifier length */ | ||
| 443 | src_limit = src + KSYM_NAME_LEN; | ||
| 444 | src++; | ||
| 445 | while (*src && src < src_limit) { | ||
| 446 | if (!isalnum(*src) && *src != '_') | ||
| 447 | return false; | ||
| 448 | src++; | ||
| 449 | } | ||
| 450 | |||
| 451 | return !*src; | ||
| 452 | } | ||
| 453 | |||
| 429 | static const char *btf_name_by_offset(const struct btf *btf, u32 offset) | 454 | static const char *btf_name_by_offset(const struct btf *btf, u32 offset) |
| 430 | { | 455 | { |
| 431 | if (!offset) | 456 | if (!offset) |
| @@ -1143,6 +1168,22 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env, | |||
| 1143 | return -EINVAL; | 1168 | return -EINVAL; |
| 1144 | } | 1169 | } |
| 1145 | 1170 | ||
| 1171 | /* typedef type must have a valid name, and other ref types, | ||
| 1172 | * volatile, const, restrict, should have a null name. | ||
| 1173 | */ | ||
| 1174 | if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { | ||
| 1175 | if (!t->name_off || | ||
| 1176 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1177 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1178 | return -EINVAL; | ||
| 1179 | } | ||
| 1180 | } else { | ||
| 1181 | if (t->name_off) { | ||
| 1182 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1183 | return -EINVAL; | ||
| 1184 | } | ||
| 1185 | } | ||
| 1186 | |||
| 1146 | btf_verifier_log_type(env, t, NULL); | 1187 | btf_verifier_log_type(env, t, NULL); |
| 1147 | 1188 | ||
| 1148 | return 0; | 1189 | return 0; |
| @@ -1300,6 +1341,13 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env, | |||
| 1300 | return -EINVAL; | 1341 | return -EINVAL; |
| 1301 | } | 1342 | } |
| 1302 | 1343 | ||
| 1344 | /* fwd type must have a valid name */ | ||
| 1345 | if (!t->name_off || | ||
| 1346 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1347 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1348 | return -EINVAL; | ||
| 1349 | } | ||
| 1350 | |||
| 1303 | btf_verifier_log_type(env, t, NULL); | 1351 | btf_verifier_log_type(env, t, NULL); |
| 1304 | 1352 | ||
| 1305 | return 0; | 1353 | return 0; |
| @@ -1356,6 +1404,12 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env, | |||
| 1356 | return -EINVAL; | 1404 | return -EINVAL; |
| 1357 | } | 1405 | } |
| 1358 | 1406 | ||
| 1407 | /* array type should not have a name */ | ||
| 1408 | if (t->name_off) { | ||
| 1409 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1410 | return -EINVAL; | ||
| 1411 | } | ||
| 1412 | |||
| 1359 | if (btf_type_vlen(t)) { | 1413 | if (btf_type_vlen(t)) { |
| 1360 | btf_verifier_log_type(env, t, "vlen != 0"); | 1414 | btf_verifier_log_type(env, t, "vlen != 0"); |
| 1361 | return -EINVAL; | 1415 | return -EINVAL; |
| @@ -1532,6 +1586,13 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, | |||
| 1532 | return -EINVAL; | 1586 | return -EINVAL; |
| 1533 | } | 1587 | } |
| 1534 | 1588 | ||
| 1589 | /* struct type either no name or a valid one */ | ||
| 1590 | if (t->name_off && | ||
| 1591 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1592 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1593 | return -EINVAL; | ||
| 1594 | } | ||
| 1595 | |||
| 1535 | btf_verifier_log_type(env, t, NULL); | 1596 | btf_verifier_log_type(env, t, NULL); |
| 1536 | 1597 | ||
| 1537 | last_offset = 0; | 1598 | last_offset = 0; |
| @@ -1543,6 +1604,12 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env, | |||
| 1543 | return -EINVAL; | 1604 | return -EINVAL; |
| 1544 | } | 1605 | } |
| 1545 | 1606 | ||
| 1607 | /* struct member either no name or a valid one */ | ||
| 1608 | if (member->name_off && | ||
| 1609 | !btf_name_valid_identifier(btf, member->name_off)) { | ||
| 1610 | btf_verifier_log_member(env, t, member, "Invalid name"); | ||
| 1611 | return -EINVAL; | ||
| 1612 | } | ||
| 1546 | /* A member cannot be in type void */ | 1613 | /* A member cannot be in type void */ |
| 1547 | if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { | 1614 | if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { |
| 1548 | btf_verifier_log_member(env, t, member, | 1615 | btf_verifier_log_member(env, t, member, |
| @@ -1730,6 +1797,13 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env, | |||
| 1730 | return -EINVAL; | 1797 | return -EINVAL; |
| 1731 | } | 1798 | } |
| 1732 | 1799 | ||
| 1800 | /* enum type either no name or a valid one */ | ||
| 1801 | if (t->name_off && | ||
| 1802 | !btf_name_valid_identifier(env->btf, t->name_off)) { | ||
| 1803 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1804 | return -EINVAL; | ||
| 1805 | } | ||
| 1806 | |||
| 1733 | btf_verifier_log_type(env, t, NULL); | 1807 | btf_verifier_log_type(env, t, NULL); |
| 1734 | 1808 | ||
| 1735 | for (i = 0; i < nr_enums; i++) { | 1809 | for (i = 0; i < nr_enums; i++) { |
| @@ -1739,6 +1813,14 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env, | |||
| 1739 | return -EINVAL; | 1813 | return -EINVAL; |
| 1740 | } | 1814 | } |
| 1741 | 1815 | ||
| 1816 | /* enum member must have a valid name */ | ||
| 1817 | if (!enums[i].name_off || | ||
| 1818 | !btf_name_valid_identifier(btf, enums[i].name_off)) { | ||
| 1819 | btf_verifier_log_type(env, t, "Invalid name"); | ||
| 1820 | return -EINVAL; | ||
| 1821 | } | ||
| 1822 | |||
| 1823 | |||
| 1742 | btf_verifier_log(env, "\t%s val=%d\n", | 1824 | btf_verifier_log(env, "\t%s val=%d\n", |
| 1743 | btf_name_by_offset(btf, enums[i].name_off), | 1825 | btf_name_by_offset(btf, enums[i].name_off), |
| 1744 | enums[i].val); | 1826 | enums[i].val); |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6dd419550aba..fc760d00a38c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -175,6 +175,7 @@ struct bpf_verifier_stack_elem { | |||
| 175 | 175 | ||
| 176 | #define BPF_COMPLEXITY_LIMIT_INSNS 131072 | 176 | #define BPF_COMPLEXITY_LIMIT_INSNS 131072 |
| 177 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 | 177 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 |
| 178 | #define BPF_COMPLEXITY_LIMIT_STATES 64 | ||
| 178 | 179 | ||
| 179 | #define BPF_MAP_PTR_UNPRIV 1UL | 180 | #define BPF_MAP_PTR_UNPRIV 1UL |
| 180 | #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ | 181 | #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ |
| @@ -3751,6 +3752,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, | |||
| 3751 | } | 3752 | } |
| 3752 | } | 3753 | } |
| 3753 | 3754 | ||
| 3755 | /* compute branch direction of the expression "if (reg opcode val) goto target;" | ||
| 3756 | * and return: | ||
| 3757 | * 1 - branch will be taken and "goto target" will be executed | ||
| 3758 | * 0 - branch will not be taken and fall-through to next insn | ||
| 3759 | * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] | ||
| 3760 | */ | ||
| 3761 | static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) | ||
| 3762 | { | ||
| 3763 | if (__is_pointer_value(false, reg)) | ||
| 3764 | return -1; | ||
| 3765 | |||
| 3766 | switch (opcode) { | ||
| 3767 | case BPF_JEQ: | ||
| 3768 | if (tnum_is_const(reg->var_off)) | ||
| 3769 | return !!tnum_equals_const(reg->var_off, val); | ||
| 3770 | break; | ||
| 3771 | case BPF_JNE: | ||
| 3772 | if (tnum_is_const(reg->var_off)) | ||
| 3773 | return !tnum_equals_const(reg->var_off, val); | ||
| 3774 | break; | ||
| 3775 | case BPF_JGT: | ||
| 3776 | if (reg->umin_value > val) | ||
| 3777 | return 1; | ||
| 3778 | else if (reg->umax_value <= val) | ||
| 3779 | return 0; | ||
| 3780 | break; | ||
| 3781 | case BPF_JSGT: | ||
| 3782 | if (reg->smin_value > (s64)val) | ||
| 3783 | return 1; | ||
| 3784 | else if (reg->smax_value < (s64)val) | ||
| 3785 | return 0; | ||
| 3786 | break; | ||
| 3787 | case BPF_JLT: | ||
| 3788 | if (reg->umax_value < val) | ||
| 3789 | return 1; | ||
| 3790 | else if (reg->umin_value >= val) | ||
| 3791 | return 0; | ||
| 3792 | break; | ||
| 3793 | case BPF_JSLT: | ||
| 3794 | if (reg->smax_value < (s64)val) | ||
| 3795 | return 1; | ||
| 3796 | else if (reg->smin_value >= (s64)val) | ||
| 3797 | return 0; | ||
| 3798 | break; | ||
| 3799 | case BPF_JGE: | ||
| 3800 | if (reg->umin_value >= val) | ||
| 3801 | return 1; | ||
| 3802 | else if (reg->umax_value < val) | ||
| 3803 | return 0; | ||
| 3804 | break; | ||
| 3805 | case BPF_JSGE: | ||
| 3806 | if (reg->smin_value >= (s64)val) | ||
| 3807 | return 1; | ||
| 3808 | else if (reg->smax_value < (s64)val) | ||
| 3809 | return 0; | ||
| 3810 | break; | ||
| 3811 | case BPF_JLE: | ||
| 3812 | if (reg->umax_value <= val) | ||
| 3813 | return 1; | ||
| 3814 | else if (reg->umin_value > val) | ||
| 3815 | return 0; | ||
| 3816 | break; | ||
| 3817 | case BPF_JSLE: | ||
| 3818 | if (reg->smax_value <= (s64)val) | ||
| 3819 | return 1; | ||
| 3820 | else if (reg->smin_value > (s64)val) | ||
| 3821 | return 0; | ||
| 3822 | break; | ||
| 3823 | } | ||
| 3824 | |||
| 3825 | return -1; | ||
| 3826 | } | ||
| 3827 | |||
| 3754 | /* Adjusts the register min/max values in the case that the dst_reg is the | 3828 | /* Adjusts the register min/max values in the case that the dst_reg is the |
| 3755 | * variable register that we are working on, and src_reg is a constant or we're | 3829 | * variable register that we are working on, and src_reg is a constant or we're |
| 3756 | * simply doing a BPF_K check. | 3830 | * simply doing a BPF_K check. |
| @@ -4152,21 +4226,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
| 4152 | 4226 | ||
| 4153 | dst_reg = ®s[insn->dst_reg]; | 4227 | dst_reg = ®s[insn->dst_reg]; |
| 4154 | 4228 | ||
| 4155 | /* detect if R == 0 where R was initialized to zero earlier */ | 4229 | if (BPF_SRC(insn->code) == BPF_K) { |
| 4156 | if (BPF_SRC(insn->code) == BPF_K && | 4230 | int pred = is_branch_taken(dst_reg, insn->imm, opcode); |
| 4157 | (opcode == BPF_JEQ || opcode == BPF_JNE) && | 4231 | |
| 4158 | dst_reg->type == SCALAR_VALUE && | 4232 | if (pred == 1) { |
| 4159 | tnum_is_const(dst_reg->var_off)) { | 4233 | /* only follow the goto, ignore fall-through */ |
| 4160 | if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) || | ||
| 4161 | (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) { | ||
| 4162 | /* if (imm == imm) goto pc+off; | ||
| 4163 | * only follow the goto, ignore fall-through | ||
| 4164 | */ | ||
| 4165 | *insn_idx += insn->off; | 4234 | *insn_idx += insn->off; |
| 4166 | return 0; | 4235 | return 0; |
| 4167 | } else { | 4236 | } else if (pred == 0) { |
| 4168 | /* if (imm != imm) goto pc+off; | 4237 | /* only follow fall-through branch, since |
| 4169 | * only follow fall-through branch, since | ||
| 4170 | * that's where the program will go | 4238 | * that's where the program will go |
| 4171 | */ | 4239 | */ |
| 4172 | return 0; | 4240 | return 0; |
| @@ -4980,7 +5048,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) | |||
| 4980 | struct bpf_verifier_state_list *new_sl; | 5048 | struct bpf_verifier_state_list *new_sl; |
| 4981 | struct bpf_verifier_state_list *sl; | 5049 | struct bpf_verifier_state_list *sl; |
| 4982 | struct bpf_verifier_state *cur = env->cur_state, *new; | 5050 | struct bpf_verifier_state *cur = env->cur_state, *new; |
| 4983 | int i, j, err; | 5051 | int i, j, err, states_cnt = 0; |
| 4984 | 5052 | ||
| 4985 | sl = env->explored_states[insn_idx]; | 5053 | sl = env->explored_states[insn_idx]; |
| 4986 | if (!sl) | 5054 | if (!sl) |
| @@ -5007,8 +5075,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) | |||
| 5007 | return 1; | 5075 | return 1; |
| 5008 | } | 5076 | } |
| 5009 | sl = sl->next; | 5077 | sl = sl->next; |
| 5078 | states_cnt++; | ||
| 5010 | } | 5079 | } |
| 5011 | 5080 | ||
| 5081 | if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) | ||
| 5082 | return 0; | ||
| 5083 | |||
| 5012 | /* there were no equivalent states, remember current one. | 5084 | /* there were no equivalent states, remember current one. |
| 5013 | * technically the current state is not proven to be safe yet, | 5085 | * technically the current state is not proven to be safe yet, |
| 5014 | * but it will either reach outer most bpf_exit (which means it's safe) | 5086 | * but it will either reach outer most bpf_exit (which means it's safe) |
| @@ -5148,6 +5220,9 @@ static int do_check(struct bpf_verifier_env *env) | |||
| 5148 | goto process_bpf_exit; | 5220 | goto process_bpf_exit; |
| 5149 | } | 5221 | } |
| 5150 | 5222 | ||
| 5223 | if (signal_pending(current)) | ||
| 5224 | return -EAGAIN; | ||
| 5225 | |||
| 5151 | if (need_resched()) | 5226 | if (need_resched()) |
| 5152 | cond_resched(); | 5227 | cond_resched(); |
| 5153 | 5228 | ||
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index c89c22c49015..25001913d03b 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c | |||
| @@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, | |||
| 28 | return ret; | 28 | return ret; |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) | 31 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, |
| 32 | u32 *time) | ||
| 32 | { | 33 | { |
| 33 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; | 34 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; |
| 34 | enum bpf_cgroup_storage_type stype; | 35 | enum bpf_cgroup_storage_type stype; |
| 35 | u64 time_start, time_spent = 0; | 36 | u64 time_start, time_spent = 0; |
| 36 | u32 ret = 0, i; | 37 | u32 i; |
| 37 | 38 | ||
| 38 | for_each_cgroup_storage_type(stype) { | 39 | for_each_cgroup_storage_type(stype) { |
| 39 | storage[stype] = bpf_cgroup_storage_alloc(prog, stype); | 40 | storage[stype] = bpf_cgroup_storage_alloc(prog, stype); |
| @@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) | |||
| 49 | repeat = 1; | 50 | repeat = 1; |
| 50 | time_start = ktime_get_ns(); | 51 | time_start = ktime_get_ns(); |
| 51 | for (i = 0; i < repeat; i++) { | 52 | for (i = 0; i < repeat; i++) { |
| 52 | ret = bpf_test_run_one(prog, ctx, storage); | 53 | *ret = bpf_test_run_one(prog, ctx, storage); |
| 53 | if (need_resched()) { | 54 | if (need_resched()) { |
| 54 | if (signal_pending(current)) | 55 | if (signal_pending(current)) |
| 55 | break; | 56 | break; |
| @@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) | |||
| 65 | for_each_cgroup_storage_type(stype) | 66 | for_each_cgroup_storage_type(stype) |
| 66 | bpf_cgroup_storage_free(storage[stype]); | 67 | bpf_cgroup_storage_free(storage[stype]); |
| 67 | 68 | ||
| 68 | return ret; | 69 | return 0; |
| 69 | } | 70 | } |
| 70 | 71 | ||
| 71 | static int bpf_test_finish(const union bpf_attr *kattr, | 72 | static int bpf_test_finish(const union bpf_attr *kattr, |
| @@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 165 | __skb_push(skb, hh_len); | 166 | __skb_push(skb, hh_len); |
| 166 | if (is_direct_pkt_access) | 167 | if (is_direct_pkt_access) |
| 167 | bpf_compute_data_pointers(skb); | 168 | bpf_compute_data_pointers(skb); |
| 168 | retval = bpf_test_run(prog, skb, repeat, &duration); | 169 | ret = bpf_test_run(prog, skb, repeat, &retval, &duration); |
| 170 | if (ret) { | ||
| 171 | kfree_skb(skb); | ||
| 172 | kfree(sk); | ||
| 173 | return ret; | ||
| 174 | } | ||
| 169 | if (!is_l2) { | 175 | if (!is_l2) { |
| 170 | if (skb_headroom(skb) < hh_len) { | 176 | if (skb_headroom(skb) < hh_len) { |
| 171 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); | 177 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); |
| @@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 212 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); | 218 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); |
| 213 | xdp.rxq = &rxqueue->xdp_rxq; | 219 | xdp.rxq = &rxqueue->xdp_rxq; |
| 214 | 220 | ||
| 215 | retval = bpf_test_run(prog, &xdp, repeat, &duration); | 221 | ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration); |
| 222 | if (ret) | ||
| 223 | goto out; | ||
| 216 | if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || | 224 | if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || |
| 217 | xdp.data_end != xdp.data + size) | 225 | xdp.data_end != xdp.data + size) |
| 218 | size = xdp.data_end - xdp.data; | 226 | size = xdp.data_end - xdp.data; |
| 219 | ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); | 227 | ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); |
| 228 | out: | ||
| 220 | kfree(data); | 229 | kfree(data); |
| 221 | return ret; | 230 | return ret; |
| 222 | } | 231 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index ddc551f24ba2..722d50dbf8a4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2175,6 +2175,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev, | |||
| 2175 | return active; | 2175 | return active; |
| 2176 | } | 2176 | } |
| 2177 | 2177 | ||
| 2178 | static void reset_xps_maps(struct net_device *dev, | ||
| 2179 | struct xps_dev_maps *dev_maps, | ||
| 2180 | bool is_rxqs_map) | ||
| 2181 | { | ||
| 2182 | if (is_rxqs_map) { | ||
| 2183 | static_key_slow_dec_cpuslocked(&xps_rxqs_needed); | ||
| 2184 | RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); | ||
| 2185 | } else { | ||
| 2186 | RCU_INIT_POINTER(dev->xps_cpus_map, NULL); | ||
| 2187 | } | ||
| 2188 | static_key_slow_dec_cpuslocked(&xps_needed); | ||
| 2189 | kfree_rcu(dev_maps, rcu); | ||
| 2190 | } | ||
| 2191 | |||
| 2178 | static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, | 2192 | static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, |
| 2179 | struct xps_dev_maps *dev_maps, unsigned int nr_ids, | 2193 | struct xps_dev_maps *dev_maps, unsigned int nr_ids, |
| 2180 | u16 offset, u16 count, bool is_rxqs_map) | 2194 | u16 offset, u16 count, bool is_rxqs_map) |
| @@ -2186,18 +2200,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask, | |||
| 2186 | j < nr_ids;) | 2200 | j < nr_ids;) |
| 2187 | active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, | 2201 | active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, |
| 2188 | count); | 2202 | count); |
| 2189 | if (!active) { | 2203 | if (!active) |
| 2190 | if (is_rxqs_map) { | 2204 | reset_xps_maps(dev, dev_maps, is_rxqs_map); |
| 2191 | RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); | ||
| 2192 | } else { | ||
| 2193 | RCU_INIT_POINTER(dev->xps_cpus_map, NULL); | ||
| 2194 | 2205 | ||
| 2195 | for (i = offset + (count - 1); count--; i--) | 2206 | if (!is_rxqs_map) { |
| 2196 | netdev_queue_numa_node_write( | 2207 | for (i = offset + (count - 1); count--; i--) { |
| 2197 | netdev_get_tx_queue(dev, i), | 2208 | netdev_queue_numa_node_write( |
| 2198 | NUMA_NO_NODE); | 2209 | netdev_get_tx_queue(dev, i), |
| 2210 | NUMA_NO_NODE); | ||
| 2199 | } | 2211 | } |
| 2200 | kfree_rcu(dev_maps, rcu); | ||
| 2201 | } | 2212 | } |
| 2202 | } | 2213 | } |
| 2203 | 2214 | ||
| @@ -2234,10 +2245,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset, | |||
| 2234 | false); | 2245 | false); |
| 2235 | 2246 | ||
| 2236 | out_no_maps: | 2247 | out_no_maps: |
| 2237 | if (static_key_enabled(&xps_rxqs_needed)) | ||
| 2238 | static_key_slow_dec_cpuslocked(&xps_rxqs_needed); | ||
| 2239 | |||
| 2240 | static_key_slow_dec_cpuslocked(&xps_needed); | ||
| 2241 | mutex_unlock(&xps_map_mutex); | 2248 | mutex_unlock(&xps_map_mutex); |
| 2242 | cpus_read_unlock(); | 2249 | cpus_read_unlock(); |
| 2243 | } | 2250 | } |
| @@ -2355,9 +2362,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, | |||
| 2355 | if (!new_dev_maps) | 2362 | if (!new_dev_maps) |
| 2356 | goto out_no_new_maps; | 2363 | goto out_no_new_maps; |
| 2357 | 2364 | ||
| 2358 | static_key_slow_inc_cpuslocked(&xps_needed); | 2365 | if (!dev_maps) { |
| 2359 | if (is_rxqs_map) | 2366 | /* Increment static keys at most once per type */ |
| 2360 | static_key_slow_inc_cpuslocked(&xps_rxqs_needed); | 2367 | static_key_slow_inc_cpuslocked(&xps_needed); |
| 2368 | if (is_rxqs_map) | ||
| 2369 | static_key_slow_inc_cpuslocked(&xps_rxqs_needed); | ||
| 2370 | } | ||
| 2361 | 2371 | ||
| 2362 | for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), | 2372 | for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids), |
| 2363 | j < nr_ids;) { | 2373 | j < nr_ids;) { |
| @@ -2455,13 +2465,8 @@ out_no_new_maps: | |||
| 2455 | } | 2465 | } |
| 2456 | 2466 | ||
| 2457 | /* free map if not active */ | 2467 | /* free map if not active */ |
| 2458 | if (!active) { | 2468 | if (!active) |
| 2459 | if (is_rxqs_map) | 2469 | reset_xps_maps(dev, dev_maps, is_rxqs_map); |
| 2460 | RCU_INIT_POINTER(dev->xps_rxqs_map, NULL); | ||
| 2461 | else | ||
| 2462 | RCU_INIT_POINTER(dev->xps_cpus_map, NULL); | ||
| 2463 | kfree_rcu(dev_maps, rcu); | ||
| 2464 | } | ||
| 2465 | 2470 | ||
| 2466 | out_no_maps: | 2471 | out_no_maps: |
| 2467 | mutex_unlock(&xps_map_mutex); | 2472 | mutex_unlock(&xps_map_mutex); |
| @@ -5009,7 +5014,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo | |||
| 5009 | struct net_device *orig_dev = skb->dev; | 5014 | struct net_device *orig_dev = skb->dev; |
| 5010 | struct packet_type *pt_prev = NULL; | 5015 | struct packet_type *pt_prev = NULL; |
| 5011 | 5016 | ||
| 5012 | list_del(&skb->list); | 5017 | skb_list_del_init(skb); |
| 5013 | __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); | 5018 | __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); |
| 5014 | if (!pt_prev) | 5019 | if (!pt_prev) |
| 5015 | continue; | 5020 | continue; |
| @@ -5165,7 +5170,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) | |||
| 5165 | INIT_LIST_HEAD(&sublist); | 5170 | INIT_LIST_HEAD(&sublist); |
| 5166 | list_for_each_entry_safe(skb, next, head, list) { | 5171 | list_for_each_entry_safe(skb, next, head, list) { |
| 5167 | net_timestamp_check(netdev_tstamp_prequeue, skb); | 5172 | net_timestamp_check(netdev_tstamp_prequeue, skb); |
| 5168 | list_del(&skb->list); | 5173 | skb_list_del_init(skb); |
| 5169 | if (!skb_defer_rx_timestamp(skb)) | 5174 | if (!skb_defer_rx_timestamp(skb)) |
| 5170 | list_add_tail(&skb->list, &sublist); | 5175 | list_add_tail(&skb->list, &sublist); |
| 5171 | } | 5176 | } |
| @@ -5176,7 +5181,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) | |||
| 5176 | rcu_read_lock(); | 5181 | rcu_read_lock(); |
| 5177 | list_for_each_entry_safe(skb, next, head, list) { | 5182 | list_for_each_entry_safe(skb, next, head, list) { |
| 5178 | xdp_prog = rcu_dereference(skb->dev->xdp_prog); | 5183 | xdp_prog = rcu_dereference(skb->dev->xdp_prog); |
| 5179 | list_del(&skb->list); | 5184 | skb_list_del_init(skb); |
| 5180 | if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) | 5185 | if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) |
| 5181 | list_add_tail(&skb->list, &sublist); | 5186 | list_add_tail(&skb->list, &sublist); |
| 5182 | } | 5187 | } |
| @@ -5195,7 +5200,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) | |||
| 5195 | 5200 | ||
| 5196 | if (cpu >= 0) { | 5201 | if (cpu >= 0) { |
| 5197 | /* Will be handled, remove from list */ | 5202 | /* Will be handled, remove from list */ |
| 5198 | list_del(&skb->list); | 5203 | skb_list_del_init(skb); |
| 5199 | enqueue_to_backlog(skb, cpu, &rflow->last_qtail); | 5204 | enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
| 5200 | } | 5205 | } |
| 5201 | } | 5206 | } |
| @@ -6204,8 +6209,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
| 6204 | napi->skb = NULL; | 6209 | napi->skb = NULL; |
| 6205 | napi->poll = poll; | 6210 | napi->poll = poll; |
| 6206 | if (weight > NAPI_POLL_WEIGHT) | 6211 | if (weight > NAPI_POLL_WEIGHT) |
| 6207 | pr_err_once("netif_napi_add() called with weight %d on device %s\n", | 6212 | netdev_err_once(dev, "%s() called with weight %d\n", __func__, |
| 6208 | weight, dev->name); | 6213 | weight); |
| 6209 | napi->weight = weight; | 6214 | napi->weight = weight; |
| 6210 | list_add(&napi->dev_list, &dev->napi_list); | 6215 | list_add(&napi->dev_list, &dev->napi_list); |
| 6211 | napi->dev = dev; | 6216 | napi->dev = dev; |
diff --git a/net/core/filter.c b/net/core/filter.c index 9a1327eb25fa..8d2c629501e2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -4890,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, | |||
| 4890 | struct net *net; | 4890 | struct net *net; |
| 4891 | 4891 | ||
| 4892 | family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; | 4892 | family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; |
| 4893 | if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) | 4893 | if (unlikely(family == AF_UNSPEC || flags || |
| 4894 | !((s32)netns_id < 0 || netns_id <= S32_MAX))) | ||
| 4894 | goto out; | 4895 | goto out; |
| 4895 | 4896 | ||
| 4896 | if (skb->dev) | 4897 | if (skb->dev) |
| 4897 | caller_net = dev_net(skb->dev); | 4898 | caller_net = dev_net(skb->dev); |
| 4898 | else | 4899 | else |
| 4899 | caller_net = sock_net(skb->sk); | 4900 | caller_net = sock_net(skb->sk); |
| 4900 | if (netns_id) { | 4901 | if ((s32)netns_id < 0) { |
| 4902 | net = caller_net; | ||
| 4903 | sk = sk_lookup(net, tuple, skb, family, proto); | ||
| 4904 | } else { | ||
| 4901 | net = get_net_ns_by_id(caller_net, netns_id); | 4905 | net = get_net_ns_by_id(caller_net, netns_id); |
| 4902 | if (unlikely(!net)) | 4906 | if (unlikely(!net)) |
| 4903 | goto out; | 4907 | goto out; |
| 4904 | sk = sk_lookup(net, tuple, skb, family, proto); | 4908 | sk = sk_lookup(net, tuple, skb, family, proto); |
| 4905 | put_net(net); | 4909 | put_net(net); |
| 4906 | } else { | ||
| 4907 | net = caller_net; | ||
| 4908 | sk = sk_lookup(net, tuple, skb, family, proto); | ||
| 4909 | } | 4910 | } |
| 4910 | 4911 | ||
| 4911 | if (sk) | 4912 | if (sk) |
| @@ -5435,8 +5436,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type | |||
| 5435 | if (size != size_default) | 5436 | if (size != size_default) |
| 5436 | return false; | 5437 | return false; |
| 5437 | break; | 5438 | break; |
| 5438 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5439 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5439 | if (size != sizeof(struct bpf_flow_keys *)) | 5440 | if (size != sizeof(__u64)) |
| 5440 | return false; | 5441 | return false; |
| 5441 | break; | 5442 | break; |
| 5442 | default: | 5443 | default: |
| @@ -5464,7 +5465,7 @@ static bool sk_filter_is_valid_access(int off, int size, | |||
| 5464 | case bpf_ctx_range(struct __sk_buff, data): | 5465 | case bpf_ctx_range(struct __sk_buff, data): |
| 5465 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5466 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5466 | case bpf_ctx_range(struct __sk_buff, data_end): | 5467 | case bpf_ctx_range(struct __sk_buff, data_end): |
| 5467 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5468 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5468 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | 5469 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
| 5469 | return false; | 5470 | return false; |
| 5470 | } | 5471 | } |
| @@ -5489,7 +5490,7 @@ static bool cg_skb_is_valid_access(int off, int size, | |||
| 5489 | switch (off) { | 5490 | switch (off) { |
| 5490 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 5491 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
| 5491 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5492 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5492 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5493 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5493 | return false; | 5494 | return false; |
| 5494 | case bpf_ctx_range(struct __sk_buff, data): | 5495 | case bpf_ctx_range(struct __sk_buff, data): |
| 5495 | case bpf_ctx_range(struct __sk_buff, data_end): | 5496 | case bpf_ctx_range(struct __sk_buff, data_end): |
| @@ -5530,7 +5531,7 @@ static bool lwt_is_valid_access(int off, int size, | |||
| 5530 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 5531 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
| 5531 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | 5532 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
| 5532 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5533 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5533 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5534 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5534 | return false; | 5535 | return false; |
| 5535 | } | 5536 | } |
| 5536 | 5537 | ||
| @@ -5756,7 +5757,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, | |||
| 5756 | case bpf_ctx_range(struct __sk_buff, data_end): | 5757 | case bpf_ctx_range(struct __sk_buff, data_end): |
| 5757 | info->reg_type = PTR_TO_PACKET_END; | 5758 | info->reg_type = PTR_TO_PACKET_END; |
| 5758 | break; | 5759 | break; |
| 5759 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5760 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5760 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | 5761 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
| 5761 | return false; | 5762 | return false; |
| 5762 | } | 5763 | } |
| @@ -5958,7 +5959,7 @@ static bool sk_skb_is_valid_access(int off, int size, | |||
| 5958 | switch (off) { | 5959 | switch (off) { |
| 5959 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 5960 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
| 5960 | case bpf_ctx_range(struct __sk_buff, data_meta): | 5961 | case bpf_ctx_range(struct __sk_buff, data_meta): |
| 5961 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 5962 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 5962 | return false; | 5963 | return false; |
| 5963 | } | 5964 | } |
| 5964 | 5965 | ||
| @@ -6039,7 +6040,7 @@ static bool flow_dissector_is_valid_access(int off, int size, | |||
| 6039 | case bpf_ctx_range(struct __sk_buff, data_end): | 6040 | case bpf_ctx_range(struct __sk_buff, data_end): |
| 6040 | info->reg_type = PTR_TO_PACKET_END; | 6041 | info->reg_type = PTR_TO_PACKET_END; |
| 6041 | break; | 6042 | break; |
| 6042 | case bpf_ctx_range(struct __sk_buff, flow_keys): | 6043 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 6043 | info->reg_type = PTR_TO_FLOW_KEYS; | 6044 | info->reg_type = PTR_TO_FLOW_KEYS; |
| 6044 | break; | 6045 | break; |
| 6045 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 6046 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 33d9227a8b80..7819f7804eeb 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -3800,6 +3800,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb, | |||
| 3800 | { | 3800 | { |
| 3801 | int err; | 3801 | int err; |
| 3802 | 3802 | ||
| 3803 | if (dev->type != ARPHRD_ETHER) | ||
| 3804 | return -EINVAL; | ||
| 3805 | |||
| 3803 | netif_addr_lock_bh(dev); | 3806 | netif_addr_lock_bh(dev); |
| 3804 | err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); | 3807 | err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc); |
| 3805 | if (err) | 3808 | if (err) |
diff --git a/net/dsa/master.c b/net/dsa/master.c index c90ee3227dea..5e8c9bef78bd 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c | |||
| @@ -158,8 +158,31 @@ static void dsa_master_ethtool_teardown(struct net_device *dev) | |||
| 158 | cpu_dp->orig_ethtool_ops = NULL; | 158 | cpu_dp->orig_ethtool_ops = NULL; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | static ssize_t tagging_show(struct device *d, struct device_attribute *attr, | ||
| 162 | char *buf) | ||
| 163 | { | ||
| 164 | struct net_device *dev = to_net_dev(d); | ||
| 165 | struct dsa_port *cpu_dp = dev->dsa_ptr; | ||
| 166 | |||
| 167 | return sprintf(buf, "%s\n", | ||
| 168 | dsa_tag_protocol_to_str(cpu_dp->tag_ops)); | ||
| 169 | } | ||
| 170 | static DEVICE_ATTR_RO(tagging); | ||
| 171 | |||
| 172 | static struct attribute *dsa_slave_attrs[] = { | ||
| 173 | &dev_attr_tagging.attr, | ||
| 174 | NULL | ||
| 175 | }; | ||
| 176 | |||
| 177 | static const struct attribute_group dsa_group = { | ||
| 178 | .name = "dsa", | ||
| 179 | .attrs = dsa_slave_attrs, | ||
| 180 | }; | ||
| 181 | |||
| 161 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | 182 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) |
| 162 | { | 183 | { |
| 184 | int ret; | ||
| 185 | |||
| 163 | /* If we use a tagging format that doesn't have an ethertype | 186 | /* If we use a tagging format that doesn't have an ethertype |
| 164 | * field, make sure that all packets from this point on get | 187 | * field, make sure that all packets from this point on get |
| 165 | * sent to the tag format's receive function. | 188 | * sent to the tag format's receive function. |
| @@ -168,11 +191,20 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | |||
| 168 | 191 | ||
| 169 | dev->dsa_ptr = cpu_dp; | 192 | dev->dsa_ptr = cpu_dp; |
| 170 | 193 | ||
| 171 | return dsa_master_ethtool_setup(dev); | 194 | ret = dsa_master_ethtool_setup(dev); |
| 195 | if (ret) | ||
| 196 | return ret; | ||
| 197 | |||
| 198 | ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); | ||
| 199 | if (ret) | ||
| 200 | dsa_master_ethtool_teardown(dev); | ||
| 201 | |||
| 202 | return ret; | ||
| 172 | } | 203 | } |
| 173 | 204 | ||
| 174 | void dsa_master_teardown(struct net_device *dev) | 205 | void dsa_master_teardown(struct net_device *dev) |
| 175 | { | 206 | { |
| 207 | sysfs_remove_group(&dev->dev.kobj, &dsa_group); | ||
| 176 | dsa_master_ethtool_teardown(dev); | 208 | dsa_master_ethtool_teardown(dev); |
| 177 | 209 | ||
| 178 | dev->dsa_ptr = NULL; | 210 | dev->dsa_ptr = NULL; |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 7d0c19e7edcf..aec78f5aca72 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -1058,27 +1058,6 @@ static struct device_type dsa_type = { | |||
| 1058 | .name = "dsa", | 1058 | .name = "dsa", |
| 1059 | }; | 1059 | }; |
| 1060 | 1060 | ||
| 1061 | static ssize_t tagging_show(struct device *d, struct device_attribute *attr, | ||
| 1062 | char *buf) | ||
| 1063 | { | ||
| 1064 | struct net_device *dev = to_net_dev(d); | ||
| 1065 | struct dsa_port *dp = dsa_slave_to_port(dev); | ||
| 1066 | |||
| 1067 | return sprintf(buf, "%s\n", | ||
| 1068 | dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops)); | ||
| 1069 | } | ||
| 1070 | static DEVICE_ATTR_RO(tagging); | ||
| 1071 | |||
| 1072 | static struct attribute *dsa_slave_attrs[] = { | ||
| 1073 | &dev_attr_tagging.attr, | ||
| 1074 | NULL | ||
| 1075 | }; | ||
| 1076 | |||
| 1077 | static const struct attribute_group dsa_group = { | ||
| 1078 | .name = "dsa", | ||
| 1079 | .attrs = dsa_slave_attrs, | ||
| 1080 | }; | ||
| 1081 | |||
| 1082 | static void dsa_slave_phylink_validate(struct net_device *dev, | 1061 | static void dsa_slave_phylink_validate(struct net_device *dev, |
| 1083 | unsigned long *supported, | 1062 | unsigned long *supported, |
| 1084 | struct phylink_link_state *state) | 1063 | struct phylink_link_state *state) |
| @@ -1374,14 +1353,8 @@ int dsa_slave_create(struct dsa_port *port) | |||
| 1374 | goto out_phy; | 1353 | goto out_phy; |
| 1375 | } | 1354 | } |
| 1376 | 1355 | ||
| 1377 | ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group); | ||
| 1378 | if (ret) | ||
| 1379 | goto out_unreg; | ||
| 1380 | |||
| 1381 | return 0; | 1356 | return 0; |
| 1382 | 1357 | ||
| 1383 | out_unreg: | ||
| 1384 | unregister_netdev(slave_dev); | ||
| 1385 | out_phy: | 1358 | out_phy: |
| 1386 | rtnl_lock(); | 1359 | rtnl_lock(); |
| 1387 | phylink_disconnect_phy(p->dp->pl); | 1360 | phylink_disconnect_phy(p->dp->pl); |
| @@ -1405,7 +1378,6 @@ void dsa_slave_destroy(struct net_device *slave_dev) | |||
| 1405 | rtnl_unlock(); | 1378 | rtnl_unlock(); |
| 1406 | 1379 | ||
| 1407 | dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); | 1380 | dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); |
| 1408 | sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group); | ||
| 1409 | unregister_netdev(slave_dev); | 1381 | unregister_netdev(slave_dev); |
| 1410 | phylink_destroy(dp->pl); | 1382 | phylink_destroy(dp->pl); |
| 1411 | free_percpu(p->stats64); | 1383 | free_percpu(p->stats64); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index d6ee343fdb86..aa0b22697998 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -515,6 +515,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, | |||
| 515 | struct rb_node *rbn; | 515 | struct rb_node *rbn; |
| 516 | int len; | 516 | int len; |
| 517 | int ihlen; | 517 | int ihlen; |
| 518 | int delta; | ||
| 518 | int err; | 519 | int err; |
| 519 | u8 ecn; | 520 | u8 ecn; |
| 520 | 521 | ||
| @@ -556,10 +557,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, | |||
| 556 | if (len > 65535) | 557 | if (len > 65535) |
| 557 | goto out_oversize; | 558 | goto out_oversize; |
| 558 | 559 | ||
| 560 | delta = - head->truesize; | ||
| 561 | |||
| 559 | /* Head of list must not be cloned. */ | 562 | /* Head of list must not be cloned. */ |
| 560 | if (skb_unclone(head, GFP_ATOMIC)) | 563 | if (skb_unclone(head, GFP_ATOMIC)) |
| 561 | goto out_nomem; | 564 | goto out_nomem; |
| 562 | 565 | ||
| 566 | delta += head->truesize; | ||
| 567 | if (delta) | ||
| 568 | add_frag_mem_limit(qp->q.net, delta); | ||
| 569 | |||
| 563 | /* If the first fragment is fragmented itself, we split | 570 | /* If the first fragment is fragmented itself, we split |
| 564 | * it to two chunks: the first with data and paged part | 571 | * it to two chunks: the first with data and paged part |
| 565 | * and the second, holding only fragments. */ | 572 | * and the second, holding only fragments. */ |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 35a786c0aaa0..e609b08c9df4 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -547,7 +547,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, | |||
| 547 | list_for_each_entry_safe(skb, next, head, list) { | 547 | list_for_each_entry_safe(skb, next, head, list) { |
| 548 | struct dst_entry *dst; | 548 | struct dst_entry *dst; |
| 549 | 549 | ||
| 550 | list_del(&skb->list); | 550 | skb_list_del_init(skb); |
| 551 | /* if ingress device is enslaved to an L3 master device pass the | 551 | /* if ingress device is enslaved to an L3 master device pass the |
| 552 | * skb to its handler for processing | 552 | * skb to its handler for processing |
| 553 | */ | 553 | */ |
| @@ -594,7 +594,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt, | |||
| 594 | struct net_device *dev = skb->dev; | 594 | struct net_device *dev = skb->dev; |
| 595 | struct net *net = dev_net(dev); | 595 | struct net *net = dev_net(dev); |
| 596 | 596 | ||
| 597 | list_del(&skb->list); | 597 | skb_list_del_init(skb); |
| 598 | skb = ip_rcv_core(skb, net); | 598 | skb = ip_rcv_core(skb, net); |
| 599 | if (skb == NULL) | 599 | if (skb == NULL) |
| 600 | continue; | 600 | continue; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f510cad0b3e..d1676d8a6ed7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue, | |||
| 1904 | * This algorithm is from John Heffner. | 1904 | * This algorithm is from John Heffner. |
| 1905 | */ | 1905 | */ |
| 1906 | static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | 1906 | static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, |
| 1907 | bool *is_cwnd_limited, u32 max_segs) | 1907 | bool *is_cwnd_limited, |
| 1908 | bool *is_rwnd_limited, | ||
| 1909 | u32 max_segs) | ||
| 1908 | { | 1910 | { |
| 1909 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1911 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 1910 | u32 age, send_win, cong_win, limit, in_flight; | 1912 | u32 age, send_win, cong_win, limit, in_flight; |
| @@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
| 1912 | struct sk_buff *head; | 1914 | struct sk_buff *head; |
| 1913 | int win_divisor; | 1915 | int win_divisor; |
| 1914 | 1916 | ||
| 1915 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) | ||
| 1916 | goto send_now; | ||
| 1917 | |||
| 1918 | if (icsk->icsk_ca_state >= TCP_CA_Recovery) | 1917 | if (icsk->icsk_ca_state >= TCP_CA_Recovery) |
| 1919 | goto send_now; | 1918 | goto send_now; |
| 1920 | 1919 | ||
| @@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
| 1973 | if (age < (tp->srtt_us >> 4)) | 1972 | if (age < (tp->srtt_us >> 4)) |
| 1974 | goto send_now; | 1973 | goto send_now; |
| 1975 | 1974 | ||
| 1976 | /* Ok, it looks like it is advisable to defer. */ | 1975 | /* Ok, it looks like it is advisable to defer. |
| 1976 | * Three cases are tracked : | ||
| 1977 | * 1) We are cwnd-limited | ||
| 1978 | * 2) We are rwnd-limited | ||
| 1979 | * 3) We are application limited. | ||
| 1980 | */ | ||
| 1981 | if (cong_win < send_win) { | ||
| 1982 | if (cong_win <= skb->len) { | ||
| 1983 | *is_cwnd_limited = true; | ||
| 1984 | return true; | ||
| 1985 | } | ||
| 1986 | } else { | ||
| 1987 | if (send_win <= skb->len) { | ||
| 1988 | *is_rwnd_limited = true; | ||
| 1989 | return true; | ||
| 1990 | } | ||
| 1991 | } | ||
| 1977 | 1992 | ||
| 1978 | if (cong_win < send_win && cong_win <= skb->len) | 1993 | /* If this packet won't get more data, do not wait. */ |
| 1979 | *is_cwnd_limited = true; | 1994 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 1995 | goto send_now; | ||
| 1980 | 1996 | ||
| 1981 | return true; | 1997 | return true; |
| 1982 | 1998 | ||
| @@ -2356,7 +2372,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
| 2356 | } else { | 2372 | } else { |
| 2357 | if (!push_one && | 2373 | if (!push_one && |
| 2358 | tcp_tso_should_defer(sk, skb, &is_cwnd_limited, | 2374 | tcp_tso_should_defer(sk, skb, &is_cwnd_limited, |
| 2359 | max_segs)) | 2375 | &is_rwnd_limited, max_segs)) |
| 2360 | break; | 2376 | break; |
| 2361 | } | 2377 | } |
| 2362 | 2378 | ||
| @@ -2494,15 +2510,18 @@ void tcp_send_loss_probe(struct sock *sk) | |||
| 2494 | goto rearm_timer; | 2510 | goto rearm_timer; |
| 2495 | } | 2511 | } |
| 2496 | skb = skb_rb_last(&sk->tcp_rtx_queue); | 2512 | skb = skb_rb_last(&sk->tcp_rtx_queue); |
| 2513 | if (unlikely(!skb)) { | ||
| 2514 | WARN_ONCE(tp->packets_out, | ||
| 2515 | "invalid inflight: %u state %u cwnd %u mss %d\n", | ||
| 2516 | tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); | ||
| 2517 | inet_csk(sk)->icsk_pending = 0; | ||
| 2518 | return; | ||
| 2519 | } | ||
| 2497 | 2520 | ||
| 2498 | /* At most one outstanding TLP retransmission. */ | 2521 | /* At most one outstanding TLP retransmission. */ |
| 2499 | if (tp->tlp_high_seq) | 2522 | if (tp->tlp_high_seq) |
| 2500 | goto rearm_timer; | 2523 | goto rearm_timer; |
| 2501 | 2524 | ||
| 2502 | /* Retransmit last segment. */ | ||
| 2503 | if (WARN_ON(!skb)) | ||
| 2504 | goto rearm_timer; | ||
| 2505 | |||
| 2506 | if (skb_still_in_host_queue(sk, skb)) | 2525 | if (skb_still_in_host_queue(sk, skb)) |
| 2507 | goto rearm_timer; | 2526 | goto rearm_timer; |
| 2508 | 2527 | ||
| @@ -2920,7 +2939,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | |||
| 2920 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; | 2939 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; |
| 2921 | trace_tcp_retransmit_skb(sk, skb); | 2940 | trace_tcp_retransmit_skb(sk, skb); |
| 2922 | } else if (err != -EBUSY) { | 2941 | } else if (err != -EBUSY) { |
| 2923 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); | 2942 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); |
| 2924 | } | 2943 | } |
| 2925 | return err; | 2944 | return err; |
| 2926 | } | 2945 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 091c53925e4d..f87dbc78b6bc 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -378,7 +378,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
| 378 | return; | 378 | return; |
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | if (icsk->icsk_probes_out > max_probes) { | 381 | if (icsk->icsk_probes_out >= max_probes) { |
| 382 | abort: tcp_write_err(sk); | 382 | abort: tcp_write_err(sk); |
| 383 | } else { | 383 | } else { |
| 384 | /* Only send another probe if we didn't close things up. */ | 384 | /* Only send another probe if we didn't close things up. */ |
| @@ -484,11 +484,12 @@ void tcp_retransmit_timer(struct sock *sk) | |||
| 484 | goto out_reset_timer; | 484 | goto out_reset_timer; |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); | ||
| 487 | if (tcp_write_timeout(sk)) | 488 | if (tcp_write_timeout(sk)) |
| 488 | goto out; | 489 | goto out; |
| 489 | 490 | ||
| 490 | if (icsk->icsk_retransmits == 0) { | 491 | if (icsk->icsk_retransmits == 0) { |
| 491 | int mib_idx; | 492 | int mib_idx = 0; |
| 492 | 493 | ||
| 493 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { | 494 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
| 494 | if (tcp_is_sack(tp)) | 495 | if (tcp_is_sack(tp)) |
| @@ -503,10 +504,9 @@ void tcp_retransmit_timer(struct sock *sk) | |||
| 503 | mib_idx = LINUX_MIB_TCPSACKFAILURES; | 504 | mib_idx = LINUX_MIB_TCPSACKFAILURES; |
| 504 | else | 505 | else |
| 505 | mib_idx = LINUX_MIB_TCPRENOFAILURES; | 506 | mib_idx = LINUX_MIB_TCPRENOFAILURES; |
| 506 | } else { | ||
| 507 | mib_idx = LINUX_MIB_TCPTIMEOUTS; | ||
| 508 | } | 507 | } |
| 509 | __NET_INC_STATS(sock_net(sk), mib_idx); | 508 | if (mib_idx) |
| 509 | __NET_INC_STATS(sock_net(sk), mib_idx); | ||
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | tcp_enter_loss(sk); | 512 | tcp_enter_loss(sk); |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 96577e742afd..c1d85830c906 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
| @@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk, | |||
| 95 | list_for_each_entry_safe(skb, next, head, list) { | 95 | list_for_each_entry_safe(skb, next, head, list) { |
| 96 | struct dst_entry *dst; | 96 | struct dst_entry *dst; |
| 97 | 97 | ||
| 98 | list_del(&skb->list); | 98 | skb_list_del_init(skb); |
| 99 | /* if ingress device is enslaved to an L3 master device pass the | 99 | /* if ingress device is enslaved to an L3 master device pass the |
| 100 | * skb to its handler for processing | 100 | * skb to its handler for processing |
| 101 | */ | 101 | */ |
| @@ -296,7 +296,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, | |||
| 296 | struct net_device *dev = skb->dev; | 296 | struct net_device *dev = skb->dev; |
| 297 | struct net *net = dev_net(dev); | 297 | struct net *net = dev_net(dev); |
| 298 | 298 | ||
| 299 | list_del(&skb->list); | 299 | skb_list_del_init(skb); |
| 300 | skb = ip6_rcv_core(skb, dev, net); | 300 | skb = ip6_rcv_core(skb, dev, net); |
| 301 | if (skb == NULL) | 301 | if (skb == NULL) |
| 302 | continue; | 302 | continue; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 827a3f5ff3bb..fcd3c66ded16 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
| 195 | const struct ipv6_pinfo *np = inet6_sk(sk); | 195 | const struct ipv6_pinfo *np = inet6_sk(sk); |
| 196 | struct in6_addr *first_hop = &fl6->daddr; | 196 | struct in6_addr *first_hop = &fl6->daddr; |
| 197 | struct dst_entry *dst = skb_dst(skb); | 197 | struct dst_entry *dst = skb_dst(skb); |
| 198 | unsigned int head_room; | ||
| 198 | struct ipv6hdr *hdr; | 199 | struct ipv6hdr *hdr; |
| 199 | u8 proto = fl6->flowi6_proto; | 200 | u8 proto = fl6->flowi6_proto; |
| 200 | int seg_len = skb->len; | 201 | int seg_len = skb->len; |
| 201 | int hlimit = -1; | 202 | int hlimit = -1; |
| 202 | u32 mtu; | 203 | u32 mtu; |
| 203 | 204 | ||
| 204 | if (opt) { | 205 | head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); |
| 205 | unsigned int head_room; | 206 | if (opt) |
| 207 | head_room += opt->opt_nflen + opt->opt_flen; | ||
| 206 | 208 | ||
| 207 | /* First: exthdrs may take lots of space (~8K for now) | 209 | if (unlikely(skb_headroom(skb) < head_room)) { |
| 208 | MAX_HEADER is not enough. | 210 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); |
| 209 | */ | 211 | if (!skb2) { |
| 210 | head_room = opt->opt_nflen + opt->opt_flen; | 212 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
| 211 | seg_len += head_room; | 213 | IPSTATS_MIB_OUTDISCARDS); |
| 212 | head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); | 214 | kfree_skb(skb); |
| 213 | 215 | return -ENOBUFS; | |
| 214 | if (skb_headroom(skb) < head_room) { | ||
| 215 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); | ||
| 216 | if (!skb2) { | ||
| 217 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | ||
| 218 | IPSTATS_MIB_OUTDISCARDS); | ||
| 219 | kfree_skb(skb); | ||
| 220 | return -ENOBUFS; | ||
| 221 | } | ||
| 222 | if (skb->sk) | ||
| 223 | skb_set_owner_w(skb2, skb->sk); | ||
| 224 | consume_skb(skb); | ||
| 225 | skb = skb2; | ||
| 226 | } | 216 | } |
| 217 | if (skb->sk) | ||
| 218 | skb_set_owner_w(skb2, skb->sk); | ||
| 219 | consume_skb(skb); | ||
| 220 | skb = skb2; | ||
| 221 | } | ||
| 222 | |||
| 223 | if (opt) { | ||
| 224 | seg_len += opt->opt_nflen + opt->opt_flen; | ||
| 225 | |||
| 227 | if (opt->opt_flen) | 226 | if (opt->opt_flen) |
| 228 | ipv6_push_frag_opts(skb, opt, &proto); | 227 | ipv6_push_frag_opts(skb, opt, &proto); |
| 228 | |||
| 229 | if (opt->opt_nflen) | 229 | if (opt->opt_nflen) |
| 230 | ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop, | 230 | ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop, |
| 231 | &fl6->saddr); | 231 | &fl6->saddr); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d219979c3e52..181da2c40f9a 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -341,7 +341,7 @@ static bool | |||
| 341 | nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) | 341 | nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) |
| 342 | { | 342 | { |
| 343 | struct sk_buff *fp, *head = fq->q.fragments; | 343 | struct sk_buff *fp, *head = fq->q.fragments; |
| 344 | int payload_len; | 344 | int payload_len, delta; |
| 345 | u8 ecn; | 345 | u8 ecn; |
| 346 | 346 | ||
| 347 | inet_frag_kill(&fq->q); | 347 | inet_frag_kill(&fq->q); |
| @@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic | |||
| 363 | return false; | 363 | return false; |
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | delta = - head->truesize; | ||
| 367 | |||
| 366 | /* Head of list must not be cloned. */ | 368 | /* Head of list must not be cloned. */ |
| 367 | if (skb_unclone(head, GFP_ATOMIC)) | 369 | if (skb_unclone(head, GFP_ATOMIC)) |
| 368 | return false; | 370 | return false; |
| 369 | 371 | ||
| 372 | delta += head->truesize; | ||
| 373 | if (delta) | ||
| 374 | add_frag_mem_limit(fq->q.net, delta); | ||
| 375 | |||
| 370 | /* If the first fragment is fragmented itself, we split | 376 | /* If the first fragment is fragmented itself, we split |
| 371 | * it to two chunks: the first with data and paged part | 377 | * it to two chunks: the first with data and paged part |
| 372 | * and the second, holding only fragments. */ | 378 | * and the second, holding only fragments. */ |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 5c3c92713096..aa26c45486d9 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
| @@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 281 | { | 281 | { |
| 282 | struct net *net = container_of(fq->q.net, struct net, ipv6.frags); | 282 | struct net *net = container_of(fq->q.net, struct net, ipv6.frags); |
| 283 | struct sk_buff *fp, *head = fq->q.fragments; | 283 | struct sk_buff *fp, *head = fq->q.fragments; |
| 284 | int payload_len; | 284 | int payload_len, delta; |
| 285 | unsigned int nhoff; | 285 | unsigned int nhoff; |
| 286 | int sum_truesize; | 286 | int sum_truesize; |
| 287 | u8 ecn; | 287 | u8 ecn; |
| @@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 322 | if (payload_len > IPV6_MAXPLEN) | 322 | if (payload_len > IPV6_MAXPLEN) |
| 323 | goto out_oversize; | 323 | goto out_oversize; |
| 324 | 324 | ||
| 325 | delta = - head->truesize; | ||
| 326 | |||
| 325 | /* Head of list must not be cloned. */ | 327 | /* Head of list must not be cloned. */ |
| 326 | if (skb_unclone(head, GFP_ATOMIC)) | 328 | if (skb_unclone(head, GFP_ATOMIC)) |
| 327 | goto out_oom; | 329 | goto out_oom; |
| 328 | 330 | ||
| 331 | delta += head->truesize; | ||
| 332 | if (delta) | ||
| 333 | add_frag_mem_limit(fq->q.net, delta); | ||
| 334 | |||
| 329 | /* If the first fragment is fragmented itself, we split | 335 | /* If the first fragment is fragmented itself, we split |
| 330 | * it to two chunks: the first with data and paged part | 336 | * it to two chunks: the first with data and paged part |
| 331 | * and the second, holding only fragments. */ | 337 | * and the second, holding only fragments. */ |
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index a8854dd3e9c5..8181ee7e1e27 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
| @@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
| 347 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 347 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
| 348 | struct flowi6 fl6; | 348 | struct flowi6 fl6; |
| 349 | 349 | ||
| 350 | memset(&fl6, 0, sizeof(fl6)); | ||
| 350 | fl6.daddr = hdr->daddr; | 351 | fl6.daddr = hdr->daddr; |
| 351 | fl6.saddr = hdr->saddr; | 352 | fl6.saddr = hdr->saddr; |
| 352 | fl6.flowlabel = ip6_flowinfo(hdr); | 353 | fl6.flowlabel = ip6_flowinfo(hdr); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 51622333d460..818aa0060349 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -2891,7 +2891,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) | |||
| 2891 | 2891 | ||
| 2892 | len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + | 2892 | len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len + |
| 2893 | beacon->proberesp_ies_len + beacon->assocresp_ies_len + | 2893 | beacon->proberesp_ies_len + beacon->assocresp_ies_len + |
| 2894 | beacon->probe_resp_len; | 2894 | beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len; |
| 2895 | 2895 | ||
| 2896 | new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); | 2896 | new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL); |
| 2897 | if (!new_beacon) | 2897 | if (!new_beacon) |
| @@ -2934,8 +2934,9 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) | |||
| 2934 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); | 2934 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); |
| 2935 | pos += beacon->probe_resp_len; | 2935 | pos += beacon->probe_resp_len; |
| 2936 | } | 2936 | } |
| 2937 | if (beacon->ftm_responder) | 2937 | |
| 2938 | new_beacon->ftm_responder = beacon->ftm_responder; | 2938 | /* might copy -1, meaning no changes requested */ |
| 2939 | new_beacon->ftm_responder = beacon->ftm_responder; | ||
| 2939 | if (beacon->lci) { | 2940 | if (beacon->lci) { |
| 2940 | new_beacon->lci_len = beacon->lci_len; | 2941 | new_beacon->lci_len = beacon->lci_len; |
| 2941 | new_beacon->lci = pos; | 2942 | new_beacon->lci = pos; |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 5836ddeac9e3..5f3c81e705c7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
| @@ -1015,6 +1015,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 1015 | if (local->open_count == 0) | 1015 | if (local->open_count == 0) |
| 1016 | ieee80211_clear_tx_pending(local); | 1016 | ieee80211_clear_tx_pending(local); |
| 1017 | 1017 | ||
| 1018 | sdata->vif.bss_conf.beacon_int = 0; | ||
| 1019 | |||
| 1018 | /* | 1020 | /* |
| 1019 | * If the interface goes down while suspended, presumably because | 1021 | * If the interface goes down while suspended, presumably because |
| 1020 | * the device was unplugged and that happens before our resume, | 1022 | * the device was unplugged and that happens before our resume, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d2bc8d57c87e..bcf5ffc1567a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -2766,6 +2766,7 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata, | |||
| 2766 | { | 2766 | { |
| 2767 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2767 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
| 2768 | struct sta_info *sta; | 2768 | struct sta_info *sta; |
| 2769 | bool result = true; | ||
| 2769 | 2770 | ||
| 2770 | sdata_info(sdata, "authenticated\n"); | 2771 | sdata_info(sdata, "authenticated\n"); |
| 2771 | ifmgd->auth_data->done = true; | 2772 | ifmgd->auth_data->done = true; |
| @@ -2778,15 +2779,18 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata, | |||
| 2778 | sta = sta_info_get(sdata, bssid); | 2779 | sta = sta_info_get(sdata, bssid); |
| 2779 | if (!sta) { | 2780 | if (!sta) { |
| 2780 | WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); | 2781 | WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid); |
| 2781 | return false; | 2782 | result = false; |
| 2783 | goto out; | ||
| 2782 | } | 2784 | } |
| 2783 | if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { | 2785 | if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) { |
| 2784 | sdata_info(sdata, "failed moving %pM to auth\n", bssid); | 2786 | sdata_info(sdata, "failed moving %pM to auth\n", bssid); |
| 2785 | return false; | 2787 | result = false; |
| 2788 | goto out; | ||
| 2786 | } | 2789 | } |
| 2787 | mutex_unlock(&sdata->local->sta_mtx); | ||
| 2788 | 2790 | ||
| 2789 | return true; | 2791 | out: |
| 2792 | mutex_unlock(&sdata->local->sta_mtx); | ||
| 2793 | return result; | ||
| 2790 | } | 2794 | } |
| 2791 | 2795 | ||
| 2792 | static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | 2796 | static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3bd3b5769797..428f7ad5f9b5 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -1403,6 +1403,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) | |||
| 1403 | return RX_CONTINUE; | 1403 | return RX_CONTINUE; |
| 1404 | 1404 | ||
| 1405 | if (ieee80211_is_ctl(hdr->frame_control) || | 1405 | if (ieee80211_is_ctl(hdr->frame_control) || |
| 1406 | ieee80211_is_nullfunc(hdr->frame_control) || | ||
| 1406 | ieee80211_is_qos_nullfunc(hdr->frame_control) || | 1407 | ieee80211_is_qos_nullfunc(hdr->frame_control) || |
| 1407 | is_multicast_ether_addr(hdr->addr1)) | 1408 | is_multicast_ether_addr(hdr->addr1)) |
| 1408 | return RX_CONTINUE; | 1409 | return RX_CONTINUE; |
| @@ -3063,7 +3064,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 3063 | cfg80211_sta_opmode_change_notify(sdata->dev, | 3064 | cfg80211_sta_opmode_change_notify(sdata->dev, |
| 3064 | rx->sta->addr, | 3065 | rx->sta->addr, |
| 3065 | &sta_opmode, | 3066 | &sta_opmode, |
| 3066 | GFP_KERNEL); | 3067 | GFP_ATOMIC); |
| 3067 | goto handled; | 3068 | goto handled; |
| 3068 | } | 3069 | } |
| 3069 | case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { | 3070 | case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { |
| @@ -3100,7 +3101,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 3100 | cfg80211_sta_opmode_change_notify(sdata->dev, | 3101 | cfg80211_sta_opmode_change_notify(sdata->dev, |
| 3101 | rx->sta->addr, | 3102 | rx->sta->addr, |
| 3102 | &sta_opmode, | 3103 | &sta_opmode, |
| 3103 | GFP_KERNEL); | 3104 | GFP_ATOMIC); |
| 3104 | goto handled; | 3105 | goto handled; |
| 3105 | } | 3106 | } |
| 3106 | default: | 3107 | default: |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index aa4afbf0abaf..a794ca729000 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
| @@ -964,6 +964,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, | |||
| 964 | /* Track when last TDLS packet was ACKed */ | 964 | /* Track when last TDLS packet was ACKed */ |
| 965 | if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) | 965 | if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) |
| 966 | sta->status_stats.last_tdls_pkt_time = jiffies; | 966 | sta->status_stats.last_tdls_pkt_time = jiffies; |
| 967 | } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { | ||
| 968 | return; | ||
| 967 | } else { | 969 | } else { |
| 968 | ieee80211_lost_packet(sta, info); | 970 | ieee80211_lost_packet(sta, info); |
| 969 | } | 971 | } |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e0ccee23fbcd..1f536ba573b4 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -439,8 +439,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
| 439 | if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) | 439 | if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) |
| 440 | info->hw_queue = tx->sdata->vif.cab_queue; | 440 | info->hw_queue = tx->sdata->vif.cab_queue; |
| 441 | 441 | ||
| 442 | /* no stations in PS mode */ | 442 | /* no stations in PS mode and no buffered packets */ |
| 443 | if (!atomic_read(&ps->num_sta_ps)) | 443 | if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf)) |
| 444 | return TX_CONTINUE; | 444 | return TX_CONTINUE; |
| 445 | 445 | ||
| 446 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; | 446 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index a4660c48ff01..cd94f925495a 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
| @@ -1166,7 +1166,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, | |||
| 1166 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 1166 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
| 1167 | if (err) { | 1167 | if (err) { |
| 1168 | net_warn_ratelimited("openvswitch: zone: %u " | 1168 | net_warn_ratelimited("openvswitch: zone: %u " |
| 1169 | "execeeds conntrack limit\n", | 1169 | "exceeds conntrack limit\n", |
| 1170 | info->zone.id); | 1170 | info->zone.id); |
| 1171 | return err; | 1171 | return err; |
| 1172 | } | 1172 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 37c9b8f0e10f..ec8ec55e0fe8 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -85,7 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 85 | int ovr, int bind, bool rtnl_held, | 85 | int ovr, int bind, bool rtnl_held, |
| 86 | struct netlink_ext_ack *extack) | 86 | struct netlink_ext_ack *extack) |
| 87 | { | 87 | { |
| 88 | int ret = 0, err; | 88 | int ret = 0, tcfp_result = TC_ACT_OK, err, size; |
| 89 | struct nlattr *tb[TCA_POLICE_MAX + 1]; | 89 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
| 90 | struct tc_police *parm; | 90 | struct tc_police *parm; |
| 91 | struct tcf_police *police; | 91 | struct tcf_police *police; |
| @@ -93,7 +93,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 93 | struct tc_action_net *tn = net_generic(net, police_net_id); | 93 | struct tc_action_net *tn = net_generic(net, police_net_id); |
| 94 | struct tcf_police_params *new; | 94 | struct tcf_police_params *new; |
| 95 | bool exists = false; | 95 | bool exists = false; |
| 96 | int size; | ||
| 97 | 96 | ||
| 98 | if (nla == NULL) | 97 | if (nla == NULL) |
| 99 | return -EINVAL; | 98 | return -EINVAL; |
| @@ -160,6 +159,16 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 160 | goto failure; | 159 | goto failure; |
| 161 | } | 160 | } |
| 162 | 161 | ||
| 162 | if (tb[TCA_POLICE_RESULT]) { | ||
| 163 | tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); | ||
| 164 | if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) { | ||
| 165 | NL_SET_ERR_MSG(extack, | ||
| 166 | "goto chain not allowed on fallback"); | ||
| 167 | err = -EINVAL; | ||
| 168 | goto failure; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 163 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 172 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
| 164 | if (unlikely(!new)) { | 173 | if (unlikely(!new)) { |
| 165 | err = -ENOMEM; | 174 | err = -ENOMEM; |
| @@ -167,6 +176,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 167 | } | 176 | } |
| 168 | 177 | ||
| 169 | /* No failure allowed after this point */ | 178 | /* No failure allowed after this point */ |
| 179 | new->tcfp_result = tcfp_result; | ||
| 170 | new->tcfp_mtu = parm->mtu; | 180 | new->tcfp_mtu = parm->mtu; |
| 171 | if (!new->tcfp_mtu) { | 181 | if (!new->tcfp_mtu) { |
| 172 | new->tcfp_mtu = ~0; | 182 | new->tcfp_mtu = ~0; |
| @@ -196,16 +206,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 196 | if (tb[TCA_POLICE_AVRATE]) | 206 | if (tb[TCA_POLICE_AVRATE]) |
| 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); | 207 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); |
| 198 | 208 | ||
| 199 | if (tb[TCA_POLICE_RESULT]) { | ||
| 200 | new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); | ||
| 201 | if (TC_ACT_EXT_CMP(new->tcfp_result, TC_ACT_GOTO_CHAIN)) { | ||
| 202 | NL_SET_ERR_MSG(extack, | ||
| 203 | "goto chain not allowed on fallback"); | ||
| 204 | err = -EINVAL; | ||
| 205 | goto failure; | ||
| 206 | } | ||
| 207 | } | ||
| 208 | |||
| 209 | spin_lock_bh(&police->tcf_lock); | 209 | spin_lock_bh(&police->tcf_lock); |
| 210 | spin_lock_bh(&police->tcfp_lock); | 210 | spin_lock_bh(&police->tcfp_lock); |
| 211 | police->tcfp_t_c = ktime_get_ns(); | 211 | police->tcfp_t_c = ktime_get_ns(); |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index c6c327874abc..71312d7bd8f4 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
| @@ -1238,18 +1238,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 1238 | if (err) | 1238 | if (err) |
| 1239 | goto errout_idr; | 1239 | goto errout_idr; |
| 1240 | 1240 | ||
| 1241 | if (!tc_skip_sw(fnew->flags)) { | 1241 | if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) { |
| 1242 | if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) { | 1242 | err = -EEXIST; |
| 1243 | err = -EEXIST; | 1243 | goto errout_mask; |
| 1244 | goto errout_mask; | ||
| 1245 | } | ||
| 1246 | |||
| 1247 | err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, | ||
| 1248 | fnew->mask->filter_ht_params); | ||
| 1249 | if (err) | ||
| 1250 | goto errout_mask; | ||
| 1251 | } | 1244 | } |
| 1252 | 1245 | ||
| 1246 | err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, | ||
| 1247 | fnew->mask->filter_ht_params); | ||
| 1248 | if (err) | ||
| 1249 | goto errout_mask; | ||
| 1250 | |||
| 1253 | if (!tc_skip_hw(fnew->flags)) { | 1251 | if (!tc_skip_hw(fnew->flags)) { |
| 1254 | err = fl_hw_replace_filter(tp, fnew, extack); | 1252 | err = fl_hw_replace_filter(tp, fnew, extack); |
| 1255 | if (err) | 1253 | if (err) |
| @@ -1303,9 +1301,8 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, | |||
| 1303 | struct cls_fl_head *head = rtnl_dereference(tp->root); | 1301 | struct cls_fl_head *head = rtnl_dereference(tp->root); |
| 1304 | struct cls_fl_filter *f = arg; | 1302 | struct cls_fl_filter *f = arg; |
| 1305 | 1303 | ||
| 1306 | if (!tc_skip_sw(f->flags)) | 1304 | rhashtable_remove_fast(&f->mask->ht, &f->ht_node, |
| 1307 | rhashtable_remove_fast(&f->mask->ht, &f->ht_node, | 1305 | f->mask->filter_ht_params); |
| 1308 | f->mask->filter_ht_params); | ||
| 1309 | __fl_delete(tp, f, extack); | 1306 | __fl_delete(tp, f, extack); |
| 1310 | *last = list_empty(&head->masks); | 1307 | *last = list_empty(&head->masks); |
| 1311 | return 0; | 1308 | return 0; |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 2c38e3d07924..22cd46a60057 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
| @@ -431,6 +431,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 431 | int count = 1; | 431 | int count = 1; |
| 432 | int rc = NET_XMIT_SUCCESS; | 432 | int rc = NET_XMIT_SUCCESS; |
| 433 | 433 | ||
| 434 | /* Do not fool qdisc_drop_all() */ | ||
| 435 | skb->prev = NULL; | ||
| 436 | |||
| 434 | /* Random duplication */ | 437 | /* Random duplication */ |
| 435 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | 438 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) |
| 436 | ++count; | 439 | ++count; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 6a28b96e779e..914750b819b2 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init( | |||
| 118 | asoc->flowlabel = sp->flowlabel; | 118 | asoc->flowlabel = sp->flowlabel; |
| 119 | asoc->dscp = sp->dscp; | 119 | asoc->dscp = sp->dscp; |
| 120 | 120 | ||
| 121 | /* Initialize default path MTU. */ | ||
| 122 | asoc->pathmtu = sp->pathmtu; | ||
| 123 | |||
| 124 | /* Set association default SACK delay */ | 121 | /* Set association default SACK delay */ |
| 125 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); | 122 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); |
| 126 | asoc->sackfreq = sp->sackfreq; | 123 | asoc->sackfreq = sp->sackfreq; |
| @@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init( | |||
| 252 | 0, gfp)) | 249 | 0, gfp)) |
| 253 | goto fail_init; | 250 | goto fail_init; |
| 254 | 251 | ||
| 252 | /* Initialize default path MTU. */ | ||
| 253 | asoc->pathmtu = sp->pathmtu; | ||
| 254 | sctp_assoc_update_frag_point(asoc); | ||
| 255 | |||
| 255 | /* Assume that peer would support both address types unless we are | 256 | /* Assume that peer would support both address types unless we are |
| 256 | * told otherwise. | 257 | * told otherwise. |
| 257 | */ | 258 | */ |
| @@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc) | |||
| 434 | 435 | ||
| 435 | WARN_ON(atomic_read(&asoc->rmem_alloc)); | 436 | WARN_ON(atomic_read(&asoc->rmem_alloc)); |
| 436 | 437 | ||
| 437 | kfree(asoc); | 438 | kfree_rcu(asoc, rcu); |
| 438 | SCTP_DBG_OBJCNT_DEC(assoc); | 439 | SCTP_DBG_OBJCNT_DEC(assoc); |
| 439 | } | 440 | } |
| 440 | 441 | ||
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index ce8087846f05..d2048de86e7c 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c | |||
| @@ -191,6 +191,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
| 191 | * the packet | 191 | * the packet |
| 192 | */ | 192 | */ |
| 193 | max_data = asoc->frag_point; | 193 | max_data = asoc->frag_point; |
| 194 | if (unlikely(!max_data)) { | ||
| 195 | max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk), | ||
| 196 | sctp_datachk_len(&asoc->stream)); | ||
| 197 | pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)", | ||
| 198 | __func__, asoc, max_data); | ||
| 199 | } | ||
| 194 | 200 | ||
| 195 | /* If the the peer requested that we authenticate DATA chunks | 201 | /* If the the peer requested that we authenticate DATA chunks |
| 196 | * we need to account for bundling of the AUTH chunks along with | 202 | * we need to account for bundling of the AUTH chunks along with |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 4a4fd1971255..f4ac6c592e13 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, | |||
| 2462 | asoc->c.sinit_max_instreams, gfp)) | 2462 | asoc->c.sinit_max_instreams, gfp)) |
| 2463 | goto clean_up; | 2463 | goto clean_up; |
| 2464 | 2464 | ||
| 2465 | /* Update frag_point when stream_interleave may get changed. */ | ||
| 2466 | sctp_assoc_update_frag_point(asoc); | ||
| 2467 | |||
| 2465 | if (!asoc->temp && sctp_assoc_set_id(asoc, gfp)) | 2468 | if (!asoc->temp && sctp_assoc_set_id(asoc, gfp)) |
| 2466 | goto clean_up; | 2469 | goto clean_up; |
| 2467 | 2470 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bf618d1b41fd..b8cebd5a87e5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -3324,8 +3324,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned | |||
| 3324 | __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) : | 3324 | __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) : |
| 3325 | sizeof(struct sctp_data_chunk); | 3325 | sizeof(struct sctp_data_chunk); |
| 3326 | 3326 | ||
| 3327 | min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, | 3327 | min_len = sctp_min_frag_point(sp, datasize); |
| 3328 | datasize); | ||
| 3329 | max_len = SCTP_MAX_CHUNK_LEN - datasize; | 3328 | max_len = SCTP_MAX_CHUNK_LEN - datasize; |
| 3330 | 3329 | ||
| 3331 | if (val < min_len || val > max_len) | 3330 | if (val < min_len || val > max_len) |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 12b3edf70a7b..1615e503f8e3 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
| @@ -272,11 +272,11 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, | |||
| 272 | 272 | ||
| 273 | p1 = (u8*)(ht_capa); | 273 | p1 = (u8*)(ht_capa); |
| 274 | p2 = (u8*)(ht_capa_mask); | 274 | p2 = (u8*)(ht_capa_mask); |
| 275 | for (i = 0; i<sizeof(*ht_capa); i++) | 275 | for (i = 0; i < sizeof(*ht_capa); i++) |
| 276 | p1[i] &= p2[i]; | 276 | p1[i] &= p2[i]; |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | /* Do a logical ht_capa &= ht_capa_mask. */ | 279 | /* Do a logical vht_capa &= vht_capa_mask. */ |
| 280 | void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, | 280 | void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, |
| 281 | const struct ieee80211_vht_cap *vht_capa_mask) | 281 | const struct ieee80211_vht_cap *vht_capa_mask) |
| 282 | { | 282 | { |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 744b5851bbf9..8d763725498c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -7870,6 +7870,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) | |||
| 7870 | } | 7870 | } |
| 7871 | 7871 | ||
| 7872 | memset(¶ms, 0, sizeof(params)); | 7872 | memset(¶ms, 0, sizeof(params)); |
| 7873 | params.beacon_csa.ftm_responder = -1; | ||
| 7873 | 7874 | ||
| 7874 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || | 7875 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || |
| 7875 | !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) | 7876 | !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d536b07582f8..f741d8376a46 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
| @@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void) | |||
| 642 | * All devices must be idle as otherwise if you are actively | 642 | * All devices must be idle as otherwise if you are actively |
| 643 | * scanning some new beacon hints could be learned and would | 643 | * scanning some new beacon hints could be learned and would |
| 644 | * count as new regulatory hints. | 644 | * count as new regulatory hints. |
| 645 | * Also if there is any other active beaconing interface we | ||
| 646 | * need not issue a disconnect hint and reset any info such | ||
| 647 | * as chan dfs state, etc. | ||
| 645 | */ | 648 | */ |
| 646 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { | 649 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { |
| 647 | list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { | 650 | list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { |
| 648 | wdev_lock(wdev); | 651 | wdev_lock(wdev); |
| 649 | if (wdev->conn || wdev->current_bss) | 652 | if (wdev->conn || wdev->current_bss || |
| 653 | cfg80211_beaconing_iface_active(wdev)) | ||
| 650 | is_all_idle = false; | 654 | is_all_idle = false; |
| 651 | wdev_unlock(wdev); | 655 | wdev_unlock(wdev); |
| 652 | } | 656 | } |
| @@ -1171,6 +1175,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
| 1171 | 1175 | ||
| 1172 | cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, | 1176 | cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, |
| 1173 | rdev->wiphy.ht_capa_mod_mask); | 1177 | rdev->wiphy.ht_capa_mod_mask); |
| 1178 | cfg80211_oper_and_vht_capa(&connect->vht_capa_mask, | ||
| 1179 | rdev->wiphy.vht_capa_mod_mask); | ||
| 1174 | 1180 | ||
| 1175 | if (connkeys && connkeys->def >= 0) { | 1181 | if (connkeys && connkeys->def >= 0) { |
| 1176 | int idx; | 1182 | int idx; |
diff --git a/net/wireless/util.c b/net/wireless/util.c index ef14d80ca03e..d473bd135da8 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -1421,6 +1421,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, | |||
| 1421 | ies[pos + ext], | 1421 | ies[pos + ext], |
| 1422 | ext == 2)) | 1422 | ext == 2)) |
| 1423 | pos = skip_ie(ies, ielen, pos); | 1423 | pos = skip_ie(ies, ielen, pos); |
| 1424 | else | ||
| 1425 | break; | ||
| 1424 | } | 1426 | } |
| 1425 | } else { | 1427 | } else { |
| 1426 | pos = skip_ie(ies, ielen, pos); | 1428 | pos = skip_ie(ies, ielen, pos); |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d49aa79b7997..5121729b8b63 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
| @@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb, | |||
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | len = *skb->data; | 102 | len = *skb->data; |
| 103 | needed = 1 + (len >> 4) + (len & 0x0f); | 103 | needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2; |
| 104 | 104 | ||
| 105 | if (!pskb_may_pull(skb, needed)) { | 105 | if (!pskb_may_pull(skb, needed)) { |
| 106 | /* packet is too short to hold the addresses it claims | 106 | /* packet is too short to hold the addresses it claims |
| @@ -288,7 +288,7 @@ static struct sock *x25_find_listener(struct x25_address *addr, | |||
| 288 | sk_for_each(s, &x25_list) | 288 | sk_for_each(s, &x25_list) |
| 289 | if ((!strcmp(addr->x25_addr, | 289 | if ((!strcmp(addr->x25_addr, |
| 290 | x25_sk(s)->source_addr.x25_addr) || | 290 | x25_sk(s)->source_addr.x25_addr) || |
| 291 | !strcmp(addr->x25_addr, | 291 | !strcmp(x25_sk(s)->source_addr.x25_addr, |
| 292 | null_x25_address.x25_addr)) && | 292 | null_x25_address.x25_addr)) && |
| 293 | s->sk_state == TCP_LISTEN) { | 293 | s->sk_state == TCP_LISTEN) { |
| 294 | /* | 294 | /* |
| @@ -688,11 +688,15 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 688 | goto out; | 688 | goto out; |
| 689 | } | 689 | } |
| 690 | 690 | ||
| 691 | len = strlen(addr->sx25_addr.x25_addr); | 691 | /* check for the null_x25_address */ |
| 692 | for (i = 0; i < len; i++) { | 692 | if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) { |
| 693 | if (!isdigit(addr->sx25_addr.x25_addr[i])) { | 693 | |
| 694 | rc = -EINVAL; | 694 | len = strlen(addr->sx25_addr.x25_addr); |
| 695 | goto out; | 695 | for (i = 0; i < len; i++) { |
| 696 | if (!isdigit(addr->sx25_addr.x25_addr[i])) { | ||
| 697 | rc = -EINVAL; | ||
| 698 | goto out; | ||
| 699 | } | ||
| 696 | } | 700 | } |
| 697 | } | 701 | } |
| 698 | 702 | ||
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 3c12cae32001..afb26221d8a8 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
| @@ -142,6 +142,15 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
| 142 | sk->sk_state_change(sk); | 142 | sk->sk_state_change(sk); |
| 143 | break; | 143 | break; |
| 144 | } | 144 | } |
| 145 | case X25_CALL_REQUEST: | ||
| 146 | /* call collision */ | ||
| 147 | x25->causediag.cause = 0x01; | ||
| 148 | x25->causediag.diagnostic = 0x48; | ||
| 149 | |||
| 150 | x25_write_internal(sk, X25_CLEAR_REQUEST); | ||
| 151 | x25_disconnect(sk, EISCONN, 0x01, 0x48); | ||
| 152 | break; | ||
| 153 | |||
| 145 | case X25_CLEAR_REQUEST: | 154 | case X25_CLEAR_REQUEST: |
| 146 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) | 155 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) |
| 147 | goto out_clear; | 156 | goto out_clear; |
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 55bc512a1831..e4e6e2b3fd84 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c | |||
| @@ -32,7 +32,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw, | |||
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, | 34 | static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, |
| 35 | const void *data) | 35 | __u8 bit_offset, const void *data) |
| 36 | { | 36 | { |
| 37 | int actual_type_id; | 37 | int actual_type_id; |
| 38 | 38 | ||
| @@ -40,7 +40,7 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, | |||
| 40 | if (actual_type_id < 0) | 40 | if (actual_type_id < 0) |
| 41 | return actual_type_id; | 41 | return actual_type_id; |
| 42 | 42 | ||
| 43 | return btf_dumper_do_type(d, actual_type_id, 0, data); | 43 | return btf_dumper_do_type(d, actual_type_id, bit_offset, data); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static void btf_dumper_enum(const void *data, json_writer_t *jw) | 46 | static void btf_dumper_enum(const void *data, json_writer_t *jw) |
| @@ -237,7 +237,7 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id, | |||
| 237 | case BTF_KIND_VOLATILE: | 237 | case BTF_KIND_VOLATILE: |
| 238 | case BTF_KIND_CONST: | 238 | case BTF_KIND_CONST: |
| 239 | case BTF_KIND_RESTRICT: | 239 | case BTF_KIND_RESTRICT: |
| 240 | return btf_dumper_modifier(d, type_id, data); | 240 | return btf_dumper_modifier(d, type_id, bit_offset, data); |
| 241 | default: | 241 | default: |
| 242 | jsonw_printf(d->jw, "(unsupported-kind"); | 242 | jsonw_printf(d->jw, "(unsupported-kind"); |
| 243 | return -EINVAL; | 243 | return -EINVAL; |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 852dc17ab47a..72c453a8bf50 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
| @@ -2170,7 +2170,7 @@ union bpf_attr { | |||
| 2170 | * Return | 2170 | * Return |
| 2171 | * 0 on success, or a negative error in case of failure. | 2171 | * 0 on success, or a negative error in case of failure. |
| 2172 | * | 2172 | * |
| 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2173 | * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2174 | * Description | 2174 | * Description |
| 2175 | * Look for TCP socket matching *tuple*, optionally in a child | 2175 | * Look for TCP socket matching *tuple*, optionally in a child |
| 2176 | * network namespace *netns*. The return value must be checked, | 2176 | * network namespace *netns*. The return value must be checked, |
| @@ -2187,12 +2187,14 @@ union bpf_attr { | |||
| 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2187 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2188 | * Look for an IPv6 socket. | 2188 | * Look for an IPv6 socket. |
| 2189 | * | 2189 | * |
| 2190 | * If the *netns* is zero, then the socket lookup table in the | 2190 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2191 | * netns associated with the *ctx* will be used. For the TC hooks, | 2191 | * socket lookup table in the netns associated with the *ctx* will |
| 2192 | * this in the netns of the device in the skb. For socket hooks, | 2192 | * will be used. For the TC hooks, this is the netns of the device |
| 2193 | * this in the netns of the socket. If *netns* is non-zero, then | 2193 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2194 | * it specifies the ID of the netns relative to the netns | 2194 | * If *netns* is any other signed 32-bit value greater than or |
| 2195 | * associated with the *ctx*. | 2195 | * equal to zero then it specifies the ID of the netns relative to |
| 2196 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2197 | * range of 32-bit integers are reserved for future use. | ||
| 2196 | * | 2198 | * |
| 2197 | * All values for *flags* are reserved for future usage, and must | 2199 | * All values for *flags* are reserved for future usage, and must |
| 2198 | * be left at zero. | 2200 | * be left at zero. |
| @@ -2201,8 +2203,10 @@ union bpf_attr { | |||
| 2201 | * **CONFIG_NET** configuration option. | 2203 | * **CONFIG_NET** configuration option. |
| 2202 | * Return | 2204 | * Return |
| 2203 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2205 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2206 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2207 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2204 | * | 2208 | * |
| 2205 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) | 2209 | * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) |
| 2206 | * Description | 2210 | * Description |
| 2207 | * Look for UDP socket matching *tuple*, optionally in a child | 2211 | * Look for UDP socket matching *tuple*, optionally in a child |
| 2208 | * network namespace *netns*. The return value must be checked, | 2212 | * network namespace *netns*. The return value must be checked, |
| @@ -2219,12 +2223,14 @@ union bpf_attr { | |||
| 2219 | * **sizeof**\ (*tuple*\ **->ipv6**) | 2223 | * **sizeof**\ (*tuple*\ **->ipv6**) |
| 2220 | * Look for an IPv6 socket. | 2224 | * Look for an IPv6 socket. |
| 2221 | * | 2225 | * |
| 2222 | * If the *netns* is zero, then the socket lookup table in the | 2226 | * If the *netns* is a negative signed 32-bit integer, then the |
| 2223 | * netns associated with the *ctx* will be used. For the TC hooks, | 2227 | * socket lookup table in the netns associated with the *ctx* will |
| 2224 | * this in the netns of the device in the skb. For socket hooks, | 2228 | * will be used. For the TC hooks, this is the netns of the device |
| 2225 | * this in the netns of the socket. If *netns* is non-zero, then | 2229 | * in the skb. For socket hooks, this is the netns of the socket. |
| 2226 | * it specifies the ID of the netns relative to the netns | 2230 | * If *netns* is any other signed 32-bit value greater than or |
| 2227 | * associated with the *ctx*. | 2231 | * equal to zero then it specifies the ID of the netns relative to |
| 2232 | * the netns associated with the *ctx*. *netns* values beyond the | ||
| 2233 | * range of 32-bit integers are reserved for future use. | ||
| 2228 | * | 2234 | * |
| 2229 | * All values for *flags* are reserved for future usage, and must | 2235 | * All values for *flags* are reserved for future usage, and must |
| 2230 | * be left at zero. | 2236 | * be left at zero. |
| @@ -2233,6 +2239,8 @@ union bpf_attr { | |||
| 2233 | * **CONFIG_NET** configuration option. | 2239 | * **CONFIG_NET** configuration option. |
| 2234 | * Return | 2240 | * Return |
| 2235 | * Pointer to *struct bpf_sock*, or NULL in case of failure. | 2241 | * Pointer to *struct bpf_sock*, or NULL in case of failure. |
| 2242 | * For sockets with reuseport option, the *struct bpf_sock* | ||
| 2243 | * result is from reuse->socks[] using the hash of the tuple. | ||
| 2236 | * | 2244 | * |
| 2237 | * int bpf_sk_release(struct bpf_sock *sk) | 2245 | * int bpf_sk_release(struct bpf_sock *sk) |
| 2238 | * Description | 2246 | * Description |
| @@ -2405,6 +2413,9 @@ enum bpf_func_id { | |||
| 2405 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ | 2413 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
| 2406 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) | 2414 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) |
| 2407 | 2415 | ||
| 2416 | /* Current network namespace */ | ||
| 2417 | #define BPF_F_CURRENT_NETNS (-1L) | ||
| 2418 | |||
| 2408 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ | 2419 | /* Mode for BPF_FUNC_skb_adjust_room helper. */ |
| 2409 | enum bpf_adj_room_mode { | 2420 | enum bpf_adj_room_mode { |
| 2410 | BPF_ADJ_ROOM_NET, | 2421 | BPF_ADJ_ROOM_NET, |
| @@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode { | |||
| 2422 | BPF_LWT_ENCAP_SEG6_INLINE | 2433 | BPF_LWT_ENCAP_SEG6_INLINE |
| 2423 | }; | 2434 | }; |
| 2424 | 2435 | ||
| 2436 | #define __bpf_md_ptr(type, name) \ | ||
| 2437 | union { \ | ||
| 2438 | type name; \ | ||
| 2439 | __u64 :64; \ | ||
| 2440 | } __attribute__((aligned(8))) | ||
| 2441 | |||
| 2425 | /* user accessible mirror of in-kernel sk_buff. | 2442 | /* user accessible mirror of in-kernel sk_buff. |
| 2426 | * new fields can only be added to the end of this structure | 2443 | * new fields can only be added to the end of this structure |
| 2427 | */ | 2444 | */ |
| @@ -2456,7 +2473,7 @@ struct __sk_buff { | |||
| 2456 | /* ... here. */ | 2473 | /* ... here. */ |
| 2457 | 2474 | ||
| 2458 | __u32 data_meta; | 2475 | __u32 data_meta; |
| 2459 | struct bpf_flow_keys *flow_keys; | 2476 | __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); |
| 2460 | }; | 2477 | }; |
| 2461 | 2478 | ||
| 2462 | struct bpf_tunnel_key { | 2479 | struct bpf_tunnel_key { |
| @@ -2572,8 +2589,8 @@ enum sk_action { | |||
| 2572 | * be added to the end of this structure | 2589 | * be added to the end of this structure |
| 2573 | */ | 2590 | */ |
| 2574 | struct sk_msg_md { | 2591 | struct sk_msg_md { |
| 2575 | void *data; | 2592 | __bpf_md_ptr(void *, data); |
| 2576 | void *data_end; | 2593 | __bpf_md_ptr(void *, data_end); |
| 2577 | 2594 | ||
| 2578 | __u32 family; | 2595 | __u32 family; |
| 2579 | __u32 remote_ip4; /* Stored in network byte order */ | 2596 | __u32 remote_ip4; /* Stored in network byte order */ |
| @@ -2589,8 +2606,9 @@ struct sk_reuseport_md { | |||
| 2589 | * Start of directly accessible data. It begins from | 2606 | * Start of directly accessible data. It begins from |
| 2590 | * the tcp/udp header. | 2607 | * the tcp/udp header. |
| 2591 | */ | 2608 | */ |
| 2592 | void *data; | 2609 | __bpf_md_ptr(void *, data); |
| 2593 | void *data_end; /* End of directly accessible data */ | 2610 | /* End of directly accessible data */ |
| 2611 | __bpf_md_ptr(void *, data_end); | ||
| 2594 | /* | 2612 | /* |
| 2595 | * Total length of packet (starting from the tcp/udp header). | 2613 | * Total length of packet (starting from the tcp/udp header). |
| 2596 | * Note that the directly accessible bytes (data_end - data) | 2614 | * Note that the directly accessible bytes (data_end - data) |
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 686e57ce40f4..efb6c13ab0de 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h | |||
| @@ -154,12 +154,12 @@ static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = | |||
| 154 | (void *) BPF_FUNC_skb_ancestor_cgroup_id; | 154 | (void *) BPF_FUNC_skb_ancestor_cgroup_id; |
| 155 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, | 155 | static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, |
| 156 | struct bpf_sock_tuple *tuple, | 156 | struct bpf_sock_tuple *tuple, |
| 157 | int size, unsigned int netns_id, | 157 | int size, unsigned long long netns_id, |
| 158 | unsigned long long flags) = | 158 | unsigned long long flags) = |
| 159 | (void *) BPF_FUNC_sk_lookup_tcp; | 159 | (void *) BPF_FUNC_sk_lookup_tcp; |
| 160 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, | 160 | static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, |
| 161 | struct bpf_sock_tuple *tuple, | 161 | struct bpf_sock_tuple *tuple, |
| 162 | int size, unsigned int netns_id, | 162 | int size, unsigned long long netns_id, |
| 163 | unsigned long long flags) = | 163 | unsigned long long flags) = |
| 164 | (void *) BPF_FUNC_sk_lookup_udp; | 164 | (void *) BPF_FUNC_sk_lookup_udp; |
| 165 | static int (*bpf_sk_release)(struct bpf_sock *sk) = | 165 | static int (*bpf_sk_release)(struct bpf_sock *sk) = |
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index f42b3396d622..38e1cbaaffdb 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
| @@ -432,11 +432,11 @@ static struct btf_raw_test raw_tests[] = { | |||
| 432 | /* const void* */ /* [3] */ | 432 | /* const void* */ /* [3] */ |
| 433 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), | 433 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), |
| 434 | /* typedef const void * const_void_ptr */ | 434 | /* typedef const void * const_void_ptr */ |
| 435 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3), | 435 | BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */ |
| 436 | /* struct A { */ /* [4] */ | 436 | /* struct A { */ /* [5] */ |
| 437 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), | 437 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), |
| 438 | /* const_void_ptr m; */ | 438 | /* const_void_ptr m; */ |
| 439 | BTF_MEMBER_ENC(NAME_TBD, 3, 0), | 439 | BTF_MEMBER_ENC(NAME_TBD, 4, 0), |
| 440 | /* } */ | 440 | /* } */ |
| 441 | BTF_END_RAW, | 441 | BTF_END_RAW, |
| 442 | }, | 442 | }, |
| @@ -494,10 +494,10 @@ static struct btf_raw_test raw_tests[] = { | |||
| 494 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), | 494 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), |
| 495 | /* const void* */ /* [3] */ | 495 | /* const void* */ /* [3] */ |
| 496 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), | 496 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), |
| 497 | /* typedef const void * const_void_ptr */ /* [4] */ | 497 | /* typedef const void * const_void_ptr */ |
| 498 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3), | 498 | BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */ |
| 499 | /* const_void_ptr[4] */ /* [5] */ | 499 | /* const_void_ptr[4] */ |
| 500 | BTF_TYPE_ARRAY_ENC(3, 1, 4), | 500 | BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [5] */ |
| 501 | BTF_END_RAW, | 501 | BTF_END_RAW, |
| 502 | }, | 502 | }, |
| 503 | .str_sec = "\0const_void_ptr", | 503 | .str_sec = "\0const_void_ptr", |
| @@ -1293,6 +1293,367 @@ static struct btf_raw_test raw_tests[] = { | |||
| 1293 | }, | 1293 | }, |
| 1294 | 1294 | ||
| 1295 | { | 1295 | { |
| 1296 | .descr = "typedef (invalid name, name_off = 0)", | ||
| 1297 | .raw_types = { | ||
| 1298 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1299 | BTF_TYPEDEF_ENC(0, 1), /* [2] */ | ||
| 1300 | BTF_END_RAW, | ||
| 1301 | }, | ||
| 1302 | .str_sec = "\0__int", | ||
| 1303 | .str_sec_size = sizeof("\0__int"), | ||
| 1304 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1305 | .map_name = "typedef_check_btf", | ||
| 1306 | .key_size = sizeof(int), | ||
| 1307 | .value_size = sizeof(int), | ||
| 1308 | .key_type_id = 1, | ||
| 1309 | .value_type_id = 1, | ||
| 1310 | .max_entries = 4, | ||
| 1311 | .btf_load_err = true, | ||
| 1312 | .err_str = "Invalid name", | ||
| 1313 | }, | ||
| 1314 | |||
| 1315 | { | ||
| 1316 | .descr = "typedef (invalid name, invalid identifier)", | ||
| 1317 | .raw_types = { | ||
| 1318 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1319 | BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ | ||
| 1320 | BTF_END_RAW, | ||
| 1321 | }, | ||
| 1322 | .str_sec = "\0__!int", | ||
| 1323 | .str_sec_size = sizeof("\0__!int"), | ||
| 1324 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1325 | .map_name = "typedef_check_btf", | ||
| 1326 | .key_size = sizeof(int), | ||
| 1327 | .value_size = sizeof(int), | ||
| 1328 | .key_type_id = 1, | ||
| 1329 | .value_type_id = 1, | ||
| 1330 | .max_entries = 4, | ||
| 1331 | .btf_load_err = true, | ||
| 1332 | .err_str = "Invalid name", | ||
| 1333 | }, | ||
| 1334 | |||
| 1335 | { | ||
| 1336 | .descr = "ptr type (invalid name, name_off <> 0)", | ||
| 1337 | .raw_types = { | ||
| 1338 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1339 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1340 | BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */ | ||
| 1341 | BTF_END_RAW, | ||
| 1342 | }, | ||
| 1343 | .str_sec = "\0__int", | ||
| 1344 | .str_sec_size = sizeof("\0__int"), | ||
| 1345 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1346 | .map_name = "ptr_type_check_btf", | ||
| 1347 | .key_size = sizeof(int), | ||
| 1348 | .value_size = sizeof(int), | ||
| 1349 | .key_type_id = 1, | ||
| 1350 | .value_type_id = 1, | ||
| 1351 | .max_entries = 4, | ||
| 1352 | .btf_load_err = true, | ||
| 1353 | .err_str = "Invalid name", | ||
| 1354 | }, | ||
| 1355 | |||
| 1356 | { | ||
| 1357 | .descr = "volatile type (invalid name, name_off <> 0)", | ||
| 1358 | .raw_types = { | ||
| 1359 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1360 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1361 | BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */ | ||
| 1362 | BTF_END_RAW, | ||
| 1363 | }, | ||
| 1364 | .str_sec = "\0__int", | ||
| 1365 | .str_sec_size = sizeof("\0__int"), | ||
| 1366 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1367 | .map_name = "volatile_type_check_btf", | ||
| 1368 | .key_size = sizeof(int), | ||
| 1369 | .value_size = sizeof(int), | ||
| 1370 | .key_type_id = 1, | ||
| 1371 | .value_type_id = 1, | ||
| 1372 | .max_entries = 4, | ||
| 1373 | .btf_load_err = true, | ||
| 1374 | .err_str = "Invalid name", | ||
| 1375 | }, | ||
| 1376 | |||
| 1377 | { | ||
| 1378 | .descr = "const type (invalid name, name_off <> 0)", | ||
| 1379 | .raw_types = { | ||
| 1380 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1381 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1382 | BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), /* [2] */ | ||
| 1383 | BTF_END_RAW, | ||
| 1384 | }, | ||
| 1385 | .str_sec = "\0__int", | ||
| 1386 | .str_sec_size = sizeof("\0__int"), | ||
| 1387 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1388 | .map_name = "const_type_check_btf", | ||
| 1389 | .key_size = sizeof(int), | ||
| 1390 | .value_size = sizeof(int), | ||
| 1391 | .key_type_id = 1, | ||
| 1392 | .value_type_id = 1, | ||
| 1393 | .max_entries = 4, | ||
| 1394 | .btf_load_err = true, | ||
| 1395 | .err_str = "Invalid name", | ||
| 1396 | }, | ||
| 1397 | |||
| 1398 | { | ||
| 1399 | .descr = "restrict type (invalid name, name_off <> 0)", | ||
| 1400 | .raw_types = { | ||
| 1401 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1402 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */ | ||
| 1403 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1404 | BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */ | ||
| 1405 | BTF_END_RAW, | ||
| 1406 | }, | ||
| 1407 | .str_sec = "\0__int", | ||
| 1408 | .str_sec_size = sizeof("\0__int"), | ||
| 1409 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1410 | .map_name = "restrict_type_check_btf", | ||
| 1411 | .key_size = sizeof(int), | ||
| 1412 | .value_size = sizeof(int), | ||
| 1413 | .key_type_id = 1, | ||
| 1414 | .value_type_id = 1, | ||
| 1415 | .max_entries = 4, | ||
| 1416 | .btf_load_err = true, | ||
| 1417 | .err_str = "Invalid name", | ||
| 1418 | }, | ||
| 1419 | |||
| 1420 | { | ||
| 1421 | .descr = "fwd type (invalid name, name_off = 0)", | ||
| 1422 | .raw_types = { | ||
| 1423 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1424 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */ | ||
| 1425 | BTF_END_RAW, | ||
| 1426 | }, | ||
| 1427 | .str_sec = "\0__skb", | ||
| 1428 | .str_sec_size = sizeof("\0__skb"), | ||
| 1429 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1430 | .map_name = "fwd_type_check_btf", | ||
| 1431 | .key_size = sizeof(int), | ||
| 1432 | .value_size = sizeof(int), | ||
| 1433 | .key_type_id = 1, | ||
| 1434 | .value_type_id = 1, | ||
| 1435 | .max_entries = 4, | ||
| 1436 | .btf_load_err = true, | ||
| 1437 | .err_str = "Invalid name", | ||
| 1438 | }, | ||
| 1439 | |||
| 1440 | { | ||
| 1441 | .descr = "fwd type (invalid name, invalid identifier)", | ||
| 1442 | .raw_types = { | ||
| 1443 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1444 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1445 | BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */ | ||
| 1446 | BTF_END_RAW, | ||
| 1447 | }, | ||
| 1448 | .str_sec = "\0__!skb", | ||
| 1449 | .str_sec_size = sizeof("\0__!skb"), | ||
| 1450 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1451 | .map_name = "fwd_type_check_btf", | ||
| 1452 | .key_size = sizeof(int), | ||
| 1453 | .value_size = sizeof(int), | ||
| 1454 | .key_type_id = 1, | ||
| 1455 | .value_type_id = 1, | ||
| 1456 | .max_entries = 4, | ||
| 1457 | .btf_load_err = true, | ||
| 1458 | .err_str = "Invalid name", | ||
| 1459 | }, | ||
| 1460 | |||
| 1461 | { | ||
| 1462 | .descr = "array type (invalid name, name_off <> 0)", | ||
| 1463 | .raw_types = { | ||
| 1464 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1465 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1466 | BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), /* [2] */ | ||
| 1467 | BTF_ARRAY_ENC(1, 1, 4), | ||
| 1468 | BTF_END_RAW, | ||
| 1469 | }, | ||
| 1470 | .str_sec = "\0__skb", | ||
| 1471 | .str_sec_size = sizeof("\0__skb"), | ||
| 1472 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1473 | .map_name = "array_type_check_btf", | ||
| 1474 | .key_size = sizeof(int), | ||
| 1475 | .value_size = sizeof(int), | ||
| 1476 | .key_type_id = 1, | ||
| 1477 | .value_type_id = 1, | ||
| 1478 | .max_entries = 4, | ||
| 1479 | .btf_load_err = true, | ||
| 1480 | .err_str = "Invalid name", | ||
| 1481 | }, | ||
| 1482 | |||
| 1483 | { | ||
| 1484 | .descr = "struct type (name_off = 0)", | ||
| 1485 | .raw_types = { | ||
| 1486 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1487 | BTF_TYPE_ENC(0, | ||
| 1488 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1489 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1490 | BTF_END_RAW, | ||
| 1491 | }, | ||
| 1492 | .str_sec = "\0A", | ||
| 1493 | .str_sec_size = sizeof("\0A"), | ||
| 1494 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1495 | .map_name = "struct_type_check_btf", | ||
| 1496 | .key_size = sizeof(int), | ||
| 1497 | .value_size = sizeof(int), | ||
| 1498 | .key_type_id = 1, | ||
| 1499 | .value_type_id = 1, | ||
| 1500 | .max_entries = 4, | ||
| 1501 | }, | ||
| 1502 | |||
| 1503 | { | ||
| 1504 | .descr = "struct type (invalid name, invalid identifier)", | ||
| 1505 | .raw_types = { | ||
| 1506 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1507 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1508 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1509 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1510 | BTF_END_RAW, | ||
| 1511 | }, | ||
| 1512 | .str_sec = "\0A!\0B", | ||
| 1513 | .str_sec_size = sizeof("\0A!\0B"), | ||
| 1514 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1515 | .map_name = "struct_type_check_btf", | ||
| 1516 | .key_size = sizeof(int), | ||
| 1517 | .value_size = sizeof(int), | ||
| 1518 | .key_type_id = 1, | ||
| 1519 | .value_type_id = 1, | ||
| 1520 | .max_entries = 4, | ||
| 1521 | .btf_load_err = true, | ||
| 1522 | .err_str = "Invalid name", | ||
| 1523 | }, | ||
| 1524 | |||
| 1525 | { | ||
| 1526 | .descr = "struct member (name_off = 0)", | ||
| 1527 | .raw_types = { | ||
| 1528 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1529 | BTF_TYPE_ENC(0, | ||
| 1530 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1531 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1532 | BTF_END_RAW, | ||
| 1533 | }, | ||
| 1534 | .str_sec = "\0A", | ||
| 1535 | .str_sec_size = sizeof("\0A"), | ||
| 1536 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1537 | .map_name = "struct_type_check_btf", | ||
| 1538 | .key_size = sizeof(int), | ||
| 1539 | .value_size = sizeof(int), | ||
| 1540 | .key_type_id = 1, | ||
| 1541 | .value_type_id = 1, | ||
| 1542 | .max_entries = 4, | ||
| 1543 | }, | ||
| 1544 | |||
| 1545 | { | ||
| 1546 | .descr = "struct member (invalid name, invalid identifier)", | ||
| 1547 | .raw_types = { | ||
| 1548 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1549 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1550 | BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ | ||
| 1551 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
| 1552 | BTF_END_RAW, | ||
| 1553 | }, | ||
| 1554 | .str_sec = "\0A\0B*", | ||
| 1555 | .str_sec_size = sizeof("\0A\0B*"), | ||
| 1556 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1557 | .map_name = "struct_type_check_btf", | ||
| 1558 | .key_size = sizeof(int), | ||
| 1559 | .value_size = sizeof(int), | ||
| 1560 | .key_type_id = 1, | ||
| 1561 | .value_type_id = 1, | ||
| 1562 | .max_entries = 4, | ||
| 1563 | .btf_load_err = true, | ||
| 1564 | .err_str = "Invalid name", | ||
| 1565 | }, | ||
| 1566 | |||
| 1567 | { | ||
| 1568 | .descr = "enum type (name_off = 0)", | ||
| 1569 | .raw_types = { | ||
| 1570 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1571 | BTF_TYPE_ENC(0, | ||
| 1572 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1573 | sizeof(int)), /* [2] */ | ||
| 1574 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
| 1575 | BTF_END_RAW, | ||
| 1576 | }, | ||
| 1577 | .str_sec = "\0A\0B", | ||
| 1578 | .str_sec_size = sizeof("\0A\0B"), | ||
| 1579 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1580 | .map_name = "enum_type_check_btf", | ||
| 1581 | .key_size = sizeof(int), | ||
| 1582 | .value_size = sizeof(int), | ||
| 1583 | .key_type_id = 1, | ||
| 1584 | .value_type_id = 1, | ||
| 1585 | .max_entries = 4, | ||
| 1586 | }, | ||
| 1587 | |||
| 1588 | { | ||
| 1589 | .descr = "enum type (invalid name, invalid identifier)", | ||
| 1590 | .raw_types = { | ||
| 1591 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1592 | BTF_TYPE_ENC(NAME_TBD, | ||
| 1593 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1594 | sizeof(int)), /* [2] */ | ||
| 1595 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
| 1596 | BTF_END_RAW, | ||
| 1597 | }, | ||
| 1598 | .str_sec = "\0A!\0B", | ||
| 1599 | .str_sec_size = sizeof("\0A!\0B"), | ||
| 1600 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1601 | .map_name = "enum_type_check_btf", | ||
| 1602 | .key_size = sizeof(int), | ||
| 1603 | .value_size = sizeof(int), | ||
| 1604 | .key_type_id = 1, | ||
| 1605 | .value_type_id = 1, | ||
| 1606 | .max_entries = 4, | ||
| 1607 | .btf_load_err = true, | ||
| 1608 | .err_str = "Invalid name", | ||
| 1609 | }, | ||
| 1610 | |||
| 1611 | { | ||
| 1612 | .descr = "enum member (invalid name, name_off = 0)", | ||
| 1613 | .raw_types = { | ||
| 1614 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1615 | BTF_TYPE_ENC(0, | ||
| 1616 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1617 | sizeof(int)), /* [2] */ | ||
| 1618 | BTF_ENUM_ENC(0, 0), | ||
| 1619 | BTF_END_RAW, | ||
| 1620 | }, | ||
| 1621 | .str_sec = "", | ||
| 1622 | .str_sec_size = sizeof(""), | ||
| 1623 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1624 | .map_name = "enum_type_check_btf", | ||
| 1625 | .key_size = sizeof(int), | ||
| 1626 | .value_size = sizeof(int), | ||
| 1627 | .key_type_id = 1, | ||
| 1628 | .value_type_id = 1, | ||
| 1629 | .max_entries = 4, | ||
| 1630 | .btf_load_err = true, | ||
| 1631 | .err_str = "Invalid name", | ||
| 1632 | }, | ||
| 1633 | |||
| 1634 | { | ||
| 1635 | .descr = "enum member (invalid name, invalid identifier)", | ||
| 1636 | .raw_types = { | ||
| 1637 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
| 1638 | BTF_TYPE_ENC(0, | ||
| 1639 | BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), | ||
| 1640 | sizeof(int)), /* [2] */ | ||
| 1641 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
| 1642 | BTF_END_RAW, | ||
| 1643 | }, | ||
| 1644 | .str_sec = "\0A!", | ||
| 1645 | .str_sec_size = sizeof("\0A!"), | ||
| 1646 | .map_type = BPF_MAP_TYPE_ARRAY, | ||
| 1647 | .map_name = "enum_type_check_btf", | ||
| 1648 | .key_size = sizeof(int), | ||
| 1649 | .value_size = sizeof(int), | ||
| 1650 | .key_type_id = 1, | ||
| 1651 | .value_type_id = 1, | ||
| 1652 | .max_entries = 4, | ||
| 1653 | .btf_load_err = true, | ||
| 1654 | .err_str = "Invalid name", | ||
| 1655 | }, | ||
| 1656 | { | ||
| 1296 | .descr = "arraymap invalid btf key (a bit field)", | 1657 | .descr = "arraymap invalid btf key (a bit field)", |
| 1297 | .raw_types = { | 1658 | .raw_types = { |
| 1298 | /* int */ /* [1] */ | 1659 | /* int */ /* [1] */ |
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/test_sk_lookup_kern.c index b745bdc08c2b..e21cd736c196 100644 --- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/test_sk_lookup_kern.c | |||
| @@ -72,7 +72,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb) | |||
| 72 | return TC_ACT_SHOT; | 72 | return TC_ACT_SHOT; |
| 73 | 73 | ||
| 74 | tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); | 74 | tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6); |
| 75 | sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0); | 75 | sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0); |
| 76 | if (sk) | 76 | if (sk) |
| 77 | bpf_sk_release(sk); | 77 | bpf_sk_release(sk); |
| 78 | return sk ? TC_ACT_OK : TC_ACT_UNSPEC; | 78 | return sk ? TC_ACT_OK : TC_ACT_UNSPEC; |
| @@ -84,7 +84,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb) | |||
| 84 | struct bpf_sock_tuple tuple = {}; | 84 | struct bpf_sock_tuple tuple = {}; |
| 85 | struct bpf_sock *sk; | 85 | struct bpf_sock *sk; |
| 86 | 86 | ||
| 87 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 87 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 88 | if (sk) | 88 | if (sk) |
| 89 | bpf_sk_release(sk); | 89 | bpf_sk_release(sk); |
| 90 | return 0; | 90 | return 0; |
| @@ -97,7 +97,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb) | |||
| 97 | struct bpf_sock *sk; | 97 | struct bpf_sock *sk; |
| 98 | __u32 family = 0; | 98 | __u32 family = 0; |
| 99 | 99 | ||
| 100 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 100 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 101 | if (sk) { | 101 | if (sk) { |
| 102 | bpf_sk_release(sk); | 102 | bpf_sk_release(sk); |
| 103 | family = sk->family; | 103 | family = sk->family; |
| @@ -112,7 +112,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb) | |||
| 112 | struct bpf_sock *sk; | 112 | struct bpf_sock *sk; |
| 113 | __u32 family; | 113 | __u32 family; |
| 114 | 114 | ||
| 115 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 115 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 116 | if (sk) { | 116 | if (sk) { |
| 117 | sk += 1; | 117 | sk += 1; |
| 118 | bpf_sk_release(sk); | 118 | bpf_sk_release(sk); |
| @@ -127,7 +127,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb) | |||
| 127 | struct bpf_sock *sk; | 127 | struct bpf_sock *sk; |
| 128 | __u32 family; | 128 | __u32 family; |
| 129 | 129 | ||
| 130 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 130 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 131 | sk += 1; | 131 | sk += 1; |
| 132 | if (sk) | 132 | if (sk) |
| 133 | bpf_sk_release(sk); | 133 | bpf_sk_release(sk); |
| @@ -139,7 +139,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb) | |||
| 139 | { | 139 | { |
| 140 | struct bpf_sock_tuple tuple = {}; | 140 | struct bpf_sock_tuple tuple = {}; |
| 141 | 141 | ||
| 142 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 142 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 143 | return 0; | 143 | return 0; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| @@ -149,7 +149,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb) | |||
| 149 | struct bpf_sock_tuple tuple = {}; | 149 | struct bpf_sock_tuple tuple = {}; |
| 150 | struct bpf_sock *sk; | 150 | struct bpf_sock *sk; |
| 151 | 151 | ||
| 152 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 152 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 153 | bpf_sk_release(sk); | 153 | bpf_sk_release(sk); |
| 154 | bpf_sk_release(sk); | 154 | bpf_sk_release(sk); |
| 155 | return 0; | 155 | return 0; |
| @@ -161,7 +161,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb) | |||
| 161 | struct bpf_sock_tuple tuple = {}; | 161 | struct bpf_sock_tuple tuple = {}; |
| 162 | struct bpf_sock *sk; | 162 | struct bpf_sock *sk; |
| 163 | 163 | ||
| 164 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 164 | sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 165 | bpf_sk_release(sk); | 165 | bpf_sk_release(sk); |
| 166 | return 0; | 166 | return 0; |
| 167 | } | 167 | } |
| @@ -169,7 +169,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb) | |||
| 169 | void lookup_no_release(struct __sk_buff *skb) | 169 | void lookup_no_release(struct __sk_buff *skb) |
| 170 | { | 170 | { |
| 171 | struct bpf_sock_tuple tuple = {}; | 171 | struct bpf_sock_tuple tuple = {}; |
| 172 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0); | 172 | bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | SEC("fail_no_release_subcall") | 175 | SEC("fail_no_release_subcall") |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 550b7e46bf4a..df6f751cc1e8 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
| @@ -8576,7 +8576,7 @@ static struct bpf_test tests[] = { | |||
| 8576 | BPF_JMP_IMM(BPF_JA, 0, 0, -7), | 8576 | BPF_JMP_IMM(BPF_JA, 0, 0, -7), |
| 8577 | }, | 8577 | }, |
| 8578 | .fixup_map_hash_8b = { 4 }, | 8578 | .fixup_map_hash_8b = { 4 }, |
| 8579 | .errstr = "R0 invalid mem access 'inv'", | 8579 | .errstr = "unbounded min value", |
| 8580 | .result = REJECT, | 8580 | .result = REJECT, |
| 8581 | }, | 8581 | }, |
| 8582 | { | 8582 | { |
| @@ -10547,7 +10547,7 @@ static struct bpf_test tests[] = { | |||
| 10547 | "check deducing bounds from const, 5", | 10547 | "check deducing bounds from const, 5", |
| 10548 | .insns = { | 10548 | .insns = { |
| 10549 | BPF_MOV64_IMM(BPF_REG_0, 0), | 10549 | BPF_MOV64_IMM(BPF_REG_0, 0), |
| 10550 | BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), | 10550 | BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), |
| 10551 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), | 10551 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
| 10552 | BPF_EXIT_INSN(), | 10552 | BPF_EXIT_INSN(), |
| 10553 | }, | 10553 | }, |
| @@ -14230,7 +14230,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, | |||
| 14230 | 14230 | ||
| 14231 | reject_from_alignment = fd_prog < 0 && | 14231 | reject_from_alignment = fd_prog < 0 && |
| 14232 | (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && | 14232 | (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && |
| 14233 | strstr(bpf_vlog, "Unknown alignment."); | 14233 | strstr(bpf_vlog, "misaligned"); |
| 14234 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 14234 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
| 14235 | if (reject_from_alignment) { | 14235 | if (reject_from_alignment) { |
| 14236 | printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", | 14236 | printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", |
