aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-16 11:37:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-16 11:37:18 -0500
commit3c7a9f32f9392c9dfce24f33bdc6799852903e27 (patch)
tree15b5365c2f82d2bd041e202fdec4d2d3342e8559
parent747ae0a96f1a78b35c5a3d93ad37a16655e16340 (diff)
parentbf3f14d6342cfb37eab8f0cddd0e4d4063fd9fc9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) In order to avoid problems in the future, make cgroup bpf overriding explicit using BPF_F_ALLOW_OVERRIDE. From Alexei Staovoitov. 2) LLC sets skb->sk without proper skb->destructor and this explodes, fix from Eric Dumazet. 3) Make sure when we have an ipv4 mapped source address, the destination is either also an ipv4 mapped address or ipv6_addr_any(). Fix from Jonathan T. Leighton. 4) Avoid packet loss in fec driver by programming the multicast filter more intelligently. From Rui Sousa. 5) Handle multiple threads invoking fanout_add(), fix from Eric Dumazet. 6) Since we can invoke the TCP input path in process context, without BH being disabled, we have to accomodate that in the locking of the TCP probe. Also from Eric Dumazet. 7) Fix erroneous emission of NETEVENT_DELAY_PROBE_TIME_UPDATE when we aren't even updating that sysctl value. From Marcus Huewe. 8) Fix endian bugs in ibmvnic driver, from Thomas Falcon. [ This is the second version of the pull that reverts the nested rhashtable changes that looked a bit too scary for this late in the release - Linus ] * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (27 commits) rhashtable: Revert nested table changes. ibmvnic: Fix endian errors in error reporting output ibmvnic: Fix endian error when requesting device capabilities net: neigh: Fix netevent NETEVENT_DELAY_PROBE_TIME_UPDATE notification net: xilinx_emaclite: fix freezes due to unordered I/O net: xilinx_emaclite: fix receive buffer overflow bpf: kernel header files need to be copied into the tools directory tcp: tcp_probe: use spin_lock_bh() uapi: fix linux/if_pppol2tp.h userspace compilation errors packet: fix races in fanout_add() ibmvnic: Fix initial MTU settings net: ethernet: ti: cpsw: fix cpsw assignment in resume kcm: fix a null pointer dereference in kcm_sendmsg() net: fec: fix multicast filtering hardware setup ipv6: Handle IPv4-mapped src to in6addr_any dst. ipv6: Inhibit IPv4-mapped src address on the wire. net/mlx5e: Disable preemption when doing TC statistics upcall rhashtable: Add nested tables tipc: Fix tipc_sk_reinit race conditions gfs2: Use rhashtable walk interface in glock_hash_walk ...
-rw-r--r--CREDITS5
-rw-r--r--MAINTAINERS15
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c23
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c126
-rw-r--r--drivers/net/xen-netback/common.h8
-rw-r--r--drivers/net/xen-netback/interface.c8
-rw-r--r--include/linux/bpf-cgroup.h13
-rw-r--r--include/uapi/linux/bpf.h7
-rw-r--r--include/uapi/linux/l2tp.h7
-rw-r--r--kernel/bpf/cgroup.c59
-rw-r--r--kernel/bpf/syscall.c20
-rw-r--r--kernel/cgroup.c9
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv6/datagram.c14
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/tcp_ipv6.c11
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/llc/llc_conn.c3
-rw-r--r--net/llc/llc_sap.c3
-rw-r--r--net/packet/af_packet.c55
-rw-r--r--samples/bpf/test_cgrp2_attach.c2
-rw-r--r--samples/bpf/test_cgrp2_attach2.c68
-rw-r--r--samples/bpf/test_cgrp2_sock.c2
-rw-r--r--samples/bpf/test_cgrp2_sock2.c2
-rw-r--r--tools/include/uapi/linux/bpf.h7
-rw-r--r--tools/lib/bpf/bpf.c4
-rw-r--r--tools/lib/bpf/bpf.h3
33 files changed, 355 insertions, 200 deletions
diff --git a/CREDITS b/CREDITS
index c58560701d13..c5626bf06264 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2478,12 +2478,11 @@ S: D-90453 Nuernberg
2478S: Germany 2478S: Germany
2479 2479
2480N: Arnaldo Carvalho de Melo 2480N: Arnaldo Carvalho de Melo
2481E: acme@ghostprotocols.net 2481E: acme@kernel.org
2482E: arnaldo.melo@gmail.com 2482E: arnaldo.melo@gmail.com
2483E: acme@redhat.com 2483E: acme@redhat.com
2484W: http://oops.ghostprotocols.net:81/blog/
2485P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01 2484P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01
2486D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks 2485D: tools/, IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
2487S: Brazil 2486S: Brazil
2488 2487
2489N: Karsten Merker 2488N: Karsten Merker
diff --git a/MAINTAINERS b/MAINTAINERS
index 107c10e8f2d2..527d13759ecc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -877,8 +877,8 @@ S: Odd fixes
877F: drivers/hwmon/applesmc.c 877F: drivers/hwmon/applesmc.c
878 878
879APPLETALK NETWORK LAYER 879APPLETALK NETWORK LAYER
880M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 880L: netdev@vger.kernel.org
881S: Maintained 881S: Odd fixes
882F: drivers/net/appletalk/ 882F: drivers/net/appletalk/
883F: net/appletalk/ 883F: net/appletalk/
884 884
@@ -6727,9 +6727,8 @@ S: Odd Fixes
6727F: drivers/tty/ipwireless/ 6727F: drivers/tty/ipwireless/
6728 6728
6729IPX NETWORK LAYER 6729IPX NETWORK LAYER
6730M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
6731L: netdev@vger.kernel.org 6730L: netdev@vger.kernel.org
6732S: Maintained 6731S: Odd fixes
6733F: include/net/ipx.h 6732F: include/net/ipx.h
6734F: include/uapi/linux/ipx.h 6733F: include/uapi/linux/ipx.h
6735F: net/ipx/ 6734F: net/ipx/
@@ -7501,8 +7500,8 @@ S: Maintained
7501F: drivers/misc/lkdtm* 7500F: drivers/misc/lkdtm*
7502 7501
7503LLC (802.2) 7502LLC (802.2)
7504M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 7503L: netdev@vger.kernel.org
7505S: Maintained 7504S: Odd fixes
7506F: include/linux/llc.h 7505F: include/linux/llc.h
7507F: include/uapi/linux/llc.h 7506F: include/uapi/linux/llc.h
7508F: include/net/llc* 7507F: include/net/llc*
@@ -13373,10 +13372,8 @@ S: Maintained
13373F: drivers/input/misc/wistron_btns.c 13372F: drivers/input/misc/wistron_btns.c
13374 13373
13375WL3501 WIRELESS PCMCIA CARD DRIVER 13374WL3501 WIRELESS PCMCIA CARD DRIVER
13376M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
13377L: linux-wireless@vger.kernel.org 13375L: linux-wireless@vger.kernel.org
13378W: http://oops.ghostprotocols.net:81/blog 13376S: Odd fixes
13379S: Maintained
13380F: drivers/net/wireless/wl3501* 13377F: drivers/net/wireless/wl3501*
13381 13378
13382WOLFSON MICROELECTRONICS DRIVERS 13379WOLFSON MICROELECTRONICS DRIVERS
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38160c2bebcb..8be7034b2e7b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2910,6 +2910,7 @@ static void set_multicast_list(struct net_device *ndev)
2910 struct netdev_hw_addr *ha; 2910 struct netdev_hw_addr *ha;
2911 unsigned int i, bit, data, crc, tmp; 2911 unsigned int i, bit, data, crc, tmp;
2912 unsigned char hash; 2912 unsigned char hash;
2913 unsigned int hash_high = 0, hash_low = 0;
2913 2914
2914 if (ndev->flags & IFF_PROMISC) { 2915 if (ndev->flags & IFF_PROMISC) {
2915 tmp = readl(fep->hwp + FEC_R_CNTRL); 2916 tmp = readl(fep->hwp + FEC_R_CNTRL);
@@ -2932,11 +2933,7 @@ static void set_multicast_list(struct net_device *ndev)
2932 return; 2933 return;
2933 } 2934 }
2934 2935
2935 /* Clear filter and add the addresses in hash register 2936 /* Add the addresses in hash register */
2936 */
2937 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2938 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2939
2940 netdev_for_each_mc_addr(ha, ndev) { 2937 netdev_for_each_mc_addr(ha, ndev) {
2941 /* calculate crc32 value of mac address */ 2938 /* calculate crc32 value of mac address */
2942 crc = 0xffffffff; 2939 crc = 0xffffffff;
@@ -2954,16 +2951,14 @@ static void set_multicast_list(struct net_device *ndev)
2954 */ 2951 */
2955 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 2952 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2956 2953
2957 if (hash > 31) { 2954 if (hash > 31)
2958 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2955 hash_high |= 1 << (hash - 32);
2959 tmp |= 1 << (hash - 32); 2956 else
2960 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2957 hash_low |= 1 << hash;
2961 } else {
2962 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2963 tmp |= 1 << hash;
2964 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2965 }
2966 } 2958 }
2959
2960 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2961 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2967} 2962}
2968 2963
2969/* Set a MAC change in hardware. */ 2964/* Set a MAC change in hardware. */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c12596676bbb..a07b8d79174c 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
189 } 189 }
190 ltb->map_id = adapter->map_id; 190 ltb->map_id = adapter->map_id;
191 adapter->map_id++; 191 adapter->map_id++;
192
193 init_completion(&adapter->fw_done);
192 send_request_map(adapter, ltb->addr, 194 send_request_map(adapter, ltb->addr,
193 ltb->size, ltb->map_id); 195 ltb->size, ltb->map_id);
194 init_completion(&adapter->fw_done);
195 wait_for_completion(&adapter->fw_done); 196 wait_for_completion(&adapter->fw_done);
196 return 0; 197 return 0;
197} 198}
@@ -505,7 +506,7 @@ rx_pool_alloc_failed:
505 adapter->rx_pool = NULL; 506 adapter->rx_pool = NULL;
506rx_pool_arr_alloc_failed: 507rx_pool_arr_alloc_failed:
507 for (i = 0; i < adapter->req_rx_queues; i++) 508 for (i = 0; i < adapter->req_rx_queues; i++)
508 napi_enable(&adapter->napi[i]); 509 napi_disable(&adapter->napi[i]);
509alloc_napi_failed: 510alloc_napi_failed:
510 return -ENOMEM; 511 return -ENOMEM;
511} 512}
@@ -1121,10 +1122,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1121 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 1122 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1122 crq.request_statistics.len = 1123 crq.request_statistics.len =
1123 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 1124 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1124 ibmvnic_send_crq(adapter, &crq);
1125 1125
1126 /* Wait for data to be written */ 1126 /* Wait for data to be written */
1127 init_completion(&adapter->stats_done); 1127 init_completion(&adapter->stats_done);
1128 ibmvnic_send_crq(adapter, &crq);
1128 wait_for_completion(&adapter->stats_done); 1129 wait_for_completion(&adapter->stats_done);
1129 1130
1130 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 1131 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -1496,7 +1497,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1496 adapter->req_rx_queues = adapter->opt_rx_comp_queues; 1497 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1497 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 1498 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1498 1499
1499 adapter->req_mtu = adapter->max_mtu; 1500 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1500 } 1501 }
1501 1502
1502 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 1503 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
@@ -2185,12 +2186,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
2185 2186
2186 if (!found) { 2187 if (!found) {
2187 dev_err(dev, "Couldn't find error id %x\n", 2188 dev_err(dev, "Couldn't find error id %x\n",
2188 crq->request_error_rsp.error_id); 2189 be32_to_cpu(crq->request_error_rsp.error_id));
2189 return; 2190 return;
2190 } 2191 }
2191 2192
2192 dev_err(dev, "Detailed info for error id %x:", 2193 dev_err(dev, "Detailed info for error id %x:",
2193 crq->request_error_rsp.error_id); 2194 be32_to_cpu(crq->request_error_rsp.error_id));
2194 2195
2195 for (i = 0; i < error_buff->len; i++) { 2196 for (i = 0; i < error_buff->len; i++) {
2196 pr_cont("%02x", (int)error_buff->buff[i]); 2197 pr_cont("%02x", (int)error_buff->buff[i]);
@@ -2269,8 +2270,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
2269 dev_err(dev, "Firmware reports %serror id %x, cause %d\n", 2270 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2270 crq->error_indication. 2271 crq->error_indication.
2271 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 2272 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2272 crq->error_indication.error_id, 2273 be32_to_cpu(crq->error_indication.error_id),
2273 crq->error_indication.error_cause); 2274 be16_to_cpu(crq->error_indication.error_cause));
2274 2275
2275 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); 2276 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2276 if (!error_buff) 2277 if (!error_buff)
@@ -2388,10 +2389,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2388 case PARTIALSUCCESS: 2389 case PARTIALSUCCESS:
2389 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 2390 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2390 *req_value, 2391 *req_value,
2391 (long int)be32_to_cpu(crq->request_capability_rsp. 2392 (long int)be64_to_cpu(crq->request_capability_rsp.
2392 number), name); 2393 number), name);
2393 release_sub_crqs_no_irqs(adapter); 2394 release_sub_crqs_no_irqs(adapter);
2394 *req_value = be32_to_cpu(crq->request_capability_rsp.number); 2395 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2395 init_sub_crqs(adapter, 1); 2396 init_sub_crqs(adapter, 1);
2396 return; 2397 return;
2397 default: 2398 default:
@@ -2626,12 +2627,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2626 break; 2627 break;
2627 case MIN_MTU: 2628 case MIN_MTU:
2628 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 2629 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2629 netdev->min_mtu = adapter->min_mtu; 2630 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2630 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 2631 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2631 break; 2632 break;
2632 case MAX_MTU: 2633 case MAX_MTU:
2633 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 2634 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2634 netdev->max_mtu = adapter->max_mtu; 2635 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2635 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 2636 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2636 break; 2637 break;
2637 case MAX_MULTICAST_FILTERS: 2638 case MAX_MULTICAST_FILTERS:
@@ -2799,9 +2800,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2799 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; 2800 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2800 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); 2801 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2801 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; 2802 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2802 ibmvnic_send_crq(adapter, &crq);
2803 2803
2804 init_completion(&adapter->fw_done); 2804 init_completion(&adapter->fw_done);
2805 ibmvnic_send_crq(adapter, &crq);
2805 wait_for_completion(&adapter->fw_done); 2806 wait_for_completion(&adapter->fw_done);
2806 2807
2807 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) 2808 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@@ -3581,9 +3582,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3581 memset(&crq, 0, sizeof(crq)); 3582 memset(&crq, 0, sizeof(crq));
3582 crq.request_dump_size.first = IBMVNIC_CRQ_CMD; 3583 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3583 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; 3584 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3584 ibmvnic_send_crq(adapter, &crq);
3585 3585
3586 init_completion(&adapter->fw_done); 3586 init_completion(&adapter->fw_done);
3587 ibmvnic_send_crq(adapter, &crq);
3587 wait_for_completion(&adapter->fw_done); 3588 wait_for_completion(&adapter->fw_done);
3588 3589
3589 seq_write(seq, adapter->dump_data, adapter->dump_data_size); 3590 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@@ -3629,8 +3630,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
3629 } 3630 }
3630 } 3631 }
3631 3632
3632 send_version_xchg(adapter);
3633 reinit_completion(&adapter->init_done); 3633 reinit_completion(&adapter->init_done);
3634 send_version_xchg(adapter);
3634 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3635 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3635 dev_err(dev, "Passive init timeout\n"); 3636 dev_err(dev, "Passive init timeout\n");
3636 goto task_failed; 3637 goto task_failed;
@@ -3640,9 +3641,9 @@ static void handle_crq_init_rsp(struct work_struct *work)
3640 if (adapter->renegotiate) { 3641 if (adapter->renegotiate) {
3641 adapter->renegotiate = false; 3642 adapter->renegotiate = false;
3642 release_sub_crqs_no_irqs(adapter); 3643 release_sub_crqs_no_irqs(adapter);
3643 send_cap_queries(adapter);
3644 3644
3645 reinit_completion(&adapter->init_done); 3645 reinit_completion(&adapter->init_done);
3646 send_cap_queries(adapter);
3646 if (!wait_for_completion_timeout(&adapter->init_done, 3647 if (!wait_for_completion_timeout(&adapter->init_done,
3647 timeout)) { 3648 timeout)) {
3648 dev_err(dev, "Passive init timeout\n"); 3649 dev_err(dev, "Passive init timeout\n");
@@ -3656,9 +3657,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
3656 goto task_failed; 3657 goto task_failed;
3657 3658
3658 netdev->real_num_tx_queues = adapter->req_tx_queues; 3659 netdev->real_num_tx_queues = adapter->req_tx_queues;
3659 netdev->mtu = adapter->req_mtu; 3660 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3660 netdev->min_mtu = adapter->min_mtu;
3661 netdev->max_mtu = adapter->max_mtu;
3662 3661
3663 if (adapter->failover) { 3662 if (adapter->failover) {
3664 adapter->failover = false; 3663 adapter->failover = false;
@@ -3772,9 +3771,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3772 adapter->debugfs_dump = ent; 3771 adapter->debugfs_dump = ent;
3773 } 3772 }
3774 } 3773 }
3775 ibmvnic_send_crq_init(adapter);
3776 3774
3777 init_completion(&adapter->init_done); 3775 init_completion(&adapter->init_done);
3776 ibmvnic_send_crq_init(adapter);
3778 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) 3777 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3779 return 0; 3778 return 0;
3780 3779
@@ -3782,9 +3781,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3782 if (adapter->renegotiate) { 3781 if (adapter->renegotiate) {
3783 adapter->renegotiate = false; 3782 adapter->renegotiate = false;
3784 release_sub_crqs_no_irqs(adapter); 3783 release_sub_crqs_no_irqs(adapter);
3785 send_cap_queries(adapter);
3786 3784
3787 reinit_completion(&adapter->init_done); 3785 reinit_completion(&adapter->init_done);
3786 send_cap_queries(adapter);
3788 if (!wait_for_completion_timeout(&adapter->init_done, 3787 if (!wait_for_completion_timeout(&adapter->init_done,
3789 timeout)) 3788 timeout))
3790 return 0; 3789 return 0;
@@ -3798,7 +3797,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3798 } 3797 }
3799 3798
3800 netdev->real_num_tx_queues = adapter->req_tx_queues; 3799 netdev->real_num_tx_queues = adapter->req_tx_queues;
3801 netdev->mtu = adapter->req_mtu; 3800 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3802 3801
3803 rc = register_netdev(netdev); 3802 rc = register_netdev(netdev);
3804 if (rc) { 3803 if (rc) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index c5282b6aba8b..2ebbe80d8126 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1087,10 +1087,14 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
1087 1087
1088 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 1088 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1089 1089
1090 preempt_disable();
1091
1090 tcf_exts_to_list(f->exts, &actions); 1092 tcf_exts_to_list(f->exts, &actions);
1091 list_for_each_entry(a, &actions, list) 1093 list_for_each_entry(a, &actions, list)
1092 tcf_action_stats_update(a, bytes, packets, lastuse); 1094 tcf_action_stats_update(a, bytes, packets, lastuse);
1093 1095
1096 preempt_enable();
1097
1094 return 0; 1098 return 0;
1095} 1099}
1096 1100
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b203143647e6..65088224c207 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -3160,7 +3160,7 @@ static int cpsw_resume(struct device *dev)
3160{ 3160{
3161 struct platform_device *pdev = to_platform_device(dev); 3161 struct platform_device *pdev = to_platform_device(dev);
3162 struct net_device *ndev = platform_get_drvdata(pdev); 3162 struct net_device *ndev = platform_get_drvdata(pdev);
3163 struct cpsw_common *cpsw = netdev_priv(ndev); 3163 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3164 3164
3165 /* Select default pin state */ 3165 /* Select default pin state */
3166 pinctrl_pm_select_default_state(dev); 3166 pinctrl_pm_select_default_state(dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 93dc10b10c09..aa02a03a6d8d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
100/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ 100/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
101#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) 101#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
102 102
103#ifdef __BIG_ENDIAN
104#define xemaclite_readl ioread32be
105#define xemaclite_writel iowrite32be
106#else
107#define xemaclite_readl ioread32
108#define xemaclite_writel iowrite32
109#endif
110
103/** 111/**
104 * struct net_local - Our private per device data 112 * struct net_local - Our private per device data
105 * @ndev: instance of the network device 113 * @ndev: instance of the network device
@@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
156 u32 reg_data; 164 u32 reg_data;
157 165
158 /* Enable the Tx interrupts for the first Buffer */ 166 /* Enable the Tx interrupts for the first Buffer */
159 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); 167 reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
160 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, 168 xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
161 drvdata->base_addr + XEL_TSR_OFFSET); 169 drvdata->base_addr + XEL_TSR_OFFSET);
162 170
163 /* Enable the Rx interrupts for the first buffer */ 171 /* Enable the Rx interrupts for the first buffer */
164 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); 172 xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
165 173
166 /* Enable the Global Interrupt Enable */ 174 /* Enable the Global Interrupt Enable */
167 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 175 xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
168} 176}
169 177
170/** 178/**
@@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
179 u32 reg_data; 187 u32 reg_data;
180 188
181 /* Disable the Global Interrupt Enable */ 189 /* Disable the Global Interrupt Enable */
182 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 190 xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
183 191
184 /* Disable the Tx interrupts for the first buffer */ 192 /* Disable the Tx interrupts for the first buffer */
185 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); 193 reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
186 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), 194 xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
187 drvdata->base_addr + XEL_TSR_OFFSET); 195 drvdata->base_addr + XEL_TSR_OFFSET);
188 196
189 /* Disable the Rx interrupts for the first buffer */ 197 /* Disable the Rx interrupts for the first buffer */
190 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); 198 reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
191 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), 199 xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
192 drvdata->base_addr + XEL_RSR_OFFSET); 200 drvdata->base_addr + XEL_RSR_OFFSET);
193} 201}
194 202
195/** 203/**
@@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
321 byte_count = ETH_FRAME_LEN; 329 byte_count = ETH_FRAME_LEN;
322 330
323 /* Check if the expected buffer is available */ 331 /* Check if the expected buffer is available */
324 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 332 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
325 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 333 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
326 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { 334 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
327 335
@@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
334 342
335 addr = (void __iomem __force *)((u32 __force)addr ^ 343 addr = (void __iomem __force *)((u32 __force)addr ^
336 XEL_BUFFER_OFFSET); 344 XEL_BUFFER_OFFSET);
337 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 345 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
338 346
339 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 347 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
340 XEL_TSR_XMIT_ACTIVE_MASK)) != 0) 348 XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
345 /* Write the frame to the buffer */ 353 /* Write the frame to the buffer */
346 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); 354 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
347 355
348 __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), 356 xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
349 addr + XEL_TPLR_OFFSET); 357 addr + XEL_TPLR_OFFSET);
350 358
351 /* Update the Tx Status Register to indicate that there is a 359 /* Update the Tx Status Register to indicate that there is a
352 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which 360 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
353 * is used by the interrupt handler to check whether a frame 361 * is used by the interrupt handler to check whether a frame
354 * has been transmitted */ 362 * has been transmitted */
355 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 363 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
356 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); 364 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
357 __raw_writel(reg_data, addr + XEL_TSR_OFFSET); 365 xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
358 366
359 return 0; 367 return 0;
360} 368}
@@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
369 * 377 *
370 * Return: Total number of bytes received 378 * Return: Total number of bytes received
371 */ 379 */
372static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) 380static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
373{ 381{
374 void __iomem *addr; 382 void __iomem *addr;
375 u16 length, proto_type; 383 u16 length, proto_type;
@@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
379 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); 387 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
380 388
381 /* Verify which buffer has valid data */ 389 /* Verify which buffer has valid data */
382 reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 390 reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
383 391
384 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { 392 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
385 if (drvdata->rx_ping_pong != 0) 393 if (drvdata->rx_ping_pong != 0)
@@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
396 return 0; /* No data was available */ 404 return 0; /* No data was available */
397 405
398 /* Verify that buffer has valid data */ 406 /* Verify that buffer has valid data */
399 reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 407 reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
400 if ((reg_data & XEL_RSR_RECV_DONE_MASK) != 408 if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
401 XEL_RSR_RECV_DONE_MASK) 409 XEL_RSR_RECV_DONE_MASK)
402 return 0; /* No data was available */ 410 return 0; /* No data was available */
403 } 411 }
404 412
405 /* Get the protocol type of the ethernet frame that arrived */ 413 /* Get the protocol type of the ethernet frame that arrived */
406 proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + 414 proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
407 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & 415 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
408 XEL_RPLR_LENGTH_MASK); 416 XEL_RPLR_LENGTH_MASK);
409 417
410 /* Check if received ethernet frame is a raw ethernet frame 418 /* Check if received ethernet frame is a raw ethernet frame
411 * or an IP packet or an ARP packet */ 419 * or an IP packet or an ARP packet */
412 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 420 if (proto_type > ETH_DATA_LEN) {
413 421
414 if (proto_type == ETH_P_IP) { 422 if (proto_type == ETH_P_IP) {
415 length = ((ntohl(__raw_readl(addr + 423 length = ((ntohl(xemaclite_readl(addr +
416 XEL_HEADER_IP_LENGTH_OFFSET + 424 XEL_HEADER_IP_LENGTH_OFFSET +
417 XEL_RXBUFF_OFFSET)) >> 425 XEL_RXBUFF_OFFSET)) >>
418 XEL_HEADER_SHIFT) & 426 XEL_HEADER_SHIFT) &
419 XEL_RPLR_LENGTH_MASK); 427 XEL_RPLR_LENGTH_MASK);
428 length = min_t(u16, length, ETH_DATA_LEN);
420 length += ETH_HLEN + ETH_FCS_LEN; 429 length += ETH_HLEN + ETH_FCS_LEN;
421 430
422 } else if (proto_type == ETH_P_ARP) 431 } else if (proto_type == ETH_P_ARP)
@@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
429 /* Use the length in the frame, plus the header and trailer */ 438 /* Use the length in the frame, plus the header and trailer */
430 length = proto_type + ETH_HLEN + ETH_FCS_LEN; 439 length = proto_type + ETH_HLEN + ETH_FCS_LEN;
431 440
441 if (WARN_ON(length > maxlen))
442 length = maxlen;
443
432 /* Read from the EmacLite device */ 444 /* Read from the EmacLite device */
433 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), 445 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
434 data, length); 446 data, length);
435 447
436 /* Acknowledge the frame */ 448 /* Acknowledge the frame */
437 reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 449 reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
438 reg_data &= ~XEL_RSR_RECV_DONE_MASK; 450 reg_data &= ~XEL_RSR_RECV_DONE_MASK;
439 __raw_writel(reg_data, addr + XEL_RSR_OFFSET); 451 xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
440 452
441 return length; 453 return length;
442} 454}
@@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
463 475
464 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); 476 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
465 477
466 __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); 478 xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
467 479
468 /* Update the MAC address in the EmacLite */ 480 /* Update the MAC address in the EmacLite */
469 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 481 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
470 __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); 482 xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
471 483
472 /* Wait for EmacLite to finish with the MAC address update */ 484 /* Wait for EmacLite to finish with the MAC address update */
473 while ((__raw_readl(addr + XEL_TSR_OFFSET) & 485 while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
474 XEL_TSR_PROG_MAC_ADDR) != 0) 486 XEL_TSR_PROG_MAC_ADDR) != 0)
475 ; 487 ;
476} 488}
@@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
603 615
604 skb_reserve(skb, 2); 616 skb_reserve(skb, 2);
605 617
606 len = xemaclite_recv_data(lp, (u8 *) skb->data); 618 len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
607 619
608 if (!len) { 620 if (!len) {
609 dev->stats.rx_errors++; 621 dev->stats.rx_errors++;
@@ -640,32 +652,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
640 u32 tx_status; 652 u32 tx_status;
641 653
642 /* Check if there is Rx Data available */ 654 /* Check if there is Rx Data available */
643 if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & 655 if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
644 XEL_RSR_RECV_DONE_MASK) || 656 XEL_RSR_RECV_DONE_MASK) ||
645 (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) 657 (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
646 & XEL_RSR_RECV_DONE_MASK)) 658 & XEL_RSR_RECV_DONE_MASK))
647 659
648 xemaclite_rx_handler(dev); 660 xemaclite_rx_handler(dev);
649 661
650 /* Check if the Transmission for the first buffer is completed */ 662 /* Check if the Transmission for the first buffer is completed */
651 tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); 663 tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
652 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 664 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
653 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 665 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
654 666
655 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 667 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
656 __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); 668 xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
657 669
658 tx_complete = true; 670 tx_complete = true;
659 } 671 }
660 672
661 /* Check if the Transmission for the second buffer is completed */ 673 /* Check if the Transmission for the second buffer is completed */
662 tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 674 tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
663 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 675 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
664 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 676 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
665 677
666 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 678 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
667 __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + 679 xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
668 XEL_TSR_OFFSET); 680 XEL_TSR_OFFSET);
669 681
670 tx_complete = true; 682 tx_complete = true;
671 } 683 }
@@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
698 /* wait for the MDIO interface to not be busy or timeout 710 /* wait for the MDIO interface to not be busy or timeout
699 after some time. 711 after some time.
700 */ 712 */
701 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 713 while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
702 XEL_MDIOCTRL_MDIOSTS_MASK) { 714 XEL_MDIOCTRL_MDIOSTS_MASK) {
703 if (time_before_eq(end, jiffies)) { 715 if (time_before_eq(end, jiffies)) {
704 WARN_ON(1); 716 WARN_ON(1);
@@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
734 * MDIO Address register. Set the Status bit in the MDIO Control 746 * MDIO Address register. Set the Status bit in the MDIO Control
735 * register to start a MDIO read transaction. 747 * register to start a MDIO read transaction.
736 */ 748 */
737 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 749 ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
738 __raw_writel(XEL_MDIOADDR_OP_MASK | 750 xemaclite_writel(XEL_MDIOADDR_OP_MASK |
739 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 751 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
740 lp->base_addr + XEL_MDIOADDR_OFFSET); 752 lp->base_addr + XEL_MDIOADDR_OFFSET);
741 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 753 xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
742 lp->base_addr + XEL_MDIOCTRL_OFFSET); 754 lp->base_addr + XEL_MDIOCTRL_OFFSET);
743 755
744 if (xemaclite_mdio_wait(lp)) 756 if (xemaclite_mdio_wait(lp))
745 return -ETIMEDOUT; 757 return -ETIMEDOUT;
746 758
747 rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); 759 rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
748 760
749 dev_dbg(&lp->ndev->dev, 761 dev_dbg(&lp->ndev->dev,
750 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", 762 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -781,13 +793,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
781 * Data register. Finally, set the Status bit in the MDIO Control 793 * Data register. Finally, set the Status bit in the MDIO Control
782 * register to start a MDIO write transaction. 794 * register to start a MDIO write transaction.
783 */ 795 */
784 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 796 ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
785 __raw_writel(~XEL_MDIOADDR_OP_MASK & 797 xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
786 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 798 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
787 lp->base_addr + XEL_MDIOADDR_OFFSET); 799 lp->base_addr + XEL_MDIOADDR_OFFSET);
788 __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); 800 xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
789 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 801 xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
790 lp->base_addr + XEL_MDIOCTRL_OFFSET); 802 lp->base_addr + XEL_MDIOCTRL_OFFSET);
791 803
792 return 0; 804 return 0;
793} 805}
@@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
834 /* Enable the MDIO bus by asserting the enable bit in MDIO Control 846 /* Enable the MDIO bus by asserting the enable bit in MDIO Control
835 * register. 847 * register.
836 */ 848 */
837 __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, 849 xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
838 lp->base_addr + XEL_MDIOCTRL_OFFSET); 850 lp->base_addr + XEL_MDIOCTRL_OFFSET);
839 851
840 bus = mdiobus_alloc(); 852 bus = mdiobus_alloc();
841 if (!bus) { 853 if (!bus) {
@@ -1140,8 +1152,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1140 } 1152 }
1141 1153
1142 /* Clear the Tx CSR's in case this is a restart */ 1154 /* Clear the Tx CSR's in case this is a restart */
1143 __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); 1155 xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
1144 __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 1156 xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
1145 1157
1146 /* Set the MAC address in the EmacLite device */ 1158 /* Set the MAC address in the EmacLite device */
1147 xemaclite_update_address(lp, ndev->dev_addr); 1159 xemaclite_update_address(lp, ndev->dev_addr);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 3ce1f7da8647..530586be05b4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -113,10 +113,10 @@ struct xenvif_stats {
113 * A subset of struct net_device_stats that contains only the 113 * A subset of struct net_device_stats that contains only the
114 * fields that are updated in netback.c for each queue. 114 * fields that are updated in netback.c for each queue.
115 */ 115 */
116 unsigned int rx_bytes; 116 u64 rx_bytes;
117 unsigned int rx_packets; 117 u64 rx_packets;
118 unsigned int tx_bytes; 118 u64 tx_bytes;
119 unsigned int tx_packets; 119 u64 tx_packets;
120 120
121 /* Additional stats used by xenvif */ 121 /* Additional stats used by xenvif */
122 unsigned long rx_gso_checksum_fixup; 122 unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 579521327b03..50fa1692d985 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -221,10 +221,10 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
221{ 221{
222 struct xenvif *vif = netdev_priv(dev); 222 struct xenvif *vif = netdev_priv(dev);
223 struct xenvif_queue *queue = NULL; 223 struct xenvif_queue *queue = NULL;
224 unsigned long rx_bytes = 0; 224 u64 rx_bytes = 0;
225 unsigned long rx_packets = 0; 225 u64 rx_packets = 0;
226 unsigned long tx_bytes = 0; 226 u64 tx_bytes = 0;
227 unsigned long tx_packets = 0; 227 u64 tx_packets = 0;
228 unsigned int index; 228 unsigned int index;
229 229
230 spin_lock(&vif->lock); 230 spin_lock(&vif->lock);
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 92bc89ae7e20..c970a25d2a49 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -21,20 +21,19 @@ struct cgroup_bpf {
21 */ 21 */
22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; 22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; 23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
24 bool disallow_override[MAX_BPF_ATTACH_TYPE];
24}; 25};
25 26
26void cgroup_bpf_put(struct cgroup *cgrp); 27void cgroup_bpf_put(struct cgroup *cgrp);
27void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); 28void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
28 29
29void __cgroup_bpf_update(struct cgroup *cgrp, 30int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
30 struct cgroup *parent, 31 struct bpf_prog *prog, enum bpf_attach_type type,
31 struct bpf_prog *prog, 32 bool overridable);
32 enum bpf_attach_type type);
33 33
34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ 34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
35void cgroup_bpf_update(struct cgroup *cgrp, 35int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
36 struct bpf_prog *prog, 36 enum bpf_attach_type type, bool overridable);
37 enum bpf_attach_type type);
38 37
39int __cgroup_bpf_run_filter_skb(struct sock *sk, 38int __cgroup_bpf_run_filter_skb(struct sock *sk,
40 struct sk_buff *skb, 39 struct sk_buff *skb,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0eb0e87dbe9f..d2b0ac799d03 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -116,6 +116,12 @@ enum bpf_attach_type {
116 116
117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
118 118
119/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
120 * to the given target_fd cgroup the descendent cgroup will be able to
121 * override effective bpf program that was inherited from this cgroup
122 */
123#define BPF_F_ALLOW_OVERRIDE (1U << 0)
124
119#define BPF_PSEUDO_MAP_FD 1 125#define BPF_PSEUDO_MAP_FD 1
120 126
121/* flags for BPF_MAP_UPDATE_ELEM command */ 127/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -171,6 +177,7 @@ union bpf_attr {
171 __u32 target_fd; /* container object to attach to */ 177 __u32 target_fd; /* container object to attach to */
172 __u32 attach_bpf_fd; /* eBPF program to attach */ 178 __u32 attach_bpf_fd; /* eBPF program to attach */
173 __u32 attach_type; 179 __u32 attach_type;
180 __u32 attach_flags;
174 }; 181 };
175} __attribute__((aligned(8))); 182} __attribute__((aligned(8)));
176 183
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 85ddb74fcd1c..b23c1914a182 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -9,9 +9,8 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/socket.h> 11#include <linux/socket.h>
12#ifndef __KERNEL__ 12#include <linux/in.h>
13#include <netinet/in.h> 13#include <linux/in6.h>
14#endif
15 14
16#define IPPROTO_L2TP 115 15#define IPPROTO_L2TP 115
17 16
@@ -31,7 +30,7 @@ struct sockaddr_l2tpip {
31 __u32 l2tp_conn_id; /* Connection ID of tunnel */ 30 __u32 l2tp_conn_id; /* Connection ID of tunnel */
32 31
33 /* Pad to size of `struct sockaddr'. */ 32 /* Pad to size of `struct sockaddr'. */
34 unsigned char __pad[sizeof(struct sockaddr) - 33 unsigned char __pad[__SOCK_SIZE__ -
35 sizeof(__kernel_sa_family_t) - 34 sizeof(__kernel_sa_family_t) -
36 sizeof(__be16) - sizeof(struct in_addr) - 35 sizeof(__be16) - sizeof(struct in_addr) -
37 sizeof(__u32)]; 36 sizeof(__u32)];
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index a515f7b007c6..da0f53690295 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -52,6 +52,7 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
52 e = rcu_dereference_protected(parent->bpf.effective[type], 52 e = rcu_dereference_protected(parent->bpf.effective[type],
53 lockdep_is_held(&cgroup_mutex)); 53 lockdep_is_held(&cgroup_mutex));
54 rcu_assign_pointer(cgrp->bpf.effective[type], e); 54 rcu_assign_pointer(cgrp->bpf.effective[type], e);
55 cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
55 } 56 }
56} 57}
57 58
@@ -82,30 +83,63 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
82 * 83 *
83 * Must be called with cgroup_mutex held. 84 * Must be called with cgroup_mutex held.
84 */ 85 */
85void __cgroup_bpf_update(struct cgroup *cgrp, 86int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
86 struct cgroup *parent, 87 struct bpf_prog *prog, enum bpf_attach_type type,
87 struct bpf_prog *prog, 88 bool new_overridable)
88 enum bpf_attach_type type)
89{ 89{
90 struct bpf_prog *old_prog, *effective; 90 struct bpf_prog *old_prog, *effective = NULL;
91 struct cgroup_subsys_state *pos; 91 struct cgroup_subsys_state *pos;
92 bool overridable = true;
92 93
93 old_prog = xchg(cgrp->bpf.prog + type, prog); 94 if (parent) {
95 overridable = !parent->bpf.disallow_override[type];
96 effective = rcu_dereference_protected(parent->bpf.effective[type],
97 lockdep_is_held(&cgroup_mutex));
98 }
99
100 if (prog && effective && !overridable)
101 /* if parent has non-overridable prog attached, disallow
102 * attaching new programs to descendent cgroup
103 */
104 return -EPERM;
105
106 if (prog && effective && overridable != new_overridable)
107 /* if parent has overridable prog attached, only
108 * allow overridable programs in descendent cgroup
109 */
110 return -EPERM;
94 111
95 effective = (!prog && parent) ? 112 old_prog = cgrp->bpf.prog[type];
96 rcu_dereference_protected(parent->bpf.effective[type], 113
97 lockdep_is_held(&cgroup_mutex)) : 114 if (prog) {
98 prog; 115 overridable = new_overridable;
116 effective = prog;
117 if (old_prog &&
118 cgrp->bpf.disallow_override[type] == new_overridable)
119 /* disallow attaching non-overridable on top
120 * of existing overridable in this cgroup
121 * and vice versa
122 */
123 return -EPERM;
124 }
125
126 if (!prog && !old_prog)
127 /* report error when trying to detach and nothing is attached */
128 return -ENOENT;
129
130 cgrp->bpf.prog[type] = prog;
99 131
100 css_for_each_descendant_pre(pos, &cgrp->self) { 132 css_for_each_descendant_pre(pos, &cgrp->self) {
101 struct cgroup *desc = container_of(pos, struct cgroup, self); 133 struct cgroup *desc = container_of(pos, struct cgroup, self);
102 134
103 /* skip the subtree if the descendant has its own program */ 135 /* skip the subtree if the descendant has its own program */
104 if (desc->bpf.prog[type] && desc != cgrp) 136 if (desc->bpf.prog[type] && desc != cgrp) {
105 pos = css_rightmost_descendant(pos); 137 pos = css_rightmost_descendant(pos);
106 else 138 } else {
107 rcu_assign_pointer(desc->bpf.effective[type], 139 rcu_assign_pointer(desc->bpf.effective[type],
108 effective); 140 effective);
141 desc->bpf.disallow_override[type] = !overridable;
142 }
109 } 143 }
110 144
111 if (prog) 145 if (prog)
@@ -115,6 +149,7 @@ void __cgroup_bpf_update(struct cgroup *cgrp,
115 bpf_prog_put(old_prog); 149 bpf_prog_put(old_prog);
116 static_branch_dec(&cgroup_bpf_enabled_key); 150 static_branch_dec(&cgroup_bpf_enabled_key);
117 } 151 }
152 return 0;
118} 153}
119 154
120/** 155/**
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 19b6129eab23..bbb016adbaeb 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -920,13 +920,14 @@ static int bpf_obj_get(const union bpf_attr *attr)
920 920
921#ifdef CONFIG_CGROUP_BPF 921#ifdef CONFIG_CGROUP_BPF
922 922
923#define BPF_PROG_ATTACH_LAST_FIELD attach_type 923#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
924 924
925static int bpf_prog_attach(const union bpf_attr *attr) 925static int bpf_prog_attach(const union bpf_attr *attr)
926{ 926{
927 enum bpf_prog_type ptype;
927 struct bpf_prog *prog; 928 struct bpf_prog *prog;
928 struct cgroup *cgrp; 929 struct cgroup *cgrp;
929 enum bpf_prog_type ptype; 930 int ret;
930 931
931 if (!capable(CAP_NET_ADMIN)) 932 if (!capable(CAP_NET_ADMIN))
932 return -EPERM; 933 return -EPERM;
@@ -934,6 +935,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
934 if (CHECK_ATTR(BPF_PROG_ATTACH)) 935 if (CHECK_ATTR(BPF_PROG_ATTACH))
935 return -EINVAL; 936 return -EINVAL;
936 937
938 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
939 return -EINVAL;
940
937 switch (attr->attach_type) { 941 switch (attr->attach_type) {
938 case BPF_CGROUP_INET_INGRESS: 942 case BPF_CGROUP_INET_INGRESS:
939 case BPF_CGROUP_INET_EGRESS: 943 case BPF_CGROUP_INET_EGRESS:
@@ -956,10 +960,13 @@ static int bpf_prog_attach(const union bpf_attr *attr)
956 return PTR_ERR(cgrp); 960 return PTR_ERR(cgrp);
957 } 961 }
958 962
959 cgroup_bpf_update(cgrp, prog, attr->attach_type); 963 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
964 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
965 if (ret)
966 bpf_prog_put(prog);
960 cgroup_put(cgrp); 967 cgroup_put(cgrp);
961 968
962 return 0; 969 return ret;
963} 970}
964 971
965#define BPF_PROG_DETACH_LAST_FIELD attach_type 972#define BPF_PROG_DETACH_LAST_FIELD attach_type
@@ -967,6 +974,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
967static int bpf_prog_detach(const union bpf_attr *attr) 974static int bpf_prog_detach(const union bpf_attr *attr)
968{ 975{
969 struct cgroup *cgrp; 976 struct cgroup *cgrp;
977 int ret;
970 978
971 if (!capable(CAP_NET_ADMIN)) 979 if (!capable(CAP_NET_ADMIN))
972 return -EPERM; 980 return -EPERM;
@@ -982,7 +990,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
982 if (IS_ERR(cgrp)) 990 if (IS_ERR(cgrp))
983 return PTR_ERR(cgrp); 991 return PTR_ERR(cgrp);
984 992
985 cgroup_bpf_update(cgrp, NULL, attr->attach_type); 993 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
986 cgroup_put(cgrp); 994 cgroup_put(cgrp);
987 break; 995 break;
988 996
@@ -990,7 +998,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
990 return -EINVAL; 998 return -EINVAL;
991 } 999 }
992 1000
993 return 0; 1001 return ret;
994} 1002}
995#endif /* CONFIG_CGROUP_BPF */ 1003#endif /* CONFIG_CGROUP_BPF */
996 1004
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 688dd02af985..53bbca7c4859 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -6498,15 +6498,16 @@ static __init int cgroup_namespaces_init(void)
6498subsys_initcall(cgroup_namespaces_init); 6498subsys_initcall(cgroup_namespaces_init);
6499 6499
6500#ifdef CONFIG_CGROUP_BPF 6500#ifdef CONFIG_CGROUP_BPF
6501void cgroup_bpf_update(struct cgroup *cgrp, 6501int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
6502 struct bpf_prog *prog, 6502 enum bpf_attach_type type, bool overridable)
6503 enum bpf_attach_type type)
6504{ 6503{
6505 struct cgroup *parent = cgroup_parent(cgrp); 6504 struct cgroup *parent = cgroup_parent(cgrp);
6505 int ret;
6506 6506
6507 mutex_lock(&cgroup_mutex); 6507 mutex_lock(&cgroup_mutex);
6508 __cgroup_bpf_update(cgrp, parent, prog, type); 6508 ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
6509 mutex_unlock(&cgroup_mutex); 6509 mutex_unlock(&cgroup_mutex);
6510 return ret;
6510} 6511}
6511#endif /* CONFIG_CGROUP_BPF */ 6512#endif /* CONFIG_CGROUP_BPF */
6512 6513
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7bb12e07ffef..e7c12caa20c8 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2923,7 +2923,8 @@ static void neigh_proc_update(struct ctl_table *ctl, int write)
2923 return; 2923 return;
2924 2924
2925 set_bit(index, p->data_state); 2925 set_bit(index, p->data_state);
2926 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2926 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2927 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2927 if (!dev) /* NULL dev means this is default value */ 2928 if (!dev) /* NULL dev means this is default value */
2928 neigh_copy_dflt_parms(net, p, index); 2929 neigh_copy_dflt_parms(net, p, index);
2929} 2930}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 89a8cac4726a..51b27ae09fbd 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1263,7 +1263,7 @@ void __init arp_init(void)
1263/* 1263/*
1264 * ax25 -> ASCII conversion 1264 * ax25 -> ASCII conversion
1265 */ 1265 */
1266static char *ax2asc2(ax25_address *a, char *buf) 1266static void ax2asc2(ax25_address *a, char *buf)
1267{ 1267{
1268 char c, *s; 1268 char c, *s;
1269 int n; 1269 int n;
@@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf)
1285 *s++ = n + '0'; 1285 *s++ = n + '0';
1286 *s++ = '\0'; 1286 *s++ = '\0';
1287 1287
1288 if (*buf == '\0' || *buf == '-') 1288 if (*buf == '\0' || *buf == '-') {
1289 return "*"; 1289 buf[0] = '*';
1290 1290 buf[1] = '\0';
1291 return buf; 1291 }
1292} 1292}
1293#endif /* CONFIG_AX25 */ 1293#endif /* CONFIG_AX25 */
1294 1294
@@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
1322 } 1322 }
1323#endif 1323#endif
1324 sprintf(tbuf, "%pI4", n->primary_key); 1324 sprintf(tbuf, "%pI4", n->primary_key);
1325 seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", 1325 seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
1326 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); 1326 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
1327 read_unlock(&n->lock); 1327 read_unlock(&n->lock);
1328} 1328}
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index f6c50af24a64..3d063eb37848 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
117 (fwmark > 0 && skb->mark == fwmark)) && 117 (fwmark > 0 && skb->mark == fwmark)) &&
118 (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { 118 (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
119 119
120 spin_lock(&tcp_probe.lock); 120 spin_lock_bh(&tcp_probe.lock);
121 /* If log fills, just silently drop */ 121 /* If log fills, just silently drop */
122 if (tcp_probe_avail() > 1) { 122 if (tcp_probe_avail() > 1) {
123 struct tcp_log *p = tcp_probe.log + tcp_probe.head; 123 struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
157 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); 157 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
158 } 158 }
159 tcp_probe.lastcwnd = tp->snd_cwnd; 159 tcp_probe.lastcwnd = tp->snd_cwnd;
160 spin_unlock(&tcp_probe.lock); 160 spin_unlock_bh(&tcp_probe.lock);
161 161
162 wake_up(&tcp_probe.wait); 162 wake_up(&tcp_probe.wait);
163 } 163 }
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a3eaafd87100..eec27f87efac 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,18 +167,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
167 if (np->sndflow) 167 if (np->sndflow)
168 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 168 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
169 169
170 addr_type = ipv6_addr_type(&usin->sin6_addr); 170 if (ipv6_addr_any(&usin->sin6_addr)) {
171
172 if (addr_type == IPV6_ADDR_ANY) {
173 /* 171 /*
174 * connect to self 172 * connect to self
175 */ 173 */
176 usin->sin6_addr.s6_addr[15] = 0x01; 174 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
175 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
176 &usin->sin6_addr);
177 else
178 usin->sin6_addr = in6addr_loopback;
177 } 179 }
178 180
181 addr_type = ipv6_addr_type(&usin->sin6_addr);
182
179 daddr = &usin->sin6_addr; 183 daddr = &usin->sin6_addr;
180 184
181 if (addr_type == IPV6_ADDR_MAPPED) { 185 if (addr_type & IPV6_ADDR_MAPPED) {
182 struct sockaddr_in sin; 186 struct sockaddr_in sin;
183 187
184 if (__ipv6_only_sock(sk)) { 188 if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b6a94ff0bbd0..e164684456df 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1021,6 +1021,9 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
1021 } 1021 }
1022 } 1022 }
1023#endif 1023#endif
1024 if (ipv6_addr_v4mapped(&fl6->saddr) &&
1025 !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
1026 return -EAFNOSUPPORT;
1024 1027
1025 return 0; 1028 return 0;
1026 1029
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index eaad72c3d746..4c60c6f71cd3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -148,8 +148,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
148 * connect() to INADDR_ANY means loopback (BSD'ism). 148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */ 149 */
150 150
151 if (ipv6_addr_any(&usin->sin6_addr)) 151 if (ipv6_addr_any(&usin->sin6_addr)) {
152 usin->sin6_addr.s6_addr[15] = 0x1; 152 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
153 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
154 &usin->sin6_addr);
155 else
156 usin->sin6_addr = in6addr_loopback;
157 }
153 158
154 addr_type = ipv6_addr_type(&usin->sin6_addr); 159 addr_type = ipv6_addr_type(&usin->sin6_addr);
155 160
@@ -188,7 +193,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
188 * TCP over IPv4 193 * TCP over IPv4
189 */ 194 */
190 195
191 if (addr_type == IPV6_ADDR_MAPPED) { 196 if (addr_type & IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len; 197 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin; 198 struct sockaddr_in sin;
194 199
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8990856f5101..221825a9407a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1033,6 +1033,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1033 if (addr_len < SIN6_LEN_RFC2133) 1033 if (addr_len < SIN6_LEN_RFC2133)
1034 return -EINVAL; 1034 return -EINVAL;
1035 daddr = &sin6->sin6_addr; 1035 daddr = &sin6->sin6_addr;
1036 if (ipv6_addr_any(daddr) &&
1037 ipv6_addr_v4mapped(&np->saddr))
1038 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1039 daddr);
1036 break; 1040 break;
1037 case AF_INET: 1041 case AF_INET:
1038 goto do_udp_sendmsg; 1042 goto do_udp_sendmsg;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 64f0e8531af0..a646f3481240 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1044,8 +1044,10 @@ wait_for_memory:
1044 } else { 1044 } else {
1045 /* Message not complete, save state */ 1045 /* Message not complete, save state */
1046partial_message: 1046partial_message:
1047 kcm->seq_skb = head; 1047 if (head) {
1048 kcm_tx_msg(head)->last_skb = skb; 1048 kcm->seq_skb = head;
1049 kcm_tx_msg(head)->last_skb = skb;
1050 }
1049 } 1051 }
1050 1052
1051 KCM_STATS_ADD(kcm->stats.tx_bytes, copied); 1053 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 3e821daf9dd4..8bc5a1bd2d45 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
821 * another trick required to cope with how the PROCOM state 821 * another trick required to cope with how the PROCOM state
822 * machine works. -acme 822 * machine works. -acme
823 */ 823 */
824 skb_orphan(skb);
825 sock_hold(sk);
824 skb->sk = sk; 826 skb->sk = sk;
827 skb->destructor = sock_efree;
825 } 828 }
826 if (!sock_owned_by_user(sk)) 829 if (!sock_owned_by_user(sk))
827 llc_conn_rcv(sk, skb); 830 llc_conn_rcv(sk, skb);
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index d0e1e804ebd7..5404d0d195cc 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
290 290
291 ev->type = LLC_SAP_EV_TYPE_PDU; 291 ev->type = LLC_SAP_EV_TYPE_PDU;
292 ev->reason = 0; 292 ev->reason = 0;
293 skb_orphan(skb);
294 sock_hold(sk);
293 skb->sk = sk; 295 skb->sk = sk;
296 skb->destructor = sock_efree;
294 llc_sap_state_process(sap, skb); 297 llc_sap_state_process(sap, skb);
295} 298}
296 299
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d56ee46b11fc..0f03f6a53b4d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1619,6 +1619,7 @@ static void fanout_release_data(struct packet_fanout *f)
1619 1619
1620static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1620static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1621{ 1621{
1622 struct packet_rollover *rollover = NULL;
1622 struct packet_sock *po = pkt_sk(sk); 1623 struct packet_sock *po = pkt_sk(sk);
1623 struct packet_fanout *f, *match; 1624 struct packet_fanout *f, *match;
1624 u8 type = type_flags & 0xff; 1625 u8 type = type_flags & 0xff;
@@ -1641,23 +1642,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1641 return -EINVAL; 1642 return -EINVAL;
1642 } 1643 }
1643 1644
1645 mutex_lock(&fanout_mutex);
1646
1647 err = -EINVAL;
1644 if (!po->running) 1648 if (!po->running)
1645 return -EINVAL; 1649 goto out;
1646 1650
1651 err = -EALREADY;
1647 if (po->fanout) 1652 if (po->fanout)
1648 return -EALREADY; 1653 goto out;
1649 1654
1650 if (type == PACKET_FANOUT_ROLLOVER || 1655 if (type == PACKET_FANOUT_ROLLOVER ||
1651 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1656 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1652 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); 1657 err = -ENOMEM;
1653 if (!po->rollover) 1658 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1654 return -ENOMEM; 1659 if (!rollover)
1655 atomic_long_set(&po->rollover->num, 0); 1660 goto out;
1656 atomic_long_set(&po->rollover->num_huge, 0); 1661 atomic_long_set(&rollover->num, 0);
1657 atomic_long_set(&po->rollover->num_failed, 0); 1662 atomic_long_set(&rollover->num_huge, 0);
1663 atomic_long_set(&rollover->num_failed, 0);
1664 po->rollover = rollover;
1658 } 1665 }
1659 1666
1660 mutex_lock(&fanout_mutex);
1661 match = NULL; 1667 match = NULL;
1662 list_for_each_entry(f, &fanout_list, list) { 1668 list_for_each_entry(f, &fanout_list, list) {
1663 if (f->id == id && 1669 if (f->id == id &&
@@ -1704,11 +1710,11 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1704 } 1710 }
1705 } 1711 }
1706out: 1712out:
1707 mutex_unlock(&fanout_mutex); 1713 if (err && rollover) {
1708 if (err) { 1714 kfree(rollover);
1709 kfree(po->rollover);
1710 po->rollover = NULL; 1715 po->rollover = NULL;
1711 } 1716 }
1717 mutex_unlock(&fanout_mutex);
1712 return err; 1718 return err;
1713} 1719}
1714 1720
@@ -1717,23 +1723,22 @@ static void fanout_release(struct sock *sk)
1717 struct packet_sock *po = pkt_sk(sk); 1723 struct packet_sock *po = pkt_sk(sk);
1718 struct packet_fanout *f; 1724 struct packet_fanout *f;
1719 1725
1720 f = po->fanout;
1721 if (!f)
1722 return;
1723
1724 mutex_lock(&fanout_mutex); 1726 mutex_lock(&fanout_mutex);
1725 po->fanout = NULL; 1727 f = po->fanout;
1728 if (f) {
1729 po->fanout = NULL;
1730
1731 if (atomic_dec_and_test(&f->sk_ref)) {
1732 list_del(&f->list);
1733 dev_remove_pack(&f->prot_hook);
1734 fanout_release_data(f);
1735 kfree(f);
1736 }
1726 1737
1727 if (atomic_dec_and_test(&f->sk_ref)) { 1738 if (po->rollover)
1728 list_del(&f->list); 1739 kfree_rcu(po->rollover, rcu);
1729 dev_remove_pack(&f->prot_hook);
1730 fanout_release_data(f);
1731 kfree(f);
1732 } 1740 }
1733 mutex_unlock(&fanout_mutex); 1741 mutex_unlock(&fanout_mutex);
1734
1735 if (po->rollover)
1736 kfree_rcu(po->rollover, rcu);
1737} 1742}
1738 1743
1739static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1744static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
index 504058631ffc..4bfcaf93fcf3 100644
--- a/samples/bpf/test_cgrp2_attach.c
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -104,7 +104,7 @@ static int attach_filter(int cg_fd, int type, int verdict)
104 return EXIT_FAILURE; 104 return EXIT_FAILURE;
105 } 105 }
106 106
107 ret = bpf_prog_attach(prog_fd, cg_fd, type); 107 ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
108 if (ret < 0) { 108 if (ret < 0) {
109 printf("Failed to attach prog to cgroup: '%s'\n", 109 printf("Failed to attach prog to cgroup: '%s'\n",
110 strerror(errno)); 110 strerror(errno));
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index 6e69be37f87f..3049b1f26267 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -79,11 +79,12 @@ int main(int argc, char **argv)
79 if (join_cgroup(FOO)) 79 if (join_cgroup(FOO))
80 goto err; 80 goto err;
81 81
82 if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS)) { 82 if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
83 log_err("Attaching prog to /foo"); 83 log_err("Attaching prog to /foo");
84 goto err; 84 goto err;
85 } 85 }
86 86
87 printf("Attached DROP prog. This ping in cgroup /foo should fail...\n");
87 assert(system(PING_CMD) != 0); 88 assert(system(PING_CMD) != 0);
88 89
89 /* Create cgroup /foo/bar, get fd, and join it */ 90 /* Create cgroup /foo/bar, get fd, and join it */
@@ -94,24 +95,27 @@ int main(int argc, char **argv)
94 if (join_cgroup(BAR)) 95 if (join_cgroup(BAR))
95 goto err; 96 goto err;
96 97
98 printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
97 assert(system(PING_CMD) != 0); 99 assert(system(PING_CMD) != 0);
98 100
99 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { 101 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
100 log_err("Attaching prog to /foo/bar"); 102 log_err("Attaching prog to /foo/bar");
101 goto err; 103 goto err;
102 } 104 }
103 105
106 printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n");
104 assert(system(PING_CMD) == 0); 107 assert(system(PING_CMD) == 0);
105 108
106
107 if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { 109 if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
108 log_err("Detaching program from /foo/bar"); 110 log_err("Detaching program from /foo/bar");
109 goto err; 111 goto err;
110 } 112 }
111 113
114 printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n"
115 "This ping in cgroup /foo/bar should fail...\n");
112 assert(system(PING_CMD) != 0); 116 assert(system(PING_CMD) != 0);
113 117
114 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { 118 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
115 log_err("Attaching prog to /foo/bar"); 119 log_err("Attaching prog to /foo/bar");
116 goto err; 120 goto err;
117 } 121 }
@@ -121,8 +125,60 @@ int main(int argc, char **argv)
121 goto err; 125 goto err;
122 } 126 }
123 127
128 printf("Attached PASS from /foo/bar and detached DROP from /foo.\n"
129 "This ping in cgroup /foo/bar should pass...\n");
124 assert(system(PING_CMD) == 0); 130 assert(system(PING_CMD) == 0);
125 131
132 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
133 log_err("Attaching prog to /foo/bar");
134 goto err;
135 }
136
137 if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
138 errno = 0;
139 log_err("Unexpected success attaching prog to /foo/bar");
140 goto err;
141 }
142
143 if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
144 log_err("Detaching program from /foo/bar");
145 goto err;
146 }
147
148 if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
149 errno = 0;
150 log_err("Unexpected success in double detach from /foo");
151 goto err;
152 }
153
154 if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
155 log_err("Attaching non-overridable prog to /foo");
156 goto err;
157 }
158
159 if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
160 errno = 0;
161 log_err("Unexpected success attaching non-overridable prog to /foo/bar");
162 goto err;
163 }
164
165 if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
166 errno = 0;
167 log_err("Unexpected success attaching overridable prog to /foo/bar");
168 goto err;
169 }
170
171 if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
172 errno = 0;
173 log_err("Unexpected success attaching overridable prog to /foo");
174 goto err;
175 }
176
177 if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
178 log_err("Attaching different non-overridable prog to /foo");
179 goto err;
180 }
181
126 goto out; 182 goto out;
127 183
128err: 184err:
@@ -132,5 +188,9 @@ out:
132 close(foo); 188 close(foo);
133 close(bar); 189 close(bar);
134 cleanup_cgroup_environment(); 190 cleanup_cgroup_environment();
191 if (!rc)
192 printf("PASS\n");
193 else
194 printf("FAIL\n");
135 return rc; 195 return rc;
136} 196}
diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c
index 0791b949cbe4..c3cfb23e23b5 100644
--- a/samples/bpf/test_cgrp2_sock.c
+++ b/samples/bpf/test_cgrp2_sock.c
@@ -75,7 +75,7 @@ int main(int argc, char **argv)
75 return EXIT_FAILURE; 75 return EXIT_FAILURE;
76 } 76 }
77 77
78 ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE); 78 ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0);
79 if (ret < 0) { 79 if (ret < 0) {
80 printf("Failed to attach prog to cgroup: '%s'\n", 80 printf("Failed to attach prog to cgroup: '%s'\n",
81 strerror(errno)); 81 strerror(errno));
diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c
index 455ef0d06e93..db036077b644 100644
--- a/samples/bpf/test_cgrp2_sock2.c
+++ b/samples/bpf/test_cgrp2_sock2.c
@@ -55,7 +55,7 @@ int main(int argc, char **argv)
55 } 55 }
56 56
57 ret = bpf_prog_attach(prog_fd[filter_id], cg_fd, 57 ret = bpf_prog_attach(prog_fd[filter_id], cg_fd,
58 BPF_CGROUP_INET_SOCK_CREATE); 58 BPF_CGROUP_INET_SOCK_CREATE, 0);
59 if (ret < 0) { 59 if (ret < 0) {
60 printf("Failed to attach prog to cgroup: '%s'\n", 60 printf("Failed to attach prog to cgroup: '%s'\n",
61 strerror(errno)); 61 strerror(errno));
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 0eb0e87dbe9f..d2b0ac799d03 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -116,6 +116,12 @@ enum bpf_attach_type {
116 116
117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
118 118
119/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
120 * to the given target_fd cgroup the descendent cgroup will be able to
121 * override effective bpf program that was inherited from this cgroup
122 */
123#define BPF_F_ALLOW_OVERRIDE (1U << 0)
124
119#define BPF_PSEUDO_MAP_FD 1 125#define BPF_PSEUDO_MAP_FD 1
120 126
121/* flags for BPF_MAP_UPDATE_ELEM command */ 127/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -171,6 +177,7 @@ union bpf_attr {
171 __u32 target_fd; /* container object to attach to */ 177 __u32 target_fd; /* container object to attach to */
172 __u32 attach_bpf_fd; /* eBPF program to attach */ 178 __u32 attach_bpf_fd; /* eBPF program to attach */
173 __u32 attach_type; 179 __u32 attach_type;
180 __u32 attach_flags;
174 }; 181 };
175} __attribute__((aligned(8))); 182} __attribute__((aligned(8)));
176 183
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 3ddb58a36d3c..ae752fa4eaa7 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -168,7 +168,8 @@ int bpf_obj_get(const char *pathname)
168 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); 168 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
169} 169}
170 170
171int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type) 171int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
172 unsigned int flags)
172{ 173{
173 union bpf_attr attr; 174 union bpf_attr attr;
174 175
@@ -176,6 +177,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
176 attr.target_fd = target_fd; 177 attr.target_fd = target_fd;
177 attr.attach_bpf_fd = prog_fd; 178 attr.attach_bpf_fd = prog_fd;
178 attr.attach_type = type; 179 attr.attach_type = type;
180 attr.attach_flags = flags;
179 181
180 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 182 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
181} 183}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index a2f9853dd882..4ac6c4b84100 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -41,7 +41,8 @@ int bpf_map_delete_elem(int fd, void *key);
41int bpf_map_get_next_key(int fd, void *key, void *next_key); 41int bpf_map_get_next_key(int fd, void *key, void *next_key);
42int bpf_obj_pin(int fd, const char *pathname); 42int bpf_obj_pin(int fd, const char *pathname);
43int bpf_obj_get(const char *pathname); 43int bpf_obj_get(const char *pathname);
44int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type); 44int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
45 unsigned int flags);
45int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); 46int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
46 47
47 48