diff options
110 files changed, 1149 insertions, 438 deletions
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst index 6780a6d81745..7cc9e368c1e9 100644 --- a/Documentation/bpf/bpf_design_QA.rst +++ b/Documentation/bpf/bpf_design_QA.rst | |||
@@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI? | |||
157 | ------------------------------ | 157 | ------------------------------ |
158 | A: YES. BPF instructions, arguments to BPF programs, set of helper | 158 | A: YES. BPF instructions, arguments to BPF programs, set of helper |
159 | functions and their arguments, recognized return codes are all part | 159 | functions and their arguments, recognized return codes are all part |
160 | of ABI. However when tracing programs are using bpf_probe_read() helper | 160 | of ABI. However there is one specific exception to tracing programs |
161 | to walk kernel internal datastructures and compile with kernel | 161 | which are using helpers like bpf_probe_read() to walk kernel internal |
162 | internal headers these accesses can and will break with newer | 162 | data structures and compile with kernel internal headers. Both of these |
163 | kernels. The union bpf_attr -> kern_version is checked at load time | 163 | kernel internals are subject to change and can break with newer kernels |
164 | to prevent accidentally loading kprobe-based bpf programs written | 164 | such that the program needs to be adapted accordingly. |
165 | for a different kernel. Networking programs don't do kern_version check. | ||
166 | 165 | ||
167 | Q: How much stack space a BPF program uses? | 166 | Q: How much stack space a BPF program uses? |
168 | ------------------------------------------- | 167 | ------------------------------------------- |
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 6d05946b445e..124ff530da82 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c | |||
@@ -262,8 +262,7 @@ hfcsusb_ph_info(struct hfcsusb *hw) | |||
262 | struct dchannel *dch = &hw->dch; | 262 | struct dchannel *dch = &hw->dch; |
263 | int i; | 263 | int i; |
264 | 264 | ||
265 | phi = kzalloc(sizeof(struct ph_info) + | 265 | phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC); |
266 | dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC); | ||
267 | phi->dch.ch.protocol = hw->protocol; | 266 | phi->dch.ch.protocol = hw->protocol; |
268 | phi->dch.ch.Flags = dch->Flags; | 267 | phi->dch.ch.Flags = dch->Flags; |
269 | phi->dch.state = dch->state; | 268 | phi->dch.state = dch->state; |
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 1b2239c1d569..dc1cded716c1 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c | |||
@@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
1437 | { | 1437 | { |
1438 | modem_info *info = (modem_info *) tty->driver_data; | 1438 | modem_info *info = (modem_info *) tty->driver_data; |
1439 | 1439 | ||
1440 | mutex_lock(&modem_info_mutex); | ||
1440 | if (!old_termios) | 1441 | if (!old_termios) |
1441 | isdn_tty_change_speed(info); | 1442 | isdn_tty_change_speed(info); |
1442 | else { | 1443 | else { |
1443 | if (tty->termios.c_cflag == old_termios->c_cflag && | 1444 | if (tty->termios.c_cflag == old_termios->c_cflag && |
1444 | tty->termios.c_ispeed == old_termios->c_ispeed && | 1445 | tty->termios.c_ispeed == old_termios->c_ispeed && |
1445 | tty->termios.c_ospeed == old_termios->c_ospeed) | 1446 | tty->termios.c_ospeed == old_termios->c_ospeed) { |
1447 | mutex_unlock(&modem_info_mutex); | ||
1446 | return; | 1448 | return; |
1449 | } | ||
1447 | isdn_tty_change_speed(info); | 1450 | isdn_tty_change_speed(info); |
1448 | } | 1451 | } |
1452 | mutex_unlock(&modem_info_mutex); | ||
1449 | } | 1453 | } |
1450 | 1454 | ||
1451 | /* | 1455 | /* |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a9d597f28023..485462d3087f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1963,6 +1963,9 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1963 | if (!bond_has_slaves(bond)) { | 1963 | if (!bond_has_slaves(bond)) { |
1964 | bond_set_carrier(bond); | 1964 | bond_set_carrier(bond); |
1965 | eth_hw_addr_random(bond_dev); | 1965 | eth_hw_addr_random(bond_dev); |
1966 | bond->nest_level = SINGLE_DEPTH_NESTING; | ||
1967 | } else { | ||
1968 | bond->nest_level = dev_get_nest_level(bond_dev) + 1; | ||
1966 | } | 1969 | } |
1967 | 1970 | ||
1968 | unblock_netpoll_tx(); | 1971 | unblock_netpoll_tx(); |
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 3b12e2dcff31..8a5111f9414c 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c | |||
@@ -7,7 +7,6 @@ | |||
7 | 7 | ||
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/export.h> | 9 | #include <linux/export.h> |
10 | #include <linux/gpio.h> | ||
11 | #include <linux/gpio/consumer.h> | 10 | #include <linux/gpio/consumer.h> |
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -15,7 +14,6 @@ | |||
15 | #include <linux/phy.h> | 14 | #include <linux/phy.h> |
16 | #include <linux/etherdevice.h> | 15 | #include <linux/etherdevice.h> |
17 | #include <linux/if_bridge.h> | 16 | #include <linux/if_bridge.h> |
18 | #include <linux/of_gpio.h> | ||
19 | #include <linux/of_net.h> | 17 | #include <linux/of_net.h> |
20 | #include <net/dsa.h> | 18 | #include <net/dsa.h> |
21 | #include <net/switchdev.h> | 19 | #include <net/switchdev.h> |
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 74547f43b938..a8a2c728afba 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | #include <linux/of_gpio.h> | ||
22 | #include <linux/of_mdio.h> | 21 | #include <linux/of_mdio.h> |
23 | #include <linux/of_net.h> | 22 | #include <linux/of_net.h> |
24 | #include <linux/of_platform.h> | 23 | #include <linux/of_platform.h> |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8a517d8fb9d1..8dca2c949e73 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -2403,6 +2403,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) | |||
2403 | return mv88e6xxx_g1_stats_clear(chip); | 2403 | return mv88e6xxx_g1_stats_clear(chip); |
2404 | } | 2404 | } |
2405 | 2405 | ||
2406 | /* The mv88e6390 has some hidden registers used for debug and | ||
2407 | * development. The errata also makes use of them. | ||
2408 | */ | ||
2409 | static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, | ||
2410 | int reg, u16 val) | ||
2411 | { | ||
2412 | u16 ctrl; | ||
2413 | int err; | ||
2414 | |||
2415 | err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, | ||
2416 | PORT_RESERVED_1A, val); | ||
2417 | if (err) | ||
2418 | return err; | ||
2419 | |||
2420 | ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | | ||
2421 | PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | | ||
2422 | reg; | ||
2423 | |||
2424 | return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, | ||
2425 | PORT_RESERVED_1A, ctrl); | ||
2426 | } | ||
2427 | |||
2428 | static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) | ||
2429 | { | ||
2430 | return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, | ||
2431 | PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); | ||
2432 | } | ||
2433 | |||
2434 | |||
2435 | static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, | ||
2436 | int reg, u16 *val) | ||
2437 | { | ||
2438 | u16 ctrl; | ||
2439 | int err; | ||
2440 | |||
2441 | ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | | ||
2442 | PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | | ||
2443 | reg; | ||
2444 | |||
2445 | err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, | ||
2446 | PORT_RESERVED_1A, ctrl); | ||
2447 | if (err) | ||
2448 | return err; | ||
2449 | |||
2450 | err = mv88e6390_hidden_wait(chip); | ||
2451 | if (err) | ||
2452 | return err; | ||
2453 | |||
2454 | return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, | ||
2455 | PORT_RESERVED_1A, val); | ||
2456 | } | ||
2457 | |||
2458 | /* Check if the errata has already been applied. */ | ||
2459 | static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) | ||
2460 | { | ||
2461 | int port; | ||
2462 | int err; | ||
2463 | u16 val; | ||
2464 | |||
2465 | for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { | ||
2466 | err = mv88e6390_hidden_read(chip, port, 0, &val); | ||
2467 | if (err) { | ||
2468 | dev_err(chip->dev, | ||
2469 | "Error reading hidden register: %d\n", err); | ||
2470 | return false; | ||
2471 | } | ||
2472 | if (val != 0x01c0) | ||
2473 | return false; | ||
2474 | } | ||
2475 | |||
2476 | return true; | ||
2477 | } | ||
2478 | |||
2479 | /* The 6390 copper ports have an errata which require poking magic | ||
2480 | * values into undocumented hidden registers and then performing a | ||
2481 | * software reset. | ||
2482 | */ | ||
2483 | static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) | ||
2484 | { | ||
2485 | int port; | ||
2486 | int err; | ||
2487 | |||
2488 | if (mv88e6390_setup_errata_applied(chip)) | ||
2489 | return 0; | ||
2490 | |||
2491 | /* Set the ports into blocking mode */ | ||
2492 | for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { | ||
2493 | err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED); | ||
2494 | if (err) | ||
2495 | return err; | ||
2496 | } | ||
2497 | |||
2498 | for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { | ||
2499 | err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); | ||
2500 | if (err) | ||
2501 | return err; | ||
2502 | } | ||
2503 | |||
2504 | return mv88e6xxx_software_reset(chip); | ||
2505 | } | ||
2506 | |||
2406 | static int mv88e6xxx_setup(struct dsa_switch *ds) | 2507 | static int mv88e6xxx_setup(struct dsa_switch *ds) |
2407 | { | 2508 | { |
2408 | struct mv88e6xxx_chip *chip = ds->priv; | 2509 | struct mv88e6xxx_chip *chip = ds->priv; |
@@ -2415,6 +2516,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) | |||
2415 | 2516 | ||
2416 | mutex_lock(&chip->reg_lock); | 2517 | mutex_lock(&chip->reg_lock); |
2417 | 2518 | ||
2519 | if (chip->info->ops->setup_errata) { | ||
2520 | err = chip->info->ops->setup_errata(chip); | ||
2521 | if (err) | ||
2522 | goto unlock; | ||
2523 | } | ||
2524 | |||
2418 | /* Cache the cmode of each port. */ | 2525 | /* Cache the cmode of each port. */ |
2419 | for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { | 2526 | for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { |
2420 | if (chip->info->ops->port_get_cmode) { | 2527 | if (chip->info->ops->port_get_cmode) { |
@@ -3226,6 +3333,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { | |||
3226 | 3333 | ||
3227 | static const struct mv88e6xxx_ops mv88e6190_ops = { | 3334 | static const struct mv88e6xxx_ops mv88e6190_ops = { |
3228 | /* MV88E6XXX_FAMILY_6390 */ | 3335 | /* MV88E6XXX_FAMILY_6390 */ |
3336 | .setup_errata = mv88e6390_setup_errata, | ||
3229 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3337 | .irl_init_all = mv88e6390_g2_irl_init_all, |
3230 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3338 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
3231 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3339 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
@@ -3269,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { | |||
3269 | 3377 | ||
3270 | static const struct mv88e6xxx_ops mv88e6190x_ops = { | 3378 | static const struct mv88e6xxx_ops mv88e6190x_ops = { |
3271 | /* MV88E6XXX_FAMILY_6390 */ | 3379 | /* MV88E6XXX_FAMILY_6390 */ |
3380 | .setup_errata = mv88e6390_setup_errata, | ||
3272 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3381 | .irl_init_all = mv88e6390_g2_irl_init_all, |
3273 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3382 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
3274 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3383 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
@@ -3312,6 +3421,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { | |||
3312 | 3421 | ||
3313 | static const struct mv88e6xxx_ops mv88e6191_ops = { | 3422 | static const struct mv88e6xxx_ops mv88e6191_ops = { |
3314 | /* MV88E6XXX_FAMILY_6390 */ | 3423 | /* MV88E6XXX_FAMILY_6390 */ |
3424 | .setup_errata = mv88e6390_setup_errata, | ||
3315 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3425 | .irl_init_all = mv88e6390_g2_irl_init_all, |
3316 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3426 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
3317 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3427 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
@@ -3404,6 +3514,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { | |||
3404 | 3514 | ||
3405 | static const struct mv88e6xxx_ops mv88e6290_ops = { | 3515 | static const struct mv88e6xxx_ops mv88e6290_ops = { |
3406 | /* MV88E6XXX_FAMILY_6390 */ | 3516 | /* MV88E6XXX_FAMILY_6390 */ |
3517 | .setup_errata = mv88e6390_setup_errata, | ||
3407 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3518 | .irl_init_all = mv88e6390_g2_irl_init_all, |
3408 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3519 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
3409 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3520 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
@@ -3709,6 +3820,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { | |||
3709 | 3820 | ||
3710 | static const struct mv88e6xxx_ops mv88e6390_ops = { | 3821 | static const struct mv88e6xxx_ops mv88e6390_ops = { |
3711 | /* MV88E6XXX_FAMILY_6390 */ | 3822 | /* MV88E6XXX_FAMILY_6390 */ |
3823 | .setup_errata = mv88e6390_setup_errata, | ||
3712 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3824 | .irl_init_all = mv88e6390_g2_irl_init_all, |
3713 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3825 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
3714 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3826 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
@@ -3756,6 +3868,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { | |||
3756 | 3868 | ||
3757 | static const struct mv88e6xxx_ops mv88e6390x_ops = { | 3869 | static const struct mv88e6xxx_ops mv88e6390x_ops = { |
3758 | /* MV88E6XXX_FAMILY_6390 */ | 3870 | /* MV88E6XXX_FAMILY_6390 */ |
3871 | .setup_errata = mv88e6390_setup_errata, | ||
3759 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3872 | .irl_init_all = mv88e6390_g2_irl_init_all, |
3760 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3873 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
3761 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3874 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index f9ecb7872d32..546651d8c3e1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h | |||
@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus { | |||
300 | }; | 300 | }; |
301 | 301 | ||
302 | struct mv88e6xxx_ops { | 302 | struct mv88e6xxx_ops { |
303 | /* Switch Setup Errata, called early in the switch setup to | ||
304 | * allow any errata actions to be performed | ||
305 | */ | ||
306 | int (*setup_errata)(struct mv88e6xxx_chip *chip); | ||
307 | |||
303 | int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); | 308 | int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); |
304 | int (*ip_pri_map)(struct mv88e6xxx_chip *chip); | 309 | int (*ip_pri_map)(struct mv88e6xxx_chip *chip); |
305 | 310 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 0d81866d0e4a..e583641de758 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h | |||
@@ -251,6 +251,16 @@ | |||
251 | /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ | 251 | /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ |
252 | #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 | 252 | #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 |
253 | 253 | ||
254 | /* Offset 0x1a: Magic undocumented errata register */ | ||
255 | #define PORT_RESERVED_1A 0x1a | ||
256 | #define PORT_RESERVED_1A_BUSY BIT(15) | ||
257 | #define PORT_RESERVED_1A_WRITE BIT(14) | ||
258 | #define PORT_RESERVED_1A_READ 0 | ||
259 | #define PORT_RESERVED_1A_PORT_SHIFT 5 | ||
260 | #define PORT_RESERVED_1A_BLOCK (0xf << 10) | ||
261 | #define PORT_RESERVED_1A_CTRL_PORT 4 | ||
262 | #define PORT_RESERVED_1A_DATA_PORT 5 | ||
263 | |||
254 | int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, | 264 | int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, |
255 | u16 *val); | 265 | u16 *val); |
256 | int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, | 266 | int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4ab6eb3baefc..6a512871176b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -5601,7 +5601,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | |||
5601 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | | 5601 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
5602 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; | 5602 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
5603 | if (bp->flags & BNXT_FLAG_CHIP_P5) | 5603 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
5604 | flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; | 5604 | flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | |
5605 | FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; | ||
5605 | else | 5606 | else |
5606 | flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; | 5607 | flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
5607 | } | 5608 | } |
@@ -6221,9 +6222,12 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, | |||
6221 | rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; | 6222 | rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; |
6222 | rmem->depth = 1; | 6223 | rmem->depth = 1; |
6223 | rmem->nr_pages = MAX_CTX_PAGES; | 6224 | rmem->nr_pages = MAX_CTX_PAGES; |
6224 | if (i == (nr_tbls - 1)) | 6225 | if (i == (nr_tbls - 1)) { |
6225 | rmem->nr_pages = ctx_pg->nr_pages % | 6226 | int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; |
6226 | MAX_CTX_PAGES; | 6227 | |
6228 | if (rem) | ||
6229 | rmem->nr_pages = rem; | ||
6230 | } | ||
6227 | rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); | 6231 | rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); |
6228 | if (rc) | 6232 | if (rc) |
6229 | break; | 6233 | break; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index f1aaac8e6268..0a0995894ddb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | |||
@@ -386,8 +386,8 @@ struct hwrm_err_output { | |||
386 | #define HWRM_VERSION_MAJOR 1 | 386 | #define HWRM_VERSION_MAJOR 1 |
387 | #define HWRM_VERSION_MINOR 10 | 387 | #define HWRM_VERSION_MINOR 10 |
388 | #define HWRM_VERSION_UPDATE 0 | 388 | #define HWRM_VERSION_UPDATE 0 |
389 | #define HWRM_VERSION_RSVD 33 | 389 | #define HWRM_VERSION_RSVD 35 |
390 | #define HWRM_VERSION_STR "1.10.0.33" | 390 | #define HWRM_VERSION_STR "1.10.0.35" |
391 | 391 | ||
392 | /* hwrm_ver_get_input (size:192b/24B) */ | 392 | /* hwrm_ver_get_input (size:192b/24B) */ |
393 | struct hwrm_ver_get_input { | 393 | struct hwrm_ver_get_input { |
@@ -1184,6 +1184,7 @@ struct hwrm_func_cfg_input { | |||
1184 | #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL | 1184 | #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL |
1185 | #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL | 1185 | #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL |
1186 | #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL | 1186 | #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL |
1187 | #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL | ||
1187 | __le32 enables; | 1188 | __le32 enables; |
1188 | #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL | 1189 | #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL |
1189 | #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL | 1190 | #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index b126926ef7f5..66cc7927061a 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
@@ -1738,12 +1738,8 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) | |||
1738 | *skb = nskb; | 1738 | *skb = nskb; |
1739 | } | 1739 | } |
1740 | 1740 | ||
1741 | if (padlen) { | 1741 | if (padlen > ETH_FCS_LEN) |
1742 | if (padlen >= ETH_FCS_LEN) | 1742 | skb_put_zero(*skb, padlen - ETH_FCS_LEN); |
1743 | skb_put_zero(*skb, padlen - ETH_FCS_LEN); | ||
1744 | else | ||
1745 | skb_trim(*skb, ETH_FCS_LEN - padlen); | ||
1746 | } | ||
1747 | 1743 | ||
1748 | add_fcs: | 1744 | add_fcs: |
1749 | /* set FCS to packet */ | 1745 | /* set FCS to packet */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 85f22c286680..89db739b7819 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -2381,7 +2381,7 @@ no_mem: | |||
2381 | lro_add_page(adap, qs, fl, | 2381 | lro_add_page(adap, qs, fl, |
2382 | G_RSPD_LEN(len), | 2382 | G_RSPD_LEN(len), |
2383 | flags & F_RSPD_EOP); | 2383 | flags & F_RSPD_EOP); |
2384 | goto next_fl; | 2384 | goto next_fl; |
2385 | } | 2385 | } |
2386 | 2386 | ||
2387 | skb = get_packet_pg(adap, fl, q, | 2387 | skb = get_packet_pg(adap, fl, q, |
@@ -3214,11 +3214,13 @@ void t3_start_sge_timers(struct adapter *adap) | |||
3214 | for (i = 0; i < SGE_QSETS; ++i) { | 3214 | for (i = 0; i < SGE_QSETS; ++i) { |
3215 | struct sge_qset *q = &adap->sge.qs[i]; | 3215 | struct sge_qset *q = &adap->sge.qs[i]; |
3216 | 3216 | ||
3217 | if (q->tx_reclaim_timer.function) | 3217 | if (q->tx_reclaim_timer.function) |
3218 | mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | 3218 | mod_timer(&q->tx_reclaim_timer, |
3219 | jiffies + TX_RECLAIM_PERIOD); | ||
3219 | 3220 | ||
3220 | if (q->rx_reclaim_timer.function) | 3221 | if (q->rx_reclaim_timer.function) |
3221 | mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); | 3222 | mod_timer(&q->rx_reclaim_timer, |
3223 | jiffies + RX_RECLAIM_PERIOD); | ||
3222 | } | 3224 | } |
3223 | } | 3225 | } |
3224 | 3226 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 080918af773c..0a9f2c596624 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c | |||
@@ -1082,7 +1082,7 @@ int t3_check_fw_version(struct adapter *adapter) | |||
1082 | CH_WARN(adapter, "found newer FW version(%u.%u), " | 1082 | CH_WARN(adapter, "found newer FW version(%u.%u), " |
1083 | "driver compiled for version %u.%u\n", major, minor, | 1083 | "driver compiled for version %u.%u\n", major, minor, |
1084 | FW_VERSION_MAJOR, FW_VERSION_MINOR); | 1084 | FW_VERSION_MAJOR, FW_VERSION_MINOR); |
1085 | return 0; | 1085 | return 0; |
1086 | } | 1086 | } |
1087 | return -EINVAL; | 1087 | return -EINVAL; |
1088 | } | 1088 | } |
@@ -3619,7 +3619,7 @@ int t3_reset_adapter(struct adapter *adapter) | |||
3619 | 3619 | ||
3620 | static int init_parity(struct adapter *adap) | 3620 | static int init_parity(struct adapter *adap) |
3621 | { | 3621 | { |
3622 | int i, err, addr; | 3622 | int i, err, addr; |
3623 | 3623 | ||
3624 | if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) | 3624 | if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) |
3625 | return -EBUSY; | 3625 | return -EBUSY; |
@@ -3806,6 +3806,6 @@ int t3_replay_prep_adapter(struct adapter *adapter) | |||
3806 | p->phy.ops->power_down(&p->phy, 1); | 3806 | p->phy.ops->power_down(&p->phy, 1); |
3807 | } | 3807 | } |
3808 | 3808 | ||
3809 | return 0; | 3809 | return 0; |
3810 | } | 3810 | } |
3811 | 3811 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 9f9d6cae39d5..58a039c3224a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c | |||
@@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter) | |||
378 | int err; | 378 | int err; |
379 | 379 | ||
380 | memset(&c, 0, sizeof(c)); | 380 | memset(&c, 0, sizeof(c)); |
381 | c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | | 381 | c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | |
382 | FW_CMD_REQUEST_F | | 382 | FW_CMD_REQUEST_F | |
383 | FW_CMD_WRITE_F | | 383 | FW_CMD_WRITE_F | |
384 | FW_PTP_CMD_PORTID_V(0)); | 384 | FW_PTP_CMD_PORTID_V(0)); |
385 | c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); | 385 | c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); |
386 | c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; | 386 | c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; |
387 | 387 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 9a6065a3fa46..c041f44324db 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | |||
@@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) | |||
78 | unsigned long flags; | 78 | unsigned long flags; |
79 | 79 | ||
80 | spin_lock_irqsave(&bmap->lock, flags); | 80 | spin_lock_irqsave(&bmap->lock, flags); |
81 | __clear_bit(msix_idx, bmap->msix_bmap); | 81 | __clear_bit(msix_idx, bmap->msix_bmap); |
82 | spin_unlock_irqrestore(&bmap->lock, flags); | 82 | spin_unlock_irqrestore(&bmap->lock, flags); |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8c34292a0ec..2b03f6187a24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap, | |||
3794 | /* If we have version number support, then check to see if the adapter | 3794 | /* If we have version number support, then check to see if the adapter |
3795 | * already has up-to-date PHY firmware loaded. | 3795 | * already has up-to-date PHY firmware loaded. |
3796 | */ | 3796 | */ |
3797 | if (phy_fw_version) { | 3797 | if (phy_fw_version) { |
3798 | new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); | 3798 | new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); |
3799 | ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); | 3799 | ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); |
3800 | if (ret < 0) | 3800 | if (ret < 0) |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index ad1779fc410e..a78bfafd212c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | |||
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle) | |||
147 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | 147 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); |
148 | int i; | 148 | int i; |
149 | 149 | ||
150 | vf_cb->mac_cb = NULL; | ||
151 | |||
152 | kfree(vf_cb); | ||
153 | |||
154 | for (i = 0; i < handle->q_num; i++) | 150 | for (i = 0; i < handle->q_num; i++) |
155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; | 151 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; |
152 | |||
153 | kfree(vf_cb); | ||
156 | } | 154 | } |
157 | 155 | ||
158 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) | 156 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5748d3f722f6..5b33238c6680 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
@@ -1170,6 +1170,13 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) | |||
1170 | if (!h->phy_dev) | 1170 | if (!h->phy_dev) |
1171 | return 0; | 1171 | return 0; |
1172 | 1172 | ||
1173 | ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); | ||
1174 | linkmode_and(phy_dev->supported, phy_dev->supported, supported); | ||
1175 | linkmode_copy(phy_dev->advertising, phy_dev->supported); | ||
1176 | |||
1177 | if (h->phy_if == PHY_INTERFACE_MODE_XGMII) | ||
1178 | phy_dev->autoneg = false; | ||
1179 | |||
1173 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { | 1180 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { |
1174 | phy_dev->dev_flags = 0; | 1181 | phy_dev->dev_flags = 0; |
1175 | 1182 | ||
@@ -1181,16 +1188,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) | |||
1181 | if (unlikely(ret)) | 1188 | if (unlikely(ret)) |
1182 | return -ENODEV; | 1189 | return -ENODEV; |
1183 | 1190 | ||
1184 | ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); | ||
1185 | linkmode_and(phy_dev->supported, phy_dev->supported, supported); | ||
1186 | linkmode_copy(phy_dev->advertising, phy_dev->supported); | ||
1187 | |||
1188 | if (h->phy_if == PHY_INTERFACE_MODE_XGMII) | ||
1189 | phy_dev->autoneg = false; | ||
1190 | |||
1191 | if (h->phy_if == PHY_INTERFACE_MODE_SGMII) | ||
1192 | phy_stop(phy_dev); | ||
1193 | |||
1194 | return 0; | 1191 | return 0; |
1195 | } | 1192 | } |
1196 | 1193 | ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 31fb76ee9d82..a1246e89aad4 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
@@ -159,7 +159,7 @@ config IXGBE | |||
159 | tristate "Intel(R) 10GbE PCI Express adapters support" | 159 | tristate "Intel(R) 10GbE PCI Express adapters support" |
160 | depends on PCI | 160 | depends on PCI |
161 | select MDIO | 161 | select MDIO |
162 | select MDIO_DEVICE | 162 | select PHYLIB |
163 | imply PTP_1588_CLOCK | 163 | imply PTP_1588_CLOCK |
164 | ---help--- | 164 | ---help--- |
165 | This driver supports Intel(R) 10GbE PCI Express family of | 165 | This driver supports Intel(R) 10GbE PCI Express family of |
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 742f0c1f60df..6d55e3d0b7ea 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c | |||
@@ -825,7 +825,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
825 | if (!cgx->cgx_cmd_workq) { | 825 | if (!cgx->cgx_cmd_workq) { |
826 | dev_err(dev, "alloc workqueue failed for cgx cmd"); | 826 | dev_err(dev, "alloc workqueue failed for cgx cmd"); |
827 | err = -ENOMEM; | 827 | err = -ENOMEM; |
828 | goto err_release_regions; | 828 | goto err_free_irq_vectors; |
829 | } | 829 | } |
830 | 830 | ||
831 | list_add(&cgx->cgx_list, &cgx_list); | 831 | list_add(&cgx->cgx_list, &cgx_list); |
@@ -841,6 +841,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
841 | err_release_lmac: | 841 | err_release_lmac: |
842 | cgx_lmac_exit(cgx); | 842 | cgx_lmac_exit(cgx); |
843 | list_del(&cgx->cgx_list); | 843 | list_del(&cgx->cgx_list); |
844 | err_free_irq_vectors: | ||
845 | pci_free_irq_vectors(pdev); | ||
844 | err_release_regions: | 846 | err_release_regions: |
845 | pci_release_regions(pdev); | 847 | pci_release_regions(pdev); |
846 | err_disable_device: | 848 | err_disable_device: |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index fe9653fa8aea..49f926b7a91c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -258,11 +258,6 @@ static void mtk_phy_link_adjust(struct net_device *dev) | |||
258 | 258 | ||
259 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | 259 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); |
260 | 260 | ||
261 | if (dev->phydev->link) | ||
262 | netif_carrier_on(dev); | ||
263 | else | ||
264 | netif_carrier_off(dev); | ||
265 | |||
266 | if (!of_phy_is_fixed_link(mac->of_node)) | 261 | if (!of_phy_is_fixed_link(mac->of_node)) |
267 | phy_print_status(dev->phydev); | 262 | phy_print_status(dev->phydev); |
268 | } | 263 | } |
@@ -347,17 +342,6 @@ static int mtk_phy_connect(struct net_device *dev) | |||
347 | if (mtk_phy_connect_node(eth, mac, np)) | 342 | if (mtk_phy_connect_node(eth, mac, np)) |
348 | goto err_phy; | 343 | goto err_phy; |
349 | 344 | ||
350 | dev->phydev->autoneg = AUTONEG_ENABLE; | ||
351 | dev->phydev->speed = 0; | ||
352 | dev->phydev->duplex = 0; | ||
353 | |||
354 | phy_set_max_speed(dev->phydev, SPEED_1000); | ||
355 | phy_support_asym_pause(dev->phydev); | ||
356 | linkmode_copy(dev->phydev->advertising, dev->phydev->supported); | ||
357 | linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, | ||
358 | dev->phydev->advertising); | ||
359 | phy_start_aneg(dev->phydev); | ||
360 | |||
361 | of_node_put(np); | 345 | of_node_put(np); |
362 | 346 | ||
363 | return 0; | 347 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 4b4351141b94..d89a3da89e5a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c | |||
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu | |||
57 | int i; | 57 | int i; |
58 | 58 | ||
59 | if (chunk->nsg > 0) | 59 | if (chunk->nsg > 0) |
60 | pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, | 60 | dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, |
61 | PCI_DMA_BIDIRECTIONAL); | 61 | DMA_BIDIRECTIONAL); |
62 | 62 | ||
63 | for (i = 0; i < chunk->npages; ++i) | 63 | for (i = 0; i < chunk->npages; ++i) |
64 | __free_pages(sg_page(&chunk->mem[i]), | 64 | __free_pages(sg_page(&chunk->sg[i]), |
65 | get_order(chunk->mem[i].length)); | 65 | get_order(chunk->sg[i].length)); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) | 68 | static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) |
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk * | |||
71 | 71 | ||
72 | for (i = 0; i < chunk->npages; ++i) | 72 | for (i = 0; i < chunk->npages; ++i) |
73 | dma_free_coherent(&dev->persist->pdev->dev, | 73 | dma_free_coherent(&dev->persist->pdev->dev, |
74 | chunk->mem[i].length, | 74 | chunk->buf[i].size, |
75 | lowmem_page_address(sg_page(&chunk->mem[i])), | 75 | chunk->buf[i].addr, |
76 | sg_dma_address(&chunk->mem[i])); | 76 | chunk->buf[i].dma_addr); |
77 | } | 77 | } |
78 | 78 | ||
79 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) | 79 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) |
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, | |||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, | 114 | static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf, |
115 | int order, gfp_t gfp_mask) | 115 | int order, gfp_t gfp_mask) |
116 | { | 116 | { |
117 | void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, | 117 | buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order, |
118 | &sg_dma_address(mem), gfp_mask); | 118 | &buf->dma_addr, gfp_mask); |
119 | if (!buf) | 119 | if (!buf->addr) |
120 | return -ENOMEM; | 120 | return -ENOMEM; |
121 | 121 | ||
122 | if (offset_in_page(buf)) { | 122 | if (offset_in_page(buf->addr)) { |
123 | dma_free_coherent(dev, PAGE_SIZE << order, | 123 | dma_free_coherent(dev, PAGE_SIZE << order, buf->addr, |
124 | buf, sg_dma_address(mem)); | 124 | buf->dma_addr); |
125 | return -ENOMEM; | 125 | return -ENOMEM; |
126 | } | 126 | } |
127 | 127 | ||
128 | sg_set_buf(mem, buf, PAGE_SIZE << order); | 128 | buf->size = PAGE_SIZE << order; |
129 | sg_dma_len(mem) = PAGE_SIZE << order; | ||
130 | return 0; | 129 | return 0; |
131 | } | 130 | } |
132 | 131 | ||
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
159 | 158 | ||
160 | while (npages > 0) { | 159 | while (npages > 0) { |
161 | if (!chunk) { | 160 | if (!chunk) { |
162 | chunk = kmalloc_node(sizeof(*chunk), | 161 | chunk = kzalloc_node(sizeof(*chunk), |
163 | gfp_mask & ~(__GFP_HIGHMEM | | 162 | gfp_mask & ~(__GFP_HIGHMEM | |
164 | __GFP_NOWARN), | 163 | __GFP_NOWARN), |
165 | dev->numa_node); | 164 | dev->numa_node); |
166 | if (!chunk) { | 165 | if (!chunk) { |
167 | chunk = kmalloc(sizeof(*chunk), | 166 | chunk = kzalloc(sizeof(*chunk), |
168 | gfp_mask & ~(__GFP_HIGHMEM | | 167 | gfp_mask & ~(__GFP_HIGHMEM | |
169 | __GFP_NOWARN)); | 168 | __GFP_NOWARN)); |
170 | if (!chunk) | 169 | if (!chunk) |
171 | goto fail; | 170 | goto fail; |
172 | } | 171 | } |
172 | chunk->coherent = coherent; | ||
173 | 173 | ||
174 | sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); | 174 | if (!coherent) |
175 | chunk->npages = 0; | 175 | sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN); |
176 | chunk->nsg = 0; | ||
177 | list_add_tail(&chunk->list, &icm->chunk_list); | 176 | list_add_tail(&chunk->list, &icm->chunk_list); |
178 | } | 177 | } |
179 | 178 | ||
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
186 | 185 | ||
187 | if (coherent) | 186 | if (coherent) |
188 | ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, | 187 | ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, |
189 | &chunk->mem[chunk->npages], | 188 | &chunk->buf[chunk->npages], |
190 | cur_order, mask); | 189 | cur_order, mask); |
191 | else | 190 | else |
192 | ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], | 191 | ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], |
193 | cur_order, mask, | 192 | cur_order, mask, |
194 | dev->numa_node); | 193 | dev->numa_node); |
195 | 194 | ||
@@ -205,9 +204,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
205 | if (coherent) | 204 | if (coherent) |
206 | ++chunk->nsg; | 205 | ++chunk->nsg; |
207 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | 206 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { |
208 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, | 207 | chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, |
209 | chunk->npages, | 208 | chunk->sg, chunk->npages, |
210 | PCI_DMA_BIDIRECTIONAL); | 209 | DMA_BIDIRECTIONAL); |
211 | 210 | ||
212 | if (chunk->nsg <= 0) | 211 | if (chunk->nsg <= 0) |
213 | goto fail; | 212 | goto fail; |
@@ -220,9 +219,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
220 | } | 219 | } |
221 | 220 | ||
222 | if (!coherent && chunk) { | 221 | if (!coherent && chunk) { |
223 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, | 222 | chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg, |
224 | chunk->npages, | 223 | chunk->npages, DMA_BIDIRECTIONAL); |
225 | PCI_DMA_BIDIRECTIONAL); | ||
226 | 224 | ||
227 | if (chunk->nsg <= 0) | 225 | if (chunk->nsg <= 0) |
228 | goto fail; | 226 | goto fail; |
@@ -320,7 +318,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, | |||
320 | u64 idx; | 318 | u64 idx; |
321 | struct mlx4_icm_chunk *chunk; | 319 | struct mlx4_icm_chunk *chunk; |
322 | struct mlx4_icm *icm; | 320 | struct mlx4_icm *icm; |
323 | struct page *page = NULL; | 321 | void *addr = NULL; |
324 | 322 | ||
325 | if (!table->lowmem) | 323 | if (!table->lowmem) |
326 | return NULL; | 324 | return NULL; |
@@ -336,28 +334,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, | |||
336 | 334 | ||
337 | list_for_each_entry(chunk, &icm->chunk_list, list) { | 335 | list_for_each_entry(chunk, &icm->chunk_list, list) { |
338 | for (i = 0; i < chunk->npages; ++i) { | 336 | for (i = 0; i < chunk->npages; ++i) { |
337 | dma_addr_t dma_addr; | ||
338 | size_t len; | ||
339 | |||
340 | if (table->coherent) { | ||
341 | len = chunk->buf[i].size; | ||
342 | dma_addr = chunk->buf[i].dma_addr; | ||
343 | addr = chunk->buf[i].addr; | ||
344 | } else { | ||
345 | struct page *page; | ||
346 | |||
347 | len = sg_dma_len(&chunk->sg[i]); | ||
348 | dma_addr = sg_dma_address(&chunk->sg[i]); | ||
349 | |||
350 | /* XXX: we should never do this for highmem | ||
351 | * allocation. This function either needs | ||
352 | * to be split, or the kernel virtual address | ||
353 | * return needs to be made optional. | ||
354 | */ | ||
355 | page = sg_page(&chunk->sg[i]); | ||
356 | addr = lowmem_page_address(page); | ||
357 | } | ||
358 | |||
339 | if (dma_handle && dma_offset >= 0) { | 359 | if (dma_handle && dma_offset >= 0) { |
340 | if (sg_dma_len(&chunk->mem[i]) > dma_offset) | 360 | if (len > dma_offset) |
341 | *dma_handle = sg_dma_address(&chunk->mem[i]) + | 361 | *dma_handle = dma_addr + dma_offset; |
342 | dma_offset; | 362 | dma_offset -= len; |
343 | dma_offset -= sg_dma_len(&chunk->mem[i]); | ||
344 | } | 363 | } |
364 | |||
345 | /* | 365 | /* |
346 | * DMA mapping can merge pages but not split them, | 366 | * DMA mapping can merge pages but not split them, |
347 | * so if we found the page, dma_handle has already | 367 | * so if we found the page, dma_handle has already |
348 | * been assigned to. | 368 | * been assigned to. |
349 | */ | 369 | */ |
350 | if (chunk->mem[i].length > offset) { | 370 | if (len > offset) |
351 | page = sg_page(&chunk->mem[i]); | ||
352 | goto out; | 371 | goto out; |
353 | } | 372 | offset -= len; |
354 | offset -= chunk->mem[i].length; | ||
355 | } | 373 | } |
356 | } | 374 | } |
357 | 375 | ||
376 | addr = NULL; | ||
358 | out: | 377 | out: |
359 | mutex_unlock(&table->mutex); | 378 | mutex_unlock(&table->mutex); |
360 | return page ? lowmem_page_address(page) + offset : NULL; | 379 | return addr ? addr + offset : NULL; |
361 | } | 380 | } |
362 | 381 | ||
363 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | 382 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index c9169a490557..d199874b1c07 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h | |||
@@ -47,11 +47,21 @@ enum { | |||
47 | MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, | 47 | MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct mlx4_icm_buf { | ||
51 | void *addr; | ||
52 | size_t size; | ||
53 | dma_addr_t dma_addr; | ||
54 | }; | ||
55 | |||
50 | struct mlx4_icm_chunk { | 56 | struct mlx4_icm_chunk { |
51 | struct list_head list; | 57 | struct list_head list; |
52 | int npages; | 58 | int npages; |
53 | int nsg; | 59 | int nsg; |
54 | struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; | 60 | bool coherent; |
61 | union { | ||
62 | struct scatterlist sg[MLX4_ICM_CHUNK_LEN]; | ||
63 | struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN]; | ||
64 | }; | ||
55 | }; | 65 | }; |
56 | 66 | ||
57 | struct mlx4_icm { | 67 | struct mlx4_icm { |
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) | |||
114 | 124 | ||
115 | static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) | 125 | static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) |
116 | { | 126 | { |
117 | return sg_dma_address(&iter->chunk->mem[iter->page_idx]); | 127 | if (iter->chunk->coherent) |
128 | return iter->chunk->buf[iter->page_idx].dma_addr; | ||
129 | else | ||
130 | return sg_dma_address(&iter->chunk->sg[iter->page_idx]); | ||
118 | } | 131 | } |
119 | 132 | ||
120 | static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) | 133 | static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) |
121 | { | 134 | { |
122 | return sg_dma_len(&iter->chunk->mem[iter->page_idx]); | 135 | if (iter->chunk->coherent) |
136 | return iter->chunk->buf[iter->page_idx].size; | ||
137 | else | ||
138 | return sg_dma_len(&iter->chunk->sg[iter->page_idx]); | ||
123 | } | 139 | } |
124 | 140 | ||
125 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); | 141 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 080ddd1942ec..b9a25aed5d11 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig | |||
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM | |||
78 | depends on IPV6 || IPV6=n | 78 | depends on IPV6 || IPV6=n |
79 | depends on NET_IPGRE || NET_IPGRE=n | 79 | depends on NET_IPGRE || NET_IPGRE=n |
80 | depends on IPV6_GRE || IPV6_GRE=n | 80 | depends on IPV6_GRE || IPV6_GRE=n |
81 | depends on VXLAN || VXLAN=n | ||
81 | select GENERIC_ALLOCATOR | 82 | select GENERIC_ALLOCATOR |
82 | select PARMAN | 83 | select PARMAN |
83 | select OBJAGG | 84 | select OBJAGG |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index eed1045e4d96..32519c93df17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -5005,12 +5005,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, | |||
5005 | lower_dev, | 5005 | lower_dev, |
5006 | upper_dev); | 5006 | upper_dev); |
5007 | } else if (netif_is_lag_master(upper_dev)) { | 5007 | } else if (netif_is_lag_master(upper_dev)) { |
5008 | if (info->linking) | 5008 | if (info->linking) { |
5009 | err = mlxsw_sp_port_lag_join(mlxsw_sp_port, | 5009 | err = mlxsw_sp_port_lag_join(mlxsw_sp_port, |
5010 | upper_dev); | 5010 | upper_dev); |
5011 | else | 5011 | } else { |
5012 | mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, | ||
5013 | false); | ||
5012 | mlxsw_sp_port_lag_leave(mlxsw_sp_port, | 5014 | mlxsw_sp_port_lag_leave(mlxsw_sp_port, |
5013 | upper_dev); | 5015 | upper_dev); |
5016 | } | ||
5014 | } else if (netif_is_ovs_master(upper_dev)) { | 5017 | } else if (netif_is_ovs_master(upper_dev)) { |
5015 | if (info->linking) | 5018 | if (info->linking) |
5016 | err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); | 5019 | err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index b0f2d8e8ded0..ac222833a5cf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c | |||
@@ -72,7 +72,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, | |||
72 | act_set = mlxsw_afa_block_first_set(rulei->act_block); | 72 | act_set = mlxsw_afa_block_first_set(rulei->act_block); |
73 | mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); | 73 | mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); |
74 | 74 | ||
75 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); | 75 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); |
76 | if (err) | ||
77 | goto err_ptce2_write; | ||
78 | |||
79 | return 0; | ||
80 | |||
81 | err_ptce2_write: | ||
82 | cregion->ops->entry_remove(cregion, centry); | ||
83 | return err; | ||
76 | } | 84 | } |
77 | 85 | ||
78 | static void | 86 | static void |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 1c19feefa5f2..2941967e1cc5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c | |||
@@ -1022,7 +1022,6 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion, | |||
1022 | { | 1022 | { |
1023 | struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; | 1023 | struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; |
1024 | 1024 | ||
1025 | ASSERT_RTNL(); | ||
1026 | objagg_obj_put(aregion->erp_table->objagg, objagg_obj); | 1025 | objagg_obj_put(aregion->erp_table->objagg, objagg_obj); |
1027 | } | 1026 | } |
1028 | 1027 | ||
@@ -1054,7 +1053,6 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp, | |||
1054 | const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); | 1053 | const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); |
1055 | unsigned int erp_bank; | 1054 | unsigned int erp_bank; |
1056 | 1055 | ||
1057 | ASSERT_RTNL(); | ||
1058 | if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) | 1056 | if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) |
1059 | return; | 1057 | return; |
1060 | 1058 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index 0a31fff2516e..fb1c48c698f2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c | |||
@@ -816,14 +816,14 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, | |||
816 | ops = nve->nve_ops_arr[params->type]; | 816 | ops = nve->nve_ops_arr[params->type]; |
817 | 817 | ||
818 | if (!ops->can_offload(nve, params->dev, extack)) | 818 | if (!ops->can_offload(nve, params->dev, extack)) |
819 | return -EOPNOTSUPP; | 819 | return -EINVAL; |
820 | 820 | ||
821 | memset(&config, 0, sizeof(config)); | 821 | memset(&config, 0, sizeof(config)); |
822 | ops->nve_config(nve, params->dev, &config); | 822 | ops->nve_config(nve, params->dev, &config); |
823 | if (nve->num_nve_tunnels && | 823 | if (nve->num_nve_tunnels && |
824 | memcmp(&config, &nve->config, sizeof(config))) { | 824 | memcmp(&config, &nve->config, sizeof(config))) { |
825 | NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); | 825 | NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); |
826 | return -EOPNOTSUPP; | 826 | return -EINVAL; |
827 | } | 827 | } |
828 | 828 | ||
829 | err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); | 829 | err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 1bd2c6e15f8d..0abbaa0fbf14 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -1078,8 +1078,7 @@ static int | |||
1078 | mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, | 1078 | mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, |
1079 | struct mlxsw_sp_bridge_port *bridge_port, | 1079 | struct mlxsw_sp_bridge_port *bridge_port, |
1080 | u16 vid, bool is_untagged, bool is_pvid, | 1080 | u16 vid, bool is_untagged, bool is_pvid, |
1081 | struct netlink_ext_ack *extack, | 1081 | struct netlink_ext_ack *extack) |
1082 | struct switchdev_trans *trans) | ||
1083 | { | 1082 | { |
1084 | u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); | 1083 | u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); |
1085 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; | 1084 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; |
@@ -1095,9 +1094,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1095 | mlxsw_sp_port_vlan->bridge_port != bridge_port) | 1094 | mlxsw_sp_port_vlan->bridge_port != bridge_port) |
1096 | return -EEXIST; | 1095 | return -EEXIST; |
1097 | 1096 | ||
1098 | if (switchdev_trans_ph_prepare(trans)) | ||
1099 | return 0; | ||
1100 | |||
1101 | if (!mlxsw_sp_port_vlan) { | 1097 | if (!mlxsw_sp_port_vlan) { |
1102 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, | 1098 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, |
1103 | vid); | 1099 | vid); |
@@ -1188,6 +1184,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1188 | return err; | 1184 | return err; |
1189 | } | 1185 | } |
1190 | 1186 | ||
1187 | if (switchdev_trans_ph_commit(trans)) | ||
1188 | return 0; | ||
1189 | |||
1191 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1190 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
1192 | if (WARN_ON(!bridge_port)) | 1191 | if (WARN_ON(!bridge_port)) |
1193 | return -EINVAL; | 1192 | return -EINVAL; |
@@ -1200,7 +1199,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1200 | 1199 | ||
1201 | err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, | 1200 | err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, |
1202 | vid, flag_untagged, | 1201 | vid, flag_untagged, |
1203 | flag_pvid, extack, trans); | 1202 | flag_pvid, extack); |
1204 | if (err) | 1203 | if (err) |
1205 | return err; | 1204 | return err; |
1206 | } | 1205 | } |
@@ -1808,7 +1807,7 @@ static void | |||
1808 | mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1807 | mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1809 | struct mlxsw_sp_bridge_port *bridge_port, u16 vid) | 1808 | struct mlxsw_sp_bridge_port *bridge_port, u16 vid) |
1810 | { | 1809 | { |
1811 | u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; | 1810 | u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; |
1812 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; | 1811 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; |
1813 | 1812 | ||
1814 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); | 1813 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); |
@@ -3207,7 +3206,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, | |||
3207 | struct mlxsw_sp_bridge_device *bridge_device, | 3206 | struct mlxsw_sp_bridge_device *bridge_device, |
3208 | const struct net_device *vxlan_dev, u16 vid, | 3207 | const struct net_device *vxlan_dev, u16 vid, |
3209 | bool flag_untagged, bool flag_pvid, | 3208 | bool flag_untagged, bool flag_pvid, |
3210 | struct switchdev_trans *trans, | ||
3211 | struct netlink_ext_ack *extack) | 3209 | struct netlink_ext_ack *extack) |
3212 | { | 3210 | { |
3213 | struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); | 3211 | struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); |
@@ -3225,9 +3223,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, | |||
3225 | mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) | 3223 | mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) |
3226 | return -EINVAL; | 3224 | return -EINVAL; |
3227 | 3225 | ||
3228 | if (switchdev_trans_ph_prepare(trans)) | ||
3229 | return 0; | ||
3230 | |||
3231 | if (!netif_running(vxlan_dev)) | 3226 | if (!netif_running(vxlan_dev)) |
3232 | return 0; | 3227 | return 0; |
3233 | 3228 | ||
@@ -3345,6 +3340,9 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, | |||
3345 | 3340 | ||
3346 | port_obj_info->handled = true; | 3341 | port_obj_info->handled = true; |
3347 | 3342 | ||
3343 | if (switchdev_trans_ph_commit(trans)) | ||
3344 | return 0; | ||
3345 | |||
3348 | bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); | 3346 | bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); |
3349 | if (!bridge_device) | 3347 | if (!bridge_device) |
3350 | return -EINVAL; | 3348 | return -EINVAL; |
@@ -3358,8 +3356,7 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, | |||
3358 | err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, | 3356 | err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, |
3359 | vxlan_dev, vid, | 3357 | vxlan_dev, vid, |
3360 | flag_untagged, | 3358 | flag_untagged, |
3361 | flag_pvid, trans, | 3359 | flag_pvid, extack); |
3362 | extack); | ||
3363 | if (err) | 3360 | if (err) |
3364 | return err; | 3361 | return err; |
3365 | } | 3362 | } |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 20c9377e99cb..310807ef328b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c | |||
@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) | |||
962 | 962 | ||
963 | memset(&ksettings, 0, sizeof(ksettings)); | 963 | memset(&ksettings, 0, sizeof(ksettings)); |
964 | phy_ethtool_get_link_ksettings(netdev, &ksettings); | 964 | phy_ethtool_get_link_ksettings(netdev, &ksettings); |
965 | local_advertisement = phy_read(phydev, MII_ADVERTISE); | 965 | local_advertisement = |
966 | if (local_advertisement < 0) | 966 | linkmode_adv_to_mii_adv_t(phydev->advertising); |
967 | return; | 967 | remote_advertisement = |
968 | 968 | linkmode_adv_to_mii_adv_t(phydev->lp_advertising); | |
969 | remote_advertisement = phy_read(phydev, MII_LPA); | ||
970 | if (remote_advertisement < 0) | ||
971 | return; | ||
972 | 969 | ||
973 | lan743x_phy_update_flowcontrol(adapter, | 970 | lan743x_phy_update_flowcontrol(adapter, |
974 | ksettings.base.duplex, | 971 | ksettings.base.duplex, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 90afd514ffe1..d9237c65a838 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
@@ -1619,6 +1619,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, | |||
1619 | cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); | 1619 | cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); |
1620 | rx_prod.bd_prod = cpu_to_le16(bd_prod); | 1620 | rx_prod.bd_prod = cpu_to_le16(bd_prod); |
1621 | rx_prod.cqe_prod = cpu_to_le16(cq_prod); | 1621 | rx_prod.cqe_prod = cpu_to_le16(cq_prod); |
1622 | |||
1623 | /* Make sure chain element is updated before ringing the doorbell */ | ||
1624 | dma_wmb(); | ||
1625 | |||
1622 | DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); | 1626 | DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); |
1623 | } | 1627 | } |
1624 | 1628 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 298930d39b79..abb94c543aa2 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -205,6 +205,8 @@ enum cfg_version { | |||
205 | }; | 205 | }; |
206 | 206 | ||
207 | static const struct pci_device_id rtl8169_pci_tbl[] = { | 207 | static const struct pci_device_id rtl8169_pci_tbl[] = { |
208 | { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, | ||
209 | { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, | ||
208 | { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, | 210 | { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, |
209 | { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, | 211 | { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, |
210 | { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, | 212 | { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, |
@@ -706,6 +708,7 @@ module_param(use_dac, int, 0); | |||
706 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); | 708 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); |
707 | module_param_named(debug, debug.msg_enable, int, 0); | 709 | module_param_named(debug, debug.msg_enable, int, 0); |
708 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); | 710 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); |
711 | MODULE_SOFTDEP("pre: realtek"); | ||
709 | MODULE_LICENSE("GPL"); | 712 | MODULE_LICENSE("GPL"); |
710 | MODULE_FIRMWARE(FIRMWARE_8168D_1); | 713 | MODULE_FIRMWARE(FIRMWARE_8168D_1); |
711 | MODULE_FIRMWARE(FIRMWARE_8168D_2); | 714 | MODULE_FIRMWARE(FIRMWARE_8168D_2); |
@@ -1679,11 +1682,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp) | |||
1679 | 1682 | ||
1680 | static bool rtl8169_update_counters(struct rtl8169_private *tp) | 1683 | static bool rtl8169_update_counters(struct rtl8169_private *tp) |
1681 | { | 1684 | { |
1685 | u8 val = RTL_R8(tp, ChipCmd); | ||
1686 | |||
1682 | /* | 1687 | /* |
1683 | * Some chips are unable to dump tally counters when the receiver | 1688 | * Some chips are unable to dump tally counters when the receiver |
1684 | * is disabled. | 1689 | * is disabled. If 0xff chip may be in a PCI power-save state. |
1685 | */ | 1690 | */ |
1686 | if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) | 1691 | if (!(val & CmdRxEnb) || val == 0xff) |
1687 | return true; | 1692 | return true; |
1688 | 1693 | ||
1689 | return rtl8169_do_counters(tp, CounterDump); | 1694 | return rtl8169_do_counters(tp, CounterDump); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 6c5092e7771c..c5e25580a43f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c | |||
@@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, | |||
263 | struct stmmac_extra_stats *x, u32 chan) | 263 | struct stmmac_extra_stats *x, u32 chan) |
264 | { | 264 | { |
265 | u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); | 265 | u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); |
266 | u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); | ||
266 | int ret = 0; | 267 | int ret = 0; |
267 | 268 | ||
268 | /* ABNORMAL interrupts */ | 269 | /* ABNORMAL interrupts */ |
@@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, | |||
282 | x->normal_irq_n++; | 283 | x->normal_irq_n++; |
283 | 284 | ||
284 | if (likely(intr_status & XGMAC_RI)) { | 285 | if (likely(intr_status & XGMAC_RI)) { |
285 | u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); | 286 | if (likely(intr_en & XGMAC_RIE)) { |
286 | if (likely(value & XGMAC_RIE)) { | ||
287 | x->rx_normal_irq_n++; | 287 | x->rx_normal_irq_n++; |
288 | ret |= handle_rx; | 288 | ret |= handle_rx; |
289 | } | 289 | } |
@@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, | |||
295 | } | 295 | } |
296 | 296 | ||
297 | /* Clear interrupts */ | 297 | /* Clear interrupts */ |
298 | writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); | 298 | writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); |
299 | 299 | ||
300 | return ret; | 300 | return ret; |
301 | } | 301 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0c4ab3444cc3..5afba69981cf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3517,27 +3517,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget) | |||
3517 | struct stmmac_channel *ch = | 3517 | struct stmmac_channel *ch = |
3518 | container_of(napi, struct stmmac_channel, napi); | 3518 | container_of(napi, struct stmmac_channel, napi); |
3519 | struct stmmac_priv *priv = ch->priv_data; | 3519 | struct stmmac_priv *priv = ch->priv_data; |
3520 | int work_done = 0, work_rem = budget; | 3520 | int work_done, rx_done = 0, tx_done = 0; |
3521 | u32 chan = ch->index; | 3521 | u32 chan = ch->index; |
3522 | 3522 | ||
3523 | priv->xstats.napi_poll++; | 3523 | priv->xstats.napi_poll++; |
3524 | 3524 | ||
3525 | if (ch->has_tx) { | 3525 | if (ch->has_tx) |
3526 | int done = stmmac_tx_clean(priv, work_rem, chan); | 3526 | tx_done = stmmac_tx_clean(priv, budget, chan); |
3527 | if (ch->has_rx) | ||
3528 | rx_done = stmmac_rx(priv, budget, chan); | ||
3527 | 3529 | ||
3528 | work_done += done; | 3530 | work_done = max(rx_done, tx_done); |
3529 | work_rem -= done; | 3531 | work_done = min(work_done, budget); |
3530 | } | ||
3531 | |||
3532 | if (ch->has_rx) { | ||
3533 | int done = stmmac_rx(priv, work_rem, chan); | ||
3534 | 3532 | ||
3535 | work_done += done; | 3533 | if (work_done < budget && napi_complete_done(napi, work_done)) { |
3536 | work_rem -= done; | 3534 | int stat; |
3537 | } | ||
3538 | 3535 | ||
3539 | if (work_done < budget && napi_complete_done(napi, work_done)) | ||
3540 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | 3536 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); |
3537 | stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, | ||
3538 | &priv->xstats, chan); | ||
3539 | if (stat && napi_reschedule(napi)) | ||
3540 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
3541 | } | ||
3541 | 3542 | ||
3542 | return work_done; | 3543 | return work_done; |
3543 | } | 3544 | } |
@@ -4160,6 +4161,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
4160 | return ret; | 4161 | return ret; |
4161 | } | 4162 | } |
4162 | 4163 | ||
4164 | /* Rx Watchdog is available in the COREs newer than the 3.40. | ||
4165 | * In some case, for example on bugged HW this feature | ||
4166 | * has to be disable and this can be done by passing the | ||
4167 | * riwt_off field from the platform. | ||
4168 | */ | ||
4169 | if (((priv->synopsys_id >= DWMAC_CORE_3_50) || | ||
4170 | (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { | ||
4171 | priv->use_riwt = 1; | ||
4172 | dev_info(priv->device, | ||
4173 | "Enable RX Mitigation via HW Watchdog Timer\n"); | ||
4174 | } | ||
4175 | |||
4163 | return 0; | 4176 | return 0; |
4164 | } | 4177 | } |
4165 | 4178 | ||
@@ -4292,18 +4305,6 @@ int stmmac_dvr_probe(struct device *device, | |||
4292 | if (flow_ctrl) | 4305 | if (flow_ctrl) |
4293 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ | 4306 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ |
4294 | 4307 | ||
4295 | /* Rx Watchdog is available in the COREs newer than the 3.40. | ||
4296 | * In some case, for example on bugged HW this feature | ||
4297 | * has to be disable and this can be done by passing the | ||
4298 | * riwt_off field from the platform. | ||
4299 | */ | ||
4300 | if (((priv->synopsys_id >= DWMAC_CORE_3_50) || | ||
4301 | (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { | ||
4302 | priv->use_riwt = 1; | ||
4303 | dev_info(priv->device, | ||
4304 | "Enable RX Mitigation via HW Watchdog Timer\n"); | ||
4305 | } | ||
4306 | |||
4307 | /* Setup channels NAPI */ | 4308 | /* Setup channels NAPI */ |
4308 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); | 4309 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
4309 | 4310 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index c54a50dbd5ac..d819e8eaba12 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev, | |||
299 | */ | 299 | */ |
300 | static void stmmac_pci_remove(struct pci_dev *pdev) | 300 | static void stmmac_pci_remove(struct pci_dev *pdev) |
301 | { | 301 | { |
302 | int i; | ||
303 | |||
302 | stmmac_dvr_remove(&pdev->dev); | 304 | stmmac_dvr_remove(&pdev->dev); |
305 | |||
306 | for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { | ||
307 | if (pci_resource_len(pdev, i) == 0) | ||
308 | continue; | ||
309 | pcim_iounmap_regions(pdev, BIT(i)); | ||
310 | break; | ||
311 | } | ||
312 | |||
303 | pci_disable_device(pdev); | 313 | pci_disable_device(pdev); |
304 | } | 314 | } |
305 | 315 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 531294f4978b..58ea18af9813 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | |||
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv, | |||
301 | /* Queue 0 is not AVB capable */ | 301 | /* Queue 0 is not AVB capable */ |
302 | if (queue <= 0 || queue >= tx_queues_count) | 302 | if (queue <= 0 || queue >= tx_queues_count) |
303 | return -EINVAL; | 303 | return -EINVAL; |
304 | if (!priv->dma_cap.av) | ||
305 | return -EOPNOTSUPP; | ||
304 | if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) | 306 | if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) |
305 | return -EOPNOTSUPP; | 307 | return -EOPNOTSUPP; |
306 | 308 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a4fdad475594..18656c4094b3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -856,10 +856,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
856 | err = 0; | 856 | err = 0; |
857 | } | 857 | } |
858 | 858 | ||
859 | rcu_assign_pointer(tfile->tun, tun); | ||
860 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | ||
861 | tun->numqueues++; | ||
862 | |||
863 | if (tfile->detached) { | 859 | if (tfile->detached) { |
864 | tun_enable_queue(tfile); | 860 | tun_enable_queue(tfile); |
865 | } else { | 861 | } else { |
@@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
876 | * refcnt. | 872 | * refcnt. |
877 | */ | 873 | */ |
878 | 874 | ||
875 | /* Publish tfile->tun and tun->tfiles only after we've fully | ||
876 | * initialized tfile; otherwise we risk using half-initialized | ||
877 | * object. | ||
878 | */ | ||
879 | rcu_assign_pointer(tfile->tun, tun); | ||
880 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | ||
881 | tun->numqueues++; | ||
879 | out: | 882 | out: |
880 | return err; | 883 | return err; |
881 | } | 884 | } |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index b3b3c05903a1..3305f23793c7 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -179,10 +179,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
179 | * probed with) and a slave/data interface; union | 179 | * probed with) and a slave/data interface; union |
180 | * descriptors sort this all out. | 180 | * descriptors sort this all out. |
181 | */ | 181 | */ |
182 | info->control = usb_ifnum_to_if(dev->udev, | 182 | info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0); |
183 | info->u->bMasterInterface0); | 183 | info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0); |
184 | info->data = usb_ifnum_to_if(dev->udev, | ||
185 | info->u->bSlaveInterface0); | ||
186 | if (!info->control || !info->data) { | 184 | if (!info->control || !info->data) { |
187 | dev_dbg(&intf->dev, | 185 | dev_dbg(&intf->dev, |
188 | "master #%u/%p slave #%u/%p\n", | 186 | "master #%u/%p slave #%u/%p\n", |
@@ -216,18 +214,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
216 | /* a data interface altsetting does the real i/o */ | 214 | /* a data interface altsetting does the real i/o */ |
217 | d = &info->data->cur_altsetting->desc; | 215 | d = &info->data->cur_altsetting->desc; |
218 | if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { | 216 | if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { |
219 | dev_dbg(&intf->dev, "slave class %u\n", | 217 | dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass); |
220 | d->bInterfaceClass); | ||
221 | goto bad_desc; | 218 | goto bad_desc; |
222 | } | 219 | } |
223 | skip: | 220 | skip: |
224 | if ( rndis && | 221 | if (rndis && header.usb_cdc_acm_descriptor && |
225 | header.usb_cdc_acm_descriptor && | 222 | header.usb_cdc_acm_descriptor->bmCapabilities) { |
226 | header.usb_cdc_acm_descriptor->bmCapabilities) { | 223 | dev_dbg(&intf->dev, |
227 | dev_dbg(&intf->dev, | 224 | "ACM capabilities %02x, not really RNDIS?\n", |
228 | "ACM capabilities %02x, not really RNDIS?\n", | 225 | header.usb_cdc_acm_descriptor->bmCapabilities); |
229 | header.usb_cdc_acm_descriptor->bmCapabilities); | 226 | goto bad_desc; |
230 | goto bad_desc; | ||
231 | } | 227 | } |
232 | 228 | ||
233 | if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { | 229 | if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { |
@@ -238,7 +234,7 @@ skip: | |||
238 | } | 234 | } |
239 | 235 | ||
240 | if (header.usb_cdc_mdlm_desc && | 236 | if (header.usb_cdc_mdlm_desc && |
241 | memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { | 237 | memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { |
242 | dev_dbg(&intf->dev, "GUID doesn't match\n"); | 238 | dev_dbg(&intf->dev, "GUID doesn't match\n"); |
243 | goto bad_desc; | 239 | goto bad_desc; |
244 | } | 240 | } |
@@ -302,7 +298,7 @@ skip: | |||
302 | if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { | 298 | if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { |
303 | struct usb_endpoint_descriptor *desc; | 299 | struct usb_endpoint_descriptor *desc; |
304 | 300 | ||
305 | dev->status = &info->control->cur_altsetting->endpoint [0]; | 301 | dev->status = &info->control->cur_altsetting->endpoint[0]; |
306 | desc = &dev->status->desc; | 302 | desc = &dev->status->desc; |
307 | if (!usb_endpoint_is_int_in(desc) || | 303 | if (!usb_endpoint_is_int_in(desc) || |
308 | (le16_to_cpu(desc->wMaxPacketSize) | 304 | (le16_to_cpu(desc->wMaxPacketSize) |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 774e1ff01c9a..735ad838e2ba 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev) | |||
123 | dev->addr_len = 0; | 123 | dev->addr_len = 0; |
124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
125 | dev->netdev_ops = &qmimux_netdev_ops; | 125 | dev->netdev_ops = &qmimux_netdev_ops; |
126 | dev->mtu = 1500; | ||
126 | dev->needs_free_netdev = true; | 127 | dev->needs_free_netdev = true; |
127 | } | 128 | } |
128 | 129 | ||
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index be6485428198..66d889d54e58 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c | |||
@@ -1056,6 +1056,54 @@ static const struct net_device_ops uhdlc_ops = { | |||
1056 | .ndo_tx_timeout = uhdlc_tx_timeout, | 1056 | .ndo_tx_timeout = uhdlc_tx_timeout, |
1057 | }; | 1057 | }; |
1058 | 1058 | ||
1059 | static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr) | ||
1060 | { | ||
1061 | struct device_node *np; | ||
1062 | struct platform_device *pdev; | ||
1063 | struct resource *res; | ||
1064 | static int siram_init_flag; | ||
1065 | int ret = 0; | ||
1066 | |||
1067 | np = of_find_compatible_node(NULL, NULL, name); | ||
1068 | if (!np) | ||
1069 | return -EINVAL; | ||
1070 | |||
1071 | pdev = of_find_device_by_node(np); | ||
1072 | if (!pdev) { | ||
1073 | pr_err("%pOFn: failed to lookup pdev\n", np); | ||
1074 | of_node_put(np); | ||
1075 | return -EINVAL; | ||
1076 | } | ||
1077 | |||
1078 | of_node_put(np); | ||
1079 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1080 | if (!res) { | ||
1081 | ret = -EINVAL; | ||
1082 | goto error_put_device; | ||
1083 | } | ||
1084 | *ptr = ioremap(res->start, resource_size(res)); | ||
1085 | if (!*ptr) { | ||
1086 | ret = -ENOMEM; | ||
1087 | goto error_put_device; | ||
1088 | } | ||
1089 | |||
1090 | /* We've remapped the addresses, and we don't need the device any | ||
1091 | * more, so we should release it. | ||
1092 | */ | ||
1093 | put_device(&pdev->dev); | ||
1094 | |||
1095 | if (init_flag && siram_init_flag == 0) { | ||
1096 | memset_io(*ptr, 0, resource_size(res)); | ||
1097 | siram_init_flag = 1; | ||
1098 | } | ||
1099 | return 0; | ||
1100 | |||
1101 | error_put_device: | ||
1102 | put_device(&pdev->dev); | ||
1103 | |||
1104 | return ret; | ||
1105 | } | ||
1106 | |||
1059 | static int ucc_hdlc_probe(struct platform_device *pdev) | 1107 | static int ucc_hdlc_probe(struct platform_device *pdev) |
1060 | { | 1108 | { |
1061 | struct device_node *np = pdev->dev.of_node; | 1109 | struct device_node *np = pdev->dev.of_node; |
@@ -1150,6 +1198,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev) | |||
1150 | ret = ucc_of_parse_tdm(np, utdm, ut_info); | 1198 | ret = ucc_of_parse_tdm(np, utdm, ut_info); |
1151 | if (ret) | 1199 | if (ret) |
1152 | goto free_utdm; | 1200 | goto free_utdm; |
1201 | |||
1202 | ret = hdlc_map_iomem("fsl,t1040-qe-si", 0, | ||
1203 | (void __iomem **)&utdm->si_regs); | ||
1204 | if (ret) | ||
1205 | goto free_utdm; | ||
1206 | ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1, | ||
1207 | (void __iomem **)&utdm->siram); | ||
1208 | if (ret) | ||
1209 | goto unmap_si_regs; | ||
1153 | } | 1210 | } |
1154 | 1211 | ||
1155 | if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) | 1212 | if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) |
@@ -1158,7 +1215,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) | |||
1158 | ret = uhdlc_init(uhdlc_priv); | 1215 | ret = uhdlc_init(uhdlc_priv); |
1159 | if (ret) { | 1216 | if (ret) { |
1160 | dev_err(&pdev->dev, "Failed to init uhdlc\n"); | 1217 | dev_err(&pdev->dev, "Failed to init uhdlc\n"); |
1161 | goto free_utdm; | 1218 | goto undo_uhdlc_init; |
1162 | } | 1219 | } |
1163 | 1220 | ||
1164 | dev = alloc_hdlcdev(uhdlc_priv); | 1221 | dev = alloc_hdlcdev(uhdlc_priv); |
@@ -1187,6 +1244,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev) | |||
1187 | free_dev: | 1244 | free_dev: |
1188 | free_netdev(dev); | 1245 | free_netdev(dev); |
1189 | undo_uhdlc_init: | 1246 | undo_uhdlc_init: |
1247 | iounmap(utdm->siram); | ||
1248 | unmap_si_regs: | ||
1249 | iounmap(utdm->si_regs); | ||
1190 | free_utdm: | 1250 | free_utdm: |
1191 | if (uhdlc_priv->tsa) | 1251 | if (uhdlc_priv->tsa) |
1192 | kfree(utdm); | 1252 | kfree(utdm); |
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig index f137e0107764..c4709ed7fb0e 100644 --- a/drivers/phy/ti/Kconfig +++ b/drivers/phy/ti/Kconfig | |||
@@ -82,6 +82,7 @@ config PHY_TI_GMII_SEL | |||
82 | default y if TI_CPSW=y | 82 | default y if TI_CPSW=y |
83 | depends on TI_CPSW || COMPILE_TEST | 83 | depends on TI_CPSW || COMPILE_TEST |
84 | select GENERIC_PHY | 84 | select GENERIC_PHY |
85 | select REGMAP | ||
85 | default m | 86 | default m |
86 | help | 87 | help |
87 | This driver supports configuring of the TI CPSW Port mode depending on | 88 | This driver supports configuring of the TI CPSW Port mode depending on |
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 797fab33bb98..7cbea796652a 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
@@ -224,7 +224,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
224 | extoff = NULL; | 224 | extoff = NULL; |
225 | break; | 225 | break; |
226 | } | 226 | } |
227 | if (extoff->n_samples > PTP_MAX_SAMPLES) { | 227 | if (extoff->n_samples > PTP_MAX_SAMPLES |
228 | || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) { | ||
228 | err = -EINVAL; | 229 | err = -EINVAL; |
229 | break; | 230 | break; |
230 | } | 231 | } |
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index f78c34647ca2..76480df195a8 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c | |||
@@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, | |||
44 | const char *sprop; | 44 | const char *sprop; |
45 | int ret = 0; | 45 | int ret = 0; |
46 | u32 val; | 46 | u32 val; |
47 | struct resource *res; | ||
48 | struct device_node *np2; | ||
49 | static int siram_init_flag; | ||
50 | struct platform_device *pdev; | ||
51 | 47 | ||
52 | sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); | 48 | sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); |
53 | if (sprop) { | 49 | if (sprop) { |
@@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, | |||
124 | utdm->siram_entry_id = val; | 120 | utdm->siram_entry_id = val; |
125 | 121 | ||
126 | set_si_param(utdm, ut_info); | 122 | set_si_param(utdm, ut_info); |
127 | |||
128 | np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si"); | ||
129 | if (!np2) | ||
130 | return -EINVAL; | ||
131 | |||
132 | pdev = of_find_device_by_node(np2); | ||
133 | if (!pdev) { | ||
134 | pr_err("%pOFn: failed to lookup pdev\n", np2); | ||
135 | of_node_put(np2); | ||
136 | return -EINVAL; | ||
137 | } | ||
138 | |||
139 | of_node_put(np2); | ||
140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
141 | utdm->si_regs = devm_ioremap_resource(&pdev->dev, res); | ||
142 | if (IS_ERR(utdm->si_regs)) { | ||
143 | ret = PTR_ERR(utdm->si_regs); | ||
144 | goto err_miss_siram_property; | ||
145 | } | ||
146 | |||
147 | np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram"); | ||
148 | if (!np2) { | ||
149 | ret = -EINVAL; | ||
150 | goto err_miss_siram_property; | ||
151 | } | ||
152 | |||
153 | pdev = of_find_device_by_node(np2); | ||
154 | if (!pdev) { | ||
155 | ret = -EINVAL; | ||
156 | pr_err("%pOFn: failed to lookup pdev\n", np2); | ||
157 | of_node_put(np2); | ||
158 | goto err_miss_siram_property; | ||
159 | } | ||
160 | |||
161 | of_node_put(np2); | ||
162 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
163 | utdm->siram = devm_ioremap_resource(&pdev->dev, res); | ||
164 | if (IS_ERR(utdm->siram)) { | ||
165 | ret = PTR_ERR(utdm->siram); | ||
166 | goto err_miss_siram_property; | ||
167 | } | ||
168 | |||
169 | if (siram_init_flag == 0) { | ||
170 | memset_io(utdm->siram, 0, resource_size(res)); | ||
171 | siram_init_flag = 1; | ||
172 | } | ||
173 | |||
174 | return ret; | ||
175 | |||
176 | err_miss_siram_property: | ||
177 | devm_iounmap(&pdev->dev, utdm->si_regs); | ||
178 | return ret; | 123 | return ret; |
179 | } | 124 | } |
180 | EXPORT_SYMBOL(ucc_of_parse_tdm); | 125 | EXPORT_SYMBOL(ucc_of_parse_tdm); |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index bc42d38ae031..3fbc068eaa9b 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
@@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) | |||
642 | hash_del_rcu(&vsock->hash); | 642 | hash_del_rcu(&vsock->hash); |
643 | 643 | ||
644 | vsock->guest_cid = guest_cid; | 644 | vsock->guest_cid = guest_cid; |
645 | hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); | 645 | hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); |
646 | mutex_unlock(&vhost_vsock_mutex); | 646 | mutex_unlock(&vhost_vsock_mutex); |
647 | 647 | ||
648 | return 0; | 648 | return 0; |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 27b74947cd2b..573cca00a0e6 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
@@ -172,6 +172,7 @@ struct bpf_verifier_state_list { | |||
172 | #define BPF_ALU_SANITIZE_SRC 1U | 172 | #define BPF_ALU_SANITIZE_SRC 1U |
173 | #define BPF_ALU_SANITIZE_DST 2U | 173 | #define BPF_ALU_SANITIZE_DST 2U |
174 | #define BPF_ALU_NEG_VALUE (1U << 2) | 174 | #define BPF_ALU_NEG_VALUE (1U << 2) |
175 | #define BPF_ALU_NON_POINTER (1U << 3) | ||
175 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ | 176 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ |
176 | BPF_ALU_SANITIZE_DST) | 177 | BPF_ALU_SANITIZE_DST) |
177 | 178 | ||
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index f02cee0225d4..d815622cd31e 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h | |||
@@ -3,13 +3,22 @@ | |||
3 | #define _LINUX_BPFILTER_H | 3 | #define _LINUX_BPFILTER_H |
4 | 4 | ||
5 | #include <uapi/linux/bpfilter.h> | 5 | #include <uapi/linux/bpfilter.h> |
6 | #include <linux/umh.h> | ||
6 | 7 | ||
7 | struct sock; | 8 | struct sock; |
8 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, | 9 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, |
9 | unsigned int optlen); | 10 | unsigned int optlen); |
10 | int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, | 11 | int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, |
11 | int __user *optlen); | 12 | int __user *optlen); |
12 | extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, | 13 | struct bpfilter_umh_ops { |
13 | char __user *optval, | 14 | struct umh_info info; |
14 | unsigned int optlen, bool is_set); | 15 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ |
16 | struct mutex lock; | ||
17 | int (*sockopt)(struct sock *sk, int optname, | ||
18 | char __user *optval, | ||
19 | unsigned int optlen, bool is_set); | ||
20 | int (*start)(void); | ||
21 | bool stop; | ||
22 | }; | ||
23 | extern struct bpfilter_umh_ops bpfilter_ops; | ||
15 | #endif | 24 | #endif |
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 59ddf9af909e..2dd0a9ed5b36 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h | |||
@@ -663,6 +663,37 @@ out: | |||
663 | static inline void qed_chain_set_prod(struct qed_chain *p_chain, | 663 | static inline void qed_chain_set_prod(struct qed_chain *p_chain, |
664 | u32 prod_idx, void *p_prod_elem) | 664 | u32 prod_idx, void *p_prod_elem) |
665 | { | 665 | { |
666 | if (p_chain->mode == QED_CHAIN_MODE_PBL) { | ||
667 | u32 cur_prod, page_mask, page_cnt, page_diff; | ||
668 | |||
669 | cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx : | ||
670 | p_chain->u.chain32.prod_idx; | ||
671 | |||
672 | /* Assume that number of elements in a page is power of 2 */ | ||
673 | page_mask = ~p_chain->elem_per_page_mask; | ||
674 | |||
675 | /* Use "cur_prod - 1" and "prod_idx - 1" since producer index | ||
676 | * reaches the first element of next page before the page index | ||
677 | * is incremented. See qed_chain_produce(). | ||
678 | * Index wrap around is not a problem because the difference | ||
679 | * between current and given producer indices is always | ||
680 | * positive and lower than the chain's capacity. | ||
681 | */ | ||
682 | page_diff = (((cur_prod - 1) & page_mask) - | ||
683 | ((prod_idx - 1) & page_mask)) / | ||
684 | p_chain->elem_per_page; | ||
685 | |||
686 | page_cnt = qed_chain_get_page_cnt(p_chain); | ||
687 | if (is_chain_u16(p_chain)) | ||
688 | p_chain->pbl.c.u16.prod_page_idx = | ||
689 | (p_chain->pbl.c.u16.prod_page_idx - | ||
690 | page_diff + page_cnt) % page_cnt; | ||
691 | else | ||
692 | p_chain->pbl.c.u32.prod_page_idx = | ||
693 | (p_chain->pbl.c.u32.prod_page_idx - | ||
694 | page_diff + page_cnt) % page_cnt; | ||
695 | } | ||
696 | |||
666 | if (is_chain_u16(p_chain)) | 697 | if (is_chain_u16(p_chain)) |
667 | p_chain->u.chain16.prod_idx = (u16) prod_idx; | 698 | p_chain->u.chain16.prod_idx = (u16) prod_idx; |
668 | else | 699 | else |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 224666226e87..d2f90fa92468 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1406,6 +1406,7 @@ extern struct pid *cad_pid; | |||
1406 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ | 1406 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ |
1407 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1407 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
1408 | #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ | 1408 | #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ |
1409 | #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ | ||
1409 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ | 1410 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
1410 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1411 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1411 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1412 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
@@ -1904,6 +1905,14 @@ static inline void rseq_execve(struct task_struct *t) | |||
1904 | 1905 | ||
1905 | #endif | 1906 | #endif |
1906 | 1907 | ||
1908 | void __exit_umh(struct task_struct *tsk); | ||
1909 | |||
1910 | static inline void exit_umh(struct task_struct *tsk) | ||
1911 | { | ||
1912 | if (unlikely(tsk->flags & PF_UMH)) | ||
1913 | __exit_umh(tsk); | ||
1914 | } | ||
1915 | |||
1907 | #ifdef CONFIG_DEBUG_RSEQ | 1916 | #ifdef CONFIG_DEBUG_RSEQ |
1908 | 1917 | ||
1909 | void rseq_syscall(struct pt_regs *regs); | 1918 | void rseq_syscall(struct pt_regs *regs); |
diff --git a/include/linux/umh.h b/include/linux/umh.h index 235f51b62c71..0c08de356d0d 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h | |||
@@ -47,6 +47,8 @@ struct umh_info { | |||
47 | const char *cmdline; | 47 | const char *cmdline; |
48 | struct file *pipe_to_umh; | 48 | struct file *pipe_to_umh; |
49 | struct file *pipe_from_umh; | 49 | struct file *pipe_from_umh; |
50 | struct list_head list; | ||
51 | void (*cleanup)(struct umh_info *info); | ||
50 | pid_t pid; | 52 | pid_t pid; |
51 | }; | 53 | }; |
52 | int fork_usermode_blob(void *data, size_t len, struct umh_info *info); | 54 | int fork_usermode_blob(void *data, size_t len, struct umh_info *info); |
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index d73d83950265..1bc794ad957a 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h | |||
@@ -147,7 +147,7 @@ struct ptp_pin_desc { | |||
147 | #define PTP_SYS_OFFSET_PRECISE \ | 147 | #define PTP_SYS_OFFSET_PRECISE \ |
148 | _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) | 148 | _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) |
149 | #define PTP_SYS_OFFSET_EXTENDED \ | 149 | #define PTP_SYS_OFFSET_EXTENDED \ |
150 | _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) | 150 | _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) |
151 | 151 | ||
152 | struct ptp_extts_event { | 152 | struct ptp_extts_event { |
153 | struct ptp_clock_time t; /* Time event occured. */ | 153 | struct ptp_clock_time t; /* Time event occured. */ |
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 715f9fcf4712..a2f53642592b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
@@ -1219,8 +1219,6 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset, | |||
1219 | u8 nr_copy_bits; | 1219 | u8 nr_copy_bits; |
1220 | u64 print_num; | 1220 | u64 print_num; |
1221 | 1221 | ||
1222 | data += BITS_ROUNDDOWN_BYTES(bits_offset); | ||
1223 | bits_offset = BITS_PER_BYTE_MASKED(bits_offset); | ||
1224 | nr_copy_bits = nr_bits + bits_offset; | 1222 | nr_copy_bits = nr_bits + bits_offset; |
1225 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); | 1223 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); |
1226 | 1224 | ||
@@ -1255,7 +1253,9 @@ static void btf_int_bits_seq_show(const struct btf *btf, | |||
1255 | * BTF_INT_OFFSET() cannot exceed 64 bits. | 1253 | * BTF_INT_OFFSET() cannot exceed 64 bits. |
1256 | */ | 1254 | */ |
1257 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); | 1255 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); |
1258 | btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m); | 1256 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
1257 | bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); | ||
1258 | btf_bitfield_seq_show(data, bits_offset, nr_bits, m); | ||
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, | 1261 | static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, |
@@ -2001,12 +2001,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, | |||
2001 | 2001 | ||
2002 | member_offset = btf_member_bit_offset(t, member); | 2002 | member_offset = btf_member_bit_offset(t, member); |
2003 | bitfield_size = btf_member_bitfield_size(t, member); | 2003 | bitfield_size = btf_member_bitfield_size(t, member); |
2004 | bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); | ||
2005 | bits8_offset = BITS_PER_BYTE_MASKED(member_offset); | ||
2004 | if (bitfield_size) { | 2006 | if (bitfield_size) { |
2005 | btf_bitfield_seq_show(data, member_offset, | 2007 | btf_bitfield_seq_show(data + bytes_offset, bits8_offset, |
2006 | bitfield_size, m); | 2008 | bitfield_size, m); |
2007 | } else { | 2009 | } else { |
2008 | bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); | ||
2009 | bits8_offset = BITS_PER_BYTE_MASKED(member_offset); | ||
2010 | ops = btf_type_ops(member_type); | 2010 | ops = btf_type_ops(member_type); |
2011 | ops->seq_show(btf, member_type, member->type, | 2011 | ops->seq_show(btf, member_type, member->type, |
2012 | data + bytes_offset, bits8_offset, m); | 2012 | data + bytes_offset, bits8_offset, m); |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 90daf285de03..d9e2483669d0 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -260,7 +260,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma, | |||
260 | return -EFAULT; /* page not mapped */ | 260 | return -EFAULT; /* page not mapped */ |
261 | 261 | ||
262 | ret = -EINVAL; | 262 | ret = -EINVAL; |
263 | page_addr = page_address(page); | 263 | page_addr = kmap_atomic(page); |
264 | ehdr = (Elf32_Ehdr *)page_addr; | 264 | ehdr = (Elf32_Ehdr *)page_addr; |
265 | 265 | ||
266 | /* compare magic x7f "ELF" */ | 266 | /* compare magic x7f "ELF" */ |
@@ -276,6 +276,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma, | |||
276 | else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) | 276 | else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) |
277 | ret = stack_map_get_build_id_64(page_addr, build_id); | 277 | ret = stack_map_get_build_id_64(page_addr, build_id); |
278 | out: | 278 | out: |
279 | kunmap_atomic(page_addr); | ||
279 | put_page(page); | 280 | put_page(page); |
280 | return ret; | 281 | return ret; |
281 | } | 282 | } |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f6bc62a9ee8e..56674a7c3778 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -3103,6 +3103,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, | |||
3103 | } | 3103 | } |
3104 | } | 3104 | } |
3105 | 3105 | ||
3106 | static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, | ||
3107 | const struct bpf_insn *insn) | ||
3108 | { | ||
3109 | return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; | ||
3110 | } | ||
3111 | |||
3112 | static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, | ||
3113 | u32 alu_state, u32 alu_limit) | ||
3114 | { | ||
3115 | /* If we arrived here from different branches with different | ||
3116 | * state or limits to sanitize, then this won't work. | ||
3117 | */ | ||
3118 | if (aux->alu_state && | ||
3119 | (aux->alu_state != alu_state || | ||
3120 | aux->alu_limit != alu_limit)) | ||
3121 | return -EACCES; | ||
3122 | |||
3123 | /* Corresponding fixup done in fixup_bpf_calls(). */ | ||
3124 | aux->alu_state = alu_state; | ||
3125 | aux->alu_limit = alu_limit; | ||
3126 | return 0; | ||
3127 | } | ||
3128 | |||
3129 | static int sanitize_val_alu(struct bpf_verifier_env *env, | ||
3130 | struct bpf_insn *insn) | ||
3131 | { | ||
3132 | struct bpf_insn_aux_data *aux = cur_aux(env); | ||
3133 | |||
3134 | if (can_skip_alu_sanitation(env, insn)) | ||
3135 | return 0; | ||
3136 | |||
3137 | return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); | ||
3138 | } | ||
3139 | |||
3106 | static int sanitize_ptr_alu(struct bpf_verifier_env *env, | 3140 | static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
3107 | struct bpf_insn *insn, | 3141 | struct bpf_insn *insn, |
3108 | const struct bpf_reg_state *ptr_reg, | 3142 | const struct bpf_reg_state *ptr_reg, |
@@ -3117,7 +3151,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, | |||
3117 | struct bpf_reg_state tmp; | 3151 | struct bpf_reg_state tmp; |
3118 | bool ret; | 3152 | bool ret; |
3119 | 3153 | ||
3120 | if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K) | 3154 | if (can_skip_alu_sanitation(env, insn)) |
3121 | return 0; | 3155 | return 0; |
3122 | 3156 | ||
3123 | /* We already marked aux for masking from non-speculative | 3157 | /* We already marked aux for masking from non-speculative |
@@ -3133,19 +3167,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, | |||
3133 | 3167 | ||
3134 | if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) | 3168 | if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) |
3135 | return 0; | 3169 | return 0; |
3136 | 3170 | if (update_alu_sanitation_state(aux, alu_state, alu_limit)) | |
3137 | /* If we arrived here from different branches with different | ||
3138 | * limits to sanitize, then this won't work. | ||
3139 | */ | ||
3140 | if (aux->alu_state && | ||
3141 | (aux->alu_state != alu_state || | ||
3142 | aux->alu_limit != alu_limit)) | ||
3143 | return -EACCES; | 3171 | return -EACCES; |
3144 | |||
3145 | /* Corresponding fixup done in fixup_bpf_calls(). */ | ||
3146 | aux->alu_state = alu_state; | ||
3147 | aux->alu_limit = alu_limit; | ||
3148 | |||
3149 | do_sim: | 3172 | do_sim: |
3150 | /* Simulate and find potential out-of-bounds access under | 3173 | /* Simulate and find potential out-of-bounds access under |
3151 | * speculative execution from truncation as a result of | 3174 | * speculative execution from truncation as a result of |
@@ -3418,6 +3441,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
3418 | s64 smin_val, smax_val; | 3441 | s64 smin_val, smax_val; |
3419 | u64 umin_val, umax_val; | 3442 | u64 umin_val, umax_val; |
3420 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; | 3443 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; |
3444 | u32 dst = insn->dst_reg; | ||
3445 | int ret; | ||
3421 | 3446 | ||
3422 | if (insn_bitness == 32) { | 3447 | if (insn_bitness == 32) { |
3423 | /* Relevant for 32-bit RSH: Information can propagate towards | 3448 | /* Relevant for 32-bit RSH: Information can propagate towards |
@@ -3452,6 +3477,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
3452 | 3477 | ||
3453 | switch (opcode) { | 3478 | switch (opcode) { |
3454 | case BPF_ADD: | 3479 | case BPF_ADD: |
3480 | ret = sanitize_val_alu(env, insn); | ||
3481 | if (ret < 0) { | ||
3482 | verbose(env, "R%d tried to add from different pointers or scalars\n", dst); | ||
3483 | return ret; | ||
3484 | } | ||
3455 | if (signed_add_overflows(dst_reg->smin_value, smin_val) || | 3485 | if (signed_add_overflows(dst_reg->smin_value, smin_val) || |
3456 | signed_add_overflows(dst_reg->smax_value, smax_val)) { | 3486 | signed_add_overflows(dst_reg->smax_value, smax_val)) { |
3457 | dst_reg->smin_value = S64_MIN; | 3487 | dst_reg->smin_value = S64_MIN; |
@@ -3471,6 +3501,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
3471 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); | 3501 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); |
3472 | break; | 3502 | break; |
3473 | case BPF_SUB: | 3503 | case BPF_SUB: |
3504 | ret = sanitize_val_alu(env, insn); | ||
3505 | if (ret < 0) { | ||
3506 | verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); | ||
3507 | return ret; | ||
3508 | } | ||
3474 | if (signed_sub_overflows(dst_reg->smin_value, smax_val) || | 3509 | if (signed_sub_overflows(dst_reg->smin_value, smax_val) || |
3475 | signed_sub_overflows(dst_reg->smax_value, smin_val)) { | 3510 | signed_sub_overflows(dst_reg->smax_value, smin_val)) { |
3476 | /* Overflow possible, we know nothing */ | 3511 | /* Overflow possible, we know nothing */ |
diff --git a/kernel/exit.c b/kernel/exit.c index 2d14979577ee..284f2fe9a293 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -866,6 +866,7 @@ void __noreturn do_exit(long code) | |||
866 | exit_task_namespaces(tsk); | 866 | exit_task_namespaces(tsk); |
867 | exit_task_work(tsk); | 867 | exit_task_work(tsk); |
868 | exit_thread(tsk); | 868 | exit_thread(tsk); |
869 | exit_umh(tsk); | ||
869 | 870 | ||
870 | /* | 871 | /* |
871 | * Flush inherited counters to the parent - before the parent | 872 | * Flush inherited counters to the parent - before the parent |
diff --git a/kernel/umh.c b/kernel/umh.c index 0baa672e023c..d937cbad903a 100644 --- a/kernel/umh.c +++ b/kernel/umh.c | |||
@@ -37,6 +37,8 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; | |||
37 | static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; | 37 | static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; |
38 | static DEFINE_SPINLOCK(umh_sysctl_lock); | 38 | static DEFINE_SPINLOCK(umh_sysctl_lock); |
39 | static DECLARE_RWSEM(umhelper_sem); | 39 | static DECLARE_RWSEM(umhelper_sem); |
40 | static LIST_HEAD(umh_list); | ||
41 | static DEFINE_MUTEX(umh_list_lock); | ||
40 | 42 | ||
41 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) | 43 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) |
42 | { | 44 | { |
@@ -100,10 +102,12 @@ static int call_usermodehelper_exec_async(void *data) | |||
100 | commit_creds(new); | 102 | commit_creds(new); |
101 | 103 | ||
102 | sub_info->pid = task_pid_nr(current); | 104 | sub_info->pid = task_pid_nr(current); |
103 | if (sub_info->file) | 105 | if (sub_info->file) { |
104 | retval = do_execve_file(sub_info->file, | 106 | retval = do_execve_file(sub_info->file, |
105 | sub_info->argv, sub_info->envp); | 107 | sub_info->argv, sub_info->envp); |
106 | else | 108 | if (!retval) |
109 | current->flags |= PF_UMH; | ||
110 | } else | ||
107 | retval = do_execve(getname_kernel(sub_info->path), | 111 | retval = do_execve(getname_kernel(sub_info->path), |
108 | (const char __user *const __user *)sub_info->argv, | 112 | (const char __user *const __user *)sub_info->argv, |
109 | (const char __user *const __user *)sub_info->envp); | 113 | (const char __user *const __user *)sub_info->envp); |
@@ -517,6 +521,11 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info) | |||
517 | goto out; | 521 | goto out; |
518 | 522 | ||
519 | err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); | 523 | err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); |
524 | if (!err) { | ||
525 | mutex_lock(&umh_list_lock); | ||
526 | list_add(&info->list, &umh_list); | ||
527 | mutex_unlock(&umh_list_lock); | ||
528 | } | ||
520 | out: | 529 | out: |
521 | fput(file); | 530 | fput(file); |
522 | return err; | 531 | return err; |
@@ -679,6 +688,26 @@ static int proc_cap_handler(struct ctl_table *table, int write, | |||
679 | return 0; | 688 | return 0; |
680 | } | 689 | } |
681 | 690 | ||
691 | void __exit_umh(struct task_struct *tsk) | ||
692 | { | ||
693 | struct umh_info *info; | ||
694 | pid_t pid = tsk->pid; | ||
695 | |||
696 | mutex_lock(&umh_list_lock); | ||
697 | list_for_each_entry(info, &umh_list, list) { | ||
698 | if (info->pid == pid) { | ||
699 | list_del(&info->list); | ||
700 | mutex_unlock(&umh_list_lock); | ||
701 | goto out; | ||
702 | } | ||
703 | } | ||
704 | mutex_unlock(&umh_list_lock); | ||
705 | return; | ||
706 | out: | ||
707 | if (info->cleanup) | ||
708 | info->cleanup(info); | ||
709 | } | ||
710 | |||
682 | struct ctl_table usermodehelper_table[] = { | 711 | struct ctl_table usermodehelper_table[] = { |
683 | { | 712 | { |
684 | .procname = "bset", | 713 | .procname = "bset", |
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 7acfc83087d5..7ee4fea93637 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c | |||
@@ -13,39 +13,24 @@ | |||
13 | extern char bpfilter_umh_start; | 13 | extern char bpfilter_umh_start; |
14 | extern char bpfilter_umh_end; | 14 | extern char bpfilter_umh_end; |
15 | 15 | ||
16 | static struct umh_info info; | 16 | static void shutdown_umh(void) |
17 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ | ||
18 | static DEFINE_MUTEX(bpfilter_lock); | ||
19 | |||
20 | static void shutdown_umh(struct umh_info *info) | ||
21 | { | 17 | { |
22 | struct task_struct *tsk; | 18 | struct task_struct *tsk; |
23 | 19 | ||
24 | if (!info->pid) | 20 | if (bpfilter_ops.stop) |
25 | return; | 21 | return; |
26 | tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID); | 22 | |
23 | tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID); | ||
27 | if (tsk) { | 24 | if (tsk) { |
28 | force_sig(SIGKILL, tsk); | 25 | force_sig(SIGKILL, tsk); |
29 | put_task_struct(tsk); | 26 | put_task_struct(tsk); |
30 | } | 27 | } |
31 | fput(info->pipe_to_umh); | ||
32 | fput(info->pipe_from_umh); | ||
33 | info->pid = 0; | ||
34 | } | 28 | } |
35 | 29 | ||
36 | static void __stop_umh(void) | 30 | static void __stop_umh(void) |
37 | { | 31 | { |
38 | if (IS_ENABLED(CONFIG_INET)) { | 32 | if (IS_ENABLED(CONFIG_INET)) |
39 | bpfilter_process_sockopt = NULL; | 33 | shutdown_umh(); |
40 | shutdown_umh(&info); | ||
41 | } | ||
42 | } | ||
43 | |||
44 | static void stop_umh(void) | ||
45 | { | ||
46 | mutex_lock(&bpfilter_lock); | ||
47 | __stop_umh(); | ||
48 | mutex_unlock(&bpfilter_lock); | ||
49 | } | 34 | } |
50 | 35 | ||
51 | static int __bpfilter_process_sockopt(struct sock *sk, int optname, | 36 | static int __bpfilter_process_sockopt(struct sock *sk, int optname, |
@@ -63,10 +48,10 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, | |||
63 | req.cmd = optname; | 48 | req.cmd = optname; |
64 | req.addr = (long __force __user)optval; | 49 | req.addr = (long __force __user)optval; |
65 | req.len = optlen; | 50 | req.len = optlen; |
66 | mutex_lock(&bpfilter_lock); | 51 | if (!bpfilter_ops.info.pid) |
67 | if (!info.pid) | ||
68 | goto out; | 52 | goto out; |
69 | n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos); | 53 | n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), |
54 | &pos); | ||
70 | if (n != sizeof(req)) { | 55 | if (n != sizeof(req)) { |
71 | pr_err("write fail %zd\n", n); | 56 | pr_err("write fail %zd\n", n); |
72 | __stop_umh(); | 57 | __stop_umh(); |
@@ -74,7 +59,8 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, | |||
74 | goto out; | 59 | goto out; |
75 | } | 60 | } |
76 | pos = 0; | 61 | pos = 0; |
77 | n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos); | 62 | n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply), |
63 | &pos); | ||
78 | if (n != sizeof(reply)) { | 64 | if (n != sizeof(reply)) { |
79 | pr_err("read fail %zd\n", n); | 65 | pr_err("read fail %zd\n", n); |
80 | __stop_umh(); | 66 | __stop_umh(); |
@@ -83,37 +69,59 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, | |||
83 | } | 69 | } |
84 | ret = reply.status; | 70 | ret = reply.status; |
85 | out: | 71 | out: |
86 | mutex_unlock(&bpfilter_lock); | ||
87 | return ret; | 72 | return ret; |
88 | } | 73 | } |
89 | 74 | ||
90 | static int __init load_umh(void) | 75 | static int start_umh(void) |
91 | { | 76 | { |
92 | int err; | 77 | int err; |
93 | 78 | ||
94 | /* fork usermode process */ | 79 | /* fork usermode process */ |
95 | info.cmdline = "bpfilter_umh"; | ||
96 | err = fork_usermode_blob(&bpfilter_umh_start, | 80 | err = fork_usermode_blob(&bpfilter_umh_start, |
97 | &bpfilter_umh_end - &bpfilter_umh_start, | 81 | &bpfilter_umh_end - &bpfilter_umh_start, |
98 | &info); | 82 | &bpfilter_ops.info); |
99 | if (err) | 83 | if (err) |
100 | return err; | 84 | return err; |
101 | pr_info("Loaded bpfilter_umh pid %d\n", info.pid); | 85 | bpfilter_ops.stop = false; |
86 | pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid); | ||
102 | 87 | ||
103 | /* health check that usermode process started correctly */ | 88 | /* health check that usermode process started correctly */ |
104 | if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { | 89 | if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { |
105 | stop_umh(); | 90 | shutdown_umh(); |
106 | return -EFAULT; | 91 | return -EFAULT; |
107 | } | 92 | } |
108 | if (IS_ENABLED(CONFIG_INET)) | ||
109 | bpfilter_process_sockopt = &__bpfilter_process_sockopt; | ||
110 | 93 | ||
111 | return 0; | 94 | return 0; |
112 | } | 95 | } |
113 | 96 | ||
97 | static int __init load_umh(void) | ||
98 | { | ||
99 | int err; | ||
100 | |||
101 | mutex_lock(&bpfilter_ops.lock); | ||
102 | if (!bpfilter_ops.stop) { | ||
103 | err = -EFAULT; | ||
104 | goto out; | ||
105 | } | ||
106 | err = start_umh(); | ||
107 | if (!err && IS_ENABLED(CONFIG_INET)) { | ||
108 | bpfilter_ops.sockopt = &__bpfilter_process_sockopt; | ||
109 | bpfilter_ops.start = &start_umh; | ||
110 | } | ||
111 | out: | ||
112 | mutex_unlock(&bpfilter_ops.lock); | ||
113 | return err; | ||
114 | } | ||
115 | |||
114 | static void __exit fini_umh(void) | 116 | static void __exit fini_umh(void) |
115 | { | 117 | { |
116 | stop_umh(); | 118 | mutex_lock(&bpfilter_ops.lock); |
119 | if (IS_ENABLED(CONFIG_INET)) { | ||
120 | shutdown_umh(); | ||
121 | bpfilter_ops.start = NULL; | ||
122 | bpfilter_ops.sockopt = NULL; | ||
123 | } | ||
124 | mutex_unlock(&bpfilter_ops.lock); | ||
117 | } | 125 | } |
118 | module_init(load_umh); | 126 | module_init(load_umh); |
119 | module_exit(fini_umh); | 127 | module_exit(fini_umh); |
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S index 40311d10d2f2..7f1c521dcc2f 100644 --- a/net/bpfilter/bpfilter_umh_blob.S +++ b/net/bpfilter/bpfilter_umh_blob.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | .section .init.rodata, "a" | 2 | .section .bpfilter_umh, "a" |
3 | .global bpfilter_umh_start | 3 | .global bpfilter_umh_start |
4 | bpfilter_umh_start: | 4 | bpfilter_umh_start: |
5 | .incbin "net/bpfilter/bpfilter_umh" | 5 | .incbin "net/bpfilter/bpfilter_umh" |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 5372e2042adf..2cb8da465b98 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); | |||
65 | 65 | ||
66 | int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | 66 | int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
67 | { | 67 | { |
68 | skb->tstamp = 0; | ||
68 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, | 69 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, |
69 | net, sk, skb, NULL, skb->dev, | 70 | net, sk, skb, NULL, skb->dev, |
70 | br_dev_queue_push_xmit); | 71 | br_dev_queue_push_xmit); |
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index d21a23698410..c93c35bb73dd 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
@@ -265,7 +265,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_ | |||
265 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); | 265 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); |
266 | int ret; | 266 | int ret; |
267 | 267 | ||
268 | if (neigh->hh.hh_len) { | 268 | if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) { |
269 | neigh_hh_bridge(&neigh->hh, skb); | 269 | neigh_hh_bridge(&neigh->hh, skb); |
270 | skb->dev = nf_bridge->physindev; | 270 | skb->dev = nf_bridge->physindev; |
271 | ret = br_handle_frame_finish(net, sk, skb); | 271 | ret = br_handle_frame_finish(net, sk, skb); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d240b3e7919f..eabf8bf28a3f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -107,6 +107,7 @@ struct br_tunnel_info { | |||
107 | /* private vlan flags */ | 107 | /* private vlan flags */ |
108 | enum { | 108 | enum { |
109 | BR_VLFLAG_PER_PORT_STATS = BIT(0), | 109 | BR_VLFLAG_PER_PORT_STATS = BIT(0), |
110 | BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1), | ||
110 | }; | 111 | }; |
111 | 112 | ||
112 | /** | 113 | /** |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 4a2f31157ef5..96abf8feb9dc 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -80,16 +80,18 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags) | |||
80 | } | 80 | } |
81 | 81 | ||
82 | static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, | 82 | static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, |
83 | u16 vid, u16 flags, struct netlink_ext_ack *extack) | 83 | struct net_bridge_vlan *v, u16 flags, |
84 | struct netlink_ext_ack *extack) | ||
84 | { | 85 | { |
85 | int err; | 86 | int err; |
86 | 87 | ||
87 | /* Try switchdev op first. In case it is not supported, fallback to | 88 | /* Try switchdev op first. In case it is not supported, fallback to |
88 | * 8021q add. | 89 | * 8021q add. |
89 | */ | 90 | */ |
90 | err = br_switchdev_port_vlan_add(dev, vid, flags, extack); | 91 | err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack); |
91 | if (err == -EOPNOTSUPP) | 92 | if (err == -EOPNOTSUPP) |
92 | return vlan_vid_add(dev, br->vlan_proto, vid); | 93 | return vlan_vid_add(dev, br->vlan_proto, v->vid); |
94 | v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV; | ||
93 | return err; | 95 | return err; |
94 | } | 96 | } |
95 | 97 | ||
@@ -121,19 +123,17 @@ static void __vlan_del_list(struct net_bridge_vlan *v) | |||
121 | } | 123 | } |
122 | 124 | ||
123 | static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, | 125 | static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, |
124 | u16 vid) | 126 | const struct net_bridge_vlan *v) |
125 | { | 127 | { |
126 | int err; | 128 | int err; |
127 | 129 | ||
128 | /* Try switchdev op first. In case it is not supported, fallback to | 130 | /* Try switchdev op first. In case it is not supported, fallback to |
129 | * 8021q del. | 131 | * 8021q del. |
130 | */ | 132 | */ |
131 | err = br_switchdev_port_vlan_del(dev, vid); | 133 | err = br_switchdev_port_vlan_del(dev, v->vid); |
132 | if (err == -EOPNOTSUPP) { | 134 | if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)) |
133 | vlan_vid_del(dev, br->vlan_proto, vid); | 135 | vlan_vid_del(dev, br->vlan_proto, v->vid); |
134 | return 0; | 136 | return err == -EOPNOTSUPP ? 0 : err; |
135 | } | ||
136 | return err; | ||
137 | } | 137 | } |
138 | 138 | ||
139 | /* Returns a master vlan, if it didn't exist it gets created. In all cases a | 139 | /* Returns a master vlan, if it didn't exist it gets created. In all cases a |
@@ -242,7 +242,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags, | |||
242 | * This ensures tagged traffic enters the bridge when | 242 | * This ensures tagged traffic enters the bridge when |
243 | * promiscuous mode is disabled by br_manage_promisc(). | 243 | * promiscuous mode is disabled by br_manage_promisc(). |
244 | */ | 244 | */ |
245 | err = __vlan_vid_add(dev, br, v->vid, flags, extack); | 245 | err = __vlan_vid_add(dev, br, v, flags, extack); |
246 | if (err) | 246 | if (err) |
247 | goto out; | 247 | goto out; |
248 | 248 | ||
@@ -305,7 +305,7 @@ out_fdb_insert: | |||
305 | 305 | ||
306 | out_filt: | 306 | out_filt: |
307 | if (p) { | 307 | if (p) { |
308 | __vlan_vid_del(dev, br, v->vid); | 308 | __vlan_vid_del(dev, br, v); |
309 | if (masterv) { | 309 | if (masterv) { |
310 | if (v->stats && masterv->stats != v->stats) | 310 | if (v->stats && masterv->stats != v->stats) |
311 | free_percpu(v->stats); | 311 | free_percpu(v->stats); |
@@ -338,7 +338,7 @@ static int __vlan_del(struct net_bridge_vlan *v) | |||
338 | 338 | ||
339 | __vlan_delete_pvid(vg, v->vid); | 339 | __vlan_delete_pvid(vg, v->vid); |
340 | if (p) { | 340 | if (p) { |
341 | err = __vlan_vid_del(p->dev, p->br, v->vid); | 341 | err = __vlan_vid_del(p->dev, p->br, v); |
342 | if (err) | 342 | if (err) |
343 | goto out; | 343 | goto out; |
344 | } else { | 344 | } else { |
diff --git a/net/can/gw.c b/net/can/gw.c index faa3da88a127..53859346dc9a 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
416 | while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) | 416 | while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) |
417 | (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); | 417 | (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); |
418 | 418 | ||
419 | /* check for checksum updates when the CAN frame has been modified */ | 419 | /* Has the CAN frame been modified? */ |
420 | if (modidx) { | 420 | if (modidx) { |
421 | if (gwj->mod.csumfunc.crc8) | 421 | /* get available space for the processed CAN frame type */ |
422 | int max_len = nskb->len - offsetof(struct can_frame, data); | ||
423 | |||
424 | /* dlc may have changed, make sure it fits to the CAN frame */ | ||
425 | if (cf->can_dlc > max_len) | ||
426 | goto out_delete; | ||
427 | |||
428 | /* check for checksum updates in classic CAN length only */ | ||
429 | if (gwj->mod.csumfunc.crc8) { | ||
430 | if (cf->can_dlc > 8) | ||
431 | goto out_delete; | ||
432 | |||
422 | (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); | 433 | (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); |
434 | } | ||
435 | |||
436 | if (gwj->mod.csumfunc.xor) { | ||
437 | if (cf->can_dlc > 8) | ||
438 | goto out_delete; | ||
423 | 439 | ||
424 | if (gwj->mod.csumfunc.xor) | ||
425 | (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); | 440 | (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); |
441 | } | ||
426 | } | 442 | } |
427 | 443 | ||
428 | /* clear the skb timestamp if not configured the other way */ | 444 | /* clear the skb timestamp if not configured the other way */ |
@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
434 | gwj->dropped_frames++; | 450 | gwj->dropped_frames++; |
435 | else | 451 | else |
436 | gwj->handled_frames++; | 452 | gwj->handled_frames++; |
453 | |||
454 | return; | ||
455 | |||
456 | out_delete: | ||
457 | /* delete frame due to misconfiguration */ | ||
458 | gwj->deleted_frames++; | ||
459 | kfree_skb(nskb); | ||
460 | return; | ||
437 | } | 461 | } |
438 | 462 | ||
439 | static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) | 463 | static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) |
diff --git a/net/core/filter.c b/net/core/filter.c index 447dd1bad31f..2b3b436ef545 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -4203,7 +4203,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
4203 | /* Only some options are supported */ | 4203 | /* Only some options are supported */ |
4204 | switch (optname) { | 4204 | switch (optname) { |
4205 | case TCP_BPF_IW: | 4205 | case TCP_BPF_IW: |
4206 | if (val <= 0 || tp->data_segs_out > 0) | 4206 | if (val <= 0 || tp->data_segs_out > tp->syn_data) |
4207 | ret = -EINVAL; | 4207 | ret = -EINVAL; |
4208 | else | 4208 | else |
4209 | tp->snd_cwnd = val; | 4209 | tp->snd_cwnd = val; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 763a7b08df67..3e27a779f288 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
19 | 19 | ||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/kmemleak.h> | ||
21 | #include <linux/types.h> | 22 | #include <linux/types.h> |
22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
@@ -443,12 +444,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) | |||
443 | ret = kmalloc(sizeof(*ret), GFP_ATOMIC); | 444 | ret = kmalloc(sizeof(*ret), GFP_ATOMIC); |
444 | if (!ret) | 445 | if (!ret) |
445 | return NULL; | 446 | return NULL; |
446 | if (size <= PAGE_SIZE) | 447 | if (size <= PAGE_SIZE) { |
447 | buckets = kzalloc(size, GFP_ATOMIC); | 448 | buckets = kzalloc(size, GFP_ATOMIC); |
448 | else | 449 | } else { |
449 | buckets = (struct neighbour __rcu **) | 450 | buckets = (struct neighbour __rcu **) |
450 | __get_free_pages(GFP_ATOMIC | __GFP_ZERO, | 451 | __get_free_pages(GFP_ATOMIC | __GFP_ZERO, |
451 | get_order(size)); | 452 | get_order(size)); |
453 | kmemleak_alloc(buckets, size, 0, GFP_ATOMIC); | ||
454 | } | ||
452 | if (!buckets) { | 455 | if (!buckets) { |
453 | kfree(ret); | 456 | kfree(ret); |
454 | return NULL; | 457 | return NULL; |
@@ -468,10 +471,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head) | |||
468 | size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); | 471 | size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); |
469 | struct neighbour __rcu **buckets = nht->hash_buckets; | 472 | struct neighbour __rcu **buckets = nht->hash_buckets; |
470 | 473 | ||
471 | if (size <= PAGE_SIZE) | 474 | if (size <= PAGE_SIZE) { |
472 | kfree(buckets); | 475 | kfree(buckets); |
473 | else | 476 | } else { |
477 | kmemleak_free(buckets); | ||
474 | free_pages((unsigned long)buckets, get_order(size)); | 478 | free_pages((unsigned long)buckets, get_order(size)); |
479 | } | ||
475 | kfree(nht); | 480 | kfree(nht); |
476 | } | 481 | } |
477 | 482 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 37317ffec146..26d848484912 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -5270,7 +5270,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
5270 | unsigned long chunk; | 5270 | unsigned long chunk; |
5271 | struct sk_buff *skb; | 5271 | struct sk_buff *skb; |
5272 | struct page *page; | 5272 | struct page *page; |
5273 | gfp_t gfp_head; | ||
5274 | int i; | 5273 | int i; |
5275 | 5274 | ||
5276 | *errcode = -EMSGSIZE; | 5275 | *errcode = -EMSGSIZE; |
@@ -5280,12 +5279,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
5280 | if (npages > MAX_SKB_FRAGS) | 5279 | if (npages > MAX_SKB_FRAGS) |
5281 | return NULL; | 5280 | return NULL; |
5282 | 5281 | ||
5283 | gfp_head = gfp_mask; | ||
5284 | if (gfp_head & __GFP_DIRECT_RECLAIM) | ||
5285 | gfp_head |= __GFP_RETRY_MAYFAIL; | ||
5286 | |||
5287 | *errcode = -ENOBUFS; | 5282 | *errcode = -ENOBUFS; |
5288 | skb = alloc_skb(header_len, gfp_head); | 5283 | skb = alloc_skb(header_len, gfp_mask); |
5289 | if (!skb) | 5284 | if (!skb) |
5290 | return NULL; | 5285 | return NULL; |
5291 | 5286 | ||
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 5e04ed25bc0e..1e976bb93d99 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c | |||
@@ -1,28 +1,54 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/init.h> | ||
3 | #include <linux/module.h> | ||
2 | #include <linux/uaccess.h> | 4 | #include <linux/uaccess.h> |
3 | #include <linux/bpfilter.h> | 5 | #include <linux/bpfilter.h> |
4 | #include <uapi/linux/bpf.h> | 6 | #include <uapi/linux/bpf.h> |
5 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
6 | #include <linux/kmod.h> | 8 | #include <linux/kmod.h> |
9 | #include <linux/fs.h> | ||
10 | #include <linux/file.h> | ||
7 | 11 | ||
8 | int (*bpfilter_process_sockopt)(struct sock *sk, int optname, | 12 | struct bpfilter_umh_ops bpfilter_ops; |
9 | char __user *optval, | 13 | EXPORT_SYMBOL_GPL(bpfilter_ops); |
10 | unsigned int optlen, bool is_set); | 14 | |
11 | EXPORT_SYMBOL_GPL(bpfilter_process_sockopt); | 15 | static void bpfilter_umh_cleanup(struct umh_info *info) |
16 | { | ||
17 | mutex_lock(&bpfilter_ops.lock); | ||
18 | bpfilter_ops.stop = true; | ||
19 | fput(info->pipe_to_umh); | ||
20 | fput(info->pipe_from_umh); | ||
21 | info->pid = 0; | ||
22 | mutex_unlock(&bpfilter_ops.lock); | ||
23 | } | ||
12 | 24 | ||
13 | static int bpfilter_mbox_request(struct sock *sk, int optname, | 25 | static int bpfilter_mbox_request(struct sock *sk, int optname, |
14 | char __user *optval, | 26 | char __user *optval, |
15 | unsigned int optlen, bool is_set) | 27 | unsigned int optlen, bool is_set) |
16 | { | 28 | { |
17 | if (!bpfilter_process_sockopt) { | 29 | int err; |
18 | int err = request_module("bpfilter"); | 30 | mutex_lock(&bpfilter_ops.lock); |
31 | if (!bpfilter_ops.sockopt) { | ||
32 | mutex_unlock(&bpfilter_ops.lock); | ||
33 | err = request_module("bpfilter"); | ||
34 | mutex_lock(&bpfilter_ops.lock); | ||
19 | 35 | ||
20 | if (err) | 36 | if (err) |
21 | return err; | 37 | goto out; |
22 | if (!bpfilter_process_sockopt) | 38 | if (!bpfilter_ops.sockopt) { |
23 | return -ECHILD; | 39 | err = -ECHILD; |
40 | goto out; | ||
41 | } | ||
42 | } | ||
43 | if (bpfilter_ops.stop) { | ||
44 | err = bpfilter_ops.start(); | ||
45 | if (err) | ||
46 | goto out; | ||
24 | } | 47 | } |
25 | return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set); | 48 | err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set); |
49 | out: | ||
50 | mutex_unlock(&bpfilter_ops.lock); | ||
51 | return err; | ||
26 | } | 52 | } |
27 | 53 | ||
28 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, | 54 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, |
@@ -41,3 +67,15 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, | |||
41 | 67 | ||
42 | return bpfilter_mbox_request(sk, optname, optval, len, false); | 68 | return bpfilter_mbox_request(sk, optname, optval, len, false); |
43 | } | 69 | } |
70 | |||
71 | static int __init bpfilter_sockopt_init(void) | ||
72 | { | ||
73 | mutex_init(&bpfilter_ops.lock); | ||
74 | bpfilter_ops.stop = true; | ||
75 | bpfilter_ops.info.cmdline = "bpfilter_umh"; | ||
76 | bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup; | ||
77 | |||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | module_init(bpfilter_sockopt_init); | ||
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 04ba321ae5ce..e258a00b4a3d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1826,7 +1826,7 @@ put_tgt_net: | |||
1826 | if (fillargs.netnsid >= 0) | 1826 | if (fillargs.netnsid >= 0) |
1827 | put_net(tgt_net); | 1827 | put_net(tgt_net); |
1828 | 1828 | ||
1829 | return err < 0 ? err : skb->len; | 1829 | return skb->len ? : err; |
1830 | } | 1830 | } |
1831 | 1831 | ||
1832 | static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, | 1832 | static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 0c9f171fb085..632863541082 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -1065,7 +1065,8 @@ static int gue_err(struct sk_buff *skb, u32 info) | |||
1065 | * recursion. Besides, this kind of encapsulation can't even be | 1065 | * recursion. Besides, this kind of encapsulation can't even be |
1066 | * configured currently. Discard this. | 1066 | * configured currently. Discard this. |
1067 | */ | 1067 | */ |
1068 | if (guehdr->proto_ctype == IPPROTO_UDP) | 1068 | if (guehdr->proto_ctype == IPPROTO_UDP || |
1069 | guehdr->proto_ctype == IPPROTO_UDPLITE) | ||
1069 | return -EOPNOTSUPP; | 1070 | return -EOPNOTSUPP; |
1070 | 1071 | ||
1071 | skb_set_transport_header(skb, -(int)sizeof(struct icmphdr)); | 1072 | skb_set_transport_header(skb, -(int)sizeof(struct icmphdr)); |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fffcc130900e..82f341e84fae 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) | |||
148 | 148 | ||
149 | static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) | 149 | static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) |
150 | { | 150 | { |
151 | __be16 _ports[2], *ports; | ||
151 | struct sockaddr_in sin; | 152 | struct sockaddr_in sin; |
152 | __be16 *ports; | ||
153 | int end; | ||
154 | |||
155 | end = skb_transport_offset(skb) + 4; | ||
156 | if (end > 0 && !pskb_may_pull(skb, end)) | ||
157 | return; | ||
158 | 153 | ||
159 | /* All current transport protocols have the port numbers in the | 154 | /* All current transport protocols have the port numbers in the |
160 | * first four bytes of the transport header and this function is | 155 | * first four bytes of the transport header and this function is |
161 | * written with this assumption in mind. | 156 | * written with this assumption in mind. |
162 | */ | 157 | */ |
163 | ports = (__be16 *)skb_transport_header(skb); | 158 | ports = skb_header_pointer(skb, skb_transport_offset(skb), |
159 | sizeof(_ports), &_ports); | ||
160 | if (!ports) | ||
161 | return; | ||
164 | 162 | ||
165 | sin.sin_family = AF_INET; | 163 | sin.sin_family = AF_INET; |
166 | sin.sin_addr.s_addr = ip_hdr(skb)->daddr; | 164 | sin.sin_addr.s_addr = ip_hdr(skb)->daddr; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index f87dbc78b6bc..71a29e9c0620 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
226 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 226 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
227 | if (icsk->icsk_retransmits) { | 227 | if (icsk->icsk_retransmits) { |
228 | dst_negative_advice(sk); | 228 | dst_negative_advice(sk); |
229 | } else if (!tp->syn_data && !tp->syn_fastopen) { | 229 | } else { |
230 | sk_rethink_txhash(sk); | 230 | sk_rethink_txhash(sk); |
231 | } | 231 | } |
232 | retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; | 232 | retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8eeec6eb2bd3..93d5ad2b1a69 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -5154,7 +5154,7 @@ put_tgt_net: | |||
5154 | if (fillargs.netnsid >= 0) | 5154 | if (fillargs.netnsid >= 0) |
5155 | put_net(tgt_net); | 5155 | put_net(tgt_net); |
5156 | 5156 | ||
5157 | return err < 0 ? err : skb->len; | 5157 | return skb->len ? : err; |
5158 | } | 5158 | } |
5159 | 5159 | ||
5160 | static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | 5160 | static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0bfb6cc0a30a..d99753b5e39b 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -310,6 +310,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, | |||
310 | 310 | ||
311 | /* Check if the address belongs to the host. */ | 311 | /* Check if the address belongs to the host. */ |
312 | if (addr_type == IPV6_ADDR_MAPPED) { | 312 | if (addr_type == IPV6_ADDR_MAPPED) { |
313 | struct net_device *dev = NULL; | ||
313 | int chk_addr_ret; | 314 | int chk_addr_ret; |
314 | 315 | ||
315 | /* Binding to v4-mapped address on a v6-only socket | 316 | /* Binding to v4-mapped address on a v6-only socket |
@@ -320,9 +321,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, | |||
320 | goto out; | 321 | goto out; |
321 | } | 322 | } |
322 | 323 | ||
324 | rcu_read_lock(); | ||
325 | if (sk->sk_bound_dev_if) { | ||
326 | dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); | ||
327 | if (!dev) { | ||
328 | err = -ENODEV; | ||
329 | goto out_unlock; | ||
330 | } | ||
331 | } | ||
332 | |||
323 | /* Reproduce AF_INET checks to make the bindings consistent */ | 333 | /* Reproduce AF_INET checks to make the bindings consistent */ |
324 | v4addr = addr->sin6_addr.s6_addr32[3]; | 334 | v4addr = addr->sin6_addr.s6_addr32[3]; |
325 | chk_addr_ret = inet_addr_type(net, v4addr); | 335 | chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr); |
336 | rcu_read_unlock(); | ||
337 | |||
326 | if (!inet_can_nonlocal_bind(net, inet) && | 338 | if (!inet_can_nonlocal_bind(net, inet) && |
327 | v4addr != htonl(INADDR_ANY) && | 339 | v4addr != htonl(INADDR_ANY) && |
328 | chk_addr_ret != RTN_LOCAL && | 340 | chk_addr_ret != RTN_LOCAL && |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index bde08aa549f3..ee4a4e54d016 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) | |||
341 | skb_reset_network_header(skb); | 341 | skb_reset_network_header(skb); |
342 | iph = ipv6_hdr(skb); | 342 | iph = ipv6_hdr(skb); |
343 | iph->daddr = fl6->daddr; | 343 | iph->daddr = fl6->daddr; |
344 | ip6_flow_hdr(iph, 0, 0); | ||
344 | 345 | ||
345 | serr = SKB_EXT_ERR(skb); | 346 | serr = SKB_EXT_ERR(skb); |
346 | serr->ee.ee_errno = err; | 347 | serr->ee.ee_errno = err; |
@@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, | |||
700 | } | 701 | } |
701 | if (np->rxopt.bits.rxorigdstaddr) { | 702 | if (np->rxopt.bits.rxorigdstaddr) { |
702 | struct sockaddr_in6 sin6; | 703 | struct sockaddr_in6 sin6; |
703 | __be16 *ports; | 704 | __be16 _ports[2], *ports; |
704 | int end; | ||
705 | 705 | ||
706 | end = skb_transport_offset(skb) + 4; | 706 | ports = skb_header_pointer(skb, skb_transport_offset(skb), |
707 | if (end <= 0 || pskb_may_pull(skb, end)) { | 707 | sizeof(_ports), &_ports); |
708 | if (ports) { | ||
708 | /* All current transport protocols have the port numbers in the | 709 | /* All current transport protocols have the port numbers in the |
709 | * first four bytes of the transport header and this function is | 710 | * first four bytes of the transport header and this function is |
710 | * written with this assumption in mind. | 711 | * written with this assumption in mind. |
711 | */ | 712 | */ |
712 | ports = (__be16 *)skb_transport_header(skb); | ||
713 | |||
714 | sin6.sin6_family = AF_INET6; | 713 | sin6.sin6_family = AF_INET6; |
715 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; | 714 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; |
716 | sin6.sin6_port = ports[1]; | 715 | sin6.sin6_port = ports[1]; |
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index bd675c61deb1..7da7bf3b7fe3 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c | |||
@@ -131,6 +131,14 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
131 | if (validate_gue_flags(guehdr, optlen)) | 131 | if (validate_gue_flags(guehdr, optlen)) |
132 | return -EINVAL; | 132 | return -EINVAL; |
133 | 133 | ||
134 | /* Handling exceptions for direct UDP encapsulation in GUE would lead to | ||
135 | * recursion. Besides, this kind of encapsulation can't even be | ||
136 | * configured currently. Discard this. | ||
137 | */ | ||
138 | if (guehdr->proto_ctype == IPPROTO_UDP || | ||
139 | guehdr->proto_ctype == IPPROTO_UDPLITE) | ||
140 | return -EOPNOTSUPP; | ||
141 | |||
134 | skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr)); | 142 | skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr)); |
135 | ret = gue6_err_proto_handler(guehdr->proto_ctype, skb, | 143 | ret = gue6_err_proto_handler(guehdr->proto_ctype, skb, |
136 | opt, type, code, offset, info); | 144 | opt, type, code, offset, info); |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 5d7aa2c2770c..bbcdfd299692 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -423,10 +423,10 @@ static int icmp6_iif(const struct sk_buff *skb) | |||
423 | static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | 423 | static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, |
424 | const struct in6_addr *force_saddr) | 424 | const struct in6_addr *force_saddr) |
425 | { | 425 | { |
426 | struct net *net = dev_net(skb->dev); | ||
427 | struct inet6_dev *idev = NULL; | 426 | struct inet6_dev *idev = NULL; |
428 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 427 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
429 | struct sock *sk; | 428 | struct sock *sk; |
429 | struct net *net; | ||
430 | struct ipv6_pinfo *np; | 430 | struct ipv6_pinfo *np; |
431 | const struct in6_addr *saddr = NULL; | 431 | const struct in6_addr *saddr = NULL; |
432 | struct dst_entry *dst; | 432 | struct dst_entry *dst; |
@@ -437,12 +437,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | |||
437 | int iif = 0; | 437 | int iif = 0; |
438 | int addr_type = 0; | 438 | int addr_type = 0; |
439 | int len; | 439 | int len; |
440 | u32 mark = IP6_REPLY_MARK(net, skb->mark); | 440 | u32 mark; |
441 | 441 | ||
442 | if ((u8 *)hdr < skb->head || | 442 | if ((u8 *)hdr < skb->head || |
443 | (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) | 443 | (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) |
444 | return; | 444 | return; |
445 | 445 | ||
446 | if (!skb->dev) | ||
447 | return; | ||
448 | net = dev_net(skb->dev); | ||
449 | mark = IP6_REPLY_MARK(net, skb->mark); | ||
446 | /* | 450 | /* |
447 | * Make sure we respect the rules | 451 | * Make sure we respect the rules |
448 | * i.e. RFC 1885 2.4(e) | 452 | * i.e. RFC 1885 2.4(e) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 9cbf363172bd..7c3505006f8e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1390,10 +1390,7 @@ do_udp_sendmsg: | |||
1390 | ipc6.opt = opt; | 1390 | ipc6.opt = opt; |
1391 | 1391 | ||
1392 | fl6.flowi6_proto = sk->sk_protocol; | 1392 | fl6.flowi6_proto = sk->sk_protocol; |
1393 | if (!ipv6_addr_any(daddr)) | 1393 | fl6.daddr = *daddr; |
1394 | fl6.daddr = *daddr; | ||
1395 | else | ||
1396 | fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ | ||
1397 | if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) | 1394 | if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) |
1398 | fl6.saddr = np->saddr; | 1395 | fl6.saddr = np->saddr; |
1399 | fl6.fl6_sport = inet->inet_sport; | 1396 | fl6.fl6_sport = inet->inet_sport; |
@@ -1421,6 +1418,9 @@ do_udp_sendmsg: | |||
1421 | } | 1418 | } |
1422 | } | 1419 | } |
1423 | 1420 | ||
1421 | if (ipv6_addr_any(&fl6.daddr)) | ||
1422 | fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ | ||
1423 | |||
1424 | final_p = fl6_update_dst(&fl6, opt, &final); | 1424 | final_p = fl6_update_dst(&fl6, opt, &final); |
1425 | if (final_p) | 1425 | if (final_p) |
1426 | connected = false; | 1426 | connected = false; |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 57e07768c9d1..f54cf17ef7a8 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -276,10 +276,12 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) | |||
276 | 276 | ||
277 | nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); | 277 | nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); |
278 | if (flags & IP6_FH_F_FRAG) { | 278 | if (flags & IP6_FH_F_FRAG) { |
279 | if (frag_off) | 279 | if (frag_off) { |
280 | key->ip.frag = OVS_FRAG_TYPE_LATER; | 280 | key->ip.frag = OVS_FRAG_TYPE_LATER; |
281 | else | 281 | key->ip.proto = nexthdr; |
282 | key->ip.frag = OVS_FRAG_TYPE_FIRST; | 282 | return 0; |
283 | } | ||
284 | key->ip.frag = OVS_FRAG_TYPE_FIRST; | ||
283 | } else { | 285 | } else { |
284 | key->ip.frag = OVS_FRAG_TYPE_NONE; | 286 | key->ip.frag = OVS_FRAG_TYPE_NONE; |
285 | } | 287 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index eedacdebcd4c..d0945253f43b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
2628 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; | 2628 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; |
2629 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); | 2629 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
2630 | if (addr && dev && saddr->sll_halen < dev->addr_len) | 2630 | if (addr && dev && saddr->sll_halen < dev->addr_len) |
2631 | goto out; | 2631 | goto out_put; |
2632 | } | 2632 | } |
2633 | 2633 | ||
2634 | err = -ENXIO; | 2634 | err = -ENXIO; |
@@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2828 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; | 2828 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; |
2829 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); | 2829 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
2830 | if (addr && dev && saddr->sll_halen < dev->addr_len) | 2830 | if (addr && dev && saddr->sll_halen < dev->addr_len) |
2831 | goto out; | 2831 | goto out_unlock; |
2832 | } | 2832 | } |
2833 | 2833 | ||
2834 | err = -ENXIO; | 2834 | err = -ENXIO; |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 2dcb555e6350..4e0c36acf866 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -522,7 +522,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
522 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | 522 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
523 | i = 1; | 523 | i = 1; |
524 | else | 524 | else |
525 | i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); | 525 | i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); |
526 | 526 | ||
527 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | 527 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); |
528 | if (work_alloc == 0) { | 528 | if (work_alloc == 0) { |
@@ -879,7 +879,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
879 | * Instead of knowing how to return a partial rdma read/write we insist that there | 879 | * Instead of knowing how to return a partial rdma read/write we insist that there |
880 | * be enough work requests to send the entire message. | 880 | * be enough work requests to send the entire message. |
881 | */ | 881 | */ |
882 | i = ceil(op->op_count, max_sge); | 882 | i = DIV_ROUND_UP(op->op_count, max_sge); |
883 | 883 | ||
884 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | 884 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); |
885 | if (work_alloc != i) { | 885 | if (work_alloc != i) { |
diff --git a/net/rds/message.c b/net/rds/message.c index f139420ba1f6..50f13f1d4ae0 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -341,7 +341,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in | |||
341 | { | 341 | { |
342 | struct rds_message *rm; | 342 | struct rds_message *rm; |
343 | unsigned int i; | 343 | unsigned int i; |
344 | int num_sgs = ceil(total_len, PAGE_SIZE); | 344 | int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE); |
345 | int extra_bytes = num_sgs * sizeof(struct scatterlist); | 345 | int extra_bytes = num_sgs * sizeof(struct scatterlist); |
346 | int ret; | 346 | int ret; |
347 | 347 | ||
@@ -351,7 +351,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in | |||
351 | 351 | ||
352 | set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); | 352 | set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); |
353 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); | 353 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); |
354 | rm->data.op_nents = ceil(total_len, PAGE_SIZE); | 354 | rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE); |
355 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); | 355 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); |
356 | if (!rm->data.op_sg) { | 356 | if (!rm->data.op_sg) { |
357 | rds_message_put(rm); | 357 | rds_message_put(rm); |
diff --git a/net/rds/rds.h b/net/rds/rds.h index 02ec4a3b2799..4ffe100ff5e6 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -48,10 +48,6 @@ void rdsdebug(char *fmt, ...) | |||
48 | } | 48 | } |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | /* XXX is there one of these somewhere? */ | ||
52 | #define ceil(x, y) \ | ||
53 | ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; }) | ||
54 | |||
55 | #define RDS_FRAG_SHIFT 12 | 51 | #define RDS_FRAG_SHIFT 12 |
56 | #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) | 52 | #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) |
57 | 53 | ||
diff --git a/net/rds/send.c b/net/rds/send.c index 3d822bad7de9..fd8b687d5c05 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -1107,7 +1107,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) | |||
1107 | size_t total_payload_len = payload_len, rdma_payload_len = 0; | 1107 | size_t total_payload_len = payload_len, rdma_payload_len = 0; |
1108 | bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && | 1108 | bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && |
1109 | sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); | 1109 | sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); |
1110 | int num_sgs = ceil(payload_len, PAGE_SIZE); | 1110 | int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE); |
1111 | int namelen; | 1111 | int namelen; |
1112 | struct rds_iov_vector_arr vct; | 1112 | struct rds_iov_vector_arr vct; |
1113 | int ind; | 1113 | int ind; |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index c4da4a78d369..c4e56602e0c6 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -146,6 +146,9 @@ static int smc_release(struct socket *sock) | |||
146 | sock_set_flag(sk, SOCK_DEAD); | 146 | sock_set_flag(sk, SOCK_DEAD); |
147 | sk->sk_shutdown |= SHUTDOWN_MASK; | 147 | sk->sk_shutdown |= SHUTDOWN_MASK; |
148 | } | 148 | } |
149 | |||
150 | sk->sk_prot->unhash(sk); | ||
151 | |||
149 | if (smc->clcsock) { | 152 | if (smc->clcsock) { |
150 | if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { | 153 | if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { |
151 | /* wake up clcsock accept */ | 154 | /* wake up clcsock accept */ |
@@ -170,7 +173,6 @@ static int smc_release(struct socket *sock) | |||
170 | smc_conn_free(&smc->conn); | 173 | smc_conn_free(&smc->conn); |
171 | release_sock(sk); | 174 | release_sock(sk); |
172 | 175 | ||
173 | sk->sk_prot->unhash(sk); | ||
174 | sock_put(sk); /* final sock_put */ | 176 | sock_put(sk); /* final sock_put */ |
175 | out: | 177 | out: |
176 | return rc; | 178 | return rc; |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 40f5cae623a7..77e4b2418f30 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
@@ -904,8 +904,10 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock) | |||
904 | 904 | ||
905 | hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, | 905 | hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, |
906 | TIPC_NL_PUBL_GET); | 906 | TIPC_NL_PUBL_GET); |
907 | if (!hdr) | 907 | if (!hdr) { |
908 | kfree_skb(args); | ||
908 | return -EMSGSIZE; | 909 | return -EMSGSIZE; |
910 | } | ||
909 | 911 | ||
910 | nest = nla_nest_start(args, TIPC_NLA_SOCK); | 912 | nest = nla_nest_start(args, TIPC_NLA_SOCK); |
911 | if (!nest) { | 913 | if (!nest) { |
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c index d7b68ef5ba79..0bb6507256b7 100644 --- a/samples/bpf/test_cgrp2_attach2.c +++ b/samples/bpf/test_cgrp2_attach2.c | |||
@@ -77,7 +77,7 @@ static int test_foo_bar(void) | |||
77 | 77 | ||
78 | /* Create cgroup /foo, get fd, and join it */ | 78 | /* Create cgroup /foo, get fd, and join it */ |
79 | foo = create_and_get_cgroup(FOO); | 79 | foo = create_and_get_cgroup(FOO); |
80 | if (!foo) | 80 | if (foo < 0) |
81 | goto err; | 81 | goto err; |
82 | 82 | ||
83 | if (join_cgroup(FOO)) | 83 | if (join_cgroup(FOO)) |
@@ -94,7 +94,7 @@ static int test_foo_bar(void) | |||
94 | 94 | ||
95 | /* Create cgroup /foo/bar, get fd, and join it */ | 95 | /* Create cgroup /foo/bar, get fd, and join it */ |
96 | bar = create_and_get_cgroup(BAR); | 96 | bar = create_and_get_cgroup(BAR); |
97 | if (!bar) | 97 | if (bar < 0) |
98 | goto err; | 98 | goto err; |
99 | 99 | ||
100 | if (join_cgroup(BAR)) | 100 | if (join_cgroup(BAR)) |
@@ -298,19 +298,19 @@ static int test_multiprog(void) | |||
298 | goto err; | 298 | goto err; |
299 | 299 | ||
300 | cg1 = create_and_get_cgroup("/cg1"); | 300 | cg1 = create_and_get_cgroup("/cg1"); |
301 | if (!cg1) | 301 | if (cg1 < 0) |
302 | goto err; | 302 | goto err; |
303 | cg2 = create_and_get_cgroup("/cg1/cg2"); | 303 | cg2 = create_and_get_cgroup("/cg1/cg2"); |
304 | if (!cg2) | 304 | if (cg2 < 0) |
305 | goto err; | 305 | goto err; |
306 | cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); | 306 | cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); |
307 | if (!cg3) | 307 | if (cg3 < 0) |
308 | goto err; | 308 | goto err; |
309 | cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); | 309 | cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); |
310 | if (!cg4) | 310 | if (cg4 < 0) |
311 | goto err; | 311 | goto err; |
312 | cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); | 312 | cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); |
313 | if (!cg5) | 313 | if (cg5 < 0) |
314 | goto err; | 314 | goto err; |
315 | 315 | ||
316 | if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) | 316 | if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) |
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c index 2259f997a26c..f082d6ac59f0 100644 --- a/samples/bpf/test_current_task_under_cgroup_user.c +++ b/samples/bpf/test_current_task_under_cgroup_user.c | |||
@@ -32,7 +32,7 @@ int main(int argc, char **argv) | |||
32 | 32 | ||
33 | cg2 = create_and_get_cgroup(CGROUP_PATH); | 33 | cg2 = create_and_get_cgroup(CGROUP_PATH); |
34 | 34 | ||
35 | if (!cg2) | 35 | if (cg2 < 0) |
36 | goto err; | 36 | goto err; |
37 | 37 | ||
38 | if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { | 38 | if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { |
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index 0a197f86ac43..8bfda95c77ad 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c | |||
@@ -103,7 +103,7 @@ int main(int argc, char **argv) | |||
103 | return 1; | 103 | return 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | ifindex = if_nametoindex(argv[1]); | 106 | ifindex = if_nametoindex(argv[optind]); |
107 | if (!ifindex) { | 107 | if (!ifindex) { |
108 | perror("if_nametoindex"); | 108 | perror("if_nametoindex"); |
109 | return 1; | 109 | return 1; |
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 3f0629edbca5..6ba5f567a9d8 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c | |||
@@ -82,8 +82,6 @@ static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset, | |||
82 | int bits_to_copy; | 82 | int bits_to_copy; |
83 | __u64 print_num; | 83 | __u64 print_num; |
84 | 84 | ||
85 | data += BITS_ROUNDDOWN_BYTES(bit_offset); | ||
86 | bit_offset = BITS_PER_BYTE_MASKED(bit_offset); | ||
87 | bits_to_copy = bit_offset + nr_bits; | 85 | bits_to_copy = bit_offset + nr_bits; |
88 | bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); | 86 | bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); |
89 | 87 | ||
@@ -118,7 +116,9 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset, | |||
118 | * BTF_INT_OFFSET() cannot exceed 64 bits. | 116 | * BTF_INT_OFFSET() cannot exceed 64 bits. |
119 | */ | 117 | */ |
120 | total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); | 118 | total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); |
121 | btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw, | 119 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
120 | bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset); | ||
121 | btf_dumper_bitfield(nr_bits, bit_offset, data, jw, | ||
122 | is_plain_text); | 122 | is_plain_text); |
123 | } | 123 | } |
124 | 124 | ||
@@ -216,11 +216,12 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id, | |||
216 | } | 216 | } |
217 | 217 | ||
218 | jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); | 218 | jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); |
219 | data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); | ||
219 | if (bitfield_size) { | 220 | if (bitfield_size) { |
220 | btf_dumper_bitfield(bitfield_size, bit_offset, | 221 | btf_dumper_bitfield(bitfield_size, |
221 | data, d->jw, d->is_plain_text); | 222 | BITS_PER_BYTE_MASKED(bit_offset), |
223 | data_off, d->jw, d->is_plain_text); | ||
222 | } else { | 224 | } else { |
223 | data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); | ||
224 | ret = btf_dumper_do_type(d, m[i].type, | 225 | ret = btf_dumper_do_type(d, m[i].type, |
225 | BITS_PER_BYTE_MASKED(bit_offset), | 226 | BITS_PER_BYTE_MASKED(bit_offset), |
226 | data_off); | 227 | data_off); |
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore index f81e549ddfdb..4db74758c674 100644 --- a/tools/lib/bpf/.gitignore +++ b/tools/lib/bpf/.gitignore | |||
@@ -1,2 +1,3 @@ | |||
1 | libbpf_version.h | 1 | libbpf_version.h |
2 | FEATURE-DUMP.libbpf | 2 | FEATURE-DUMP.libbpf |
3 | test_libbpf | ||
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst index 056f38310722..607aae40f4ed 100644 --- a/tools/lib/bpf/README.rst +++ b/tools/lib/bpf/README.rst | |||
@@ -132,6 +132,20 @@ For example, if current state of ``libbpf.map`` is: | |||
132 | Format of version script and ways to handle ABI changes, including | 132 | Format of version script and ways to handle ABI changes, including |
133 | incompatible ones, described in details in [1]. | 133 | incompatible ones, described in details in [1]. |
134 | 134 | ||
135 | Stand-alone build | ||
136 | ================= | ||
137 | |||
138 | Under https://github.com/libbpf/libbpf there is a (semi-)automated | ||
139 | mirror of the mainline's version of libbpf for a stand-alone build. | ||
140 | |||
141 | However, all changes to libbpf's code base must be upstreamed through | ||
142 | the mainline kernel tree. | ||
143 | |||
144 | License | ||
145 | ======= | ||
146 | |||
147 | libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause. | ||
148 | |||
135 | Links | 149 | Links |
136 | ===== | 150 | ===== |
137 | 151 | ||
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 4a9785043a39..dd093bd91aa9 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore | |||
@@ -28,3 +28,4 @@ flow_dissector_load | |||
28 | test_netcnt | 28 | test_netcnt |
29 | test_section_names | 29 | test_section_names |
30 | test_tcpnotify_user | 30 | test_tcpnotify_user |
31 | test_libbpf | ||
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 73aa6d8f4a2f..70229de510f5 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
@@ -55,7 +55,9 @@ TEST_PROGS := test_kmod.sh \ | |||
55 | test_flow_dissector.sh \ | 55 | test_flow_dissector.sh \ |
56 | test_xdp_vlan.sh | 56 | test_xdp_vlan.sh |
57 | 57 | ||
58 | TEST_PROGS_EXTENDED := with_addr.sh | 58 | TEST_PROGS_EXTENDED := with_addr.sh \ |
59 | tcp_client.py \ | ||
60 | tcp_server.py | ||
59 | 61 | ||
60 | # Compile but not part of 'make run_tests' | 62 | # Compile but not part of 'make run_tests' |
61 | TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ | 63 | TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ |
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index cf16948aad4a..6692a40a6979 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c | |||
@@ -155,7 +155,7 @@ void cleanup_cgroup_environment(void) | |||
155 | * This function creates a cgroup under the top level workdir and returns the | 155 | * This function creates a cgroup under the top level workdir and returns the |
156 | * file descriptor. It is idempotent. | 156 | * file descriptor. It is idempotent. |
157 | * | 157 | * |
158 | * On success, it returns the file descriptor. On failure it returns 0. | 158 | * On success, it returns the file descriptor. On failure it returns -1. |
159 | * If there is a failure, it prints the error to stderr. | 159 | * If there is a failure, it prints the error to stderr. |
160 | */ | 160 | */ |
161 | int create_and_get_cgroup(const char *path) | 161 | int create_and_get_cgroup(const char *path) |
@@ -166,13 +166,13 @@ int create_and_get_cgroup(const char *path) | |||
166 | format_cgroup_path(cgroup_path, path); | 166 | format_cgroup_path(cgroup_path, path); |
167 | if (mkdir(cgroup_path, 0777) && errno != EEXIST) { | 167 | if (mkdir(cgroup_path, 0777) && errno != EEXIST) { |
168 | log_err("mkdiring cgroup %s .. %s", path, cgroup_path); | 168 | log_err("mkdiring cgroup %s .. %s", path, cgroup_path); |
169 | return 0; | 169 | return -1; |
170 | } | 170 | } |
171 | 171 | ||
172 | fd = open(cgroup_path, O_RDONLY); | 172 | fd = open(cgroup_path, O_RDONLY); |
173 | if (fd < 0) { | 173 | if (fd < 0) { |
174 | log_err("Opening Cgroup"); | 174 | log_err("Opening Cgroup"); |
175 | return 0; | 175 | return -1; |
176 | } | 176 | } |
177 | 177 | ||
178 | return fd; | 178 | return fd; |
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 8bcd38010582..a0bd04befe87 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
@@ -3526,6 +3526,8 @@ struct pprint_mapv { | |||
3526 | ENUM_TWO, | 3526 | ENUM_TWO, |
3527 | ENUM_THREE, | 3527 | ENUM_THREE, |
3528 | } aenum; | 3528 | } aenum; |
3529 | uint32_t ui32b; | ||
3530 | uint32_t bits2c:2; | ||
3529 | }; | 3531 | }; |
3530 | 3532 | ||
3531 | static struct btf_raw_test pprint_test_template[] = { | 3533 | static struct btf_raw_test pprint_test_template[] = { |
@@ -3568,7 +3570,7 @@ static struct btf_raw_test pprint_test_template[] = { | |||
3568 | BTF_ENUM_ENC(NAME_TBD, 2), | 3570 | BTF_ENUM_ENC(NAME_TBD, 2), |
3569 | BTF_ENUM_ENC(NAME_TBD, 3), | 3571 | BTF_ENUM_ENC(NAME_TBD, 3), |
3570 | /* struct pprint_mapv */ /* [16] */ | 3572 | /* struct pprint_mapv */ /* [16] */ |
3571 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), | 3573 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40), |
3572 | BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ | 3574 | BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ |
3573 | BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ | 3575 | BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ |
3574 | BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ | 3576 | BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ |
@@ -3577,9 +3579,11 @@ static struct btf_raw_test pprint_test_template[] = { | |||
3577 | BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ | 3579 | BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ |
3578 | BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ | 3580 | BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ |
3579 | BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ | 3581 | BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ |
3582 | BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */ | ||
3583 | BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */ | ||
3580 | BTF_END_RAW, | 3584 | BTF_END_RAW, |
3581 | }, | 3585 | }, |
3582 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), | 3586 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), |
3583 | .key_size = sizeof(unsigned int), | 3587 | .key_size = sizeof(unsigned int), |
3584 | .value_size = sizeof(struct pprint_mapv), | 3588 | .value_size = sizeof(struct pprint_mapv), |
3585 | .key_type_id = 3, /* unsigned int */ | 3589 | .key_type_id = 3, /* unsigned int */ |
@@ -3628,7 +3632,7 @@ static struct btf_raw_test pprint_test_template[] = { | |||
3628 | BTF_ENUM_ENC(NAME_TBD, 2), | 3632 | BTF_ENUM_ENC(NAME_TBD, 2), |
3629 | BTF_ENUM_ENC(NAME_TBD, 3), | 3633 | BTF_ENUM_ENC(NAME_TBD, 3), |
3630 | /* struct pprint_mapv */ /* [16] */ | 3634 | /* struct pprint_mapv */ /* [16] */ |
3631 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), | 3635 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), |
3632 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ | 3636 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ |
3633 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ | 3637 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ |
3634 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ | 3638 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ |
@@ -3637,9 +3641,11 @@ static struct btf_raw_test pprint_test_template[] = { | |||
3637 | BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ | 3641 | BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ |
3638 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ | 3642 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ |
3639 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ | 3643 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ |
3644 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ | ||
3645 | BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ | ||
3640 | BTF_END_RAW, | 3646 | BTF_END_RAW, |
3641 | }, | 3647 | }, |
3642 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), | 3648 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), |
3643 | .key_size = sizeof(unsigned int), | 3649 | .key_size = sizeof(unsigned int), |
3644 | .value_size = sizeof(struct pprint_mapv), | 3650 | .value_size = sizeof(struct pprint_mapv), |
3645 | .key_type_id = 3, /* unsigned int */ | 3651 | .key_type_id = 3, /* unsigned int */ |
@@ -3690,7 +3696,7 @@ static struct btf_raw_test pprint_test_template[] = { | |||
3690 | BTF_ENUM_ENC(NAME_TBD, 2), | 3696 | BTF_ENUM_ENC(NAME_TBD, 2), |
3691 | BTF_ENUM_ENC(NAME_TBD, 3), | 3697 | BTF_ENUM_ENC(NAME_TBD, 3), |
3692 | /* struct pprint_mapv */ /* [16] */ | 3698 | /* struct pprint_mapv */ /* [16] */ |
3693 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), | 3699 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), |
3694 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ | 3700 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ |
3695 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ | 3701 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ |
3696 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ | 3702 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ |
@@ -3699,13 +3705,15 @@ static struct btf_raw_test pprint_test_template[] = { | |||
3699 | BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ | 3705 | BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ |
3700 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ | 3706 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ |
3701 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ | 3707 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ |
3708 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ | ||
3709 | BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ | ||
3702 | /* typedef unsigned int ___int */ /* [17] */ | 3710 | /* typedef unsigned int ___int */ /* [17] */ |
3703 | BTF_TYPEDEF_ENC(NAME_TBD, 18), | 3711 | BTF_TYPEDEF_ENC(NAME_TBD, 18), |
3704 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ | 3712 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ |
3705 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ | 3713 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ |
3706 | BTF_END_RAW, | 3714 | BTF_END_RAW, |
3707 | }, | 3715 | }, |
3708 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"), | 3716 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"), |
3709 | .key_size = sizeof(unsigned int), | 3717 | .key_size = sizeof(unsigned int), |
3710 | .value_size = sizeof(struct pprint_mapv), | 3718 | .value_size = sizeof(struct pprint_mapv), |
3711 | .key_type_id = 3, /* unsigned int */ | 3719 | .key_type_id = 3, /* unsigned int */ |
@@ -3793,6 +3801,8 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i, | |||
3793 | v->unused_bits2b = 3; | 3801 | v->unused_bits2b = 3; |
3794 | v->ui64 = i; | 3802 | v->ui64 = i; |
3795 | v->aenum = i & 0x03; | 3803 | v->aenum = i & 0x03; |
3804 | v->ui32b = 4; | ||
3805 | v->bits2c = 1; | ||
3796 | v = (void *)v + rounded_value_size; | 3806 | v = (void *)v + rounded_value_size; |
3797 | } | 3807 | } |
3798 | } | 3808 | } |
@@ -3955,7 +3965,8 @@ static int do_test_pprint(int test_num) | |||
3955 | 3965 | ||
3956 | nexpected_line = snprintf(expected_line, sizeof(expected_line), | 3966 | nexpected_line = snprintf(expected_line, sizeof(expected_line), |
3957 | "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," | 3967 | "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," |
3958 | "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", | 3968 | "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," |
3969 | "%u,0x%x}\n", | ||
3959 | percpu_map ? "\tcpu" : "", | 3970 | percpu_map ? "\tcpu" : "", |
3960 | percpu_map ? cpu : next_key, | 3971 | percpu_map ? cpu : next_key, |
3961 | cmapv->ui32, cmapv->si32, | 3972 | cmapv->ui32, cmapv->si32, |
@@ -3967,7 +3978,9 @@ static int do_test_pprint(int test_num) | |||
3967 | cmapv->ui8a[2], cmapv->ui8a[3], | 3978 | cmapv->ui8a[2], cmapv->ui8a[3], |
3968 | cmapv->ui8a[4], cmapv->ui8a[5], | 3979 | cmapv->ui8a[4], cmapv->ui8a[5], |
3969 | cmapv->ui8a[6], cmapv->ui8a[7], | 3980 | cmapv->ui8a[6], cmapv->ui8a[7], |
3970 | pprint_enum_str[cmapv->aenum]); | 3981 | pprint_enum_str[cmapv->aenum], |
3982 | cmapv->ui32b, | ||
3983 | cmapv->bits2c); | ||
3971 | 3984 | ||
3972 | err = check_line(expected_line, nexpected_line, | 3985 | err = check_line(expected_line, nexpected_line, |
3973 | sizeof(expected_line), line); | 3986 | sizeof(expected_line), line); |
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index f44834155f25..2fc4625c1a15 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c | |||
@@ -81,7 +81,7 @@ int main(int argc, char **argv) | |||
81 | 81 | ||
82 | /* Create a cgroup, get fd, and join it */ | 82 | /* Create a cgroup, get fd, and join it */ |
83 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); | 83 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); |
84 | if (!cgroup_fd) { | 84 | if (cgroup_fd < 0) { |
85 | printf("Failed to create test cgroup\n"); | 85 | printf("Failed to create test cgroup\n"); |
86 | goto err; | 86 | goto err; |
87 | } | 87 | } |
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c index 9c8b50bac7e0..76e4993b7c16 100644 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ b/tools/testing/selftests/bpf/test_dev_cgroup.c | |||
@@ -43,7 +43,7 @@ int main(int argc, char **argv) | |||
43 | 43 | ||
44 | /* Create a cgroup, get fd, and join it */ | 44 | /* Create a cgroup, get fd, and join it */ |
45 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); | 45 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); |
46 | if (!cgroup_fd) { | 46 | if (cgroup_fd < 0) { |
47 | printf("Failed to create test cgroup\n"); | 47 | printf("Failed to create test cgroup\n"); |
48 | goto err; | 48 | goto err; |
49 | } | 49 | } |
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c index 44ed7f29f8ab..c1da5404454a 100644 --- a/tools/testing/selftests/bpf/test_netcnt.c +++ b/tools/testing/selftests/bpf/test_netcnt.c | |||
@@ -65,7 +65,7 @@ int main(int argc, char **argv) | |||
65 | 65 | ||
66 | /* Create a cgroup, get fd, and join it */ | 66 | /* Create a cgroup, get fd, and join it */ |
67 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); | 67 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); |
68 | if (!cgroup_fd) { | 68 | if (cgroup_fd < 0) { |
69 | printf("Failed to create test cgroup\n"); | 69 | printf("Failed to create test cgroup\n"); |
70 | goto err; | 70 | goto err; |
71 | } | 71 | } |
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c index c121cc59f314..9220747c069d 100644 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c | |||
@@ -164,7 +164,7 @@ int main(int argc, char **argv) | |||
164 | goto err; | 164 | goto err; |
165 | 165 | ||
166 | cgfd = create_and_get_cgroup(CGROUP_PATH); | 166 | cgfd = create_and_get_cgroup(CGROUP_PATH); |
167 | if (!cgfd) | 167 | if (cgfd < 0) |
168 | goto err; | 168 | goto err; |
169 | 169 | ||
170 | if (join_cgroup(CGROUP_PATH)) | 170 | if (join_cgroup(CGROUP_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index b8ebe2f58074..561ffb6d6433 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c | |||
@@ -458,7 +458,7 @@ int main(int argc, char **argv) | |||
458 | goto err; | 458 | goto err; |
459 | 459 | ||
460 | cgfd = create_and_get_cgroup(CG_PATH); | 460 | cgfd = create_and_get_cgroup(CG_PATH); |
461 | if (!cgfd) | 461 | if (cgfd < 0) |
462 | goto err; | 462 | goto err; |
463 | 463 | ||
464 | if (join_cgroup(CG_PATH)) | 464 | if (join_cgroup(CG_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 73b7493d4120..3f110eaaf29c 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4" | 44 | #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4" |
45 | #define SRC6_IP "::1" | 45 | #define SRC6_IP "::1" |
46 | #define SRC6_REWRITE_IP "::6" | 46 | #define SRC6_REWRITE_IP "::6" |
47 | #define WILDCARD6_IP "::" | ||
47 | #define SERV6_PORT 6060 | 48 | #define SERV6_PORT 6060 |
48 | #define SERV6_REWRITE_PORT 6666 | 49 | #define SERV6_REWRITE_PORT 6666 |
49 | 50 | ||
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test); | |||
85 | static int bind6_prog_load(const struct sock_addr_test *test); | 86 | static int bind6_prog_load(const struct sock_addr_test *test); |
86 | static int connect4_prog_load(const struct sock_addr_test *test); | 87 | static int connect4_prog_load(const struct sock_addr_test *test); |
87 | static int connect6_prog_load(const struct sock_addr_test *test); | 88 | static int connect6_prog_load(const struct sock_addr_test *test); |
89 | static int sendmsg_allow_prog_load(const struct sock_addr_test *test); | ||
88 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test); | 90 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test); |
89 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); | 91 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); |
90 | static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); | 92 | static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); |
91 | static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); | 93 | static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); |
92 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); | 94 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); |
93 | static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); | 95 | static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); |
96 | static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test); | ||
94 | 97 | ||
95 | static struct sock_addr_test tests[] = { | 98 | static struct sock_addr_test tests[] = { |
96 | /* bind */ | 99 | /* bind */ |
@@ -463,6 +466,34 @@ static struct sock_addr_test tests[] = { | |||
463 | SYSCALL_ENOTSUPP, | 466 | SYSCALL_ENOTSUPP, |
464 | }, | 467 | }, |
465 | { | 468 | { |
469 | "sendmsg6: set dst IP = [::] (BSD'ism)", | ||
470 | sendmsg6_rw_wildcard_prog_load, | ||
471 | BPF_CGROUP_UDP6_SENDMSG, | ||
472 | BPF_CGROUP_UDP6_SENDMSG, | ||
473 | AF_INET6, | ||
474 | SOCK_DGRAM, | ||
475 | SERV6_IP, | ||
476 | SERV6_PORT, | ||
477 | SERV6_REWRITE_IP, | ||
478 | SERV6_REWRITE_PORT, | ||
479 | SRC6_REWRITE_IP, | ||
480 | SUCCESS, | ||
481 | }, | ||
482 | { | ||
483 | "sendmsg6: preserve dst IP = [::] (BSD'ism)", | ||
484 | sendmsg_allow_prog_load, | ||
485 | BPF_CGROUP_UDP6_SENDMSG, | ||
486 | BPF_CGROUP_UDP6_SENDMSG, | ||
487 | AF_INET6, | ||
488 | SOCK_DGRAM, | ||
489 | WILDCARD6_IP, | ||
490 | SERV6_PORT, | ||
491 | SERV6_REWRITE_IP, | ||
492 | SERV6_PORT, | ||
493 | SRC6_IP, | ||
494 | SUCCESS, | ||
495 | }, | ||
496 | { | ||
466 | "sendmsg6: deny call", | 497 | "sendmsg6: deny call", |
467 | sendmsg_deny_prog_load, | 498 | sendmsg_deny_prog_load, |
468 | BPF_CGROUP_UDP6_SENDMSG, | 499 | BPF_CGROUP_UDP6_SENDMSG, |
@@ -734,16 +765,27 @@ static int connect6_prog_load(const struct sock_addr_test *test) | |||
734 | return load_path(test, CONNECT6_PROG_PATH); | 765 | return load_path(test, CONNECT6_PROG_PATH); |
735 | } | 766 | } |
736 | 767 | ||
737 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test) | 768 | static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test, |
769 | int32_t rc) | ||
738 | { | 770 | { |
739 | struct bpf_insn insns[] = { | 771 | struct bpf_insn insns[] = { |
740 | /* return 0 */ | 772 | /* return rc */ |
741 | BPF_MOV64_IMM(BPF_REG_0, 0), | 773 | BPF_MOV64_IMM(BPF_REG_0, rc), |
742 | BPF_EXIT_INSN(), | 774 | BPF_EXIT_INSN(), |
743 | }; | 775 | }; |
744 | return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); | 776 | return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); |
745 | } | 777 | } |
746 | 778 | ||
779 | static int sendmsg_allow_prog_load(const struct sock_addr_test *test) | ||
780 | { | ||
781 | return sendmsg_ret_only_prog_load(test, /*rc*/ 1); | ||
782 | } | ||
783 | |||
784 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test) | ||
785 | { | ||
786 | return sendmsg_ret_only_prog_load(test, /*rc*/ 0); | ||
787 | } | ||
788 | |||
747 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) | 789 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) |
748 | { | 790 | { |
749 | struct sockaddr_in dst4_rw_addr; | 791 | struct sockaddr_in dst4_rw_addr; |
@@ -864,6 +906,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test) | |||
864 | return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); | 906 | return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); |
865 | } | 907 | } |
866 | 908 | ||
909 | static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test) | ||
910 | { | ||
911 | return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP); | ||
912 | } | ||
913 | |||
867 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test) | 914 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test) |
868 | { | 915 | { |
869 | return load_path(test, SENDMSG6_PROG_PATH); | 916 | return load_path(test, SENDMSG6_PROG_PATH); |
@@ -1395,7 +1442,7 @@ int main(int argc, char **argv) | |||
1395 | goto err; | 1442 | goto err; |
1396 | 1443 | ||
1397 | cgfd = create_and_get_cgroup(CG_PATH); | 1444 | cgfd = create_and_get_cgroup(CG_PATH); |
1398 | if (!cgfd) | 1445 | if (cgfd < 0) |
1399 | goto err; | 1446 | goto err; |
1400 | 1447 | ||
1401 | if (join_cgroup(CG_PATH)) | 1448 | if (join_cgroup(CG_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c index b6c2c605d8c0..fc7832ee566b 100644 --- a/tools/testing/selftests/bpf/test_socket_cookie.c +++ b/tools/testing/selftests/bpf/test_socket_cookie.c | |||
@@ -202,7 +202,7 @@ int main(int argc, char **argv) | |||
202 | goto err; | 202 | goto err; |
203 | 203 | ||
204 | cgfd = create_and_get_cgroup(CG_PATH); | 204 | cgfd = create_and_get_cgroup(CG_PATH); |
205 | if (!cgfd) | 205 | if (cgfd < 0) |
206 | goto err; | 206 | goto err; |
207 | 207 | ||
208 | if (join_cgroup(CG_PATH)) | 208 | if (join_cgroup(CG_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index e6eebda7d112..716b4e3be581 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c | |||
@@ -103,7 +103,7 @@ int main(int argc, char **argv) | |||
103 | goto err; | 103 | goto err; |
104 | 104 | ||
105 | cg_fd = create_and_get_cgroup(cg_path); | 105 | cg_fd = create_and_get_cgroup(cg_path); |
106 | if (!cg_fd) | 106 | if (cg_fd < 0) |
107 | goto err; | 107 | goto err; |
108 | 108 | ||
109 | if (join_cgroup(cg_path)) | 109 | if (join_cgroup(cg_path)) |
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index ff3c4522aed6..4e4353711a86 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c | |||
@@ -115,7 +115,7 @@ int main(int argc, char **argv) | |||
115 | goto err; | 115 | goto err; |
116 | 116 | ||
117 | cg_fd = create_and_get_cgroup(cg_path); | 117 | cg_fd = create_and_get_cgroup(cg_path); |
118 | if (!cg_fd) | 118 | if (cg_fd < 0) |
119 | goto err; | 119 | goto err; |
120 | 120 | ||
121 | if (join_cgroup(cg_path)) | 121 | if (join_cgroup(cg_path)) |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 10d44446e801..2fd90d456892 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -6934,6 +6934,126 @@ static struct bpf_test tests[] = { | |||
6934 | .retval = 1, | 6934 | .retval = 1, |
6935 | }, | 6935 | }, |
6936 | { | 6936 | { |
6937 | "map access: mixing value pointer and scalar, 1", | ||
6938 | .insns = { | ||
6939 | // load map value pointer into r0 and r2 | ||
6940 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
6941 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
6942 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
6943 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
6944 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
6945 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
6946 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
6947 | BPF_EXIT_INSN(), | ||
6948 | // load some number from the map into r1 | ||
6949 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
6950 | // depending on r1, branch: | ||
6951 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), | ||
6952 | // branch A | ||
6953 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), | ||
6954 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
6955 | BPF_JMP_A(2), | ||
6956 | // branch B | ||
6957 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
6958 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
6959 | // common instruction | ||
6960 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
6961 | // depending on r1, branch: | ||
6962 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), | ||
6963 | // branch A | ||
6964 | BPF_JMP_A(4), | ||
6965 | // branch B | ||
6966 | BPF_MOV64_IMM(BPF_REG_0, 0x13371337), | ||
6967 | // verifier follows fall-through | ||
6968 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), | ||
6969 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6970 | BPF_EXIT_INSN(), | ||
6971 | // fake-dead code; targeted from branch A to | ||
6972 | // prevent dead code sanitization | ||
6973 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), | ||
6974 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6975 | BPF_EXIT_INSN(), | ||
6976 | }, | ||
6977 | .fixup_map_array_48b = { 1 }, | ||
6978 | .result = ACCEPT, | ||
6979 | .result_unpriv = REJECT, | ||
6980 | .errstr_unpriv = "R2 tried to add from different pointers or scalars", | ||
6981 | .retval = 0, | ||
6982 | }, | ||
6983 | { | ||
6984 | "map access: mixing value pointer and scalar, 2", | ||
6985 | .insns = { | ||
6986 | // load map value pointer into r0 and r2 | ||
6987 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
6988 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
6989 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
6990 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
6991 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
6992 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
6993 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
6994 | BPF_EXIT_INSN(), | ||
6995 | // load some number from the map into r1 | ||
6996 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
6997 | // depending on r1, branch: | ||
6998 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), | ||
6999 | // branch A | ||
7000 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
7001 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
7002 | BPF_JMP_A(2), | ||
7003 | // branch B | ||
7004 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), | ||
7005 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
7006 | // common instruction | ||
7007 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
7008 | // depending on r1, branch: | ||
7009 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), | ||
7010 | // branch A | ||
7011 | BPF_JMP_A(4), | ||
7012 | // branch B | ||
7013 | BPF_MOV64_IMM(BPF_REG_0, 0x13371337), | ||
7014 | // verifier follows fall-through | ||
7015 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), | ||
7016 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7017 | BPF_EXIT_INSN(), | ||
7018 | // fake-dead code; targeted from branch A to | ||
7019 | // prevent dead code sanitization | ||
7020 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), | ||
7021 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7022 | BPF_EXIT_INSN(), | ||
7023 | }, | ||
7024 | .fixup_map_array_48b = { 1 }, | ||
7025 | .result = ACCEPT, | ||
7026 | .result_unpriv = REJECT, | ||
7027 | .errstr_unpriv = "R2 tried to add from different maps or paths", | ||
7028 | .retval = 0, | ||
7029 | }, | ||
7030 | { | ||
7031 | "sanitation: alu with different scalars", | ||
7032 | .insns = { | ||
7033 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
7034 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
7035 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
7036 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
7037 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
7038 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
7039 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
7040 | BPF_EXIT_INSN(), | ||
7041 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
7042 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), | ||
7043 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
7044 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
7045 | BPF_JMP_A(2), | ||
7046 | BPF_MOV64_IMM(BPF_REG_2, 42), | ||
7047 | BPF_MOV64_IMM(BPF_REG_3, 0x100001), | ||
7048 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
7049 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
7050 | BPF_EXIT_INSN(), | ||
7051 | }, | ||
7052 | .fixup_map_array_48b = { 1 }, | ||
7053 | .result = ACCEPT, | ||
7054 | .retval = 0x100000, | ||
7055 | }, | ||
7056 | { | ||
6937 | "map access: value_ptr += known scalar, upper oob arith, test 1", | 7057 | "map access: value_ptr += known scalar, upper oob arith, test 1", |
6938 | .insns = { | 7058 | .insns = { |
6939 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | 7059 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh index dcf9f4e913e0..ae6146ec5afd 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh | |||
@@ -847,6 +847,24 @@ sanitization_vlan_aware_test() | |||
847 | 847 | ||
848 | log_test "vlan-aware - failed enslavement to vlan-aware bridge" | 848 | log_test "vlan-aware - failed enslavement to vlan-aware bridge" |
849 | 849 | ||
850 | bridge vlan del vid 10 dev vxlan20 | ||
851 | bridge vlan add vid 20 dev vxlan20 pvid untagged | ||
852 | |||
853 | # Test that offloading of an unsupported tunnel fails when it is | ||
854 | # triggered by addition of VLAN to a local port | ||
855 | RET=0 | ||
856 | |||
857 | # TOS must be set to inherit | ||
858 | ip link set dev vxlan10 type vxlan tos 42 | ||
859 | |||
860 | ip link set dev $swp1 master br0 | ||
861 | bridge vlan add vid 10 dev $swp1 &> /dev/null | ||
862 | check_fail $? | ||
863 | |||
864 | log_test "vlan-aware - failed vlan addition to a local port" | ||
865 | |||
866 | ip link set dev vxlan10 type vxlan tos inherit | ||
867 | |||
850 | ip link del dev vxlan20 | 868 | ip link del dev vxlan20 |
851 | ip link del dev vxlan10 | 869 | ip link del dev vxlan10 |
852 | ip link del dev br0 | 870 | ip link del dev br0 |
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh index d8313d0438b7..04c6431b2bd8 100755 --- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh | |||
@@ -1,7 +1,7 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding" | 4 | ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion" |
5 | NUM_NETIFS=4 | 5 | NUM_NETIFS=4 |
6 | CHECK_TC="yes" | 6 | CHECK_TC="yes" |
7 | source lib.sh | 7 | source lib.sh |
@@ -96,6 +96,19 @@ flooding() | |||
96 | flood_test $swp2 $h1 $h2 | 96 | flood_test $swp2 $h1 $h2 |
97 | } | 97 | } |
98 | 98 | ||
99 | vlan_deletion() | ||
100 | { | ||
101 | # Test that the deletion of a VLAN on a bridge port does not affect | ||
102 | # the PVID VLAN | ||
103 | log_info "Add and delete a VLAN on bridge port $swp1" | ||
104 | |||
105 | bridge vlan add vid 10 dev $swp1 | ||
106 | bridge vlan del vid 10 dev $swp1 | ||
107 | |||
108 | ping_ipv4 | ||
109 | ping_ipv6 | ||
110 | } | ||
111 | |||
99 | trap cleanup EXIT | 112 | trap cleanup EXIT |
100 | 113 | ||
101 | setup_prepare | 114 | setup_prepare |
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh index 56cef3b1c194..bb10e33690b2 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh | |||
@@ -629,7 +629,7 @@ __test_ecn_decap() | |||
629 | RET=0 | 629 | RET=0 |
630 | 630 | ||
631 | tc filter add dev $h1 ingress pref 77 prot ip \ | 631 | tc filter add dev $h1 ingress pref 77 prot ip \ |
632 | flower ip_tos $decapped_tos action pass | 632 | flower ip_tos $decapped_tos action drop |
633 | sleep 1 | 633 | sleep 1 |
634 | vxlan_encapped_ping_test v2 v1 192.0.2.17 \ | 634 | vxlan_encapped_ping_test v2 v1 192.0.2.17 \ |
635 | $orig_inner_tos $orig_outer_tos \ | 635 | $orig_inner_tos $orig_outer_tos \ |
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c index 61ae2782388e..5d56cc0838f6 100644 --- a/tools/testing/selftests/net/ip_defrag.c +++ b/tools/testing/selftests/net/ip_defrag.c | |||
@@ -203,6 +203,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
203 | { | 203 | { |
204 | struct ip *iphdr = (struct ip *)ip_frame; | 204 | struct ip *iphdr = (struct ip *)ip_frame; |
205 | struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; | 205 | struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; |
206 | const bool ipv4 = !ipv6; | ||
206 | int res; | 207 | int res; |
207 | int offset; | 208 | int offset; |
208 | int frag_len; | 209 | int frag_len; |
@@ -239,19 +240,53 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
239 | iphdr->ip_sum = 0; | 240 | iphdr->ip_sum = 0; |
240 | } | 241 | } |
241 | 242 | ||
243 | /* Occasionally test in-order fragments. */ | ||
244 | if (!cfg_overlap && (rand() % 100 < 15)) { | ||
245 | offset = 0; | ||
246 | while (offset < (UDP_HLEN + payload_len)) { | ||
247 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
248 | offset += max_frag_len; | ||
249 | } | ||
250 | return; | ||
251 | } | ||
252 | |||
253 | /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */ | ||
254 | if (ipv4 && !cfg_overlap && (rand() % 100 < 20) && | ||
255 | (payload_len > 9 * max_frag_len)) { | ||
256 | offset = 6 * max_frag_len; | ||
257 | while (offset < (UDP_HLEN + payload_len)) { | ||
258 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
259 | offset += max_frag_len; | ||
260 | } | ||
261 | offset = 3 * max_frag_len; | ||
262 | while (offset < 6 * max_frag_len) { | ||
263 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
264 | offset += max_frag_len; | ||
265 | } | ||
266 | offset = 0; | ||
267 | while (offset < 3 * max_frag_len) { | ||
268 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
269 | offset += max_frag_len; | ||
270 | } | ||
271 | return; | ||
272 | } | ||
273 | |||
242 | /* Odd fragments. */ | 274 | /* Odd fragments. */ |
243 | offset = max_frag_len; | 275 | offset = max_frag_len; |
244 | while (offset < (UDP_HLEN + payload_len)) { | 276 | while (offset < (UDP_HLEN + payload_len)) { |
245 | send_fragment(fd_raw, addr, alen, offset, ipv6); | 277 | send_fragment(fd_raw, addr, alen, offset, ipv6); |
278 | /* IPv4 ignores duplicates, so randomly send a duplicate. */ | ||
279 | if (ipv4 && (1 == rand() % 100)) | ||
280 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
246 | offset += 2 * max_frag_len; | 281 | offset += 2 * max_frag_len; |
247 | } | 282 | } |
248 | 283 | ||
249 | if (cfg_overlap) { | 284 | if (cfg_overlap) { |
250 | /* Send an extra random fragment. */ | 285 | /* Send an extra random fragment. */ |
251 | offset = rand() % (UDP_HLEN + payload_len - 1); | ||
252 | /* sendto() returns EINVAL if offset + frag_len is too small. */ | ||
253 | if (ipv6) { | 286 | if (ipv6) { |
254 | struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); | 287 | struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); |
288 | /* sendto() returns EINVAL if offset + frag_len is too small. */ | ||
289 | offset = rand() % (UDP_HLEN + payload_len - 1); | ||
255 | frag_len = max_frag_len + rand() % 256; | 290 | frag_len = max_frag_len + rand() % 256; |
256 | /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ | 291 | /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ |
257 | frag_len &= ~0x7; | 292 | frag_len &= ~0x7; |
@@ -259,13 +294,29 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
259 | ip6hdr->ip6_plen = htons(frag_len); | 294 | ip6hdr->ip6_plen = htons(frag_len); |
260 | frag_len += IP6_HLEN; | 295 | frag_len += IP6_HLEN; |
261 | } else { | 296 | } else { |
262 | frag_len = IP4_HLEN + UDP_HLEN + rand() % 256; | 297 | /* In IPv4, duplicates and some fragments completely inside |
298 | * previously sent fragments are dropped/ignored. So | ||
299 | * random offset and frag_len can result in a dropped | ||
300 | * fragment instead of a dropped queue/packet. So we | ||
301 | * hard-code offset and frag_len. | ||
302 | * | ||
303 | * See ade446403bfb ("net: ipv4: do not handle duplicate | ||
304 | * fragments as overlapping"). | ||
305 | */ | ||
306 | if (max_frag_len * 4 < payload_len || max_frag_len < 16) { | ||
307 | /* not enough payload to play with random offset and frag_len. */ | ||
308 | offset = 8; | ||
309 | frag_len = IP4_HLEN + UDP_HLEN + max_frag_len; | ||
310 | } else { | ||
311 | offset = rand() % (payload_len / 2); | ||
312 | frag_len = 2 * max_frag_len + 1 + rand() % 256; | ||
313 | } | ||
263 | iphdr->ip_off = htons(offset / 8 | IP4_MF); | 314 | iphdr->ip_off = htons(offset / 8 | IP4_MF); |
264 | iphdr->ip_len = htons(frag_len); | 315 | iphdr->ip_len = htons(frag_len); |
265 | } | 316 | } |
266 | res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); | 317 | res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); |
267 | if (res < 0) | 318 | if (res < 0) |
268 | error(1, errno, "sendto overlap"); | 319 | error(1, errno, "sendto overlap: %d", frag_len); |
269 | if (res != frag_len) | 320 | if (res != frag_len) |
270 | error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); | 321 | error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); |
271 | frag_counter++; | 322 | frag_counter++; |
@@ -275,6 +326,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
275 | offset = 0; | 326 | offset = 0; |
276 | while (offset < (UDP_HLEN + payload_len)) { | 327 | while (offset < (UDP_HLEN + payload_len)) { |
277 | send_fragment(fd_raw, addr, alen, offset, ipv6); | 328 | send_fragment(fd_raw, addr, alen, offset, ipv6); |
329 | /* IPv4 ignores duplicates, so randomly send a duplicate. */ | ||
330 | if (ipv4 && (1 == rand() % 100)) | ||
331 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
278 | offset += 2 * max_frag_len; | 332 | offset += 2 * max_frag_len; |
279 | } | 333 | } |
280 | } | 334 | } |
@@ -282,7 +336,11 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
282 | static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) | 336 | static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) |
283 | { | 337 | { |
284 | int fd_tx_raw, fd_rx_udp; | 338 | int fd_tx_raw, fd_rx_udp; |
285 | struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 }; | 339 | /* Frag queue timeout is set to one second in the calling script; |
340 | * socket timeout should be just a bit longer to avoid tests interfering | ||
341 | * with each other. | ||
342 | */ | ||
343 | struct timeval tv = { .tv_sec = 1, .tv_usec = 10 }; | ||
286 | int idx; | 344 | int idx; |
287 | int min_frag_len = ipv6 ? 1280 : 8; | 345 | int min_frag_len = ipv6 ? 1280 : 8; |
288 | 346 | ||
@@ -308,12 +366,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) | |||
308 | payload_len += (rand() % 4096)) { | 366 | payload_len += (rand() % 4096)) { |
309 | if (cfg_verbose) | 367 | if (cfg_verbose) |
310 | printf("payload_len: %d\n", payload_len); | 368 | printf("payload_len: %d\n", payload_len); |
311 | max_frag_len = min_frag_len; | 369 | |
312 | do { | 370 | if (cfg_overlap) { |
371 | /* With overlaps, one send/receive pair below takes | ||
372 | * at least one second (== timeout) to run, so there | ||
373 | * is not enough test time to run a nested loop: | ||
374 | * the full overlap test takes 20-30 seconds. | ||
375 | */ | ||
376 | max_frag_len = min_frag_len + | ||
377 | rand() % (1500 - FRAG_HLEN - min_frag_len); | ||
313 | send_udp_frags(fd_tx_raw, addr, alen, ipv6); | 378 | send_udp_frags(fd_tx_raw, addr, alen, ipv6); |
314 | recv_validate_udp(fd_rx_udp); | 379 | recv_validate_udp(fd_rx_udp); |
315 | max_frag_len += 8 * (rand() % 8); | 380 | } else { |
316 | } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len); | 381 | /* Without overlaps, each packet reassembly (== one |
382 | * send/receive pair below) takes very little time to | ||
383 | * run, so we can easily afford more thourough testing | ||
384 | * with a nested loop: the full non-overlap test takes | ||
385 | * less than one second). | ||
386 | */ | ||
387 | max_frag_len = min_frag_len; | ||
388 | do { | ||
389 | send_udp_frags(fd_tx_raw, addr, alen, ipv6); | ||
390 | recv_validate_udp(fd_rx_udp); | ||
391 | max_frag_len += 8 * (rand() % 8); | ||
392 | } while (max_frag_len < (1500 - FRAG_HLEN) && | ||
393 | max_frag_len <= payload_len); | ||
394 | } | ||
317 | } | 395 | } |
318 | 396 | ||
319 | /* Cleanup. */ | 397 | /* Cleanup. */ |
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh index f34672796044..7dd79a9efb17 100755 --- a/tools/testing/selftests/net/ip_defrag.sh +++ b/tools/testing/selftests/net/ip_defrag.sh | |||
@@ -11,10 +11,17 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)" | |||
11 | setup() { | 11 | setup() { |
12 | ip netns add "${NETNS}" | 12 | ip netns add "${NETNS}" |
13 | ip -netns "${NETNS}" link set lo up | 13 | ip -netns "${NETNS}" link set lo up |
14 | |||
14 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 | 15 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 |
15 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 | 16 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 |
17 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1 | ||
18 | |||
16 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 | 19 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 |
17 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 | 20 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 |
21 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1 | ||
22 | |||
23 | # DST cache can get full with a lot of frags, with GC not keeping up with the test. | ||
24 | ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1 | ||
18 | } | 25 | } |
19 | 26 | ||
20 | cleanup() { | 27 | cleanup() { |
@@ -27,7 +34,6 @@ setup | |||
27 | echo "ipv4 defrag" | 34 | echo "ipv4 defrag" |
28 | ip netns exec "${NETNS}" ./ip_defrag -4 | 35 | ip netns exec "${NETNS}" ./ip_defrag -4 |
29 | 36 | ||
30 | |||
31 | echo "ipv4 defrag with overlaps" | 37 | echo "ipv4 defrag with overlaps" |
32 | ip netns exec "${NETNS}" ./ip_defrag -4o | 38 | ip netns exec "${NETNS}" ./ip_defrag -4o |
33 | 39 | ||
@@ -37,3 +43,4 @@ ip netns exec "${NETNS}" ./ip_defrag -6 | |||
37 | echo "ipv6 defrag with overlaps" | 43 | echo "ipv6 defrag with overlaps" |
38 | ip netns exec "${NETNS}" ./ip_defrag -6o | 44 | ip netns exec "${NETNS}" ./ip_defrag -6o |
39 | 45 | ||
46 | echo "all tests done" | ||