diff options
84 files changed, 843 insertions, 696 deletions
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt index c0aab985bad9..949d5dcdd9a3 100644 --- a/Documentation/networking/tuntap.txt +++ b/Documentation/networking/tuntap.txt | |||
@@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com> | |||
105 | Proto [2 bytes] | 105 | Proto [2 bytes] |
106 | Raw protocol(IP, IPv6, etc) frame. | 106 | Raw protocol(IP, IPv6, etc) frame. |
107 | 107 | ||
108 | 3.3 Multiqueue tuntap interface: | ||
109 | |||
110 | From version 3.8, Linux supports multiqueue tuntap which can uses multiple | ||
111 | file descriptors (queues) to parallelize packets sending or receiving. The | ||
112 | device allocation is the same as before, and if user wants to create multiple | ||
113 | queues, TUNSETIFF with the same device name must be called many times with | ||
114 | IFF_MULTI_QUEUE flag. | ||
115 | |||
116 | char *dev should be the name of the device, queues is the number of queues to | ||
117 | be created, fds is used to store and return the file descriptors (queues) | ||
118 | created to the caller. Each file descriptor were served as the interface of a | ||
119 | queue which could be accessed by userspace. | ||
120 | |||
121 | #include <linux/if.h> | ||
122 | #include <linux/if_tun.h> | ||
123 | |||
124 | int tun_alloc_mq(char *dev, int queues, int *fds) | ||
125 | { | ||
126 | struct ifreq ifr; | ||
127 | int fd, err, i; | ||
128 | |||
129 | if (!dev) | ||
130 | return -1; | ||
131 | |||
132 | memset(&ifr, 0, sizeof(ifr)); | ||
133 | /* Flags: IFF_TUN - TUN device (no Ethernet headers) | ||
134 | * IFF_TAP - TAP device | ||
135 | * | ||
136 | * IFF_NO_PI - Do not provide packet information | ||
137 | * IFF_MULTI_QUEUE - Create a queue of multiqueue device | ||
138 | */ | ||
139 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE; | ||
140 | strcpy(ifr.ifr_name, dev); | ||
141 | |||
142 | for (i = 0; i < queues; i++) { | ||
143 | if ((fd = open("/dev/net/tun", O_RDWR)) < 0) | ||
144 | goto err; | ||
145 | err = ioctl(fd, TUNSETIFF, (void *)&ifr); | ||
146 | if (err) { | ||
147 | close(fd); | ||
148 | goto err; | ||
149 | } | ||
150 | fds[i] = fd; | ||
151 | } | ||
152 | |||
153 | return 0; | ||
154 | err: | ||
155 | for (--i; i >= 0; i--) | ||
156 | close(fds[i]); | ||
157 | return err; | ||
158 | } | ||
159 | |||
160 | A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When | ||
161 | calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when | ||
162 | calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were | ||
163 | enabled by default after it was created through TUNSETIFF. | ||
164 | |||
165 | fd is the file descriptor (queue) that we want to enable or disable, when | ||
166 | enable is true we enable it, otherwise we disable it | ||
167 | |||
168 | #include <linux/if.h> | ||
169 | #include <linux/if_tun.h> | ||
170 | |||
171 | int tun_set_queue(int fd, int enable) | ||
172 | { | ||
173 | struct ifreq ifr; | ||
174 | |||
175 | memset(&ifr, 0, sizeof(ifr)); | ||
176 | |||
177 | if (enable) | ||
178 | ifr.ifr_flags = IFF_ATTACH_QUEUE; | ||
179 | else | ||
180 | ifr.ifr_flags = IFF_DETACH_QUEUE; | ||
181 | |||
182 | return ioctl(fd, TUNSETQUEUE, (void *)&ifr); | ||
183 | } | ||
184 | |||
108 | Universal TUN/TAP device driver Frequently Asked Question. | 185 | Universal TUN/TAP device driver Frequently Asked Question. |
109 | 186 | ||
110 | 1. What platforms are supported by TUN/TAP driver ? | 187 | 1. What platforms are supported by TUN/TAP driver ? |
diff --git a/MAINTAINERS b/MAINTAINERS index 95616582c728..c08411b27499 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6412,6 +6412,8 @@ F: Documentation/networking/LICENSE.qla3xxx | |||
6412 | F: drivers/net/ethernet/qlogic/qla3xxx.* | 6412 | F: drivers/net/ethernet/qlogic/qla3xxx.* |
6413 | 6413 | ||
6414 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER | 6414 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER |
6415 | M: Rajesh Borundia <rajesh.borundia@qlogic.com> | ||
6416 | M: Shahed Shaikh <shahed.shaikh@qlogic.com> | ||
6415 | M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> | 6417 | M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> |
6416 | M: Sony Chacko <sony.chacko@qlogic.com> | 6418 | M: Sony Chacko <sony.chacko@qlogic.com> |
6417 | M: linux-driver@qlogic.com | 6419 | M: linux-driver@qlogic.com |
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index d8a7d8323414..ebaebdf30f98 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c | |||
@@ -902,7 +902,9 @@ isdn_tty_send_msg(modem_info *info, atemu *m, char *msg) | |||
902 | int j; | 902 | int j; |
903 | int l; | 903 | int l; |
904 | 904 | ||
905 | l = strlen(msg); | 905 | l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg) |
906 | + sizeof(cmd.parm.cmsg.para) - 2); | ||
907 | |||
906 | if (!l) { | 908 | if (!l) { |
907 | isdn_tty_modem_result(RESULT_ERROR, info); | 909 | isdn_tty_modem_result(RESULT_ERROR, info); |
908 | return; | 910 | return; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7bd068a6056a..8b4e96e01d6c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1964,7 +1964,6 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1964 | } | 1964 | } |
1965 | 1965 | ||
1966 | block_netpoll_tx(); | 1966 | block_netpoll_tx(); |
1967 | call_netdevice_notifiers(NETDEV_RELEASE, bond_dev); | ||
1968 | write_lock_bh(&bond->lock); | 1967 | write_lock_bh(&bond->lock); |
1969 | 1968 | ||
1970 | slave = bond_get_slave_by_dev(bond, slave_dev); | 1969 | slave = bond_get_slave_by_dev(bond, slave_dev); |
@@ -2066,8 +2065,10 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
2066 | write_unlock_bh(&bond->lock); | 2065 | write_unlock_bh(&bond->lock); |
2067 | unblock_netpoll_tx(); | 2066 | unblock_netpoll_tx(); |
2068 | 2067 | ||
2069 | if (bond->slave_cnt == 0) | 2068 | if (bond->slave_cnt == 0) { |
2070 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); | 2069 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); |
2070 | call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); | ||
2071 | } | ||
2071 | 2072 | ||
2072 | bond_compute_features(bond); | 2073 | bond_compute_features(bond); |
2073 | if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && | 2074 | if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 31c5787970db..77ebae0ac64a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params) | |||
8647 | MDIO_WC_DEVAD, | 8647 | MDIO_WC_DEVAD, |
8648 | MDIO_WC_REG_DIGITAL5_MISC6, | 8648 | MDIO_WC_REG_DIGITAL5_MISC6, |
8649 | &rx_tx_in_reset); | 8649 | &rx_tx_in_reset); |
8650 | if (!rx_tx_in_reset) { | 8650 | if ((!rx_tx_in_reset) && |
8651 | (params->link_flags & | ||
8652 | PHY_INITIALIZED)) { | ||
8651 | bnx2x_warpcore_reset_lane(bp, phy, 1); | 8653 | bnx2x_warpcore_reset_lane(bp, phy, 1); |
8652 | bnx2x_warpcore_config_sfi(phy, params); | 8654 | bnx2x_warpcore_config_sfi(phy, params); |
8653 | bnx2x_warpcore_reset_lane(bp, phy, 0); | 8655 | bnx2x_warpcore_reset_lane(bp, phy, 0); |
@@ -12527,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) | |||
12527 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; | 12529 | vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; |
12528 | vars->mac_type = MAC_TYPE_NONE; | 12530 | vars->mac_type = MAC_TYPE_NONE; |
12529 | vars->phy_flags = 0; | 12531 | vars->phy_flags = 0; |
12532 | vars->check_kr2_recovery_cnt = 0; | ||
12533 | params->link_flags = PHY_INITIALIZED; | ||
12530 | /* Driver opens NIG-BRB filters */ | 12534 | /* Driver opens NIG-BRB filters */ |
12531 | bnx2x_set_rx_filter(params, 1); | 12535 | bnx2x_set_rx_filter(params, 1); |
12532 | /* Check if link flap can be avoided */ | 12536 | /* Check if link flap can be avoided */ |
@@ -12691,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params, | |||
12691 | struct bnx2x *bp = params->bp; | 12695 | struct bnx2x *bp = params->bp; |
12692 | vars->link_up = 0; | 12696 | vars->link_up = 0; |
12693 | vars->phy_flags = 0; | 12697 | vars->phy_flags = 0; |
12698 | params->link_flags &= ~PHY_INITIALIZED; | ||
12694 | if (!params->lfa_base) | 12699 | if (!params->lfa_base) |
12695 | return bnx2x_link_reset(params, vars, 1); | 12700 | return bnx2x_link_reset(params, vars, 1); |
12696 | /* | 12701 | /* |
@@ -13411,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params, | |||
13411 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; | 13416 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; |
13412 | bnx2x_update_link_attr(params, vars->link_attr_sync); | 13417 | bnx2x_update_link_attr(params, vars->link_attr_sync); |
13413 | 13418 | ||
13419 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; | ||
13414 | /* Restart AN on leading lane */ | 13420 | /* Restart AN on leading lane */ |
13415 | bnx2x_warpcore_restart_AN_KR(phy, params); | 13421 | bnx2x_warpcore_restart_AN_KR(phy, params); |
13416 | } | 13422 | } |
@@ -13439,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13439 | return; | 13445 | return; |
13440 | } | 13446 | } |
13441 | 13447 | ||
13448 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery | ||
13449 | * since some switches tend to reinit the AN process and clear the | ||
13450 | * advertised BP/NP after ~2 seconds causing the KR2 to be disabled | ||
13451 | * and recovered many times | ||
13452 | */ | ||
13453 | if (vars->check_kr2_recovery_cnt > 0) { | ||
13454 | vars->check_kr2_recovery_cnt--; | ||
13455 | return; | ||
13456 | } | ||
13442 | lane = bnx2x_get_warpcore_lane(phy, params); | 13457 | lane = bnx2x_get_warpcore_lane(phy, params); |
13443 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | 13458 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, |
13444 | MDIO_AER_BLOCK_AER_REG, lane); | 13459 | MDIO_AER_BLOCK_AER_REG, lane); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index be5c195d03dd..56c2aae4e2c8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | |||
@@ -309,6 +309,7 @@ struct link_params { | |||
309 | req_flow_ctrl is set to AUTO */ | 309 | req_flow_ctrl is set to AUTO */ |
310 | u16 link_flags; | 310 | u16 link_flags; |
311 | #define LINK_FLAGS_INT_DISABLED (1<<0) | 311 | #define LINK_FLAGS_INT_DISABLED (1<<0) |
312 | #define PHY_INITIALIZED (1<<1) | ||
312 | u32 lfa_base; | 313 | u32 lfa_base; |
313 | }; | 314 | }; |
314 | 315 | ||
@@ -342,7 +343,8 @@ struct link_vars { | |||
342 | u32 link_status; | 343 | u32 link_status; |
343 | u32 eee_status; | 344 | u32 eee_status; |
344 | u8 fault_detected; | 345 | u8 fault_detected; |
345 | u8 rsrv1; | 346 | u8 check_kr2_recovery_cnt; |
347 | #define CHECK_KR2_RECOVERY_CNT 5 | ||
346 | u16 periodic_flags; | 348 | u16 periodic_flags; |
347 | #define PERIODIC_FLAGS_LINK_EVENT 0x0001 | 349 | #define PERIODIC_FLAGS_LINK_EVENT 0x0001 |
348 | 350 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index fdb9b5655414..93729f942358 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp) | |||
1869 | 1869 | ||
1870 | tg3_ump_link_report(tp); | 1870 | tg3_ump_link_report(tp); |
1871 | } | 1871 | } |
1872 | |||
1873 | tp->link_up = netif_carrier_ok(tp->dev); | ||
1872 | } | 1874 | } |
1873 | 1875 | ||
1874 | static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) | 1876 | static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) |
@@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) | |||
2522 | return err; | 2524 | return err; |
2523 | } | 2525 | } |
2524 | 2526 | ||
2525 | static void tg3_carrier_on(struct tg3 *tp) | ||
2526 | { | ||
2527 | netif_carrier_on(tp->dev); | ||
2528 | tp->link_up = true; | ||
2529 | } | ||
2530 | |||
2531 | static void tg3_carrier_off(struct tg3 *tp) | 2527 | static void tg3_carrier_off(struct tg3 *tp) |
2532 | { | 2528 | { |
2533 | netif_carrier_off(tp->dev); | 2529 | netif_carrier_off(tp->dev); |
@@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp) | |||
2553 | return -EBUSY; | 2549 | return -EBUSY; |
2554 | 2550 | ||
2555 | if (netif_running(tp->dev) && tp->link_up) { | 2551 | if (netif_running(tp->dev) && tp->link_up) { |
2556 | tg3_carrier_off(tp); | 2552 | netif_carrier_off(tp->dev); |
2557 | tg3_link_report(tp); | 2553 | tg3_link_report(tp); |
2558 | } | 2554 | } |
2559 | 2555 | ||
@@ -4262,9 +4258,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up) | |||
4262 | { | 4258 | { |
4263 | if (curr_link_up != tp->link_up) { | 4259 | if (curr_link_up != tp->link_up) { |
4264 | if (curr_link_up) { | 4260 | if (curr_link_up) { |
4265 | tg3_carrier_on(tp); | 4261 | netif_carrier_on(tp->dev); |
4266 | } else { | 4262 | } else { |
4267 | tg3_carrier_off(tp); | 4263 | netif_carrier_off(tp->dev); |
4268 | if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) | 4264 | if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) |
4269 | tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; | 4265 | tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; |
4270 | } | 4266 | } |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 28ceb8414185..29aff55f2eea 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -349,6 +349,7 @@ struct be_adapter { | |||
349 | struct pci_dev *pdev; | 349 | struct pci_dev *pdev; |
350 | struct net_device *netdev; | 350 | struct net_device *netdev; |
351 | 351 | ||
352 | u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ | ||
352 | u8 __iomem *db; /* Door Bell */ | 353 | u8 __iomem *db; /* Door Bell */ |
353 | 354 | ||
354 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ | 355 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 071aea79d218..3c9b4f12e3e5 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) | |||
473 | return 0; | 473 | return 0; |
474 | } | 474 | } |
475 | 475 | ||
476 | static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) | 476 | static u16 be_POST_stage_get(struct be_adapter *adapter) |
477 | { | 477 | { |
478 | u32 sem; | 478 | u32 sem; |
479 | u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH : | ||
480 | SLIPORT_SEMAPHORE_OFFSET_BE; | ||
481 | 479 | ||
482 | pci_read_config_dword(adapter->pdev, reg, &sem); | 480 | if (BEx_chip(adapter)) |
483 | *stage = sem & POST_STAGE_MASK; | 481 | sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); |
484 | |||
485 | if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK) | ||
486 | return -1; | ||
487 | else | 482 | else |
488 | return 0; | 483 | pci_read_config_dword(adapter->pdev, |
484 | SLIPORT_SEMAPHORE_OFFSET_SH, &sem); | ||
485 | |||
486 | return sem & POST_STAGE_MASK; | ||
489 | } | 487 | } |
490 | 488 | ||
491 | int lancer_wait_ready(struct be_adapter *adapter) | 489 | int lancer_wait_ready(struct be_adapter *adapter) |
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter) | |||
579 | } | 577 | } |
580 | 578 | ||
581 | do { | 579 | do { |
582 | status = be_POST_stage_get(adapter, &stage); | 580 | stage = be_POST_stage_get(adapter); |
583 | if (status) { | 581 | if (stage == POST_STAGE_ARMFW_RDY) |
584 | dev_err(dev, "POST error; stage=0x%x\n", stage); | ||
585 | return -1; | ||
586 | } else if (stage != POST_STAGE_ARMFW_RDY) { | ||
587 | if (msleep_interruptible(2000)) { | ||
588 | dev_err(dev, "Waiting for POST aborted\n"); | ||
589 | return -EINTR; | ||
590 | } | ||
591 | timeout += 2; | ||
592 | } else { | ||
593 | return 0; | 582 | return 0; |
583 | |||
584 | dev_info(dev, "Waiting for POST, %ds elapsed\n", | ||
585 | timeout); | ||
586 | if (msleep_interruptible(2000)) { | ||
587 | dev_err(dev, "Waiting for POST aborted\n"); | ||
588 | return -EINTR; | ||
594 | } | 589 | } |
590 | timeout += 2; | ||
595 | } while (timeout < 60); | 591 | } while (timeout < 60); |
596 | 592 | ||
597 | dev_err(dev, "POST timeout; stage=0x%x\n", stage); | 593 | dev_err(dev, "POST timeout; stage=0x%x\n", stage); |
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 541d4530d5bf..62dc220695f7 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h | |||
@@ -32,8 +32,8 @@ | |||
32 | #define MPU_EP_CONTROL 0 | 32 | #define MPU_EP_CONTROL 0 |
33 | 33 | ||
34 | /********** MPU semphore: used for SH & BE *************/ | 34 | /********** MPU semphore: used for SH & BE *************/ |
35 | #define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c | 35 | #define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */ |
36 | #define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 | 36 | #define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */ |
37 | #define POST_STAGE_MASK 0x0000FFFF | 37 | #define POST_STAGE_MASK 0x0000FFFF |
38 | #define POST_ERR_MASK 0x1 | 38 | #define POST_ERR_MASK 0x1 |
39 | #define POST_ERR_SHIFT 31 | 39 | #define POST_ERR_SHIFT 31 |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3860888ac711..08e54f3d288b 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev) | |||
3688 | 3688 | ||
3689 | static void be_unmap_pci_bars(struct be_adapter *adapter) | 3689 | static void be_unmap_pci_bars(struct be_adapter *adapter) |
3690 | { | 3690 | { |
3691 | if (adapter->csr) | ||
3692 | pci_iounmap(adapter->pdev, adapter->csr); | ||
3691 | if (adapter->db) | 3693 | if (adapter->db) |
3692 | pci_iounmap(adapter->pdev, adapter->db); | 3694 | pci_iounmap(adapter->pdev, adapter->db); |
3693 | } | 3695 | } |
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter) | |||
3721 | adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> | 3723 | adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> |
3722 | SLI_INTF_IF_TYPE_SHIFT; | 3724 | SLI_INTF_IF_TYPE_SHIFT; |
3723 | 3725 | ||
3726 | if (BEx_chip(adapter) && be_physfn(adapter)) { | ||
3727 | adapter->csr = pci_iomap(adapter->pdev, 2, 0); | ||
3728 | if (adapter->csr == NULL) | ||
3729 | return -ENOMEM; | ||
3730 | } | ||
3731 | |||
3724 | addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); | 3732 | addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); |
3725 | if (addr == NULL) | 3733 | if (addr == NULL) |
3726 | goto pci_map_err; | 3734 | goto pci_map_err; |
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) | |||
4329 | pci_restore_state(pdev); | 4337 | pci_restore_state(pdev); |
4330 | 4338 | ||
4331 | /* Check if card is ok and fw is ready */ | 4339 | /* Check if card is ok and fw is ready */ |
4340 | dev_info(&adapter->pdev->dev, | ||
4341 | "Waiting for FW to be ready after EEH reset\n"); | ||
4332 | status = be_fw_wait_ready(adapter); | 4342 | status = be_fw_wait_ready(adapter); |
4333 | if (status) | 4343 | if (status) |
4334 | return PCI_ERS_RESULT_DISCONNECT; | 4344 | return PCI_ERS_RESULT_DISCONNECT; |
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 2c1813737f6d..f91a8f3f9d48 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/vmalloc.h> | 37 | #include <linux/vmalloc.h> |
38 | #include <linux/mdio.h> | 38 | #include <linux/mdio.h> |
39 | #include <linux/pm_runtime.h> | ||
39 | 40 | ||
40 | #include "e1000.h" | 41 | #include "e1000.h" |
41 | 42 | ||
@@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev, | |||
2229 | return 0; | 2230 | return 0; |
2230 | } | 2231 | } |
2231 | 2232 | ||
2233 | static int e1000e_ethtool_begin(struct net_device *netdev) | ||
2234 | { | ||
2235 | return pm_runtime_get_sync(netdev->dev.parent); | ||
2236 | } | ||
2237 | |||
2238 | static void e1000e_ethtool_complete(struct net_device *netdev) | ||
2239 | { | ||
2240 | pm_runtime_put_sync(netdev->dev.parent); | ||
2241 | } | ||
2242 | |||
2232 | static const struct ethtool_ops e1000_ethtool_ops = { | 2243 | static const struct ethtool_ops e1000_ethtool_ops = { |
2244 | .begin = e1000e_ethtool_begin, | ||
2245 | .complete = e1000e_ethtool_complete, | ||
2233 | .get_settings = e1000_get_settings, | 2246 | .get_settings = e1000_get_settings, |
2234 | .set_settings = e1000_set_settings, | 2247 | .set_settings = e1000_set_settings, |
2235 | .get_drvinfo = e1000_get_drvinfo, | 2248 | .get_drvinfo = e1000_get_drvinfo, |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index dff7bff8b8e0..121a865c7fbd 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -782,6 +782,59 @@ release: | |||
782 | } | 782 | } |
783 | 783 | ||
784 | /** | 784 | /** |
785 | * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP | ||
786 | * @hw: pointer to the HW structure | ||
787 | * @link: link up bool flag | ||
788 | * | ||
789 | * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications | ||
790 | * preventing further DMA write requests. Workaround the issue by disabling | ||
791 | * the de-assertion of the clock request when in 1Gpbs mode. | ||
792 | **/ | ||
793 | static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) | ||
794 | { | ||
795 | u32 fextnvm6 = er32(FEXTNVM6); | ||
796 | s32 ret_val = 0; | ||
797 | |||
798 | if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { | ||
799 | u16 kmrn_reg; | ||
800 | |||
801 | ret_val = hw->phy.ops.acquire(hw); | ||
802 | if (ret_val) | ||
803 | return ret_val; | ||
804 | |||
805 | ret_val = | ||
806 | e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, | ||
807 | &kmrn_reg); | ||
808 | if (ret_val) | ||
809 | goto release; | ||
810 | |||
811 | ret_val = | ||
812 | e1000e_write_kmrn_reg_locked(hw, | ||
813 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
814 | kmrn_reg & | ||
815 | ~E1000_KMRNCTRLSTA_K1_ENABLE); | ||
816 | if (ret_val) | ||
817 | goto release; | ||
818 | |||
819 | usleep_range(10, 20); | ||
820 | |||
821 | ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); | ||
822 | |||
823 | ret_val = | ||
824 | e1000e_write_kmrn_reg_locked(hw, | ||
825 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
826 | kmrn_reg); | ||
827 | release: | ||
828 | hw->phy.ops.release(hw); | ||
829 | } else { | ||
830 | /* clear FEXTNVM6 bit 8 on link down or 10/100 */ | ||
831 | ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); | ||
832 | } | ||
833 | |||
834 | return ret_val; | ||
835 | } | ||
836 | |||
837 | /** | ||
785 | * e1000_check_for_copper_link_ich8lan - Check for link (Copper) | 838 | * e1000_check_for_copper_link_ich8lan - Check for link (Copper) |
786 | * @hw: pointer to the HW structure | 839 | * @hw: pointer to the HW structure |
787 | * | 840 | * |
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
818 | return ret_val; | 871 | return ret_val; |
819 | } | 872 | } |
820 | 873 | ||
874 | /* Work-around I218 hang issue */ | ||
875 | if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || | ||
876 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { | ||
877 | ret_val = e1000_k1_workaround_lpt_lp(hw, link); | ||
878 | if (ret_val) | ||
879 | return ret_val; | ||
880 | } | ||
881 | |||
821 | /* Clear link partner's EEE ability */ | 882 | /* Clear link partner's EEE ability */ |
822 | hw->dev_spec.ich8lan.eee_lp_ability = 0; | 883 | hw->dev_spec.ich8lan.eee_lp_ability = 0; |
823 | 884 | ||
@@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) | |||
3954 | 4015 | ||
3955 | phy_ctrl = er32(PHY_CTRL); | 4016 | phy_ctrl = er32(PHY_CTRL); |
3956 | phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; | 4017 | phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; |
4018 | |||
3957 | if (hw->phy.type == e1000_phy_i217) { | 4019 | if (hw->phy.type == e1000_phy_i217) { |
3958 | u16 phy_reg; | 4020 | u16 phy_reg, device_id = hw->adapter->pdev->device; |
4021 | |||
4022 | if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || | ||
4023 | (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { | ||
4024 | u32 fextnvm6 = er32(FEXTNVM6); | ||
4025 | |||
4026 | ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); | ||
4027 | } | ||
3959 | 4028 | ||
3960 | ret_val = hw->phy.ops.acquire(hw); | 4029 | ret_val = hw->phy.ops.acquire(hw); |
3961 | if (ret_val) | 4030 | if (ret_val) |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index b6d3174d7d2d..8bf4655c2e17 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h | |||
@@ -92,6 +92,8 @@ | |||
92 | #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 | 92 | #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 |
93 | #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 | 93 | #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 |
94 | 94 | ||
95 | #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 | ||
96 | |||
95 | #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL | 97 | #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL |
96 | 98 | ||
97 | #define E1000_ICH_RAR_ENTRIES 7 | 99 | #define E1000_ICH_RAR_ENTRIES 7 |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a177b8b65c44..948b86ffa4f0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev) | |||
4303 | netif_start_queue(netdev); | 4303 | netif_start_queue(netdev); |
4304 | 4304 | ||
4305 | adapter->idle_check = true; | 4305 | adapter->idle_check = true; |
4306 | hw->mac.get_link_status = true; | ||
4306 | pm_runtime_put(&pdev->dev); | 4307 | pm_runtime_put(&pdev->dev); |
4307 | 4308 | ||
4308 | /* fire a link status change interrupt to start the watchdog */ | 4309 | /* fire a link status change interrupt to start the watchdog */ |
@@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) | |||
4662 | (adapter->hw.phy.media_type == e1000_media_type_copper)) { | 4663 | (adapter->hw.phy.media_type == e1000_media_type_copper)) { |
4663 | int ret_val; | 4664 | int ret_val; |
4664 | 4665 | ||
4666 | pm_runtime_get_sync(&adapter->pdev->dev); | ||
4665 | ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); | 4667 | ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); |
4666 | ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); | 4668 | ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); |
4667 | ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); | 4669 | ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); |
@@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) | |||
4672 | ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); | 4674 | ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); |
4673 | if (ret_val) | 4675 | if (ret_val) |
4674 | e_warn("Error reading PHY register\n"); | 4676 | e_warn("Error reading PHY register\n"); |
4677 | pm_runtime_put_sync(&adapter->pdev->dev); | ||
4675 | } else { | 4678 | } else { |
4676 | /* Do not read PHY registers if link is not up | 4679 | /* Do not read PHY registers if link is not up |
4677 | * Set values to typical power-on defaults | 4680 | * Set values to typical power-on defaults |
@@ -5887,8 +5890,7 @@ release: | |||
5887 | return retval; | 5890 | return retval; |
5888 | } | 5891 | } |
5889 | 5892 | ||
5890 | static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | 5893 | static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) |
5891 | bool runtime) | ||
5892 | { | 5894 | { |
5893 | struct net_device *netdev = pci_get_drvdata(pdev); | 5895 | struct net_device *netdev = pci_get_drvdata(pdev); |
5894 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5896 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
5912 | } | 5914 | } |
5913 | e1000e_reset_interrupt_capability(adapter); | 5915 | e1000e_reset_interrupt_capability(adapter); |
5914 | 5916 | ||
5915 | retval = pci_save_state(pdev); | ||
5916 | if (retval) | ||
5917 | return retval; | ||
5918 | |||
5919 | status = er32(STATUS); | 5917 | status = er32(STATUS); |
5920 | if (status & E1000_STATUS_LU) | 5918 | if (status & E1000_STATUS_LU) |
5921 | wufc &= ~E1000_WUFC_LNKC; | 5919 | wufc &= ~E1000_WUFC_LNKC; |
@@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
5971 | ew32(WUFC, 0); | 5969 | ew32(WUFC, 0); |
5972 | } | 5970 | } |
5973 | 5971 | ||
5974 | *enable_wake = !!wufc; | ||
5975 | |||
5976 | /* make sure adapter isn't asleep if manageability is enabled */ | ||
5977 | if ((adapter->flags & FLAG_MNG_PT_ENABLED) || | ||
5978 | (hw->mac.ops.check_mng_mode(hw))) | ||
5979 | *enable_wake = true; | ||
5980 | |||
5981 | if (adapter->hw.phy.type == e1000_phy_igp_3) | 5972 | if (adapter->hw.phy.type == e1000_phy_igp_3) |
5982 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | 5973 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); |
5983 | 5974 | ||
@@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
5986 | */ | 5977 | */ |
5987 | e1000e_release_hw_control(adapter); | 5978 | e1000e_release_hw_control(adapter); |
5988 | 5979 | ||
5989 | pci_disable_device(pdev); | 5980 | pci_clear_master(pdev); |
5990 | |||
5991 | return 0; | ||
5992 | } | ||
5993 | |||
5994 | static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) | ||
5995 | { | ||
5996 | if (sleep && wake) { | ||
5997 | pci_prepare_to_sleep(pdev); | ||
5998 | return; | ||
5999 | } | ||
6000 | |||
6001 | pci_wake_from_d3(pdev, wake); | ||
6002 | pci_set_power_state(pdev, PCI_D3hot); | ||
6003 | } | ||
6004 | |||
6005 | static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | ||
6006 | bool wake) | ||
6007 | { | ||
6008 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
6009 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
6010 | 5981 | ||
6011 | /* The pci-e switch on some quad port adapters will report a | 5982 | /* The pci-e switch on some quad port adapters will report a |
6012 | * correctable error when the MAC transitions from D0 to D3. To | 5983 | * correctable error when the MAC transitions from D0 to D3. To |
@@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, | |||
6021 | pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, | 5992 | pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, |
6022 | (devctl & ~PCI_EXP_DEVCTL_CERE)); | 5993 | (devctl & ~PCI_EXP_DEVCTL_CERE)); |
6023 | 5994 | ||
6024 | e1000_power_off(pdev, sleep, wake); | 5995 | pci_save_state(pdev); |
5996 | pci_prepare_to_sleep(pdev); | ||
6025 | 5997 | ||
6026 | pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); | 5998 | pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); |
6027 | } else { | ||
6028 | e1000_power_off(pdev, sleep, wake); | ||
6029 | } | 5999 | } |
6000 | |||
6001 | return 0; | ||
6030 | } | 6002 | } |
6031 | 6003 | ||
6032 | #ifdef CONFIG_PCIEASPM | 6004 | #ifdef CONFIG_PCIEASPM |
@@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
6084 | if (aspm_disable_flag) | 6056 | if (aspm_disable_flag) |
6085 | e1000e_disable_aspm(pdev, aspm_disable_flag); | 6057 | e1000e_disable_aspm(pdev, aspm_disable_flag); |
6086 | 6058 | ||
6087 | pci_set_power_state(pdev, PCI_D0); | 6059 | pci_set_master(pdev); |
6088 | pci_restore_state(pdev); | ||
6089 | pci_save_state(pdev); | ||
6090 | 6060 | ||
6091 | e1000e_set_interrupt_capability(adapter); | 6061 | e1000e_set_interrupt_capability(adapter); |
6092 | if (netif_running(netdev)) { | 6062 | if (netif_running(netdev)) { |
@@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
6152 | static int e1000_suspend(struct device *dev) | 6122 | static int e1000_suspend(struct device *dev) |
6153 | { | 6123 | { |
6154 | struct pci_dev *pdev = to_pci_dev(dev); | 6124 | struct pci_dev *pdev = to_pci_dev(dev); |
6155 | int retval; | ||
6156 | bool wake; | ||
6157 | |||
6158 | retval = __e1000_shutdown(pdev, &wake, false); | ||
6159 | if (!retval) | ||
6160 | e1000_complete_shutdown(pdev, true, wake); | ||
6161 | 6125 | ||
6162 | return retval; | 6126 | return __e1000_shutdown(pdev, false); |
6163 | } | 6127 | } |
6164 | 6128 | ||
6165 | static int e1000_resume(struct device *dev) | 6129 | static int e1000_resume(struct device *dev) |
@@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev) | |||
6182 | struct net_device *netdev = pci_get_drvdata(pdev); | 6146 | struct net_device *netdev = pci_get_drvdata(pdev); |
6183 | struct e1000_adapter *adapter = netdev_priv(netdev); | 6147 | struct e1000_adapter *adapter = netdev_priv(netdev); |
6184 | 6148 | ||
6185 | if (e1000e_pm_ready(adapter)) { | 6149 | if (!e1000e_pm_ready(adapter)) |
6186 | bool wake; | 6150 | return 0; |
6187 | |||
6188 | __e1000_shutdown(pdev, &wake, true); | ||
6189 | } | ||
6190 | 6151 | ||
6191 | return 0; | 6152 | return __e1000_shutdown(pdev, true); |
6192 | } | 6153 | } |
6193 | 6154 | ||
6194 | static int e1000_idle(struct device *dev) | 6155 | static int e1000_idle(struct device *dev) |
@@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev) | |||
6226 | 6187 | ||
6227 | static void e1000_shutdown(struct pci_dev *pdev) | 6188 | static void e1000_shutdown(struct pci_dev *pdev) |
6228 | { | 6189 | { |
6229 | bool wake = false; | 6190 | __e1000_shutdown(pdev, false); |
6230 | |||
6231 | __e1000_shutdown(pdev, &wake, false); | ||
6232 | |||
6233 | if (system_state == SYSTEM_POWER_OFF) | ||
6234 | e1000_complete_shutdown(pdev, false, wake); | ||
6235 | } | 6191 | } |
6236 | 6192 | ||
6237 | #ifdef CONFIG_NET_POLL_CONTROLLER | 6193 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
6352 | "Cannot re-enable PCI device after reset.\n"); | 6308 | "Cannot re-enable PCI device after reset.\n"); |
6353 | result = PCI_ERS_RESULT_DISCONNECT; | 6309 | result = PCI_ERS_RESULT_DISCONNECT; |
6354 | } else { | 6310 | } else { |
6355 | pci_set_master(pdev); | ||
6356 | pdev->state_saved = true; | 6311 | pdev->state_saved = true; |
6357 | pci_restore_state(pdev); | 6312 | pci_restore_state(pdev); |
6313 | pci_set_master(pdev); | ||
6358 | 6314 | ||
6359 | pci_enable_wake(pdev, PCI_D3hot, 0); | 6315 | pci_enable_wake(pdev, PCI_D3hot, 0); |
6360 | pci_enable_wake(pdev, PCI_D3cold, 0); | 6316 | pci_enable_wake(pdev, PCI_D3cold, 0); |
@@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
6783 | 6739 | ||
6784 | /* initialize the wol settings based on the eeprom settings */ | 6740 | /* initialize the wol settings based on the eeprom settings */ |
6785 | adapter->wol = adapter->eeprom_wol; | 6741 | adapter->wol = adapter->eeprom_wol; |
6786 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | 6742 | |
6743 | /* make sure adapter isn't asleep if manageability is enabled */ | ||
6744 | if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || | ||
6745 | (hw->mac.ops.check_mng_mode(hw))) | ||
6746 | device_wakeup_enable(&pdev->dev); | ||
6787 | 6747 | ||
6788 | /* save off EEPROM version number */ | 6748 | /* save off EEPROM version number */ |
6789 | e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); | 6749 | e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); |
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 794fe1497666..a7e6a3e37257 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h | |||
@@ -42,6 +42,7 @@ | |||
42 | #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ | 42 | #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ |
43 | #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ | 43 | #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ |
44 | #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ | 44 | #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ |
45 | #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ | ||
45 | #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ | 46 | #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ |
46 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ | 47 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ |
47 | #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ | 48 | #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 84e7e0909def..b64542acfa34 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c | |||
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) | |||
1361 | switch (hw->phy.type) { | 1361 | switch (hw->phy.type) { |
1362 | case e1000_phy_i210: | 1362 | case e1000_phy_i210: |
1363 | case e1000_phy_m88: | 1363 | case e1000_phy_m88: |
1364 | if (hw->phy.id == I347AT4_E_PHY_ID || | 1364 | switch (hw->phy.id) { |
1365 | hw->phy.id == M88E1112_E_PHY_ID) | 1365 | case I347AT4_E_PHY_ID: |
1366 | case M88E1112_E_PHY_ID: | ||
1367 | case I210_I_PHY_ID: | ||
1366 | ret_val = igb_copper_link_setup_m88_gen2(hw); | 1368 | ret_val = igb_copper_link_setup_m88_gen2(hw); |
1367 | else | 1369 | break; |
1370 | default: | ||
1368 | ret_val = igb_copper_link_setup_m88(hw); | 1371 | ret_val = igb_copper_link_setup_m88(hw); |
1372 | break; | ||
1373 | } | ||
1369 | break; | 1374 | break; |
1370 | case e1000_phy_igp_3: | 1375 | case e1000_phy_igp_3: |
1371 | ret_val = igb_copper_link_setup_igp(hw); | 1376 | ret_val = igb_copper_link_setup_igp(hw); |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index d27edbc63923..25151401c2ab 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -447,7 +447,7 @@ struct igb_adapter { | |||
447 | #endif | 447 | #endif |
448 | struct i2c_algo_bit_data i2c_algo; | 448 | struct i2c_algo_bit_data i2c_algo; |
449 | struct i2c_adapter i2c_adap; | 449 | struct i2c_adapter i2c_adap; |
450 | struct igb_i2c_client_list *i2c_clients; | 450 | struct i2c_client *i2c_client; |
451 | }; | 451 | }; |
452 | 452 | ||
453 | #define IGB_FLAG_HAS_MSI (1 << 0) | 453 | #define IGB_FLAG_HAS_MSI (1 << 0) |
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 0a9b073d0b03..4623502054d5 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c | |||
@@ -39,6 +39,10 @@ | |||
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | 40 | ||
41 | #ifdef CONFIG_IGB_HWMON | 41 | #ifdef CONFIG_IGB_HWMON |
42 | struct i2c_board_info i350_sensor_info = { | ||
43 | I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), | ||
44 | }; | ||
45 | |||
42 | /* hwmon callback functions */ | 46 | /* hwmon callback functions */ |
43 | static ssize_t igb_hwmon_show_location(struct device *dev, | 47 | static ssize_t igb_hwmon_show_location(struct device *dev, |
44 | struct device_attribute *attr, | 48 | struct device_attribute *attr, |
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter) | |||
188 | unsigned int i; | 192 | unsigned int i; |
189 | int n_attrs; | 193 | int n_attrs; |
190 | int rc = 0; | 194 | int rc = 0; |
195 | struct i2c_client *client = NULL; | ||
191 | 196 | ||
192 | /* If this method isn't defined we don't support thermals */ | 197 | /* If this method isn't defined we don't support thermals */ |
193 | if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) | 198 | if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) |
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter) | |||
198 | if (rc) | 203 | if (rc) |
199 | goto exit; | 204 | goto exit; |
200 | 205 | ||
206 | /* init i2c_client */ | ||
207 | client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); | ||
208 | if (client == NULL) { | ||
209 | dev_info(&adapter->pdev->dev, | ||
210 | "Failed to create new i2c device..\n"); | ||
211 | goto exit; | ||
212 | } | ||
213 | adapter->i2c_client = client; | ||
214 | |||
201 | /* Allocation space for max attributes | 215 | /* Allocation space for max attributes |
202 | * max num sensors * values (loc, temp, max, caution) | 216 | * max num sensors * values (loc, temp, max, caution) |
203 | */ | 217 | */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ed79a1c53b59..4dbd62968c7a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) | |||
1923 | return; | 1923 | return; |
1924 | } | 1924 | } |
1925 | 1925 | ||
1926 | static const struct i2c_board_info i350_sensor_info = { | ||
1927 | I2C_BOARD_INFO("i350bb", 0Xf8), | ||
1928 | }; | ||
1929 | |||
1930 | /* igb_init_i2c - Init I2C interface | 1926 | /* igb_init_i2c - Init I2C interface |
1931 | * @adapter: pointer to adapter structure | 1927 | * @adapter: pointer to adapter structure |
1932 | * | 1928 | * |
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, | |||
6227 | /* If we spanned a buffer we have a huge mess so test for it */ | 6223 | /* If we spanned a buffer we have a huge mess so test for it */ |
6228 | BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); | 6224 | BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); |
6229 | 6225 | ||
6230 | /* Guarantee this function can be used by verifying buffer sizes */ | ||
6231 | BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD + | ||
6232 | NET_IP_ALIGN + | ||
6233 | IGB_TS_HDR_LEN + | ||
6234 | ETH_FRAME_LEN + | ||
6235 | ETH_FCS_LEN)); | ||
6236 | |||
6237 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | 6226 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; |
6238 | page = rx_buffer->page; | 6227 | page = rx_buffer->page; |
6239 | prefetchw(page); | 6228 | prefetchw(page); |
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) | |||
7724 | } | 7713 | } |
7725 | } | 7714 | } |
7726 | 7715 | ||
7727 | static DEFINE_SPINLOCK(i2c_clients_lock); | ||
7728 | |||
7729 | /* igb_get_i2c_client - returns matching client | ||
7730 | * in adapters's client list. | ||
7731 | * @adapter: adapter struct | ||
7732 | * @dev_addr: device address of i2c needed. | ||
7733 | */ | ||
7734 | static struct i2c_client * | ||
7735 | igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr) | ||
7736 | { | ||
7737 | ulong flags; | ||
7738 | struct igb_i2c_client_list *client_list; | ||
7739 | struct i2c_client *client = NULL; | ||
7740 | struct i2c_board_info client_info = { | ||
7741 | I2C_BOARD_INFO("igb", 0x00), | ||
7742 | }; | ||
7743 | |||
7744 | spin_lock_irqsave(&i2c_clients_lock, flags); | ||
7745 | client_list = adapter->i2c_clients; | ||
7746 | |||
7747 | /* See if we already have an i2c_client */ | ||
7748 | while (client_list) { | ||
7749 | if (client_list->client->addr == (dev_addr >> 1)) { | ||
7750 | client = client_list->client; | ||
7751 | goto exit; | ||
7752 | } else { | ||
7753 | client_list = client_list->next; | ||
7754 | } | ||
7755 | } | ||
7756 | |||
7757 | /* no client_list found, create a new one */ | ||
7758 | client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC); | ||
7759 | if (client_list == NULL) | ||
7760 | goto exit; | ||
7761 | |||
7762 | /* dev_addr passed to us is left-shifted by 1 bit | ||
7763 | * i2c_new_device call expects it to be flush to the right. | ||
7764 | */ | ||
7765 | client_info.addr = dev_addr >> 1; | ||
7766 | client_info.platform_data = adapter; | ||
7767 | client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info); | ||
7768 | if (client_list->client == NULL) { | ||
7769 | dev_info(&adapter->pdev->dev, | ||
7770 | "Failed to create new i2c device..\n"); | ||
7771 | goto err_no_client; | ||
7772 | } | ||
7773 | |||
7774 | /* insert new client at head of list */ | ||
7775 | client_list->next = adapter->i2c_clients; | ||
7776 | adapter->i2c_clients = client_list; | ||
7777 | |||
7778 | client = client_list->client; | ||
7779 | goto exit; | ||
7780 | |||
7781 | err_no_client: | ||
7782 | kfree(client_list); | ||
7783 | exit: | ||
7784 | spin_unlock_irqrestore(&i2c_clients_lock, flags); | ||
7785 | return client; | ||
7786 | } | ||
7787 | |||
7788 | /* igb_read_i2c_byte - Reads 8 bit word over I2C | 7716 | /* igb_read_i2c_byte - Reads 8 bit word over I2C |
7789 | * @hw: pointer to hardware structure | 7717 | * @hw: pointer to hardware structure |
7790 | * @byte_offset: byte offset to read | 7718 | * @byte_offset: byte offset to read |
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, | |||
7798 | u8 dev_addr, u8 *data) | 7726 | u8 dev_addr, u8 *data) |
7799 | { | 7727 | { |
7800 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); | 7728 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); |
7801 | struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); | 7729 | struct i2c_client *this_client = adapter->i2c_client; |
7802 | s32 status; | 7730 | s32 status; |
7803 | u16 swfw_mask = 0; | 7731 | u16 swfw_mask = 0; |
7804 | 7732 | ||
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, | |||
7835 | u8 dev_addr, u8 data) | 7763 | u8 dev_addr, u8 data) |
7836 | { | 7764 | { |
7837 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); | 7765 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); |
7838 | struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); | 7766 | struct i2c_client *this_client = adapter->i2c_client; |
7839 | s32 status; | 7767 | s32 status; |
7840 | u16 swfw_mask = E1000_SWFW_PHY0_SM; | 7768 | u16 swfw_mask = E1000_SWFW_PHY0_SM; |
7841 | 7769 | ||
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 29140502b71a..6562c736a1d8 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) | |||
1081 | 1081 | ||
1082 | 1082 | ||
1083 | /* mii management interface *************************************************/ | 1083 | /* mii management interface *************************************************/ |
1084 | static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) | ||
1085 | { | ||
1086 | u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); | ||
1087 | u32 autoneg_disable = FORCE_LINK_PASS | | ||
1088 | DISABLE_AUTO_NEG_SPEED_GMII | | ||
1089 | DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | ||
1090 | DISABLE_AUTO_NEG_FOR_DUPLEX; | ||
1091 | |||
1092 | if (mp->phy->autoneg == AUTONEG_ENABLE) { | ||
1093 | /* enable auto negotiation */ | ||
1094 | pscr &= ~autoneg_disable; | ||
1095 | goto out_write; | ||
1096 | } | ||
1097 | |||
1098 | pscr |= autoneg_disable; | ||
1099 | |||
1100 | if (mp->phy->speed == SPEED_1000) { | ||
1101 | /* force gigabit, half duplex not supported */ | ||
1102 | pscr |= SET_GMII_SPEED_TO_1000; | ||
1103 | pscr |= SET_FULL_DUPLEX_MODE; | ||
1104 | goto out_write; | ||
1105 | } | ||
1106 | |||
1107 | pscr &= ~SET_GMII_SPEED_TO_1000; | ||
1108 | |||
1109 | if (mp->phy->speed == SPEED_100) | ||
1110 | pscr |= SET_MII_SPEED_TO_100; | ||
1111 | else | ||
1112 | pscr &= ~SET_MII_SPEED_TO_100; | ||
1113 | |||
1114 | if (mp->phy->duplex == DUPLEX_FULL) | ||
1115 | pscr |= SET_FULL_DUPLEX_MODE; | ||
1116 | else | ||
1117 | pscr &= ~SET_FULL_DUPLEX_MODE; | ||
1118 | |||
1119 | out_write: | ||
1120 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); | ||
1121 | } | ||
1122 | |||
1084 | static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) | 1123 | static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) |
1085 | { | 1124 | { |
1086 | struct mv643xx_eth_shared_private *msp = dev_id; | 1125 | struct mv643xx_eth_shared_private *msp = dev_id; |
@@ -1499,6 +1538,7 @@ static int | |||
1499 | mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 1538 | mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1500 | { | 1539 | { |
1501 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1540 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1541 | int ret; | ||
1502 | 1542 | ||
1503 | if (mp->phy == NULL) | 1543 | if (mp->phy == NULL) |
1504 | return -EINVAL; | 1544 | return -EINVAL; |
@@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
1508 | */ | 1548 | */ |
1509 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | 1549 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; |
1510 | 1550 | ||
1511 | return phy_ethtool_sset(mp->phy, cmd); | 1551 | ret = phy_ethtool_sset(mp->phy, cmd); |
1552 | if (!ret) | ||
1553 | mv643xx_adjust_pscr(mp); | ||
1554 | return ret; | ||
1512 | } | 1555 | } |
1513 | 1556 | ||
1514 | static void mv643xx_eth_get_drvinfo(struct net_device *dev, | 1557 | static void mv643xx_eth_get_drvinfo(struct net_device *dev, |
@@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
2442 | static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 2485 | static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2443 | { | 2486 | { |
2444 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 2487 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2488 | int ret; | ||
2445 | 2489 | ||
2446 | if (mp->phy != NULL) | 2490 | if (mp->phy == NULL) |
2447 | return phy_mii_ioctl(mp->phy, ifr, cmd); | 2491 | return -ENOTSUPP; |
2448 | 2492 | ||
2449 | return -EOPNOTSUPP; | 2493 | ret = phy_mii_ioctl(mp->phy, ifr, cmd); |
2494 | if (!ret) | ||
2495 | mv643xx_adjust_pscr(mp); | ||
2496 | return ret; | ||
2450 | } | 2497 | } |
2451 | 2498 | ||
2452 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | 2499 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 7e64033d7de3..0706623cfb96 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c | |||
@@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) | |||
226 | 226 | ||
227 | static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) | 227 | static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) |
228 | { | 228 | { |
229 | u64 in_param; | 229 | u64 in_param = 0; |
230 | int err; | 230 | int err; |
231 | 231 | ||
232 | if (mlx4_is_mfunc(dev)) { | 232 | if (mlx4_is_mfunc(dev)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bb4d8d99f36d..995d4b6d5c1e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) | |||
565 | struct mlx4_en_dev *mdev = priv->mdev; | 565 | struct mlx4_en_dev *mdev = priv->mdev; |
566 | struct mlx4_dev *dev = mdev->dev; | 566 | struct mlx4_dev *dev = mdev->dev; |
567 | int qpn = priv->base_qpn; | 567 | int qpn = priv->base_qpn; |
568 | u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); | 568 | u64 mac; |
569 | |||
570 | en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", | ||
571 | priv->dev->dev_addr); | ||
572 | mlx4_unregister_mac(dev, priv->port, mac); | ||
573 | 569 | ||
574 | if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { | 570 | if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { |
571 | mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); | ||
572 | en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", | ||
573 | priv->dev->dev_addr); | ||
574 | mlx4_unregister_mac(dev, priv->port, mac); | ||
575 | } else { | ||
575 | struct mlx4_mac_entry *entry; | 576 | struct mlx4_mac_entry *entry; |
576 | struct hlist_node *tmp; | 577 | struct hlist_node *tmp; |
577 | struct hlist_head *bucket; | 578 | struct hlist_head *bucket; |
578 | unsigned int mac_hash; | 579 | unsigned int i; |
579 | 580 | ||
580 | mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; | 581 | for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { |
581 | bucket = &priv->mac_hash[mac_hash]; | 582 | bucket = &priv->mac_hash[i]; |
582 | hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { | 583 | hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { |
583 | if (ether_addr_equal_64bits(entry->mac, | 584 | mac = mlx4_en_mac_to_u64(entry->mac); |
584 | priv->dev->dev_addr)) { | 585 | en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", |
585 | en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", | 586 | entry->mac); |
586 | priv->port, priv->dev->dev_addr, qpn); | ||
587 | mlx4_en_uc_steer_release(priv, entry->mac, | 587 | mlx4_en_uc_steer_release(priv, entry->mac, |
588 | qpn, entry->reg_id); | 588 | qpn, entry->reg_id); |
589 | mlx4_qp_release_range(dev, qpn, 1); | ||
590 | 589 | ||
590 | mlx4_unregister_mac(dev, priv->port, mac); | ||
591 | hlist_del_rcu(&entry->hlist); | 591 | hlist_del_rcu(&entry->hlist); |
592 | kfree_rcu(entry, rcu); | 592 | kfree_rcu(entry, rcu); |
593 | break; | ||
594 | } | 593 | } |
595 | } | 594 | } |
595 | |||
596 | en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", | ||
597 | priv->port, qpn); | ||
598 | mlx4_qp_release_range(dev, qpn, 1); | ||
599 | priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; | ||
596 | } | 600 | } |
597 | } | 601 | } |
598 | 602 | ||
@@ -650,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr) | |||
650 | return mac; | 654 | return mac; |
651 | } | 655 | } |
652 | 656 | ||
653 | static int mlx4_en_set_mac(struct net_device *dev, void *addr) | 657 | static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) |
654 | { | ||
655 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
656 | struct mlx4_en_dev *mdev = priv->mdev; | ||
657 | struct sockaddr *saddr = addr; | ||
658 | |||
659 | if (!is_valid_ether_addr(saddr->sa_data)) | ||
660 | return -EADDRNOTAVAIL; | ||
661 | |||
662 | memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); | ||
663 | queue_work(mdev->workqueue, &priv->mac_task); | ||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | static void mlx4_en_do_set_mac(struct work_struct *work) | ||
668 | { | 658 | { |
669 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
670 | mac_task); | ||
671 | struct mlx4_en_dev *mdev = priv->mdev; | ||
672 | int err = 0; | 659 | int err = 0; |
673 | 660 | ||
674 | mutex_lock(&mdev->state_lock); | ||
675 | if (priv->port_up) { | 661 | if (priv->port_up) { |
676 | /* Remove old MAC and insert the new one */ | 662 | /* Remove old MAC and insert the new one */ |
677 | err = mlx4_en_replace_mac(priv, priv->base_qpn, | 663 | err = mlx4_en_replace_mac(priv, priv->base_qpn, |
@@ -683,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work) | |||
683 | } else | 669 | } else |
684 | en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); | 670 | en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); |
685 | 671 | ||
672 | return err; | ||
673 | } | ||
674 | |||
675 | static int mlx4_en_set_mac(struct net_device *dev, void *addr) | ||
676 | { | ||
677 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
678 | struct mlx4_en_dev *mdev = priv->mdev; | ||
679 | struct sockaddr *saddr = addr; | ||
680 | int err; | ||
681 | |||
682 | if (!is_valid_ether_addr(saddr->sa_data)) | ||
683 | return -EADDRNOTAVAIL; | ||
684 | |||
685 | memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); | ||
686 | |||
687 | mutex_lock(&mdev->state_lock); | ||
688 | err = mlx4_en_do_set_mac(priv); | ||
686 | mutex_unlock(&mdev->state_lock); | 689 | mutex_unlock(&mdev->state_lock); |
690 | |||
691 | return err; | ||
687 | } | 692 | } |
688 | 693 | ||
689 | static void mlx4_en_clear_list(struct net_device *dev) | 694 | static void mlx4_en_clear_list(struct net_device *dev) |
@@ -1348,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work) | |||
1348 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | 1353 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); |
1349 | } | 1354 | } |
1350 | if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { | 1355 | if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { |
1351 | queue_work(mdev->workqueue, &priv->mac_task); | 1356 | mlx4_en_do_set_mac(priv); |
1352 | mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; | 1357 | mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; |
1353 | } | 1358 | } |
1354 | mutex_unlock(&mdev->state_lock); | 1359 | mutex_unlock(&mdev->state_lock); |
@@ -1828,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
1828 | } | 1833 | } |
1829 | 1834 | ||
1830 | #ifdef CONFIG_RFS_ACCEL | 1835 | #ifdef CONFIG_RFS_ACCEL |
1831 | priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); | 1836 | if (priv->mdev->dev->caps.comp_pool) { |
1832 | if (!priv->dev->rx_cpu_rmap) | 1837 | priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); |
1833 | goto err; | 1838 | if (!priv->dev->rx_cpu_rmap) |
1839 | goto err; | ||
1840 | } | ||
1834 | #endif | 1841 | #endif |
1835 | 1842 | ||
1836 | return 0; | 1843 | return 0; |
@@ -2078,7 +2085,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2078 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | 2085 | priv->msg_enable = MLX4_EN_MSG_LEVEL; |
2079 | spin_lock_init(&priv->stats_lock); | 2086 | spin_lock_init(&priv->stats_lock); |
2080 | INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); | 2087 | INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); |
2081 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | ||
2082 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | 2088 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); |
2083 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | 2089 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); |
2084 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | 2090 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 50917eb3013e..f6245579962d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
787 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; | 787 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; |
788 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 788 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
789 | 789 | ||
790 | /* turn off device-managed steering capability if not enabled */ | ||
791 | if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { | ||
792 | MLX4_GET(field, outbox->buf, | ||
793 | QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); | ||
794 | field &= 0x7f; | ||
795 | MLX4_PUT(outbox->buf, field, | ||
796 | QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); | ||
797 | } | ||
790 | return 0; | 798 | return 0; |
791 | } | 799 | } |
792 | 800 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d180bc46826a..16abde20e1fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) | |||
1555 | 1555 | ||
1556 | void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) | 1556 | void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) |
1557 | { | 1557 | { |
1558 | u64 in_param; | 1558 | u64 in_param = 0; |
1559 | 1559 | ||
1560 | if (mlx4_is_mfunc(dev)) { | 1560 | if (mlx4_is_mfunc(dev)) { |
1561 | set_param_l(&in_param, idx); | 1561 | set_param_l(&in_param, idx); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cf883345af88..d738454116a0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); | |||
1235 | 1235 | ||
1236 | static inline void set_param_l(u64 *arg, u32 val) | 1236 | static inline void set_param_l(u64 *arg, u32 val) |
1237 | { | 1237 | { |
1238 | *((u32 *)arg) = val; | 1238 | *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; |
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | static inline void set_param_h(u64 *arg, u32 val) | 1241 | static inline void set_param_h(u64 *arg, u32 val) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c313d7e943a9..f710b7ce0dcb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -509,7 +509,6 @@ struct mlx4_en_priv { | |||
509 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | 509 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; |
510 | struct mlx4_qp drop_qp; | 510 | struct mlx4_qp drop_qp; |
511 | struct work_struct rx_mode_task; | 511 | struct work_struct rx_mode_task; |
512 | struct work_struct mac_task; | ||
513 | struct work_struct watchdog_task; | 512 | struct work_struct watchdog_task; |
514 | struct work_struct linkstate_task; | 513 | struct work_struct linkstate_task; |
515 | struct delayed_work stats_task; | 514 | struct delayed_work stats_task; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 602ca9bf78e4..f91719a08cba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) | |||
183 | 183 | ||
184 | static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) | 184 | static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) |
185 | { | 185 | { |
186 | u64 in_param; | 186 | u64 in_param = 0; |
187 | u64 out_param; | 187 | u64 out_param; |
188 | int err; | 188 | int err; |
189 | 189 | ||
@@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) | |||
240 | 240 | ||
241 | static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) | 241 | static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) |
242 | { | 242 | { |
243 | u64 in_param; | 243 | u64 in_param = 0; |
244 | int err; | 244 | int err; |
245 | 245 | ||
246 | if (mlx4_is_mfunc(dev)) { | 246 | if (mlx4_is_mfunc(dev)) { |
@@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) | |||
351 | 351 | ||
352 | static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) | 352 | static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) |
353 | { | 353 | { |
354 | u64 in_param; | 354 | u64 in_param = 0; |
355 | 355 | ||
356 | if (mlx4_is_mfunc(dev)) { | 356 | if (mlx4_is_mfunc(dev)) { |
357 | set_param_l(&in_param, index); | 357 | set_param_l(&in_param, index); |
@@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) | |||
374 | 374 | ||
375 | static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) | 375 | static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) |
376 | { | 376 | { |
377 | u64 param; | 377 | u64 param = 0; |
378 | 378 | ||
379 | if (mlx4_is_mfunc(dev)) { | 379 | if (mlx4_is_mfunc(dev)) { |
380 | set_param_l(¶m, index); | 380 | set_param_l(¶m, index); |
@@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) | |||
395 | 395 | ||
396 | static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) | 396 | static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) |
397 | { | 397 | { |
398 | u64 in_param; | 398 | u64 in_param = 0; |
399 | 399 | ||
400 | if (mlx4_is_mfunc(dev)) { | 400 | if (mlx4_is_mfunc(dev)) { |
401 | set_param_l(&in_param, index); | 401 | set_param_l(&in_param, index); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 1ac88637ad9d..00f223acada7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c | |||
@@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) | |||
101 | 101 | ||
102 | void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) | 102 | void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) |
103 | { | 103 | { |
104 | u64 in_param; | 104 | u64 in_param = 0; |
105 | int err; | 105 | int err; |
106 | 106 | ||
107 | if (mlx4_is_mfunc(dev)) { | 107 | if (mlx4_is_mfunc(dev)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 719ead15e491..10c57c86388b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac); | |||
175 | 175 | ||
176 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) | 176 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) |
177 | { | 177 | { |
178 | u64 out_param; | 178 | u64 out_param = 0; |
179 | int err; | 179 | int err; |
180 | 180 | ||
181 | if (mlx4_is_mfunc(dev)) { | 181 | if (mlx4_is_mfunc(dev)) { |
@@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); | |||
222 | 222 | ||
223 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) | 223 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) |
224 | { | 224 | { |
225 | u64 out_param; | 225 | u64 out_param = 0; |
226 | 226 | ||
227 | if (mlx4_is_mfunc(dev)) { | 227 | if (mlx4_is_mfunc(dev)) { |
228 | set_param_l(&out_param, port); | 228 | set_param_l(&out_param, port); |
@@ -361,7 +361,7 @@ out: | |||
361 | 361 | ||
362 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) | 362 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) |
363 | { | 363 | { |
364 | u64 out_param; | 364 | u64 out_param = 0; |
365 | int err; | 365 | int err; |
366 | 366 | ||
367 | if (mlx4_is_mfunc(dev)) { | 367 | if (mlx4_is_mfunc(dev)) { |
@@ -406,7 +406,7 @@ out: | |||
406 | 406 | ||
407 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) | 407 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) |
408 | { | 408 | { |
409 | u64 in_param; | 409 | u64 in_param = 0; |
410 | int err; | 410 | int err; |
411 | 411 | ||
412 | if (mlx4_is_mfunc(dev)) { | 412 | if (mlx4_is_mfunc(dev)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 81e2abe07bbb..e891b058c1be 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, | |||
222 | 222 | ||
223 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) | 223 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) |
224 | { | 224 | { |
225 | u64 in_param; | 225 | u64 in_param = 0; |
226 | u64 out_param; | 226 | u64 out_param; |
227 | int err; | 227 | int err; |
228 | 228 | ||
@@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | |||
255 | 255 | ||
256 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | 256 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) |
257 | { | 257 | { |
258 | u64 in_param; | 258 | u64 in_param = 0; |
259 | int err; | 259 | int err; |
260 | 260 | ||
261 | if (mlx4_is_mfunc(dev)) { | 261 | if (mlx4_is_mfunc(dev)) { |
@@ -319,7 +319,7 @@ err_out: | |||
319 | 319 | ||
320 | static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) | 320 | static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) |
321 | { | 321 | { |
322 | u64 param; | 322 | u64 param = 0; |
323 | 323 | ||
324 | if (mlx4_is_mfunc(dev)) { | 324 | if (mlx4_is_mfunc(dev)) { |
325 | set_param_l(¶m, qpn); | 325 | set_param_l(¶m, qpn); |
@@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) | |||
344 | 344 | ||
345 | static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) | 345 | static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) |
346 | { | 346 | { |
347 | u64 in_param; | 347 | u64 in_param = 0; |
348 | 348 | ||
349 | if (mlx4_is_mfunc(dev)) { | 349 | if (mlx4_is_mfunc(dev)) { |
350 | set_param_l(&in_param, qpn); | 350 | set_param_l(&in_param, qpn); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 083fb48dc3d7..2995687f1aee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -2990,6 +2990,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | |||
2990 | u8 steer_type_mask = 2; | 2990 | u8 steer_type_mask = 2; |
2991 | enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; | 2991 | enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; |
2992 | 2992 | ||
2993 | if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0) | ||
2994 | return -EINVAL; | ||
2995 | |||
2993 | qpn = vhcr->in_modifier & 0xffffff; | 2996 | qpn = vhcr->in_modifier & 0xffffff; |
2994 | err = get_res(dev, slave, qpn, RES_QP, &rqp); | 2997 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
2995 | if (err) | 2998 | if (err) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index feda6c00829f..e329fe1f11b7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c | |||
@@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) | |||
149 | 149 | ||
150 | static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) | 150 | static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) |
151 | { | 151 | { |
152 | u64 in_param; | 152 | u64 in_param = 0; |
153 | 153 | ||
154 | if (mlx4_is_mfunc(dev)) { | 154 | if (mlx4_is_mfunc(dev)) { |
155 | set_param_l(&in_param, srqn); | 155 | set_param_l(&in_param, srqn); |
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 50247dfe8f57..d2f790df6dcb 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h | |||
@@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) | |||
171 | * TX scheduler is stopped when we're done and before | 171 | * TX scheduler is stopped when we're done and before |
172 | * netif_device_present() becomes false. | 172 | * netif_device_present() becomes false. |
173 | */ | 173 | */ |
174 | netif_tx_lock(dev); | 174 | netif_tx_lock_bh(dev); |
175 | netif_device_detach(dev); | 175 | netif_device_detach(dev); |
176 | netif_tx_unlock(dev); | 176 | netif_tx_unlock_bh(dev); |
177 | } | 177 | } |
178 | 178 | ||
179 | #endif /* EFX_EFX_H */ | 179 | #endif /* EFX_EFX_H */ |
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 879ff5849bbd..bb579a6128c8 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -215,7 +215,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
215 | rx_buf = efx_rx_buffer(rx_queue, index); | 215 | rx_buf = efx_rx_buffer(rx_queue, index); |
216 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; | 216 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
217 | rx_buf->u.page = page; | 217 | rx_buf->u.page = page; |
218 | rx_buf->page_offset = page_offset; | 218 | rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; |
219 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | 219 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
220 | rx_buf->flags = EFX_RX_BUF_PAGE; | 220 | rx_buf->flags = EFX_RX_BUF_PAGE; |
221 | ++rx_queue->added_count; | 221 | ++rx_queue->added_count; |
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index e5b19b056909..3c4d6274bb9b 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c | |||
@@ -202,6 +202,9 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
202 | return 0; | 202 | return 0; |
203 | 203 | ||
204 | out: | 204 | out: |
205 | if (rrpriv->evt_ring) | ||
206 | pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring, | ||
207 | rrpriv->evt_ring_dma); | ||
205 | if (rrpriv->rx_ring) | 208 | if (rrpriv->rx_ring) |
206 | pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, | 209 | pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, |
207 | rrpriv->rx_ring_dma); | 210 | rrpriv->rx_ring_dma); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 417b2af1aa80..73abbc1655d5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -660,6 +660,7 @@ void macvlan_common_setup(struct net_device *dev) | |||
660 | ether_setup(dev); | 660 | ether_setup(dev); |
661 | 661 | ||
662 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); | 662 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); |
663 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
663 | dev->netdev_ops = &macvlan_netdev_ops; | 664 | dev->netdev_ops = &macvlan_netdev_ops; |
664 | dev->destructor = free_netdev; | 665 | dev->destructor = free_netdev; |
665 | dev->header_ops = &macvlan_hard_header_ops, | 666 | dev->header_ops = &macvlan_hard_header_ops, |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 05c5efe84591..bf3419297875 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1138,6 +1138,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev) | |||
1138 | netdev_upper_dev_unlink(port_dev, dev); | 1138 | netdev_upper_dev_unlink(port_dev, dev); |
1139 | team_port_disable_netpoll(port); | 1139 | team_port_disable_netpoll(port); |
1140 | vlan_vids_del_by_dev(port_dev, dev); | 1140 | vlan_vids_del_by_dev(port_dev, dev); |
1141 | dev_uc_unsync(port_dev, dev); | ||
1142 | dev_mc_unsync(port_dev, dev); | ||
1141 | dev_close(port_dev); | 1143 | dev_close(port_dev); |
1142 | team_port_leave(team, port); | 1144 | team_port_leave(team, port); |
1143 | 1145 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2c6a22e278ea..b7c457adc0dc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -747,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
747 | goto drop; | 747 | goto drop; |
748 | skb_orphan(skb); | 748 | skb_orphan(skb); |
749 | 749 | ||
750 | nf_reset(skb); | ||
751 | |||
750 | /* Enqueue packet */ | 752 | /* Enqueue packet */ |
751 | skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); | 753 | skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); |
752 | 754 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 4aad350e4dae..eae7a03d4f9b 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -2958,6 +2958,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2958 | 2958 | ||
2959 | adapter->num_rx_queues = num_rx_queues; | 2959 | adapter->num_rx_queues = num_rx_queues; |
2960 | adapter->num_tx_queues = num_tx_queues; | 2960 | adapter->num_tx_queues = num_tx_queues; |
2961 | adapter->rx_buf_per_pkt = 1; | ||
2961 | 2962 | ||
2962 | size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; | 2963 | size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; |
2963 | size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; | 2964 | size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; |
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index a0feb17a0238..63a124340cbe 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -472,6 +472,12 @@ vmxnet3_set_ringparam(struct net_device *netdev, | |||
472 | VMXNET3_RX_RING_MAX_SIZE) | 472 | VMXNET3_RX_RING_MAX_SIZE) |
473 | return -EINVAL; | 473 | return -EINVAL; |
474 | 474 | ||
475 | /* if adapter not yet initialized, do nothing */ | ||
476 | if (adapter->rx_buf_per_pkt == 0) { | ||
477 | netdev_err(netdev, "adapter not completely initialized, " | ||
478 | "ring size cannot be changed yet\n"); | ||
479 | return -EOPNOTSUPP; | ||
480 | } | ||
475 | 481 | ||
476 | /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ | 482 | /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ |
477 | new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & | 483 | new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3198384689d9..35418146fa17 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -70,10 +70,10 @@ | |||
70 | /* | 70 | /* |
71 | * Version numbers | 71 | * Version numbers |
72 | */ | 72 | */ |
73 | #define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k" | 73 | #define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k" |
74 | 74 | ||
75 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 75 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
76 | #define VMXNET3_DRIVER_VERSION_NUM 0x01011D00 | 76 | #define VMXNET3_DRIVER_VERSION_NUM 0x01011E00 |
77 | 77 | ||
78 | #if defined(CONFIG_PCI_MSI) | 78 | #if defined(CONFIG_PCI_MSI) |
79 | /* RSS only makes sense if MSI-X is supported. */ | 79 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f10e58ac9c1b..7cee7a3068ec 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) | |||
961 | iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); | 961 | iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); |
962 | tunnel_ip_select_ident(skb, old_iph, &rt->dst); | 962 | tunnel_ip_select_ident(skb, old_iph, &rt->dst); |
963 | 963 | ||
964 | nf_reset(skb); | ||
965 | |||
964 | vxlan_set_owner(dev, skb); | 966 | vxlan_set_owner(dev, skb); |
965 | 967 | ||
966 | /* See iptunnel_xmit() */ | 968 | /* See iptunnel_xmit() */ |
@@ -1504,6 +1506,14 @@ static __net_init int vxlan_init_net(struct net *net) | |||
1504 | static __net_exit void vxlan_exit_net(struct net *net) | 1506 | static __net_exit void vxlan_exit_net(struct net *net) |
1505 | { | 1507 | { |
1506 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); | 1508 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); |
1509 | struct vxlan_dev *vxlan; | ||
1510 | unsigned h; | ||
1511 | |||
1512 | rtnl_lock(); | ||
1513 | for (h = 0; h < VNI_HASH_SIZE; ++h) | ||
1514 | hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) | ||
1515 | dev_close(vxlan->dev); | ||
1516 | rtnl_unlock(); | ||
1507 | 1517 | ||
1508 | if (vn->sock) { | 1518 | if (vn->sock) { |
1509 | sk_release_kernel(vn->sock->sk); | 1519 | sk_release_kernel(vn->sock->sk); |
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index 94ef33838bc6..b775769f8322 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c | |||
@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, | |||
151 | sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); | 151 | sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); |
152 | 152 | ||
153 | if (!(flags & CMD_ASYNC)) { | 153 | if (!(flags & CMD_ASYNC)) { |
154 | cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; | 154 | cmd.flags |= CMD_WANT_SKB; |
155 | might_sleep(); | 155 | might_sleep(); |
156 | } | 156 | } |
157 | 157 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 10f01793d7a6..81aa91fab5aa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h | |||
@@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd, | |||
363 | __entry->flags = cmd->flags; | 363 | __entry->flags = cmd->flags; |
364 | memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); | 364 | memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); |
365 | 365 | ||
366 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 366 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
367 | if (!cmd->len[i]) | 367 | if (!cmd->len[i]) |
368 | continue; | 368 | continue; |
369 | memcpy((u8 *)__get_dynamic_array(hcmd) + offset, | 369 | memcpy((u8 *)__get_dynamic_array(hcmd) + offset, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 6f228bb2b844..fbfd2d137117 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv) | |||
1102 | 1102 | ||
1103 | /* shared module parameters */ | 1103 | /* shared module parameters */ |
1104 | struct iwl_mod_params iwlwifi_mod_params = { | 1104 | struct iwl_mod_params iwlwifi_mod_params = { |
1105 | .amsdu_size_8K = 1, | ||
1106 | .restart_fw = 1, | 1105 | .restart_fw = 1, |
1107 | .plcp_check = true, | 1106 | .plcp_check = true, |
1108 | .bt_coex_active = true, | 1107 | .bt_coex_active = true, |
@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable, | |||
1207 | "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); | 1206 | "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); |
1208 | module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, | 1207 | module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, |
1209 | int, S_IRUGO); | 1208 | int, S_IRUGO); |
1210 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); | 1209 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); |
1211 | module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); | 1210 | module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); |
1212 | MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); | 1211 | MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); |
1213 | 1212 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index e5e3a79eae2f..2c2a729092f5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h | |||
@@ -91,7 +91,7 @@ enum iwl_power_level { | |||
91 | * @sw_crypto: using hardware encryption, default = 0 | 91 | * @sw_crypto: using hardware encryption, default = 0 |
92 | * @disable_11n: disable 11n capabilities, default = 0, | 92 | * @disable_11n: disable 11n capabilities, default = 0, |
93 | * use IWL_DISABLE_HT_* constants | 93 | * use IWL_DISABLE_HT_* constants |
94 | * @amsdu_size_8K: enable 8K amsdu size, default = 1 | 94 | * @amsdu_size_8K: enable 8K amsdu size, default = 0 |
95 | * @restart_fw: restart firmware, default = 1 | 95 | * @restart_fw: restart firmware, default = 1 |
96 | * @plcp_check: enable plcp health check, default = true | 96 | * @plcp_check: enable plcp health check, default = true |
97 | * @wd_disable: enable stuck queue check, default = 0 | 97 | * @wd_disable: enable stuck queue check, default = 0 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 8c7bec6b9a0b..0cac2b7af78b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h | |||
@@ -186,19 +186,13 @@ struct iwl_rx_packet { | |||
186 | * @CMD_ASYNC: Return right away and don't want for the response | 186 | * @CMD_ASYNC: Return right away and don't want for the response |
187 | * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the | 187 | * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the |
188 | * response. The caller needs to call iwl_free_resp when done. | 188 | * response. The caller needs to call iwl_free_resp when done. |
189 | * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the | ||
190 | * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be | ||
191 | * copied. The pointer passed to the response handler is in the transport | ||
192 | * ownership and don't need to be freed by the op_mode. This also means | ||
193 | * that the pointer is invalidated after the op_mode's handler returns. | ||
194 | * @CMD_ON_DEMAND: This command is sent by the test mode pipe. | 189 | * @CMD_ON_DEMAND: This command is sent by the test mode pipe. |
195 | */ | 190 | */ |
196 | enum CMD_MODE { | 191 | enum CMD_MODE { |
197 | CMD_SYNC = 0, | 192 | CMD_SYNC = 0, |
198 | CMD_ASYNC = BIT(0), | 193 | CMD_ASYNC = BIT(0), |
199 | CMD_WANT_SKB = BIT(1), | 194 | CMD_WANT_SKB = BIT(1), |
200 | CMD_WANT_HCMD = BIT(2), | 195 | CMD_ON_DEMAND = BIT(2), |
201 | CMD_ON_DEMAND = BIT(3), | ||
202 | }; | 196 | }; |
203 | 197 | ||
204 | #define DEF_CMD_PAYLOAD_SIZE 320 | 198 | #define DEF_CMD_PAYLOAD_SIZE 320 |
@@ -217,7 +211,11 @@ struct iwl_device_cmd { | |||
217 | 211 | ||
218 | #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) | 212 | #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) |
219 | 213 | ||
220 | #define IWL_MAX_CMD_TFDS 2 | 214 | /* |
215 | * number of transfer buffers (fragments) per transmit frame descriptor; | ||
216 | * this is just the driver's idea, the hardware supports 20 | ||
217 | */ | ||
218 | #define IWL_MAX_CMD_TBS_PER_TFD 2 | ||
221 | 219 | ||
222 | /** | 220 | /** |
223 | * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command | 221 | * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command |
@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag { | |||
254 | * @id: id of the host command | 252 | * @id: id of the host command |
255 | */ | 253 | */ |
256 | struct iwl_host_cmd { | 254 | struct iwl_host_cmd { |
257 | const void *data[IWL_MAX_CMD_TFDS]; | 255 | const void *data[IWL_MAX_CMD_TBS_PER_TFD]; |
258 | struct iwl_rx_packet *resp_pkt; | 256 | struct iwl_rx_packet *resp_pkt; |
259 | unsigned long _rx_page_addr; | 257 | unsigned long _rx_page_addr; |
260 | u32 _rx_page_order; | 258 | u32 _rx_page_order; |
261 | int handler_status; | 259 | int handler_status; |
262 | 260 | ||
263 | u32 flags; | 261 | u32 flags; |
264 | u16 len[IWL_MAX_CMD_TFDS]; | 262 | u16 len[IWL_MAX_CMD_TBS_PER_TFD]; |
265 | u8 dataflags[IWL_MAX_CMD_TFDS]; | 263 | u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; |
266 | u8 id; | 264 | u8 id; |
267 | }; | 265 | }; |
268 | 266 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 23eebda848b0..2adb61f103f4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h | |||
@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd { | |||
762 | #define IWL_RX_INFO_PHY_CNT 8 | 762 | #define IWL_RX_INFO_PHY_CNT 8 |
763 | #define IWL_RX_INFO_AGC_IDX 1 | 763 | #define IWL_RX_INFO_AGC_IDX 1 |
764 | #define IWL_RX_INFO_RSSI_AB_IDX 2 | 764 | #define IWL_RX_INFO_RSSI_AB_IDX 2 |
765 | #define IWL_RX_INFO_RSSI_C_IDX 3 | 765 | #define IWL_OFDM_AGC_A_MSK 0x0000007f |
766 | #define IWL_OFDM_AGC_DB_MSK 0xfe00 | 766 | #define IWL_OFDM_AGC_A_POS 0 |
767 | #define IWL_OFDM_AGC_DB_POS 9 | 767 | #define IWL_OFDM_AGC_B_MSK 0x00003f80 |
768 | #define IWL_OFDM_AGC_B_POS 7 | ||
769 | #define IWL_OFDM_AGC_CODE_MSK 0x3fe00000 | ||
770 | #define IWL_OFDM_AGC_CODE_POS 20 | ||
768 | #define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff | 771 | #define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff |
769 | #define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00 | ||
770 | #define IWL_OFDM_RSSI_A_POS 0 | 772 | #define IWL_OFDM_RSSI_A_POS 0 |
773 | #define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00 | ||
774 | #define IWL_OFDM_RSSI_ALLBAND_A_POS 8 | ||
771 | #define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 | 775 | #define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 |
772 | #define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000 | ||
773 | #define IWL_OFDM_RSSI_B_POS 16 | 776 | #define IWL_OFDM_RSSI_B_POS 16 |
774 | #define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff | 777 | #define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000 |
775 | #define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 | 778 | #define IWL_OFDM_RSSI_ALLBAND_B_POS 24 |
776 | #define IWL_OFDM_RSSI_C_POS 0 | ||
777 | 779 | ||
778 | /** | 780 | /** |
779 | * struct iwl_rx_phy_info - phy info | 781 | * struct iwl_rx_phy_info - phy info |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index d3d959db03a9..500f818dba04 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c | |||
@@ -79,17 +79,8 @@ | |||
79 | #define UCODE_VALID_OK cpu_to_le32(0x1) | 79 | #define UCODE_VALID_OK cpu_to_le32(0x1) |
80 | 80 | ||
81 | /* Default calibration values for WkP - set to INIT image w/o running */ | 81 | /* Default calibration values for WkP - set to INIT image w/o running */ |
82 | static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f, | ||
83 | 0x00, 0x18, 0x00 }; | ||
84 | static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, | ||
85 | 0x7f, 0x7f, 0x7f }; | ||
86 | static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 }; | ||
87 | static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00, | ||
88 | 0x00 }; | ||
89 | static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 }; | ||
90 | static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; | 82 | static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; |
91 | static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; | 83 | static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; |
92 | static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 }; | ||
93 | 84 | ||
94 | struct iwl_calib_default_data { | 85 | struct iwl_calib_default_data { |
95 | u16 size; | 86 | u16 size; |
@@ -99,12 +90,7 @@ struct iwl_calib_default_data { | |||
99 | #define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} | 90 | #define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} |
100 | 91 | ||
101 | static const struct iwl_calib_default_data wkp_calib_default_data[12] = { | 92 | static const struct iwl_calib_default_data wkp_calib_default_data[12] = { |
102 | [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc), | ||
103 | [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter), | ||
104 | [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo), | ||
105 | [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq), | ||
106 | [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), | 93 | [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), |
107 | [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq), | ||
108 | [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), | 94 | [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), |
109 | }; | 95 | }; |
110 | 96 | ||
@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, | |||
241 | 227 | ||
242 | return 0; | 228 | return 0; |
243 | } | 229 | } |
244 | #define IWL_HW_REV_ID_RAINBOW 0x2 | ||
245 | #define IWL_PROJ_TYPE_LHP 0x5 | ||
246 | |||
247 | static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm) | ||
248 | { | ||
249 | struct iwl_nvm_data *data = mvm->nvm_data; | ||
250 | /* Temp calls to static definitions, will be changed to CSR calls */ | ||
251 | u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW; | ||
252 | u8 project_type = IWL_PROJ_TYPE_LHP; | ||
253 | |||
254 | return data->radio_cfg_dash | (data->radio_cfg_step << 2) | | ||
255 | (hw_rev_id << 4) | ((project_type & 0x7f) << 6) | | ||
256 | (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20); | ||
257 | } | ||
258 | 230 | ||
259 | static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) | 231 | static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) |
260 | { | 232 | { |
@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) | |||
262 | enum iwl_ucode_type ucode_type = mvm->cur_ucode; | 234 | enum iwl_ucode_type ucode_type = mvm->cur_ucode; |
263 | 235 | ||
264 | /* Set parameters */ | 236 | /* Set parameters */ |
265 | phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); | 237 | phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config); |
266 | phy_cfg_cmd.calib_control.event_trigger = | 238 | phy_cfg_cmd.calib_control.event_trigger = |
267 | mvm->fw->default_calib[ucode_type].event_trigger; | 239 | mvm->fw->default_calib[ucode_type].event_trigger; |
268 | phy_cfg_cmd.calib_control.flow_trigger = | 240 | phy_cfg_cmd.calib_control.flow_trigger = |
@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) | |||
275 | sizeof(phy_cfg_cmd), &phy_cfg_cmd); | 247 | sizeof(phy_cfg_cmd), &phy_cfg_cmd); |
276 | } | 248 | } |
277 | 249 | ||
278 | /* Starting with the new PHY DB implementation - New calibs are enabled */ | ||
279 | /* Value - 0x405e7 */ | ||
280 | #define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\ | ||
281 | IWL_CALIB_CFG_TEMPERATURE_IDX |\ | ||
282 | IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ | ||
283 | IWL_CALIB_CFG_DC_IDX |\ | ||
284 | IWL_CALIB_CFG_BB_FILTER_IDX |\ | ||
285 | IWL_CALIB_CFG_LO_LEAKAGE_IDX |\ | ||
286 | IWL_CALIB_CFG_TX_IQ_IDX |\ | ||
287 | IWL_CALIB_CFG_RX_IQ_IDX |\ | ||
288 | IWL_CALIB_CFG_AGC_IDX) | ||
289 | |||
290 | #define IWL_CALIB_DEFAULT_EVENT_INIT 0x0 | ||
291 | |||
292 | /* Value 0x41567 */ | ||
293 | #define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\ | ||
294 | IWL_CALIB_CFG_TEMPERATURE_IDX |\ | ||
295 | IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ | ||
296 | IWL_CALIB_CFG_BB_FILTER_IDX |\ | ||
297 | IWL_CALIB_CFG_DC_IDX |\ | ||
298 | IWL_CALIB_CFG_TX_IQ_IDX |\ | ||
299 | IWL_CALIB_CFG_RX_IQ_IDX |\ | ||
300 | IWL_CALIB_CFG_SENSITIVITY_IDX |\ | ||
301 | IWL_CALIB_CFG_AGC_IDX) | ||
302 | |||
303 | #define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\ | ||
304 | IWL_CALIB_CFG_TEMPERATURE_IDX |\ | ||
305 | IWL_CALIB_CFG_VOLTAGE_READ_IDX |\ | ||
306 | IWL_CALIB_CFG_TX_PWR_IDX |\ | ||
307 | IWL_CALIB_CFG_DC_IDX |\ | ||
308 | IWL_CALIB_CFG_TX_IQ_IDX |\ | ||
309 | IWL_CALIB_CFG_SENSITIVITY_IDX) | ||
310 | |||
311 | /* | ||
312 | * Sets the calibrations trigger values that will be sent to the FW for runtime | ||
313 | * and init calibrations. | ||
314 | * The ones given in the FW TLV are not correct. | ||
315 | */ | ||
316 | static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm) | ||
317 | { | ||
318 | struct iwl_tlv_calib_ctrl default_calib; | ||
319 | |||
320 | /* | ||
321 | * WkP FW TLV calib bits are wrong, overwrite them. | ||
322 | * This defines the dynamic calibrations which are implemented in the | ||
323 | * uCode both for init(flow) calculation and event driven calibs. | ||
324 | */ | ||
325 | |||
326 | /* Init Image */ | ||
327 | default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT); | ||
328 | default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT); | ||
329 | |||
330 | if (default_calib.event_trigger != | ||
331 | mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger) | ||
332 | IWL_ERR(mvm, | ||
333 | "Updating the event calib for INIT image: 0x%x -> 0x%x\n", | ||
334 | mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger, | ||
335 | default_calib.event_trigger); | ||
336 | if (default_calib.flow_trigger != | ||
337 | mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger) | ||
338 | IWL_ERR(mvm, | ||
339 | "Updating the flow calib for INIT image: 0x%x -> 0x%x\n", | ||
340 | mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger, | ||
341 | default_calib.flow_trigger); | ||
342 | |||
343 | memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT], | ||
344 | &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); | ||
345 | IWL_ERR(mvm, | ||
346 | "Setting uCode init calibrations event 0x%x, trigger 0x%x\n", | ||
347 | default_calib.event_trigger, | ||
348 | default_calib.flow_trigger); | ||
349 | |||
350 | /* Run time image */ | ||
351 | default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN); | ||
352 | default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN); | ||
353 | |||
354 | if (default_calib.event_trigger != | ||
355 | mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger) | ||
356 | IWL_ERR(mvm, | ||
357 | "Updating the event calib for RT image: 0x%x -> 0x%x\n", | ||
358 | mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger, | ||
359 | default_calib.event_trigger); | ||
360 | if (default_calib.flow_trigger != | ||
361 | mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger) | ||
362 | IWL_ERR(mvm, | ||
363 | "Updating the flow calib for RT image: 0x%x -> 0x%x\n", | ||
364 | mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger, | ||
365 | default_calib.flow_trigger); | ||
366 | |||
367 | memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR], | ||
368 | &default_calib, sizeof(struct iwl_tlv_calib_ctrl)); | ||
369 | IWL_ERR(mvm, | ||
370 | "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n", | ||
371 | default_calib.event_trigger, | ||
372 | default_calib.flow_trigger); | ||
373 | } | ||
374 | |||
375 | static int iwl_set_default_calibrations(struct iwl_mvm *mvm) | 250 | static int iwl_set_default_calibrations(struct iwl_mvm *mvm) |
376 | { | 251 | { |
377 | u8 cmd_raw[16]; /* holds the variable size commands */ | 252 | u8 cmd_raw[16]; /* holds the variable size commands */ |
@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) | |||
446 | ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); | 321 | ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); |
447 | WARN_ON(ret); | 322 | WARN_ON(ret); |
448 | 323 | ||
449 | /* Override the calibrations from TLV and the const of fw */ | 324 | /* Send TX valid antennas before triggering calibrations */ |
450 | iwl_set_default_calib_trigger(mvm); | 325 | ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); |
326 | if (ret) | ||
327 | goto error; | ||
451 | 328 | ||
452 | /* WkP doesn't have all calibrations, need to set default values */ | 329 | /* WkP doesn't have all calibrations, need to set default values */ |
453 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | 330 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 537711b10478..bdae700c769e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
@@ -80,7 +80,8 @@ | |||
80 | 80 | ||
81 | #define IWL_INVALID_MAC80211_QUEUE 0xff | 81 | #define IWL_INVALID_MAC80211_QUEUE 0xff |
82 | #define IWL_MVM_MAX_ADDRESSES 2 | 82 | #define IWL_MVM_MAX_ADDRESSES 2 |
83 | #define IWL_RSSI_OFFSET 44 | 83 | /* RSSI offset for WkP */ |
84 | #define IWL_RSSI_OFFSET 50 | ||
84 | 85 | ||
85 | enum iwl_mvm_tx_fifo { | 86 | enum iwl_mvm_tx_fifo { |
86 | IWL_MVM_TX_FIFO_BK = 0, | 87 | IWL_MVM_TX_FIFO_BK = 0, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index aa59adf87db3..d0f9c1e0475e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c | |||
@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) | |||
624 | ieee80211_free_txskb(mvm->hw, skb); | 624 | ieee80211_free_txskb(mvm->hw, skb); |
625 | } | 625 | } |
626 | 626 | ||
627 | static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) | 627 | static void iwl_mvm_nic_restart(struct iwl_mvm *mvm) |
628 | { | 628 | { |
629 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | ||
630 | |||
631 | iwl_mvm_dump_nic_error_log(mvm); | ||
632 | |||
633 | iwl_abort_notification_waits(&mvm->notif_wait); | 629 | iwl_abort_notification_waits(&mvm->notif_wait); |
634 | 630 | ||
635 | /* | 631 | /* |
@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) | |||
663 | } | 659 | } |
664 | } | 660 | } |
665 | 661 | ||
662 | static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) | ||
663 | { | ||
664 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | ||
665 | |||
666 | iwl_mvm_dump_nic_error_log(mvm); | ||
667 | |||
668 | iwl_mvm_nic_restart(mvm); | ||
669 | } | ||
670 | |||
666 | static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) | 671 | static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) |
667 | { | 672 | { |
673 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | ||
674 | |||
668 | WARN_ON(1); | 675 | WARN_ON(1); |
676 | iwl_mvm_nic_restart(mvm); | ||
669 | } | 677 | } |
670 | 678 | ||
671 | static const struct iwl_op_mode_ops iwl_mvm_ops = { | 679 | static const struct iwl_op_mode_ops iwl_mvm_ops = { |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 3f40ab05bbd8..b0b190d0ec23 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c | |||
@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, | |||
131 | static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, | 131 | static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, |
132 | struct iwl_rx_phy_info *phy_info) | 132 | struct iwl_rx_phy_info *phy_info) |
133 | { | 133 | { |
134 | u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; | 134 | int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm; |
135 | int rssi_all_band_a, rssi_all_band_b; | ||
136 | u32 agc_a, agc_b, max_agc; | ||
135 | u32 val; | 137 | u32 val; |
136 | 138 | ||
137 | /* Find max rssi among 3 possible receivers. | 139 | /* Find max rssi among 2 possible receivers. |
138 | * These values are measured by the Digital Signal Processor (DSP). | 140 | * These values are measured by the Digital Signal Processor (DSP). |
139 | * They should stay fairly constant even as the signal strength varies, | 141 | * They should stay fairly constant even as the signal strength varies, |
140 | * if the radio's Automatic Gain Control (AGC) is working right. | 142 | * if the radio's Automatic Gain Control (AGC) is working right. |
141 | * AGC value (see below) will provide the "interesting" info. | 143 | * AGC value (see below) will provide the "interesting" info. |
142 | */ | 144 | */ |
145 | val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); | ||
146 | agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS; | ||
147 | agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS; | ||
148 | max_agc = max_t(u32, agc_a, agc_b); | ||
149 | |||
143 | val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); | 150 | val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); |
144 | rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; | 151 | rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; |
145 | rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; | 152 | rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; |
146 | val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); | 153 | rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >> |
147 | rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; | 154 | IWL_OFDM_RSSI_ALLBAND_A_POS; |
148 | 155 | rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >> | |
149 | val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); | 156 | IWL_OFDM_RSSI_ALLBAND_B_POS; |
150 | agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS; | ||
151 | 157 | ||
152 | max_rssi = max_t(u32, rssi_a, rssi_b); | 158 | /* |
153 | max_rssi = max_t(u32, max_rssi, rssi_c); | 159 | * dBm = rssi dB - agc dB - constant. |
160 | * Higher AGC (higher radio gain) means lower signal. | ||
161 | */ | ||
162 | rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a; | ||
163 | rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b; | ||
164 | max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm); | ||
154 | 165 | ||
155 | IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", | 166 | IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n", |
156 | rssi_a, rssi_b, rssi_c, max_rssi, agc_db); | 167 | rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b); |
157 | 168 | ||
158 | /* dBm = max_rssi dB - agc dB - constant. | 169 | return max_rssi_dbm; |
159 | * Higher AGC (higher radio gain) means lower signal. */ | ||
160 | return max_rssi - agc_db - IWL_RSSI_OFFSET; | ||
161 | } | 170 | } |
162 | 171 | ||
163 | /* | 172 | /* |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 861a7f9f8e7f..274f44e2ef60 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c | |||
@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
770 | u16 txq_id; | 770 | u16 txq_id; |
771 | int err; | 771 | int err; |
772 | 772 | ||
773 | |||
774 | /* | ||
775 | * If mac80211 is cleaning its state, then say that we finished since | ||
776 | * our state has been cleared anyway. | ||
777 | */ | ||
778 | if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | ||
779 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | ||
780 | return 0; | ||
781 | } | ||
782 | |||
773 | spin_lock_bh(&mvmsta->lock); | 783 | spin_lock_bh(&mvmsta->lock); |
774 | 784 | ||
775 | txq_id = tid_data->txq_id; | 785 | txq_id = tid_data->txq_id; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 6b67ce3f679c..6645efe5c03e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
607 | 607 | ||
608 | /* Single frame failure in an AMPDU queue => send BAR */ | 608 | /* Single frame failure in an AMPDU queue => send BAR */ |
609 | if (txq_id >= IWL_FIRST_AMPDU_QUEUE && | 609 | if (txq_id >= IWL_FIRST_AMPDU_QUEUE && |
610 | !(info->flags & IEEE80211_TX_STAT_ACK)) { | 610 | !(info->flags & IEEE80211_TX_STAT_ACK)) |
611 | /* there must be only one skb in the skb_list */ | ||
612 | WARN_ON_ONCE(skb_freed > 1 || | ||
613 | !skb_queue_empty(&skbs)); | ||
614 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | 611 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; |
615 | } | ||
616 | 612 | ||
617 | /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ | 613 | /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ |
618 | if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { | 614 | if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { |
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 3d62e8055352..148843e7f34f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd) | |||
137 | struct iwl_cmd_meta { | 137 | struct iwl_cmd_meta { |
138 | /* only for SYNC commands, iff the reply skb is wanted */ | 138 | /* only for SYNC commands, iff the reply skb is wanted */ |
139 | struct iwl_host_cmd *source; | 139 | struct iwl_host_cmd *source; |
140 | |||
141 | DEFINE_DMA_UNMAP_ADDR(mapping); | ||
142 | DEFINE_DMA_UNMAP_LEN(len); | ||
143 | |||
144 | u32 flags; | 140 | u32 flags; |
145 | }; | 141 | }; |
146 | 142 | ||
@@ -185,25 +181,36 @@ struct iwl_queue { | |||
185 | /* | 181 | /* |
186 | * The FH will write back to the first TB only, so we need | 182 | * The FH will write back to the first TB only, so we need |
187 | * to copy some data into the buffer regardless of whether | 183 | * to copy some data into the buffer regardless of whether |
188 | * it should be mapped or not. This indicates how much to | 184 | * it should be mapped or not. This indicates how big the |
189 | * copy, even for HCMDs it must be big enough to fit the | 185 | * first TB must be to include the scratch buffer. Since |
190 | * DRAM scratch from the TX cmd, at least 16 bytes. | 186 | * the scratch is 4 bytes at offset 12, it's 16 now. If we |
187 | * make it bigger then allocations will be bigger and copy | ||
188 | * slower, so that's probably not useful. | ||
191 | */ | 189 | */ |
192 | #define IWL_HCMD_MIN_COPY_SIZE 16 | 190 | #define IWL_HCMD_SCRATCHBUF_SIZE 16 |
193 | 191 | ||
194 | struct iwl_pcie_txq_entry { | 192 | struct iwl_pcie_txq_entry { |
195 | struct iwl_device_cmd *cmd; | 193 | struct iwl_device_cmd *cmd; |
196 | struct iwl_device_cmd *copy_cmd; | ||
197 | struct sk_buff *skb; | 194 | struct sk_buff *skb; |
198 | /* buffer to free after command completes */ | 195 | /* buffer to free after command completes */ |
199 | const void *free_buf; | 196 | const void *free_buf; |
200 | struct iwl_cmd_meta meta; | 197 | struct iwl_cmd_meta meta; |
201 | }; | 198 | }; |
202 | 199 | ||
200 | struct iwl_pcie_txq_scratch_buf { | ||
201 | struct iwl_cmd_header hdr; | ||
202 | u8 buf[8]; | ||
203 | __le32 scratch; | ||
204 | }; | ||
205 | |||
203 | /** | 206 | /** |
204 | * struct iwl_txq - Tx Queue for DMA | 207 | * struct iwl_txq - Tx Queue for DMA |
205 | * @q: generic Rx/Tx queue descriptor | 208 | * @q: generic Rx/Tx queue descriptor |
206 | * @tfds: transmit frame descriptors (DMA memory) | 209 | * @tfds: transmit frame descriptors (DMA memory) |
210 | * @scratchbufs: start of command headers, including scratch buffers, for | ||
211 | * the writeback -- this is DMA memory and an array holding one buffer | ||
212 | * for each command on the queue | ||
213 | * @scratchbufs_dma: DMA address for the scratchbufs start | ||
207 | * @entries: transmit entries (driver state) | 214 | * @entries: transmit entries (driver state) |
208 | * @lock: queue lock | 215 | * @lock: queue lock |
209 | * @stuck_timer: timer that fires if queue gets stuck | 216 | * @stuck_timer: timer that fires if queue gets stuck |
@@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry { | |||
217 | struct iwl_txq { | 224 | struct iwl_txq { |
218 | struct iwl_queue q; | 225 | struct iwl_queue q; |
219 | struct iwl_tfd *tfds; | 226 | struct iwl_tfd *tfds; |
227 | struct iwl_pcie_txq_scratch_buf *scratchbufs; | ||
228 | dma_addr_t scratchbufs_dma; | ||
220 | struct iwl_pcie_txq_entry *entries; | 229 | struct iwl_pcie_txq_entry *entries; |
221 | spinlock_t lock; | 230 | spinlock_t lock; |
222 | struct timer_list stuck_timer; | 231 | struct timer_list stuck_timer; |
@@ -225,6 +234,13 @@ struct iwl_txq { | |||
225 | u8 active; | 234 | u8 active; |
226 | }; | 235 | }; |
227 | 236 | ||
237 | static inline dma_addr_t | ||
238 | iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) | ||
239 | { | ||
240 | return txq->scratchbufs_dma + | ||
241 | sizeof(struct iwl_pcie_txq_scratch_buf) * idx; | ||
242 | } | ||
243 | |||
228 | /** | 244 | /** |
229 | * struct iwl_trans_pcie - PCIe transport specific data | 245 | * struct iwl_trans_pcie - PCIe transport specific data |
230 | * @rxq: all the RX queue data | 246 | * @rxq: all the RX queue data |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index b0ae06d2456f..567e67ad1f61 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, | |||
637 | index = SEQ_TO_INDEX(sequence); | 637 | index = SEQ_TO_INDEX(sequence); |
638 | cmd_index = get_cmd_index(&txq->q, index); | 638 | cmd_index = get_cmd_index(&txq->q, index); |
639 | 639 | ||
640 | if (reclaim) { | 640 | if (reclaim) |
641 | struct iwl_pcie_txq_entry *ent; | 641 | cmd = txq->entries[cmd_index].cmd; |
642 | ent = &txq->entries[cmd_index]; | 642 | else |
643 | cmd = ent->copy_cmd; | ||
644 | WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); | ||
645 | } else { | ||
646 | cmd = NULL; | 643 | cmd = NULL; |
647 | } | ||
648 | 644 | ||
649 | err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); | 645 | err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); |
650 | 646 | ||
651 | if (reclaim) { | 647 | if (reclaim) { |
652 | /* The original command isn't needed any more */ | ||
653 | kfree(txq->entries[cmd_index].copy_cmd); | ||
654 | txq->entries[cmd_index].copy_cmd = NULL; | ||
655 | /* nor is the duplicated part of the command */ | ||
656 | kfree(txq->entries[cmd_index].free_buf); | 648 | kfree(txq->entries[cmd_index].free_buf); |
657 | txq->entries[cmd_index].free_buf = NULL; | 649 | txq->entries[cmd_index].free_buf = NULL; |
658 | } | 650 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8b625a7f5685..8595c16f74de 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) | |||
191 | } | 191 | } |
192 | 192 | ||
193 | for (i = q->read_ptr; i != q->write_ptr; | 193 | for (i = q->read_ptr; i != q->write_ptr; |
194 | i = iwl_queue_inc_wrap(i, q->n_bd)) { | 194 | i = iwl_queue_inc_wrap(i, q->n_bd)) |
195 | struct iwl_tx_cmd *tx_cmd = | ||
196 | (struct iwl_tx_cmd *)txq->entries[i].cmd->payload; | ||
197 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, | 195 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, |
198 | get_unaligned_le32(&tx_cmd->scratch)); | 196 | le32_to_cpu(txq->scratchbufs[i].scratch)); |
199 | } | ||
200 | 197 | ||
201 | iwl_op_mode_nic_error(trans->op_mode); | 198 | iwl_op_mode_nic_error(trans->op_mode); |
202 | } | 199 | } |
@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) | |||
367 | } | 364 | } |
368 | 365 | ||
369 | static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | 366 | static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, |
370 | struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, | 367 | struct iwl_cmd_meta *meta, |
371 | enum dma_data_direction dma_dir) | 368 | struct iwl_tfd *tfd) |
372 | { | 369 | { |
373 | int i; | 370 | int i; |
374 | int num_tbs; | 371 | int num_tbs; |
@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | |||
382 | return; | 379 | return; |
383 | } | 380 | } |
384 | 381 | ||
385 | /* Unmap tx_cmd */ | 382 | /* first TB is never freed - it's the scratchbuf data */ |
386 | if (num_tbs) | ||
387 | dma_unmap_single(trans->dev, | ||
388 | dma_unmap_addr(meta, mapping), | ||
389 | dma_unmap_len(meta, len), | ||
390 | DMA_BIDIRECTIONAL); | ||
391 | 383 | ||
392 | /* Unmap chunks, if any. */ | ||
393 | for (i = 1; i < num_tbs; i++) | 384 | for (i = 1; i < num_tbs; i++) |
394 | dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), | 385 | dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), |
395 | iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); | 386 | iwl_pcie_tfd_tb_get_len(tfd, i), |
387 | DMA_TO_DEVICE); | ||
396 | 388 | ||
397 | tfd->num_tbs = 0; | 389 | tfd->num_tbs = 0; |
398 | } | 390 | } |
@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | |||
406 | * Does NOT advance any TFD circular buffer read/write indexes | 398 | * Does NOT advance any TFD circular buffer read/write indexes |
407 | * Does NOT free the TFD itself (which is within circular buffer) | 399 | * Does NOT free the TFD itself (which is within circular buffer) |
408 | */ | 400 | */ |
409 | static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, | 401 | static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) |
410 | enum dma_data_direction dma_dir) | ||
411 | { | 402 | { |
412 | struct iwl_tfd *tfd_tmp = txq->tfds; | 403 | struct iwl_tfd *tfd_tmp = txq->tfds; |
413 | 404 | ||
@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, | |||
418 | lockdep_assert_held(&txq->lock); | 409 | lockdep_assert_held(&txq->lock); |
419 | 410 | ||
420 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ | 411 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ |
421 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], | 412 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); |
422 | dma_dir); | ||
423 | 413 | ||
424 | /* free SKB */ | 414 | /* free SKB */ |
425 | if (txq->entries) { | 415 | if (txq->entries) { |
@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, | |||
479 | { | 469 | { |
480 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 470 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
481 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; | 471 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; |
472 | size_t scratchbuf_sz; | ||
482 | int i; | 473 | int i; |
483 | 474 | ||
484 | if (WARN_ON(txq->entries || txq->tfds)) | 475 | if (WARN_ON(txq->entries || txq->tfds)) |
@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, | |||
514 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | 505 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); |
515 | goto error; | 506 | goto error; |
516 | } | 507 | } |
508 | |||
509 | BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); | ||
510 | BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != | ||
511 | sizeof(struct iwl_cmd_header) + | ||
512 | offsetof(struct iwl_tx_cmd, scratch)); | ||
513 | |||
514 | scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; | ||
515 | |||
516 | txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, | ||
517 | &txq->scratchbufs_dma, | ||
518 | GFP_KERNEL); | ||
519 | if (!txq->scratchbufs) | ||
520 | goto err_free_tfds; | ||
521 | |||
517 | txq->q.id = txq_id; | 522 | txq->q.id = txq_id; |
518 | 523 | ||
519 | return 0; | 524 | return 0; |
525 | err_free_tfds: | ||
526 | dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); | ||
520 | error: | 527 | error: |
521 | if (txq->entries && txq_id == trans_pcie->cmd_queue) | 528 | if (txq->entries && txq_id == trans_pcie->cmd_queue) |
522 | for (i = 0; i < slots_num; i++) | 529 | for (i = 0; i < slots_num; i++) |
@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) | |||
565 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 572 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
566 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | 573 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; |
567 | struct iwl_queue *q = &txq->q; | 574 | struct iwl_queue *q = &txq->q; |
568 | enum dma_data_direction dma_dir; | ||
569 | 575 | ||
570 | if (!q->n_bd) | 576 | if (!q->n_bd) |
571 | return; | 577 | return; |
572 | 578 | ||
573 | /* In the command queue, all the TBs are mapped as BIDI | ||
574 | * so unmap them as such. | ||
575 | */ | ||
576 | if (txq_id == trans_pcie->cmd_queue) | ||
577 | dma_dir = DMA_BIDIRECTIONAL; | ||
578 | else | ||
579 | dma_dir = DMA_TO_DEVICE; | ||
580 | |||
581 | spin_lock_bh(&txq->lock); | 579 | spin_lock_bh(&txq->lock); |
582 | while (q->write_ptr != q->read_ptr) { | 580 | while (q->write_ptr != q->read_ptr) { |
583 | iwl_pcie_txq_free_tfd(trans, txq, dma_dir); | 581 | iwl_pcie_txq_free_tfd(trans, txq); |
584 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 582 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
585 | } | 583 | } |
586 | spin_unlock_bh(&txq->lock); | 584 | spin_unlock_bh(&txq->lock); |
@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) | |||
610 | if (txq_id == trans_pcie->cmd_queue) | 608 | if (txq_id == trans_pcie->cmd_queue) |
611 | for (i = 0; i < txq->q.n_window; i++) { | 609 | for (i = 0; i < txq->q.n_window; i++) { |
612 | kfree(txq->entries[i].cmd); | 610 | kfree(txq->entries[i].cmd); |
613 | kfree(txq->entries[i].copy_cmd); | ||
614 | kfree(txq->entries[i].free_buf); | 611 | kfree(txq->entries[i].free_buf); |
615 | } | 612 | } |
616 | 613 | ||
@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) | |||
619 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * | 616 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * |
620 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | 617 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
621 | txq->q.dma_addr = 0; | 618 | txq->q.dma_addr = 0; |
619 | |||
620 | dma_free_coherent(dev, | ||
621 | sizeof(*txq->scratchbufs) * txq->q.n_window, | ||
622 | txq->scratchbufs, txq->scratchbufs_dma); | ||
622 | } | 623 | } |
623 | 624 | ||
624 | kfree(txq->entries); | 625 | kfree(txq->entries); |
@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |||
962 | 963 | ||
963 | iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); | 964 | iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); |
964 | 965 | ||
965 | iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | 966 | iwl_pcie_txq_free_tfd(trans, txq); |
966 | } | 967 | } |
967 | 968 | ||
968 | iwl_pcie_txq_progress(trans_pcie, txq); | 969 | iwl_pcie_txq_progress(trans_pcie, txq); |
@@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1152 | void *dup_buf = NULL; | 1153 | void *dup_buf = NULL; |
1153 | dma_addr_t phys_addr; | 1154 | dma_addr_t phys_addr; |
1154 | int idx; | 1155 | int idx; |
1155 | u16 copy_size, cmd_size, dma_size; | 1156 | u16 copy_size, cmd_size, scratch_size; |
1156 | bool had_nocopy = false; | 1157 | bool had_nocopy = false; |
1157 | int i; | 1158 | int i; |
1158 | u32 cmd_pos; | 1159 | u32 cmd_pos; |
1159 | const u8 *cmddata[IWL_MAX_CMD_TFDS]; | 1160 | const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; |
1160 | u16 cmdlen[IWL_MAX_CMD_TFDS]; | 1161 | u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; |
1161 | 1162 | ||
1162 | copy_size = sizeof(out_cmd->hdr); | 1163 | copy_size = sizeof(out_cmd->hdr); |
1163 | cmd_size = sizeof(out_cmd->hdr); | 1164 | cmd_size = sizeof(out_cmd->hdr); |
1164 | 1165 | ||
1165 | /* need one for the header if the first is NOCOPY */ | 1166 | /* need one for the header if the first is NOCOPY */ |
1166 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); | 1167 | BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); |
1167 | 1168 | ||
1168 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 1169 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
1169 | cmddata[i] = cmd->data[i]; | 1170 | cmddata[i] = cmd->data[i]; |
1170 | cmdlen[i] = cmd->len[i]; | 1171 | cmdlen[i] = cmd->len[i]; |
1171 | 1172 | ||
1172 | if (!cmd->len[i]) | 1173 | if (!cmd->len[i]) |
1173 | continue; | 1174 | continue; |
1174 | 1175 | ||
1175 | /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ | 1176 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ |
1176 | if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { | 1177 | if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { |
1177 | int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; | 1178 | int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; |
1178 | 1179 | ||
1179 | if (copy > cmdlen[i]) | 1180 | if (copy > cmdlen[i]) |
1180 | copy = cmdlen[i]; | 1181 | copy = cmdlen[i]; |
@@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1260 | /* and copy the data that needs to be copied */ | 1261 | /* and copy the data that needs to be copied */ |
1261 | cmd_pos = offsetof(struct iwl_device_cmd, payload); | 1262 | cmd_pos = offsetof(struct iwl_device_cmd, payload); |
1262 | copy_size = sizeof(out_cmd->hdr); | 1263 | copy_size = sizeof(out_cmd->hdr); |
1263 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 1264 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
1264 | int copy = 0; | 1265 | int copy = 0; |
1265 | 1266 | ||
1266 | if (!cmd->len) | 1267 | if (!cmd->len) |
1267 | continue; | 1268 | continue; |
1268 | 1269 | ||
1269 | /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ | 1270 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ |
1270 | if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { | 1271 | if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { |
1271 | copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; | 1272 | copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; |
1272 | 1273 | ||
1273 | if (copy > cmd->len[i]) | 1274 | if (copy > cmd->len[i]) |
1274 | copy = cmd->len[i]; | 1275 | copy = cmd->len[i]; |
@@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1286 | } | 1287 | } |
1287 | } | 1288 | } |
1288 | 1289 | ||
1289 | WARN_ON_ONCE(txq->entries[idx].copy_cmd); | ||
1290 | |||
1291 | /* | ||
1292 | * since out_cmd will be the source address of the FH, it will write | ||
1293 | * the retry count there. So when the user needs to receivce the HCMD | ||
1294 | * that corresponds to the response in the response handler, it needs | ||
1295 | * to set CMD_WANT_HCMD. | ||
1296 | */ | ||
1297 | if (cmd->flags & CMD_WANT_HCMD) { | ||
1298 | txq->entries[idx].copy_cmd = | ||
1299 | kmemdup(out_cmd, cmd_pos, GFP_ATOMIC); | ||
1300 | if (unlikely(!txq->entries[idx].copy_cmd)) { | ||
1301 | idx = -ENOMEM; | ||
1302 | goto out; | ||
1303 | } | ||
1304 | } | ||
1305 | |||
1306 | IWL_DEBUG_HC(trans, | 1290 | IWL_DEBUG_HC(trans, |
1307 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | 1291 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", |
1308 | get_cmd_string(trans_pcie, out_cmd->hdr.cmd), | 1292 | get_cmd_string(trans_pcie, out_cmd->hdr.cmd), |
1309 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | 1293 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), |
1310 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); | 1294 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); |
1311 | 1295 | ||
1312 | /* | 1296 | /* start the TFD with the scratchbuf */ |
1313 | * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must | 1297 | scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); |
1314 | * still map at least that many bytes for the hardware to write back to. | 1298 | memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); |
1315 | * We have enough space, so that's not a problem. | 1299 | iwl_pcie_txq_build_tfd(trans, txq, |
1316 | */ | 1300 | iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), |
1317 | dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); | 1301 | scratch_size, 1); |
1302 | |||
1303 | /* map first command fragment, if any remains */ | ||
1304 | if (copy_size > scratch_size) { | ||
1305 | phys_addr = dma_map_single(trans->dev, | ||
1306 | ((u8 *)&out_cmd->hdr) + scratch_size, | ||
1307 | copy_size - scratch_size, | ||
1308 | DMA_TO_DEVICE); | ||
1309 | if (dma_mapping_error(trans->dev, phys_addr)) { | ||
1310 | iwl_pcie_tfd_unmap(trans, out_meta, | ||
1311 | &txq->tfds[q->write_ptr]); | ||
1312 | idx = -ENOMEM; | ||
1313 | goto out; | ||
1314 | } | ||
1318 | 1315 | ||
1319 | phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, | 1316 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, |
1320 | DMA_BIDIRECTIONAL); | 1317 | copy_size - scratch_size, 0); |
1321 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | ||
1322 | idx = -ENOMEM; | ||
1323 | goto out; | ||
1324 | } | 1318 | } |
1325 | 1319 | ||
1326 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | ||
1327 | dma_unmap_len_set(out_meta, len, dma_size); | ||
1328 | |||
1329 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); | ||
1330 | |||
1331 | /* map the remaining (adjusted) nocopy/dup fragments */ | 1320 | /* map the remaining (adjusted) nocopy/dup fragments */ |
1332 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 1321 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
1333 | const void *data = cmddata[i]; | 1322 | const void *data = cmddata[i]; |
1334 | 1323 | ||
1335 | if (!cmdlen[i]) | 1324 | if (!cmdlen[i]) |
@@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1340 | if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) | 1329 | if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) |
1341 | data = dup_buf; | 1330 | data = dup_buf; |
1342 | phys_addr = dma_map_single(trans->dev, (void *)data, | 1331 | phys_addr = dma_map_single(trans->dev, (void *)data, |
1343 | cmdlen[i], DMA_BIDIRECTIONAL); | 1332 | cmdlen[i], DMA_TO_DEVICE); |
1344 | if (dma_mapping_error(trans->dev, phys_addr)) { | 1333 | if (dma_mapping_error(trans->dev, phys_addr)) { |
1345 | iwl_pcie_tfd_unmap(trans, out_meta, | 1334 | iwl_pcie_tfd_unmap(trans, out_meta, |
1346 | &txq->tfds[q->write_ptr], | 1335 | &txq->tfds[q->write_ptr]); |
1347 | DMA_BIDIRECTIONAL); | ||
1348 | idx = -ENOMEM; | 1336 | idx = -ENOMEM; |
1349 | goto out; | 1337 | goto out; |
1350 | } | 1338 | } |
@@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, | |||
1418 | cmd = txq->entries[cmd_index].cmd; | 1406 | cmd = txq->entries[cmd_index].cmd; |
1419 | meta = &txq->entries[cmd_index].meta; | 1407 | meta = &txq->entries[cmd_index].meta; |
1420 | 1408 | ||
1421 | iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); | 1409 | iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); |
1422 | 1410 | ||
1423 | /* Input error checking is done when commands are added to queue. */ | 1411 | /* Input error checking is done when commands are added to queue. */ |
1424 | if (meta->flags & CMD_WANT_SKB) { | 1412 | if (meta->flags & CMD_WANT_SKB) { |
@@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1597 | struct iwl_cmd_meta *out_meta; | 1585 | struct iwl_cmd_meta *out_meta; |
1598 | struct iwl_txq *txq; | 1586 | struct iwl_txq *txq; |
1599 | struct iwl_queue *q; | 1587 | struct iwl_queue *q; |
1600 | dma_addr_t phys_addr = 0; | 1588 | dma_addr_t tb0_phys, tb1_phys, scratch_phys; |
1601 | dma_addr_t txcmd_phys; | 1589 | void *tb1_addr; |
1602 | dma_addr_t scratch_phys; | 1590 | u16 len, tb1_len, tb2_len; |
1603 | u16 len, firstlen, secondlen; | ||
1604 | u8 wait_write_ptr = 0; | 1591 | u8 wait_write_ptr = 0; |
1605 | __le16 fc = hdr->frame_control; | 1592 | __le16 fc = hdr->frame_control; |
1606 | u8 hdr_len = ieee80211_hdrlen(fc); | 1593 | u8 hdr_len = ieee80211_hdrlen(fc); |
@@ -1638,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1638 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | 1625 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | |
1639 | INDEX_TO_SEQ(q->write_ptr))); | 1626 | INDEX_TO_SEQ(q->write_ptr))); |
1640 | 1627 | ||
1628 | tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); | ||
1629 | scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + | ||
1630 | offsetof(struct iwl_tx_cmd, scratch); | ||
1631 | |||
1632 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1633 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
1634 | |||
1641 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | 1635 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ |
1642 | out_meta = &txq->entries[q->write_ptr].meta; | 1636 | out_meta = &txq->entries[q->write_ptr].meta; |
1643 | 1637 | ||
1644 | /* | 1638 | /* |
1645 | * Use the first empty entry in this queue's command buffer array | 1639 | * The second TB (tb1) points to the remainder of the TX command |
1646 | * to contain the Tx command and MAC header concatenated together | 1640 | * and the 802.11 header - dword aligned size |
1647 | * (payload data will be in another buffer). | 1641 | * (This calculation modifies the TX command, so do it before the |
1648 | * Size of this varies, due to varying MAC header length. | 1642 | * setup of the first TB) |
1649 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
1650 | * of the MAC header (device reads on dword boundaries). | ||
1651 | * We'll tell device about this padding later. | ||
1652 | */ | 1643 | */ |
1653 | len = sizeof(struct iwl_tx_cmd) + | 1644 | len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + |
1654 | sizeof(struct iwl_cmd_header) + hdr_len; | 1645 | hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; |
1655 | firstlen = (len + 3) & ~3; | 1646 | tb1_len = (len + 3) & ~3; |
1656 | 1647 | ||
1657 | /* Tell NIC about any 2-byte padding after MAC header */ | 1648 | /* Tell NIC about any 2-byte padding after MAC header */ |
1658 | if (firstlen != len) | 1649 | if (tb1_len != len) |
1659 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | 1650 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; |
1660 | 1651 | ||
1661 | /* Physical address of this Tx command's header (not MAC header!), | 1652 | /* The first TB points to the scratchbuf data - min_copy bytes */ |
1662 | * within command buffer array. */ | 1653 | memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, |
1663 | txcmd_phys = dma_map_single(trans->dev, | 1654 | IWL_HCMD_SCRATCHBUF_SIZE); |
1664 | &dev_cmd->hdr, firstlen, | 1655 | iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, |
1665 | DMA_BIDIRECTIONAL); | 1656 | IWL_HCMD_SCRATCHBUF_SIZE, 1); |
1666 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) | ||
1667 | goto out_err; | ||
1668 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
1669 | dma_unmap_len_set(out_meta, len, firstlen); | ||
1670 | 1657 | ||
1671 | if (!ieee80211_has_morefrags(fc)) { | 1658 | /* there must be data left over for TB1 or this code must be changed */ |
1672 | txq->need_update = 1; | 1659 | BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); |
1673 | } else { | ||
1674 | wait_write_ptr = 1; | ||
1675 | txq->need_update = 0; | ||
1676 | } | ||
1677 | 1660 | ||
1678 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | 1661 | /* map the data for TB1 */ |
1679 | * if any (802.11 null frames have no payload). */ | 1662 | tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; |
1680 | secondlen = skb->len - hdr_len; | 1663 | tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); |
1681 | if (secondlen > 0) { | 1664 | if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) |
1682 | phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, | 1665 | goto out_err; |
1683 | secondlen, DMA_TO_DEVICE); | 1666 | iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0); |
1684 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | 1667 | |
1685 | dma_unmap_single(trans->dev, | 1668 | /* |
1686 | dma_unmap_addr(out_meta, mapping), | 1669 | * Set up TFD's third entry to point directly to remainder |
1687 | dma_unmap_len(out_meta, len), | 1670 | * of skb, if any (802.11 null frames have no payload). |
1688 | DMA_BIDIRECTIONAL); | 1671 | */ |
1672 | tb2_len = skb->len - hdr_len; | ||
1673 | if (tb2_len > 0) { | ||
1674 | dma_addr_t tb2_phys = dma_map_single(trans->dev, | ||
1675 | skb->data + hdr_len, | ||
1676 | tb2_len, DMA_TO_DEVICE); | ||
1677 | if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { | ||
1678 | iwl_pcie_tfd_unmap(trans, out_meta, | ||
1679 | &txq->tfds[q->write_ptr]); | ||
1689 | goto out_err; | 1680 | goto out_err; |
1690 | } | 1681 | } |
1682 | iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0); | ||
1691 | } | 1683 | } |
1692 | 1684 | ||
1693 | /* Attach buffers to TFD */ | ||
1694 | iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1); | ||
1695 | if (secondlen > 0) | ||
1696 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0); | ||
1697 | |||
1698 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
1699 | offsetof(struct iwl_tx_cmd, scratch); | ||
1700 | |||
1701 | /* take back ownership of DMA buffer to enable update */ | ||
1702 | dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, | ||
1703 | DMA_BIDIRECTIONAL); | ||
1704 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1705 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
1706 | |||
1707 | /* Set up entry for this TFD in Tx byte-count array */ | 1685 | /* Set up entry for this TFD in Tx byte-count array */ |
1708 | iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); | 1686 | iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); |
1709 | 1687 | ||
1710 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, | ||
1711 | DMA_BIDIRECTIONAL); | ||
1712 | |||
1713 | trace_iwlwifi_dev_tx(trans->dev, skb, | 1688 | trace_iwlwifi_dev_tx(trans->dev, skb, |
1714 | &txq->tfds[txq->q.write_ptr], | 1689 | &txq->tfds[txq->q.write_ptr], |
1715 | sizeof(struct iwl_tfd), | 1690 | sizeof(struct iwl_tfd), |
1716 | &dev_cmd->hdr, firstlen, | 1691 | &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, |
1717 | skb->data + hdr_len, secondlen); | 1692 | skb->data + hdr_len, tb2_len); |
1718 | trace_iwlwifi_dev_tx_data(trans->dev, skb, | 1693 | trace_iwlwifi_dev_tx_data(trans->dev, skb, |
1719 | skb->data + hdr_len, secondlen); | 1694 | skb->data + hdr_len, tb2_len); |
1695 | |||
1696 | if (!ieee80211_has_morefrags(fc)) { | ||
1697 | txq->need_update = 1; | ||
1698 | } else { | ||
1699 | wait_write_ptr = 1; | ||
1700 | txq->need_update = 0; | ||
1701 | } | ||
1720 | 1702 | ||
1721 | /* start timer if queue currently empty */ | 1703 | /* start timer if queue currently empty */ |
1722 | if (txq->need_update && q->read_ptr == q->write_ptr && | 1704 | if (txq->need_update && q->read_ptr == q->write_ptr && |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d5f1d3fd4b28..314c73ed418f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
66 | goto out; | 66 | goto out; |
67 | } | 67 | } |
68 | 68 | ||
69 | mdst = br_mdb_get(br, skb); | 69 | mdst = br_mdb_get(br, skb, vid); |
70 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) | 70 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) |
71 | br_multicast_deliver(mdst, skb); | 71 | br_multicast_deliver(mdst, skb); |
72 | else | 72 | else |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 480330151898..828e2bcc1f52 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
97 | if (is_broadcast_ether_addr(dest)) | 97 | if (is_broadcast_ether_addr(dest)) |
98 | skb2 = skb; | 98 | skb2 = skb; |
99 | else if (is_multicast_ether_addr(dest)) { | 99 | else if (is_multicast_ether_addr(dest)) { |
100 | mdst = br_mdb_get(br, skb); | 100 | mdst = br_mdb_get(br, skb, vid); |
101 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { | 101 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { |
102 | if ((mdst && mdst->mglist) || | 102 | if ((mdst && mdst->mglist) || |
103 | br_multicast_is_router(br)) | 103 | br_multicast_is_router(br)) |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 9f97b850fc65..ee79f3f20383 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
80 | port = p->port; | 80 | port = p->port; |
81 | if (port) { | 81 | if (port) { |
82 | struct br_mdb_entry e; | 82 | struct br_mdb_entry e; |
83 | memset(&e, 0, sizeof(e)); | ||
83 | e.ifindex = port->dev->ifindex; | 84 | e.ifindex = port->dev->ifindex; |
84 | e.state = p->state; | 85 | e.state = p->state; |
85 | if (p->addr.proto == htons(ETH_P_IP)) | 86 | if (p->addr.proto == htons(ETH_P_IP)) |
@@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
136 | break; | 137 | break; |
137 | 138 | ||
138 | bpm = nlmsg_data(nlh); | 139 | bpm = nlmsg_data(nlh); |
140 | memset(bpm, 0, sizeof(*bpm)); | ||
139 | bpm->ifindex = dev->ifindex; | 141 | bpm->ifindex = dev->ifindex; |
140 | if (br_mdb_fill_info(skb, cb, dev) < 0) | 142 | if (br_mdb_fill_info(skb, cb, dev) < 0) |
141 | goto out; | 143 | goto out; |
@@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, | |||
171 | return -EMSGSIZE; | 173 | return -EMSGSIZE; |
172 | 174 | ||
173 | bpm = nlmsg_data(nlh); | 175 | bpm = nlmsg_data(nlh); |
176 | memset(bpm, 0, sizeof(*bpm)); | ||
174 | bpm->family = AF_BRIDGE; | 177 | bpm->family = AF_BRIDGE; |
175 | bpm->ifindex = dev->ifindex; | 178 | bpm->ifindex = dev->ifindex; |
176 | nest = nla_nest_start(skb, MDBA_MDB); | 179 | nest = nla_nest_start(skb, MDBA_MDB); |
@@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, | |||
228 | { | 231 | { |
229 | struct br_mdb_entry entry; | 232 | struct br_mdb_entry entry; |
230 | 233 | ||
234 | memset(&entry, 0, sizeof(entry)); | ||
231 | entry.ifindex = port->dev->ifindex; | 235 | entry.ifindex = port->dev->ifindex; |
232 | entry.addr.proto = group->proto; | 236 | entry.addr.proto = group->proto; |
233 | entry.addr.u.ip4 = group->u.ip4; | 237 | entry.addr.u.ip4 = group->u.ip4; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 10e6fce1bb62..923fbeaf7afd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( | |||
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 134 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
135 | struct sk_buff *skb) | 135 | struct sk_buff *skb, u16 vid) |
136 | { | 136 | { |
137 | struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); | 137 | struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); |
138 | struct br_ip ip; | 138 | struct br_ip ip; |
@@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | |||
144 | return NULL; | 144 | return NULL; |
145 | 145 | ||
146 | ip.proto = skb->protocol; | 146 | ip.proto = skb->protocol; |
147 | ip.vid = vid; | ||
147 | 148 | ||
148 | switch (skb->protocol) { | 149 | switch (skb->protocol) { |
149 | case htons(ETH_P_IP): | 150 | case htons(ETH_P_IP): |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6d314c4e6bcb..3cbf5beb3d4b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -442,7 +442,7 @@ extern int br_multicast_rcv(struct net_bridge *br, | |||
442 | struct net_bridge_port *port, | 442 | struct net_bridge_port *port, |
443 | struct sk_buff *skb); | 443 | struct sk_buff *skb); |
444 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 444 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
445 | struct sk_buff *skb); | 445 | struct sk_buff *skb, u16 vid); |
446 | extern void br_multicast_add_port(struct net_bridge_port *port); | 446 | extern void br_multicast_add_port(struct net_bridge_port *port); |
447 | extern void br_multicast_del_port(struct net_bridge_port *port); | 447 | extern void br_multicast_del_port(struct net_bridge_port *port); |
448 | extern void br_multicast_enable_port(struct net_bridge_port *port); | 448 | extern void br_multicast_enable_port(struct net_bridge_port *port); |
@@ -504,7 +504,7 @@ static inline int br_multicast_rcv(struct net_bridge *br, | |||
504 | } | 504 | } |
505 | 505 | ||
506 | static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 506 | static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
507 | struct sk_buff *skb) | 507 | struct sk_buff *skb, u16 vid) |
508 | { | 508 | { |
509 | return NULL; | 509 | return NULL; |
510 | } | 510 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd11..dffbef70cd31 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3444,6 +3444,7 @@ ncls: | |||
3444 | } | 3444 | } |
3445 | switch (rx_handler(&skb)) { | 3445 | switch (rx_handler(&skb)) { |
3446 | case RX_HANDLER_CONSUMED: | 3446 | case RX_HANDLER_CONSUMED: |
3447 | ret = NET_RX_SUCCESS; | ||
3447 | goto unlock; | 3448 | goto unlock; |
3448 | case RX_HANDLER_ANOTHER: | 3449 | case RX_HANDLER_ANOTHER: |
3449 | goto another_round; | 3450 | goto another_round; |
@@ -4103,7 +4104,7 @@ static void net_rx_action(struct softirq_action *h) | |||
4103 | * Allow this to run for 2 jiffies since which will allow | 4104 | * Allow this to run for 2 jiffies since which will allow |
4104 | * an average latency of 1.5/HZ. | 4105 | * an average latency of 1.5/HZ. |
4105 | */ | 4106 | */ |
4106 | if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) | 4107 | if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) |
4107 | goto softnet_break; | 4108 | goto softnet_break; |
4108 | 4109 | ||
4109 | local_irq_enable(); | 4110 | local_irq_enable(); |
@@ -4780,7 +4781,7 @@ EXPORT_SYMBOL(dev_set_mac_address); | |||
4780 | /** | 4781 | /** |
4781 | * dev_change_carrier - Change device carrier | 4782 | * dev_change_carrier - Change device carrier |
4782 | * @dev: device | 4783 | * @dev: device |
4783 | * @new_carries: new value | 4784 | * @new_carrier: new value |
4784 | * | 4785 | * |
4785 | * Change device carrier | 4786 | * Change device carrier |
4786 | */ | 4787 | */ |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff259..a585d45cc9d9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -979,6 +979,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
979 | * report anything. | 979 | * report anything. |
980 | */ | 980 | */ |
981 | ivi.spoofchk = -1; | 981 | ivi.spoofchk = -1; |
982 | memset(ivi.mac, 0, sizeof(ivi.mac)); | ||
982 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | 983 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) |
983 | break; | 984 | break; |
984 | vf_mac.vf = | 985 | vf_mac.vf = |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 1b588e23cf80..21291f1abcd6 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, | |||
284 | if (!netdev->dcbnl_ops->getpermhwaddr) | 284 | if (!netdev->dcbnl_ops->getpermhwaddr) |
285 | return -EOPNOTSUPP; | 285 | return -EOPNOTSUPP; |
286 | 286 | ||
287 | memset(perm_addr, 0, sizeof(perm_addr)); | ||
287 | netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); | 288 | netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); |
288 | 289 | ||
289 | return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); | 290 | return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); |
@@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1042 | 1043 | ||
1043 | if (ops->ieee_getets) { | 1044 | if (ops->ieee_getets) { |
1044 | struct ieee_ets ets; | 1045 | struct ieee_ets ets; |
1046 | memset(&ets, 0, sizeof(ets)); | ||
1045 | err = ops->ieee_getets(netdev, &ets); | 1047 | err = ops->ieee_getets(netdev, &ets); |
1046 | if (!err && | 1048 | if (!err && |
1047 | nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) | 1049 | nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) |
@@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1050 | 1052 | ||
1051 | if (ops->ieee_getmaxrate) { | 1053 | if (ops->ieee_getmaxrate) { |
1052 | struct ieee_maxrate maxrate; | 1054 | struct ieee_maxrate maxrate; |
1055 | memset(&maxrate, 0, sizeof(maxrate)); | ||
1053 | err = ops->ieee_getmaxrate(netdev, &maxrate); | 1056 | err = ops->ieee_getmaxrate(netdev, &maxrate); |
1054 | if (!err) { | 1057 | if (!err) { |
1055 | err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, | 1058 | err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, |
@@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1061 | 1064 | ||
1062 | if (ops->ieee_getpfc) { | 1065 | if (ops->ieee_getpfc) { |
1063 | struct ieee_pfc pfc; | 1066 | struct ieee_pfc pfc; |
1067 | memset(&pfc, 0, sizeof(pfc)); | ||
1064 | err = ops->ieee_getpfc(netdev, &pfc); | 1068 | err = ops->ieee_getpfc(netdev, &pfc); |
1065 | if (!err && | 1069 | if (!err && |
1066 | nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) | 1070 | nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) |
@@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1094 | /* get peer info if available */ | 1098 | /* get peer info if available */ |
1095 | if (ops->ieee_peer_getets) { | 1099 | if (ops->ieee_peer_getets) { |
1096 | struct ieee_ets ets; | 1100 | struct ieee_ets ets; |
1101 | memset(&ets, 0, sizeof(ets)); | ||
1097 | err = ops->ieee_peer_getets(netdev, &ets); | 1102 | err = ops->ieee_peer_getets(netdev, &ets); |
1098 | if (!err && | 1103 | if (!err && |
1099 | nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) | 1104 | nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) |
@@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1102 | 1107 | ||
1103 | if (ops->ieee_peer_getpfc) { | 1108 | if (ops->ieee_peer_getpfc) { |
1104 | struct ieee_pfc pfc; | 1109 | struct ieee_pfc pfc; |
1110 | memset(&pfc, 0, sizeof(pfc)); | ||
1105 | err = ops->ieee_peer_getpfc(netdev, &pfc); | 1111 | err = ops->ieee_peer_getpfc(netdev, &pfc); |
1106 | if (!err && | 1112 | if (!err && |
1107 | nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) | 1113 | nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) |
@@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1280 | /* peer info if available */ | 1286 | /* peer info if available */ |
1281 | if (ops->cee_peer_getpg) { | 1287 | if (ops->cee_peer_getpg) { |
1282 | struct cee_pg pg; | 1288 | struct cee_pg pg; |
1289 | memset(&pg, 0, sizeof(pg)); | ||
1283 | err = ops->cee_peer_getpg(netdev, &pg); | 1290 | err = ops->cee_peer_getpg(netdev, &pg); |
1284 | if (!err && | 1291 | if (!err && |
1285 | nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) | 1292 | nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) |
@@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
1288 | 1295 | ||
1289 | if (ops->cee_peer_getpfc) { | 1296 | if (ops->cee_peer_getpfc) { |
1290 | struct cee_pfc pfc; | 1297 | struct cee_pfc pfc; |
1298 | memset(&pfc, 0, sizeof(pfc)); | ||
1291 | err = ops->cee_peer_getpfc(netdev, &pfc); | 1299 | err = ops->cee_peer_getpfc(netdev, &pfc); |
1292 | if (!err && | 1300 | if (!err && |
1293 | nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) | 1301 | nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) |
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 8c2251fb0a3f..bba5f8336317 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h | |||
@@ -84,7 +84,7 @@ | |||
84 | (memcmp(addr1, addr2, length >> 3) == 0) | 84 | (memcmp(addr1, addr2, length >> 3) == 0) |
85 | 85 | ||
86 | /* local link, i.e. FE80::/10 */ | 86 | /* local link, i.e. FE80::/10 */ |
87 | #define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) | 87 | #define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * check whether we can compress the IID to 16 bits, | 90 | * check whether we can compress the IID to 16 bits, |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df3..786d97aee751 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock); | |||
735 | * tcp/dccp_create_openreq_child(). | 735 | * tcp/dccp_create_openreq_child(). |
736 | */ | 736 | */ |
737 | void inet_csk_prepare_forced_close(struct sock *sk) | 737 | void inet_csk_prepare_forced_close(struct sock *sk) |
738 | __releases(&sk->sk_lock.slock) | ||
738 | { | 739 | { |
739 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | 740 | /* sk_clone_lock locked the socket and set refcnt to 2 */ |
740 | bh_unlock_sock(sk); | 741 | bh_unlock_sock(sk); |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f332..310a3647c83d 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -423,7 +423,7 @@ int ip_options_compile(struct net *net, | |||
423 | put_unaligned_be32(midtime, timeptr); | 423 | put_unaligned_be32(midtime, timeptr); |
424 | opt->is_changed = 1; | 424 | opt->is_changed = 1; |
425 | } | 425 | } |
426 | } else { | 426 | } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { |
427 | unsigned int overflow = optptr[3]>>4; | 427 | unsigned int overflow = optptr[3]>>4; |
428 | if (overflow == 15) { | 428 | if (overflow == 15) { |
429 | pp_ptr = optptr + 3; | 429 | pp_ptr = optptr + 3; |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index b1876e52091e..e33fe0ab2568 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -281,7 +281,8 @@ int ip6_mc_input(struct sk_buff *skb) | |||
281 | * IPv6 multicast router mode is now supported ;) | 281 | * IPv6 multicast router mode is now supported ;) |
282 | */ | 282 | */ |
283 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && | 283 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && |
284 | !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && | 284 | !(ipv6_addr_type(&hdr->daddr) & |
285 | (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && | ||
285 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { | 286 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { |
286 | /* | 287 | /* |
287 | * Okay, we try to forward - split and duplicate | 288 | * Okay, we try to forward - split and duplicate |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 9a5fd3c3e530..362ba47968e4 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
280 | struct tty_port *port = &self->port; | 280 | struct tty_port *port = &self->port; |
281 | DECLARE_WAITQUEUE(wait, current); | 281 | DECLARE_WAITQUEUE(wait, current); |
282 | int retval; | 282 | int retval; |
283 | int do_clocal = 0, extra_count = 0; | 283 | int do_clocal = 0; |
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | 285 | ||
286 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 286 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
@@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
289 | * If non-blocking mode is set, or the port is not enabled, | 289 | * If non-blocking mode is set, or the port is not enabled, |
290 | * then make the check up front and then exit. | 290 | * then make the check up front and then exit. |
291 | */ | 291 | */ |
292 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 292 | if (test_bit(TTY_IO_ERROR, &tty->flags)) { |
293 | /* nonblock mode is set or port is not enabled */ | 293 | port->flags |= ASYNC_NORMAL_ACTIVE; |
294 | return 0; | ||
295 | } | ||
296 | |||
297 | if (filp->f_flags & O_NONBLOCK) { | ||
298 | /* nonblock mode is set */ | ||
299 | if (tty->termios.c_cflag & CBAUD) | ||
300 | tty_port_raise_dtr_rts(port); | ||
294 | port->flags |= ASYNC_NORMAL_ACTIVE; | 301 | port->flags |= ASYNC_NORMAL_ACTIVE; |
295 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); | 302 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); |
296 | return 0; | 303 | return 0; |
@@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
315 | __FILE__, __LINE__, tty->driver->name, port->count); | 322 | __FILE__, __LINE__, tty->driver->name, port->count); |
316 | 323 | ||
317 | spin_lock_irqsave(&port->lock, flags); | 324 | spin_lock_irqsave(&port->lock, flags); |
318 | if (!tty_hung_up_p(filp)) { | 325 | if (!tty_hung_up_p(filp)) |
319 | extra_count = 1; | ||
320 | port->count--; | 326 | port->count--; |
321 | } | ||
322 | spin_unlock_irqrestore(&port->lock, flags); | ||
323 | port->blocked_open++; | 327 | port->blocked_open++; |
328 | spin_unlock_irqrestore(&port->lock, flags); | ||
324 | 329 | ||
325 | while (1) { | 330 | while (1) { |
326 | if (tty->termios.c_cflag & CBAUD) | 331 | if (tty->termios.c_cflag & CBAUD) |
327 | tty_port_raise_dtr_rts(port); | 332 | tty_port_raise_dtr_rts(port); |
328 | 333 | ||
329 | current->state = TASK_INTERRUPTIBLE; | 334 | set_current_state(TASK_INTERRUPTIBLE); |
330 | 335 | ||
331 | if (tty_hung_up_p(filp) || | 336 | if (tty_hung_up_p(filp) || |
332 | !test_bit(ASYNCB_INITIALIZED, &port->flags)) { | 337 | !test_bit(ASYNCB_INITIALIZED, &port->flags)) { |
@@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
361 | __set_current_state(TASK_RUNNING); | 366 | __set_current_state(TASK_RUNNING); |
362 | remove_wait_queue(&port->open_wait, &wait); | 367 | remove_wait_queue(&port->open_wait, &wait); |
363 | 368 | ||
364 | if (extra_count) { | 369 | spin_lock_irqsave(&port->lock, flags); |
365 | /* ++ is not atomic, so this should be protected - Jean II */ | 370 | if (!tty_hung_up_p(filp)) |
366 | spin_lock_irqsave(&port->lock, flags); | ||
367 | port->count++; | 371 | port->count++; |
368 | spin_unlock_irqrestore(&port->lock, flags); | ||
369 | } | ||
370 | port->blocked_open--; | 372 | port->blocked_open--; |
373 | spin_unlock_irqrestore(&port->lock, flags); | ||
371 | 374 | ||
372 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", | 375 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", |
373 | __FILE__, __LINE__, tty->driver->name, port->count); | 376 | __FILE__, __LINE__, tty->driver->name, port->count); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 556fdafdd1ea..8555f331ea60 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
2201 | XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); | 2201 | XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); |
2202 | xp->priority = pol->sadb_x_policy_priority; | 2202 | xp->priority = pol->sadb_x_policy_priority; |
2203 | 2203 | ||
2204 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], | 2204 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; |
2205 | xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); | 2205 | xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); |
2206 | if (!xp->family) { | 2206 | if (!xp->family) { |
2207 | err = -EINVAL; | 2207 | err = -EINVAL; |
@@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
2214 | if (xp->selector.sport) | 2214 | if (xp->selector.sport) |
2215 | xp->selector.sport_mask = htons(0xffff); | 2215 | xp->selector.sport_mask = htons(0xffff); |
2216 | 2216 | ||
2217 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], | 2217 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; |
2218 | pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); | 2218 | pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); |
2219 | xp->selector.prefixlen_d = sa->sadb_address_prefixlen; | 2219 | xp->selector.prefixlen_d = sa->sadb_address_prefixlen; |
2220 | 2220 | ||
@@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
2315 | 2315 | ||
2316 | memset(&sel, 0, sizeof(sel)); | 2316 | memset(&sel, 0, sizeof(sel)); |
2317 | 2317 | ||
2318 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], | 2318 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; |
2319 | sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); | 2319 | sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); |
2320 | sel.prefixlen_s = sa->sadb_address_prefixlen; | 2320 | sel.prefixlen_s = sa->sadb_address_prefixlen; |
2321 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); | 2321 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); |
@@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
2323 | if (sel.sport) | 2323 | if (sel.sport) |
2324 | sel.sport_mask = htons(0xffff); | 2324 | sel.sport_mask = htons(0xffff); |
2325 | 2325 | ||
2326 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], | 2326 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; |
2327 | pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); | 2327 | pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); |
2328 | sel.prefixlen_d = sa->sadb_address_prefixlen; | 2328 | sel.prefixlen_d = sa->sadb_address_prefixlen; |
2329 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); | 2329 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 808f5fcd1ced..fb306814576a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -3290,14 +3290,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, | |||
3290 | int ret = -ENODATA; | 3290 | int ret = -ENODATA; |
3291 | 3291 | ||
3292 | rcu_read_lock(); | 3292 | rcu_read_lock(); |
3293 | if (local->use_chanctx) { | 3293 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
3294 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | 3294 | if (chanctx_conf) { |
3295 | if (chanctx_conf) { | 3295 | *chandef = chanctx_conf->def; |
3296 | *chandef = chanctx_conf->def; | 3296 | ret = 0; |
3297 | ret = 0; | 3297 | } else if (local->open_count > 0 && |
3298 | } | 3298 | local->open_count == local->monitors && |
3299 | } else if (local->open_count == local->monitors) { | 3299 | sdata->vif.type == NL80211_IFTYPE_MONITOR) { |
3300 | *chandef = local->monitor_chandef; | 3300 | if (local->use_chanctx) |
3301 | *chandef = local->monitor_chandef; | ||
3302 | else | ||
3303 | cfg80211_chandef_create(chandef, | ||
3304 | local->_oper_channel, | ||
3305 | local->_oper_channel_type); | ||
3301 | ret = 0; | 3306 | ret = 0; |
3302 | } | 3307 | } |
3303 | rcu_read_unlock(); | 3308 | rcu_read_unlock(); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 640afab304d7..baaa8608e52d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) | |||
541 | 541 | ||
542 | ieee80211_adjust_monitor_flags(sdata, 1); | 542 | ieee80211_adjust_monitor_flags(sdata, 1); |
543 | ieee80211_configure_filter(local); | 543 | ieee80211_configure_filter(local); |
544 | mutex_lock(&local->mtx); | ||
545 | ieee80211_recalc_idle(local); | ||
546 | mutex_unlock(&local->mtx); | ||
544 | 547 | ||
545 | netif_carrier_on(dev); | 548 | netif_carrier_on(dev); |
546 | break; | 549 | break; |
@@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
812 | 815 | ||
813 | ieee80211_adjust_monitor_flags(sdata, -1); | 816 | ieee80211_adjust_monitor_flags(sdata, -1); |
814 | ieee80211_configure_filter(local); | 817 | ieee80211_configure_filter(local); |
818 | mutex_lock(&local->mtx); | ||
819 | ieee80211_recalc_idle(local); | ||
820 | mutex_unlock(&local->mtx); | ||
815 | break; | 821 | break; |
816 | case NL80211_IFTYPE_P2P_DEVICE: | 822 | case NL80211_IFTYPE_P2P_DEVICE: |
817 | /* relies on synchronize_rcu() below */ | 823 | /* relies on synchronize_rcu() below */ |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6464f3e05f..141577412d84 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, | |||
647 | our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & | 647 | our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & |
648 | mask) >> shift; | 648 | mask) >> shift; |
649 | 649 | ||
650 | if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED) | ||
651 | continue; | ||
652 | |||
650 | switch (ap_mcs) { | 653 | switch (ap_mcs) { |
651 | default: | 654 | default: |
652 | if (our_mcs <= ap_mcs) | 655 | if (our_mcs <= ap_mcs) |
@@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
3503 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 3506 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
3504 | 3507 | ||
3505 | /* | 3508 | /* |
3509 | * Stop timers before deleting work items, as timers | ||
3510 | * could race and re-add the work-items. They will be | ||
3511 | * re-established on connection. | ||
3512 | */ | ||
3513 | del_timer_sync(&ifmgd->conn_mon_timer); | ||
3514 | del_timer_sync(&ifmgd->bcn_mon_timer); | ||
3515 | |||
3516 | /* | ||
3506 | * we need to use atomic bitops for the running bits | 3517 | * we need to use atomic bitops for the running bits |
3507 | * only because both timers might fire at the same | 3518 | * only because both timers might fire at the same |
3508 | * time -- the code here is properly synchronised. | 3519 | * time -- the code here is properly synchronised. |
@@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
3516 | if (del_timer_sync(&ifmgd->timer)) | 3527 | if (del_timer_sync(&ifmgd->timer)) |
3517 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); | 3528 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); |
3518 | 3529 | ||
3519 | cancel_work_sync(&ifmgd->chswitch_work); | ||
3520 | if (del_timer_sync(&ifmgd->chswitch_timer)) | 3530 | if (del_timer_sync(&ifmgd->chswitch_timer)) |
3521 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); | 3531 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); |
3522 | 3532 | cancel_work_sync(&ifmgd->chswitch_work); | |
3523 | /* these will just be re-established on connection */ | ||
3524 | del_timer_sync(&ifmgd->conn_mon_timer); | ||
3525 | del_timer_sync(&ifmgd->bcn_mon_timer); | ||
3526 | } | 3533 | } |
3527 | 3534 | ||
3528 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | 3535 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) |
@@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) | |||
4315 | { | 4322 | { |
4316 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 4323 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
4317 | 4324 | ||
4325 | /* | ||
4326 | * Make sure some work items will not run after this, | ||
4327 | * they will not do anything but might not have been | ||
4328 | * cancelled when disconnecting. | ||
4329 | */ | ||
4330 | cancel_work_sync(&ifmgd->monitor_work); | ||
4331 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); | ||
4332 | cancel_work_sync(&ifmgd->request_smps_work); | ||
4333 | cancel_work_sync(&ifmgd->csa_connection_drop_work); | ||
4334 | cancel_work_sync(&ifmgd->chswitch_work); | ||
4335 | |||
4318 | mutex_lock(&ifmgd->mtx); | 4336 | mutex_lock(&ifmgd->mtx); |
4319 | if (ifmgd->assoc_data) | 4337 | if (ifmgd->assoc_data) |
4320 | ieee80211_destroy_assoc_data(sdata, false); | 4338 | ieee80211_destroy_assoc_data(sdata, false); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce78d1149f1d..8914d2d2881a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -2745,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2745 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 2745 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
2746 | } | 2746 | } |
2747 | 2747 | ||
2748 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); | 2748 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
2749 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); | ||
2749 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) | 2750 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) |
2750 | break; | 2751 | break; |
2751 | dev_kfree_skb_any(skb); | 2752 | dev_kfree_skb_any(skb); |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a9740bd6fe54..94b4b9853f60 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, | |||
339 | { | 339 | { |
340 | const struct nf_conn_help *help; | 340 | const struct nf_conn_help *help; |
341 | const struct nf_conntrack_helper *helper; | 341 | const struct nf_conntrack_helper *helper; |
342 | struct va_format vaf; | ||
343 | va_list args; | ||
344 | |||
345 | va_start(args, fmt); | ||
346 | |||
347 | vaf.fmt = fmt; | ||
348 | vaf.va = &args; | ||
342 | 349 | ||
343 | /* Called from the helper function, this call never fails */ | 350 | /* Called from the helper function, this call never fails */ |
344 | help = nfct_help(ct); | 351 | help = nfct_help(ct); |
@@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, | |||
347 | helper = rcu_dereference(help->helper); | 354 | helper = rcu_dereference(help->helper); |
348 | 355 | ||
349 | nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, | 356 | nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, |
350 | "nf_ct_%s: dropping packet: %s ", helper->name, fmt); | 357 | "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); |
358 | |||
359 | va_end(args); | ||
351 | } | 360 | } |
352 | EXPORT_SYMBOL_GPL(nf_ct_helper_log); | 361 | EXPORT_SYMBOL_GPL(nf_ct_helper_log); |
353 | 362 | ||
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index d578ec251712..0b1b32cda307 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id) | |||
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(nfnl_unlock); | 63 | EXPORT_SYMBOL_GPL(nfnl_unlock); |
64 | 64 | ||
65 | static struct mutex *nfnl_get_lock(__u8 subsys_id) | ||
66 | { | ||
67 | return &table[subsys_id].mutex; | ||
68 | } | ||
69 | |||
70 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) | 65 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) |
71 | { | 66 | { |
72 | nfnl_lock(n->subsys_id); | 67 | nfnl_lock(n->subsys_id); |
@@ -199,7 +194,7 @@ replay: | |||
199 | rcu_read_unlock(); | 194 | rcu_read_unlock(); |
200 | nfnl_lock(subsys_id); | 195 | nfnl_lock(subsys_id); |
201 | if (rcu_dereference_protected(table[subsys_id].subsys, | 196 | if (rcu_dereference_protected(table[subsys_id].subsys, |
202 | lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || | 197 | lockdep_is_held(&table[subsys_id].mutex)) != ss || |
203 | nfnetlink_find_client(type, ss) != nc) | 198 | nfnetlink_find_client(type, ss) != nc) |
204 | err = -EAGAIN; | 199 | err = -EAGAIN; |
205 | else if (nc->call) | 200 | else if (nc->call) |
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index ba92824086f3..3228d7f24eb4 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c | |||
@@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
124 | const struct xt_audit_info *info = par->targinfo; | 124 | const struct xt_audit_info *info = par->targinfo; |
125 | struct audit_buffer *ab; | 125 | struct audit_buffer *ab; |
126 | 126 | ||
127 | if (audit_enabled == 0) | ||
128 | goto errout; | ||
129 | |||
127 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); | 130 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); |
128 | if (ab == NULL) | 131 | if (ab == NULL) |
129 | goto errout; | 132 | goto errout; |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 847d495cd4de..8a6c6ea466d8 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1189 | struct netlbl_unlhsh_walk_arg cb_arg; | 1189 | struct netlbl_unlhsh_walk_arg cb_arg; |
1190 | u32 skip_bkt = cb->args[0]; | 1190 | u32 skip_bkt = cb->args[0]; |
1191 | u32 skip_chain = cb->args[1]; | 1191 | u32 skip_chain = cb->args[1]; |
1192 | u32 skip_addr4 = cb->args[2]; | ||
1193 | u32 skip_addr6 = cb->args[3]; | ||
1194 | u32 iter_bkt; | 1192 | u32 iter_bkt; |
1195 | u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; | 1193 | u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; |
1196 | struct netlbl_unlhsh_iface *iface; | 1194 | struct netlbl_unlhsh_iface *iface; |
@@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1215 | continue; | 1213 | continue; |
1216 | netlbl_af4list_foreach_rcu(addr4, | 1214 | netlbl_af4list_foreach_rcu(addr4, |
1217 | &iface->addr4_list) { | 1215 | &iface->addr4_list) { |
1218 | if (iter_addr4++ < skip_addr4) | 1216 | if (iter_addr4++ < cb->args[2]) |
1219 | continue; | 1217 | continue; |
1220 | if (netlbl_unlabel_staticlist_gen( | 1218 | if (netlbl_unlabel_staticlist_gen( |
1221 | NLBL_UNLABEL_C_STATICLIST, | 1219 | NLBL_UNLABEL_C_STATICLIST, |
@@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1231 | #if IS_ENABLED(CONFIG_IPV6) | 1229 | #if IS_ENABLED(CONFIG_IPV6) |
1232 | netlbl_af6list_foreach_rcu(addr6, | 1230 | netlbl_af6list_foreach_rcu(addr6, |
1233 | &iface->addr6_list) { | 1231 | &iface->addr6_list) { |
1234 | if (iter_addr6++ < skip_addr6) | 1232 | if (iter_addr6++ < cb->args[3]) |
1235 | continue; | 1233 | continue; |
1236 | if (netlbl_unlabel_staticlist_gen( | 1234 | if (netlbl_unlabel_staticlist_gen( |
1237 | NLBL_UNLABEL_C_STATICLIST, | 1235 | NLBL_UNLABEL_C_STATICLIST, |
@@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
1250 | 1248 | ||
1251 | unlabel_staticlist_return: | 1249 | unlabel_staticlist_return: |
1252 | rcu_read_unlock(); | 1250 | rcu_read_unlock(); |
1253 | cb->args[0] = skip_bkt; | 1251 | cb->args[0] = iter_bkt; |
1254 | cb->args[1] = skip_chain; | 1252 | cb->args[1] = iter_chain; |
1255 | cb->args[2] = skip_addr4; | 1253 | cb->args[2] = iter_addr4; |
1256 | cb->args[3] = skip_addr6; | 1254 | cb->args[3] = iter_addr6; |
1257 | return skb->len; | 1255 | return skb->len; |
1258 | } | 1256 | } |
1259 | 1257 | ||
@@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1273 | { | 1271 | { |
1274 | struct netlbl_unlhsh_walk_arg cb_arg; | 1272 | struct netlbl_unlhsh_walk_arg cb_arg; |
1275 | struct netlbl_unlhsh_iface *iface; | 1273 | struct netlbl_unlhsh_iface *iface; |
1276 | u32 skip_addr4 = cb->args[0]; | 1274 | u32 iter_addr4 = 0, iter_addr6 = 0; |
1277 | u32 skip_addr6 = cb->args[1]; | ||
1278 | u32 iter_addr4 = 0; | ||
1279 | struct netlbl_af4list *addr4; | 1275 | struct netlbl_af4list *addr4; |
1280 | #if IS_ENABLED(CONFIG_IPV6) | 1276 | #if IS_ENABLED(CONFIG_IPV6) |
1281 | u32 iter_addr6 = 0; | ||
1282 | struct netlbl_af6list *addr6; | 1277 | struct netlbl_af6list *addr6; |
1283 | #endif | 1278 | #endif |
1284 | 1279 | ||
@@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1292 | goto unlabel_staticlistdef_return; | 1287 | goto unlabel_staticlistdef_return; |
1293 | 1288 | ||
1294 | netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { | 1289 | netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { |
1295 | if (iter_addr4++ < skip_addr4) | 1290 | if (iter_addr4++ < cb->args[0]) |
1296 | continue; | 1291 | continue; |
1297 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, | 1292 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, |
1298 | iface, | 1293 | iface, |
@@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1305 | } | 1300 | } |
1306 | #if IS_ENABLED(CONFIG_IPV6) | 1301 | #if IS_ENABLED(CONFIG_IPV6) |
1307 | netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { | 1302 | netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { |
1308 | if (iter_addr6++ < skip_addr6) | 1303 | if (iter_addr6++ < cb->args[1]) |
1309 | continue; | 1304 | continue; |
1310 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, | 1305 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, |
1311 | iface, | 1306 | iface, |
@@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
1320 | 1315 | ||
1321 | unlabel_staticlistdef_return: | 1316 | unlabel_staticlistdef_return: |
1322 | rcu_read_unlock(); | 1317 | rcu_read_unlock(); |
1323 | cb->args[0] = skip_addr4; | 1318 | cb->args[0] = iter_addr4; |
1324 | cb->args[1] = skip_addr6; | 1319 | cb->args[1] = iter_addr6; |
1325 | return skb->len; | 1320 | return skb->len; |
1326 | } | 1321 | } |
1327 | 1322 | ||
diff --git a/net/rds/stats.c b/net/rds/stats.c index 7be790d60b90..73be187d389e 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c | |||
@@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, | |||
87 | for (i = 0; i < nr; i++) { | 87 | for (i = 0; i < nr; i++) { |
88 | BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); | 88 | BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); |
89 | strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); | 89 | strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); |
90 | ctr.name[sizeof(ctr.name) - 1] = '\0'; | ||
90 | ctr.value = values[i]; | 91 | ctr.value = values[i]; |
91 | 92 | ||
92 | rds_info_copy(iter, &ctr, sizeof(ctr)); | 93 | rds_info_copy(iter, &ctr, sizeof(ctr)); |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e9a77f621c3d..d51852bba01c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | |||
298 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ | 298 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ |
299 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); | 299 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); |
300 | 300 | ||
301 | /* The next assignment may let | ||
302 | * agg->initial_budget > agg->budgetmax | ||
303 | * hold, we will take it into account in charge_actual_service(). | ||
304 | */ | ||
301 | agg->budgetmax = new_num_classes * agg->lmax; | 305 | agg->budgetmax = new_num_classes * agg->lmax; |
302 | new_agg_weight = agg->class_weight * new_num_classes; | 306 | new_agg_weight = agg->class_weight * new_num_classes; |
303 | agg->inv_w = ONE_FP/new_agg_weight; | 307 | agg->inv_w = ONE_FP/new_agg_weight; |
@@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q) | |||
817 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; | 821 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; |
818 | 822 | ||
819 | if (vslot != old_vslot) { | 823 | if (vslot != old_vslot) { |
820 | unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; | 824 | unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1; |
821 | qfq_move_groups(q, mask, IR, ER); | 825 | qfq_move_groups(q, mask, IR, ER); |
822 | qfq_move_groups(q, mask, IB, EB); | 826 | qfq_move_groups(q, mask, IB, EB); |
823 | } | 827 | } |
@@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, | |||
988 | /* Update F according to the actual service received by the aggregate. */ | 992 | /* Update F according to the actual service received by the aggregate. */ |
989 | static inline void charge_actual_service(struct qfq_aggregate *agg) | 993 | static inline void charge_actual_service(struct qfq_aggregate *agg) |
990 | { | 994 | { |
991 | /* compute the service received by the aggregate */ | 995 | /* Compute the service received by the aggregate, taking into |
992 | u32 service_received = agg->initial_budget - agg->budget; | 996 | * account that, after decreasing the number of classes in |
997 | * agg, it may happen that | ||
998 | * agg->initial_budget - agg->budget > agg->bugdetmax | ||
999 | */ | ||
1000 | u32 service_received = min(agg->budgetmax, | ||
1001 | agg->initial_budget - agg->budget); | ||
993 | 1002 | ||
994 | agg->F = agg->S + (u64)service_received * agg->inv_w; | 1003 | agg->F = agg->S + (u64)service_received * agg->inv_w; |
995 | } | 1004 | } |
996 | 1005 | ||
1006 | static inline void qfq_update_agg_ts(struct qfq_sched *q, | ||
1007 | struct qfq_aggregate *agg, | ||
1008 | enum update_reason reason); | ||
1009 | |||
1010 | static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); | ||
1011 | |||
997 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | 1012 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) |
998 | { | 1013 | { |
999 | struct qfq_sched *q = qdisc_priv(sch); | 1014 | struct qfq_sched *q = qdisc_priv(sch); |
@@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
1021 | in_serv_agg->initial_budget = in_serv_agg->budget = | 1036 | in_serv_agg->initial_budget = in_serv_agg->budget = |
1022 | in_serv_agg->budgetmax; | 1037 | in_serv_agg->budgetmax; |
1023 | 1038 | ||
1024 | if (!list_empty(&in_serv_agg->active)) | 1039 | if (!list_empty(&in_serv_agg->active)) { |
1025 | /* | 1040 | /* |
1026 | * Still active: reschedule for | 1041 | * Still active: reschedule for |
1027 | * service. Possible optimization: if no other | 1042 | * service. Possible optimization: if no other |
@@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
1032 | * handle it, we would need to maintain an | 1047 | * handle it, we would need to maintain an |
1033 | * extra num_active_aggs field. | 1048 | * extra num_active_aggs field. |
1034 | */ | 1049 | */ |
1035 | qfq_activate_agg(q, in_serv_agg, requeue); | 1050 | qfq_update_agg_ts(q, in_serv_agg, requeue); |
1036 | else if (sch->q.qlen == 0) { /* no aggregate to serve */ | 1051 | qfq_schedule_agg(q, in_serv_agg); |
1052 | } else if (sch->q.qlen == 0) { /* no aggregate to serve */ | ||
1037 | q->in_serv_agg = NULL; | 1053 | q->in_serv_agg = NULL; |
1038 | return NULL; | 1054 | return NULL; |
1039 | } | 1055 | } |
@@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
1052 | qdisc_bstats_update(sch, skb); | 1068 | qdisc_bstats_update(sch, skb); |
1053 | 1069 | ||
1054 | agg_dequeue(in_serv_agg, cl, len); | 1070 | agg_dequeue(in_serv_agg, cl, len); |
1055 | in_serv_agg->budget -= len; | 1071 | /* If lmax is lowered, through qfq_change_class, for a class |
1072 | * owning pending packets with larger size than the new value | ||
1073 | * of lmax, then the following condition may hold. | ||
1074 | */ | ||
1075 | if (unlikely(in_serv_agg->budget < len)) | ||
1076 | in_serv_agg->budget = 0; | ||
1077 | else | ||
1078 | in_serv_agg->budget -= len; | ||
1079 | |||
1056 | q->V += (u64)len * IWSUM; | 1080 | q->V += (u64)len * IWSUM; |
1057 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", | 1081 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", |
1058 | len, (unsigned long long) in_serv_agg->F, | 1082 | len, (unsigned long long) in_serv_agg->F, |
@@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1217 | cl->deficit = agg->lmax; | 1241 | cl->deficit = agg->lmax; |
1218 | list_add_tail(&cl->alist, &agg->active); | 1242 | list_add_tail(&cl->alist, &agg->active); |
1219 | 1243 | ||
1220 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) | 1244 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || |
1221 | return err; /* aggregate was not empty, nothing else to do */ | 1245 | q->in_serv_agg == agg) |
1246 | return err; /* non-empty or in service, nothing else to do */ | ||
1222 | 1247 | ||
1223 | /* recharge budget */ | 1248 | qfq_activate_agg(q, agg, enqueue); |
1224 | agg->initial_budget = agg->budget = agg->budgetmax; | ||
1225 | |||
1226 | qfq_update_agg_ts(q, agg, enqueue); | ||
1227 | if (q->in_serv_agg == NULL) | ||
1228 | q->in_serv_agg = agg; | ||
1229 | else if (agg != q->in_serv_agg) | ||
1230 | qfq_schedule_agg(q, agg); | ||
1231 | 1249 | ||
1232 | return err; | 1250 | return err; |
1233 | } | 1251 | } |
@@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | |||
1261 | /* group was surely ineligible, remove */ | 1279 | /* group was surely ineligible, remove */ |
1262 | __clear_bit(grp->index, &q->bitmaps[IR]); | 1280 | __clear_bit(grp->index, &q->bitmaps[IR]); |
1263 | __clear_bit(grp->index, &q->bitmaps[IB]); | 1281 | __clear_bit(grp->index, &q->bitmaps[IB]); |
1264 | } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) | 1282 | } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && |
1283 | q->in_serv_agg == NULL) | ||
1265 | q->V = roundedS; | 1284 | q->V = roundedS; |
1266 | 1285 | ||
1267 | grp->S = roundedS; | 1286 | grp->S = roundedS; |
@@ -1284,8 +1303,15 @@ skip_update: | |||
1284 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | 1303 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, |
1285 | enum update_reason reason) | 1304 | enum update_reason reason) |
1286 | { | 1305 | { |
1306 | agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ | ||
1307 | |||
1287 | qfq_update_agg_ts(q, agg, reason); | 1308 | qfq_update_agg_ts(q, agg, reason); |
1288 | qfq_schedule_agg(q, agg); | 1309 | if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ |
1310 | q->in_serv_agg = agg; /* start serving this aggregate */ | ||
1311 | /* update V: to be in service, agg must be eligible */ | ||
1312 | q->oldV = q->V = agg->S; | ||
1313 | } else if (agg != q->in_serv_agg) | ||
1314 | qfq_schedule_agg(q, agg); | ||
1289 | } | 1315 | } |
1290 | 1316 | ||
1291 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, | 1317 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, |
@@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | |||
1357 | __set_bit(grp->index, &q->bitmaps[s]); | 1383 | __set_bit(grp->index, &q->bitmaps[s]); |
1358 | } | 1384 | } |
1359 | } | 1385 | } |
1360 | |||
1361 | qfq_update_eligible(q); | ||
1362 | } | 1386 | } |
1363 | 1387 | ||
1364 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1388 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 5ffff039b017..ea4155fe9733 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
367 | rdev->wiphy.rts_threshold = (u32) -1; | 367 | rdev->wiphy.rts_threshold = (u32) -1; |
368 | rdev->wiphy.coverage_class = 0; | 368 | rdev->wiphy.coverage_class = 0; |
369 | 369 | ||
370 | rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | | 370 | rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; |
371 | NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; | ||
372 | 371 | ||
373 | return &rdev->wiphy; | 372 | return &rdev->wiphy; |
374 | } | 373 | } |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e652d05ff712..d44ab216c0ec 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, | |||
557 | if ((chan->flags & IEEE80211_CHAN_RADAR) && | 557 | if ((chan->flags & IEEE80211_CHAN_RADAR) && |
558 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) | 558 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) |
559 | goto nla_put_failure; | 559 | goto nla_put_failure; |
560 | if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && | ||
561 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) | ||
562 | goto nla_put_failure; | ||
563 | if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && | ||
564 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) | ||
565 | goto nla_put_failure; | ||
566 | if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && | ||
567 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) | ||
568 | goto nla_put_failure; | ||
569 | if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && | ||
570 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) | ||
571 | goto nla_put_failure; | ||
572 | 560 | ||
573 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, | 561 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, |
574 | DBM_TO_MBM(chan->max_power))) | 562 | DBM_TO_MBM(chan->max_power))) |
@@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
1310 | dev->wiphy.max_acl_mac_addrs)) | 1298 | dev->wiphy.max_acl_mac_addrs)) |
1311 | goto nla_put_failure; | 1299 | goto nla_put_failure; |
1312 | 1300 | ||
1313 | if (dev->wiphy.extended_capabilities && | ||
1314 | (nla_put(msg, NL80211_ATTR_EXT_CAPA, | ||
1315 | dev->wiphy.extended_capabilities_len, | ||
1316 | dev->wiphy.extended_capabilities) || | ||
1317 | nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, | ||
1318 | dev->wiphy.extended_capabilities_len, | ||
1319 | dev->wiphy.extended_capabilities_mask))) | ||
1320 | goto nla_put_failure; | ||
1321 | |||
1322 | return genlmsg_end(msg, hdr); | 1301 | return genlmsg_end(msg, hdr); |
1323 | 1302 | ||
1324 | nla_put_failure: | 1303 | nla_put_failure: |
@@ -1328,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
1328 | 1307 | ||
1329 | static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | 1308 | static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) |
1330 | { | 1309 | { |
1331 | int idx = 0; | 1310 | int idx = 0, ret; |
1332 | int start = cb->args[0]; | 1311 | int start = cb->args[0]; |
1333 | struct cfg80211_registered_device *dev; | 1312 | struct cfg80211_registered_device *dev; |
1334 | 1313 | ||
@@ -1338,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | |||
1338 | continue; | 1317 | continue; |
1339 | if (++idx <= start) | 1318 | if (++idx <= start) |
1340 | continue; | 1319 | continue; |
1341 | if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, | 1320 | ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, |
1342 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1321 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
1343 | dev) < 0) { | 1322 | dev); |
1323 | if (ret < 0) { | ||
1324 | /* | ||
1325 | * If sending the wiphy data didn't fit (ENOBUFS or | ||
1326 | * EMSGSIZE returned), this SKB is still empty (so | ||
1327 | * it's not too big because another wiphy dataset is | ||
1328 | * already in the skb) and we've not tried to adjust | ||
1329 | * the dump allocation yet ... then adjust the alloc | ||
1330 | * size to be bigger, and return 1 but with the empty | ||
1331 | * skb. This results in an empty message being RX'ed | ||
1332 | * in userspace, but that is ignored. | ||
1333 | * | ||
1334 | * We can then retry with the larger buffer. | ||
1335 | */ | ||
1336 | if ((ret == -ENOBUFS || ret == -EMSGSIZE) && | ||
1337 | !skb->len && | ||
1338 | cb->min_dump_alloc < 4096) { | ||
1339 | cb->min_dump_alloc = 4096; | ||
1340 | mutex_unlock(&cfg80211_mutex); | ||
1341 | return 1; | ||
1342 | } | ||
1344 | idx--; | 1343 | idx--; |
1345 | break; | 1344 | break; |
1346 | } | 1345 | } |
@@ -1357,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
1357 | struct sk_buff *msg; | 1356 | struct sk_buff *msg; |
1358 | struct cfg80211_registered_device *dev = info->user_ptr[0]; | 1357 | struct cfg80211_registered_device *dev = info->user_ptr[0]; |
1359 | 1358 | ||
1360 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 1359 | msg = nlmsg_new(4096, GFP_KERNEL); |
1361 | if (!msg) | 1360 | if (!msg) |
1362 | return -ENOMEM; | 1361 | return -ENOMEM; |
1363 | 1362 | ||