diff options
64 files changed, 1447 insertions, 520 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 47cc449d89d8..34f09e4383ca 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1482,9 +1482,10 @@ M: Andy Whitcroft <apw@canonical.com> | |||
1482 | S: Supported | 1482 | S: Supported |
1483 | F: scripts/checkpatch.pl | 1483 | F: scripts/checkpatch.pl |
1484 | 1484 | ||
1485 | CISCO 10G ETHERNET DRIVER | 1485 | CISCO VIC ETHERNET NIC DRIVER |
1486 | M: Scott Feldman <scofeldm@cisco.com> | 1486 | M: Scott Feldman <scofeldm@cisco.com> |
1487 | M: Joe Eykholt <jeykholt@cisco.com> | 1487 | M: Vasanthy Kolluri <vkolluri@cisco.com> |
1488 | M: Roopa Prabhu <roprabhu@cisco.com> | ||
1488 | S: Supported | 1489 | S: Supported |
1489 | F: drivers/net/enic/ | 1490 | F: drivers/net/enic/ |
1490 | 1491 | ||
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 441c0642b30a..dba1c84058b7 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c | |||
@@ -1140,7 +1140,6 @@ static void wq_set_multicast_list (struct work_struct *work) | |||
1140 | dprintk("%s: allmulti mode\n", dev->name); | 1140 | dprintk("%s: allmulti mode\n", dev->name); |
1141 | priv->rx_mode = RX_MODE_ALL_MULTI; | 1141 | priv->rx_mode = RX_MODE_ALL_MULTI; |
1142 | } else if (!netdev_mc_empty(dev)) { | 1142 | } else if (!netdev_mc_empty(dev)) { |
1143 | int mci; | ||
1144 | struct dev_mc_list *mc; | 1143 | struct dev_mc_list *mc; |
1145 | 1144 | ||
1146 | dprintk("%s: set_mc_list, %d entries\n", | 1145 | dprintk("%s: set_mc_list, %d entries\n", |
@@ -1149,11 +1148,8 @@ static void wq_set_multicast_list (struct work_struct *work) | |||
1149 | priv->rx_mode = RX_MODE_MULTI; | 1148 | priv->rx_mode = RX_MODE_MULTI; |
1150 | priv->multi_num = 0; | 1149 | priv->multi_num = 0; |
1151 | 1150 | ||
1152 | for (mci = 0, mc=dev->mc_list; | 1151 | netdev_for_each_mc_addr(mc, dev) |
1153 | mci < netdev_mc_count(dev); | ||
1154 | mc = mc->next, mci++) { | ||
1155 | dvb_set_mc_filter(dev, mc); | 1152 | dvb_set_mc_filter(dev, mc); |
1156 | } | ||
1157 | } | 1153 | } |
1158 | 1154 | ||
1159 | netif_addr_unlock_bh(dev); | 1155 | netif_addr_unlock_bh(dev); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0ba5b8e50a7c..bf223fb4a86e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2593,11 +2593,11 @@ config EHEA | |||
2593 | will be called ehea. | 2593 | will be called ehea. |
2594 | 2594 | ||
2595 | config ENIC | 2595 | config ENIC |
2596 | tristate "Cisco 10G Ethernet NIC support" | 2596 | tristate "Cisco VIC Ethernet NIC Support" |
2597 | depends on PCI && INET | 2597 | depends on PCI && INET |
2598 | select INET_LRO | 2598 | select INET_LRO |
2599 | help | 2599 | help |
2600 | This enables the support for the Cisco 10G Ethernet card. | 2600 | This enables the support for the Cisco VIC Ethernet card. |
2601 | 2601 | ||
2602 | config IXGBE | 2602 | config IXGBE |
2603 | tristate "Intel(R) 10GbE PCI Express adapters support" | 2603 | tristate "Intel(R) 10GbE PCI Express adapters support" |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 430c02267d7e..cbe9e353d46a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1480,14 +1480,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1480 | bond_dev->name, | 1480 | bond_dev->name, |
1481 | bond_dev->type, slave_dev->type); | 1481 | bond_dev->type, slave_dev->type); |
1482 | 1482 | ||
1483 | netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE); | 1483 | res = netdev_bonding_change(bond_dev, |
1484 | NETDEV_PRE_TYPE_CHANGE); | ||
1485 | res = notifier_to_errno(res); | ||
1486 | if (res) { | ||
1487 | pr_err("%s: refused to change device type\n", | ||
1488 | bond_dev->name); | ||
1489 | res = -EBUSY; | ||
1490 | goto err_undo_flags; | ||
1491 | } | ||
1484 | 1492 | ||
1485 | if (slave_dev->type != ARPHRD_ETHER) | 1493 | if (slave_dev->type != ARPHRD_ETHER) |
1486 | bond_setup_by_slave(bond_dev, slave_dev); | 1494 | bond_setup_by_slave(bond_dev, slave_dev); |
1487 | else | 1495 | else |
1488 | ether_setup(bond_dev); | 1496 | ether_setup(bond_dev); |
1489 | 1497 | ||
1490 | netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE); | 1498 | netdev_bonding_change(bond_dev, |
1499 | NETDEV_POST_TYPE_CHANGE); | ||
1491 | } | 1500 | } |
1492 | } else if (bond_dev->type != slave_dev->type) { | 1501 | } else if (bond_dev->type != slave_dev->type) { |
1493 | pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", | 1502 | pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index b997e578e58f..c0cd57656681 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -147,6 +147,8 @@ | |||
147 | * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs | 147 | * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs |
148 | */ | 148 | */ |
149 | 149 | ||
150 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
151 | |||
150 | #include <linux/module.h> | 152 | #include <linux/module.h> |
151 | #include <linux/moduleparam.h> | 153 | #include <linux/moduleparam.h> |
152 | #include <linux/kernel.h> | 154 | #include <linux/kernel.h> |
@@ -174,7 +176,6 @@ | |||
174 | #define DRV_VERSION "3.5.24-k2"DRV_EXT | 176 | #define DRV_VERSION "3.5.24-k2"DRV_EXT |
175 | #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" | 177 | #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" |
176 | #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" | 178 | #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" |
177 | #define PFX DRV_NAME ": " | ||
178 | 179 | ||
179 | #define E100_WATCHDOG_PERIOD (2 * HZ) | 180 | #define E100_WATCHDOG_PERIOD (2 * HZ) |
180 | #define E100_NAPI_WEIGHT 16 | 181 | #define E100_NAPI_WEIGHT 16 |
@@ -200,10 +201,6 @@ module_param(use_io, int, 0); | |||
200 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 201 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
201 | MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); | 202 | MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); |
202 | MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); | 203 | MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); |
203 | #define DPRINTK(nlevel, klevel, fmt, args...) \ | ||
204 | (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ | ||
205 | printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ | ||
206 | __func__ , ## args)) | ||
207 | 204 | ||
208 | #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ | 205 | #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ |
209 | PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ | 206 | PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ |
@@ -689,12 +686,13 @@ static int e100_self_test(struct nic *nic) | |||
689 | 686 | ||
690 | /* Check results of self-test */ | 687 | /* Check results of self-test */ |
691 | if (nic->mem->selftest.result != 0) { | 688 | if (nic->mem->selftest.result != 0) { |
692 | DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n", | 689 | netif_err(nic, hw, nic->netdev, |
693 | nic->mem->selftest.result); | 690 | "Self-test failed: result=0x%08X\n", |
691 | nic->mem->selftest.result); | ||
694 | return -ETIMEDOUT; | 692 | return -ETIMEDOUT; |
695 | } | 693 | } |
696 | if (nic->mem->selftest.signature == 0) { | 694 | if (nic->mem->selftest.signature == 0) { |
697 | DPRINTK(HW, ERR, "Self-test failed: timed out\n"); | 695 | netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); |
698 | return -ETIMEDOUT; | 696 | return -ETIMEDOUT; |
699 | } | 697 | } |
700 | 698 | ||
@@ -797,7 +795,7 @@ static int e100_eeprom_load(struct nic *nic) | |||
797 | /* The checksum, stored in the last word, is calculated such that | 795 | /* The checksum, stored in the last word, is calculated such that |
798 | * the sum of words should be 0xBABA */ | 796 | * the sum of words should be 0xBABA */ |
799 | if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { | 797 | if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { |
800 | DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); | 798 | netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); |
801 | if (!eeprom_bad_csum_allow) | 799 | if (!eeprom_bad_csum_allow) |
802 | return -EAGAIN; | 800 | return -EAGAIN; |
803 | } | 801 | } |
@@ -953,8 +951,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) | |||
953 | udelay(20); | 951 | udelay(20); |
954 | } | 952 | } |
955 | if (unlikely(!i)) { | 953 | if (unlikely(!i)) { |
956 | printk("e100.mdio_ctrl(%s) won't go Ready\n", | 954 | netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); |
957 | nic->netdev->name ); | ||
958 | spin_unlock_irqrestore(&nic->mdio_lock, flags); | 955 | spin_unlock_irqrestore(&nic->mdio_lock, flags); |
959 | return 0; /* No way to indicate timeout error */ | 956 | return 0; /* No way to indicate timeout error */ |
960 | } | 957 | } |
@@ -966,9 +963,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) | |||
966 | break; | 963 | break; |
967 | } | 964 | } |
968 | spin_unlock_irqrestore(&nic->mdio_lock, flags); | 965 | spin_unlock_irqrestore(&nic->mdio_lock, flags); |
969 | DPRINTK(HW, DEBUG, | 966 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
970 | "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", | 967 | "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", |
971 | dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out); | 968 | dir == mdi_read ? "READ" : "WRITE", |
969 | addr, reg, data, data_out); | ||
972 | return (u16)data_out; | 970 | return (u16)data_out; |
973 | } | 971 | } |
974 | 972 | ||
@@ -1028,17 +1026,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, | |||
1028 | return ADVERTISE_10HALF | | 1026 | return ADVERTISE_10HALF | |
1029 | ADVERTISE_10FULL; | 1027 | ADVERTISE_10FULL; |
1030 | default: | 1028 | default: |
1031 | DPRINTK(HW, DEBUG, | 1029 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1032 | "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", | 1030 | "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", |
1033 | dir == mdi_read ? "READ" : "WRITE", addr, reg, data); | 1031 | dir == mdi_read ? "READ" : "WRITE", |
1032 | addr, reg, data); | ||
1034 | return 0xFFFF; | 1033 | return 0xFFFF; |
1035 | } | 1034 | } |
1036 | } else { | 1035 | } else { |
1037 | switch (reg) { | 1036 | switch (reg) { |
1038 | default: | 1037 | default: |
1039 | DPRINTK(HW, DEBUG, | 1038 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1040 | "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", | 1039 | "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", |
1041 | dir == mdi_read ? "READ" : "WRITE", addr, reg, data); | 1040 | dir == mdi_read ? "READ" : "WRITE", |
1041 | addr, reg, data); | ||
1042 | return 0xFFFF; | 1042 | return 0xFFFF; |
1043 | } | 1043 | } |
1044 | } | 1044 | } |
@@ -1155,12 +1155,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1155 | } | 1155 | } |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1158 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1159 | c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); | 1159 | "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", |
1160 | DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1160 | c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); |
1161 | c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); | 1161 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1162 | DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1162 | "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", |
1163 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | 1163 | c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); |
1164 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, | ||
1165 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | ||
1166 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | ||
1164 | } | 1167 | } |
1165 | 1168 | ||
1166 | /************************************************************************* | 1169 | /************************************************************************* |
@@ -1253,16 +1256,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic) | |||
1253 | err = request_firmware(&fw, fw_name, &nic->pdev->dev); | 1256 | err = request_firmware(&fw, fw_name, &nic->pdev->dev); |
1254 | 1257 | ||
1255 | if (err) { | 1258 | if (err) { |
1256 | DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n", | 1259 | netif_err(nic, probe, nic->netdev, |
1257 | fw_name, err); | 1260 | "Failed to load firmware \"%s\": %d\n", |
1261 | fw_name, err); | ||
1258 | return ERR_PTR(err); | 1262 | return ERR_PTR(err); |
1259 | } | 1263 | } |
1260 | 1264 | ||
1261 | /* Firmware should be precisely UCODE_SIZE (words) plus three bytes | 1265 | /* Firmware should be precisely UCODE_SIZE (words) plus three bytes |
1262 | indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ | 1266 | indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ |
1263 | if (fw->size != UCODE_SIZE * 4 + 3) { | 1267 | if (fw->size != UCODE_SIZE * 4 + 3) { |
1264 | DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n", | 1268 | netif_err(nic, probe, nic->netdev, |
1265 | fw_name, fw->size); | 1269 | "Firmware \"%s\" has wrong size %zu\n", |
1270 | fw_name, fw->size); | ||
1266 | release_firmware(fw); | 1271 | release_firmware(fw); |
1267 | return ERR_PTR(-EINVAL); | 1272 | return ERR_PTR(-EINVAL); |
1268 | } | 1273 | } |
@@ -1274,9 +1279,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic) | |||
1274 | 1279 | ||
1275 | if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || | 1280 | if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || |
1276 | min_size >= UCODE_SIZE) { | 1281 | min_size >= UCODE_SIZE) { |
1277 | DPRINTK(PROBE, ERR, | 1282 | netif_err(nic, probe, nic->netdev, |
1278 | "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", | 1283 | "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", |
1279 | fw_name, timer, bundle, min_size); | 1284 | fw_name, timer, bundle, min_size); |
1280 | release_firmware(fw); | 1285 | release_firmware(fw); |
1281 | return ERR_PTR(-EINVAL); | 1286 | return ERR_PTR(-EINVAL); |
1282 | } | 1287 | } |
@@ -1328,7 +1333,8 @@ static inline int e100_load_ucode_wait(struct nic *nic) | |||
1328 | return PTR_ERR(fw); | 1333 | return PTR_ERR(fw); |
1329 | 1334 | ||
1330 | if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) | 1335 | if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) |
1331 | DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err); | 1336 | netif_err(nic, probe, nic->netdev, |
1337 | "ucode cmd failed with error %d\n", err); | ||
1332 | 1338 | ||
1333 | /* must restart cuc */ | 1339 | /* must restart cuc */ |
1334 | nic->cuc_cmd = cuc_start; | 1340 | nic->cuc_cmd = cuc_start; |
@@ -1348,7 +1354,7 @@ static inline int e100_load_ucode_wait(struct nic *nic) | |||
1348 | 1354 | ||
1349 | /* if the command failed, or is not OK, notify and return */ | 1355 | /* if the command failed, or is not OK, notify and return */ |
1350 | if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { | 1356 | if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { |
1351 | DPRINTK(PROBE,ERR, "ucode load failed\n"); | 1357 | netif_err(nic, probe, nic->netdev, "ucode load failed\n"); |
1352 | err = -EPERM; | 1358 | err = -EPERM; |
1353 | } | 1359 | } |
1354 | 1360 | ||
@@ -1386,8 +1392,8 @@ static int e100_phy_check_without_mii(struct nic *nic) | |||
1386 | * media is sensed automatically based on how the link partner | 1392 | * media is sensed automatically based on how the link partner |
1387 | * is configured. This is, in essence, manual configuration. | 1393 | * is configured. This is, in essence, manual configuration. |
1388 | */ | 1394 | */ |
1389 | DPRINTK(PROBE, INFO, | 1395 | netif_info(nic, probe, nic->netdev, |
1390 | "found MII-less i82503 or 80c24 or other PHY\n"); | 1396 | "found MII-less i82503 or 80c24 or other PHY\n"); |
1391 | 1397 | ||
1392 | nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; | 1398 | nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; |
1393 | nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ | 1399 | nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ |
@@ -1434,18 +1440,20 @@ static int e100_phy_init(struct nic *nic) | |||
1434 | return 0; /* simply return and hope for the best */ | 1440 | return 0; /* simply return and hope for the best */ |
1435 | else { | 1441 | else { |
1436 | /* for unknown cases log a fatal error */ | 1442 | /* for unknown cases log a fatal error */ |
1437 | DPRINTK(HW, ERR, | 1443 | netif_err(nic, hw, nic->netdev, |
1438 | "Failed to locate any known PHY, aborting.\n"); | 1444 | "Failed to locate any known PHY, aborting\n"); |
1439 | return -EAGAIN; | 1445 | return -EAGAIN; |
1440 | } | 1446 | } |
1441 | } else | 1447 | } else |
1442 | DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); | 1448 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1449 | "phy_addr = %d\n", nic->mii.phy_id); | ||
1443 | 1450 | ||
1444 | /* Get phy ID */ | 1451 | /* Get phy ID */ |
1445 | id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); | 1452 | id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); |
1446 | id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); | 1453 | id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); |
1447 | nic->phy = (u32)id_hi << 16 | (u32)id_lo; | 1454 | nic->phy = (u32)id_hi << 16 | (u32)id_lo; |
1448 | DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); | 1455 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1456 | "phy ID = 0x%08X\n", nic->phy); | ||
1449 | 1457 | ||
1450 | /* Select the phy and isolate the rest */ | 1458 | /* Select the phy and isolate the rest */ |
1451 | for (addr = 0; addr < 32; addr++) { | 1459 | for (addr = 0; addr < 32; addr++) { |
@@ -1507,7 +1515,7 @@ static int e100_hw_init(struct nic *nic) | |||
1507 | 1515 | ||
1508 | e100_hw_reset(nic); | 1516 | e100_hw_reset(nic); |
1509 | 1517 | ||
1510 | DPRINTK(HW, ERR, "e100_hw_init\n"); | 1518 | netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); |
1511 | if (!in_interrupt() && (err = e100_self_test(nic))) | 1519 | if (!in_interrupt() && (err = e100_self_test(nic))) |
1512 | return err; | 1520 | return err; |
1513 | 1521 | ||
@@ -1555,8 +1563,9 @@ static void e100_set_multicast_list(struct net_device *netdev) | |||
1555 | { | 1563 | { |
1556 | struct nic *nic = netdev_priv(netdev); | 1564 | struct nic *nic = netdev_priv(netdev); |
1557 | 1565 | ||
1558 | DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n", | 1566 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1559 | netdev_mc_count(netdev), netdev->flags); | 1567 | "mc_count=%d, flags=0x%04X\n", |
1568 | netdev_mc_count(netdev), netdev->flags); | ||
1560 | 1569 | ||
1561 | if (netdev->flags & IFF_PROMISC) | 1570 | if (netdev->flags & IFF_PROMISC) |
1562 | nic->flags |= promiscuous; | 1571 | nic->flags |= promiscuous; |
@@ -1629,7 +1638,8 @@ static void e100_update_stats(struct nic *nic) | |||
1629 | 1638 | ||
1630 | 1639 | ||
1631 | if (e100_exec_cmd(nic, cuc_dump_reset, 0)) | 1640 | if (e100_exec_cmd(nic, cuc_dump_reset, 0)) |
1632 | DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); | 1641 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1642 | "exec cuc_dump_reset failed\n"); | ||
1633 | } | 1643 | } |
1634 | 1644 | ||
1635 | static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) | 1645 | static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) |
@@ -1659,20 +1669,19 @@ static void e100_watchdog(unsigned long data) | |||
1659 | struct nic *nic = (struct nic *)data; | 1669 | struct nic *nic = (struct nic *)data; |
1660 | struct ethtool_cmd cmd; | 1670 | struct ethtool_cmd cmd; |
1661 | 1671 | ||
1662 | DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies); | 1672 | netif_printk(nic, timer, KERN_DEBUG, nic->netdev, |
1673 | "right now = %ld\n", jiffies); | ||
1663 | 1674 | ||
1664 | /* mii library handles link maintenance tasks */ | 1675 | /* mii library handles link maintenance tasks */ |
1665 | 1676 | ||
1666 | mii_ethtool_gset(&nic->mii, &cmd); | 1677 | mii_ethtool_gset(&nic->mii, &cmd); |
1667 | 1678 | ||
1668 | if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { | 1679 | if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { |
1669 | printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n", | 1680 | netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", |
1670 | nic->netdev->name, | 1681 | cmd.speed == SPEED_100 ? 100 : 10, |
1671 | cmd.speed == SPEED_100 ? "100" : "10", | 1682 | cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); |
1672 | cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); | ||
1673 | } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { | 1683 | } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { |
1674 | printk(KERN_INFO "e100: %s NIC Link is Down\n", | 1684 | netdev_info(nic->netdev, "NIC Link is Down\n"); |
1675 | nic->netdev->name); | ||
1676 | } | 1685 | } |
1677 | 1686 | ||
1678 | mii_check_link(&nic->mii); | 1687 | mii_check_link(&nic->mii); |
@@ -1732,7 +1741,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, | |||
1732 | Issue a NOP command followed by a 1us delay before | 1741 | Issue a NOP command followed by a 1us delay before |
1733 | issuing the Tx command. */ | 1742 | issuing the Tx command. */ |
1734 | if (e100_exec_cmd(nic, cuc_nop, 0)) | 1743 | if (e100_exec_cmd(nic, cuc_nop, 0)) |
1735 | DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); | 1744 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1745 | "exec cuc_nop failed\n"); | ||
1736 | udelay(1); | 1746 | udelay(1); |
1737 | } | 1747 | } |
1738 | 1748 | ||
@@ -1741,12 +1751,14 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, | |||
1741 | switch (err) { | 1751 | switch (err) { |
1742 | case -ENOSPC: | 1752 | case -ENOSPC: |
1743 | /* We queued the skb, but now we're out of space. */ | 1753 | /* We queued the skb, but now we're out of space. */ |
1744 | DPRINTK(TX_ERR, DEBUG, "No space for CB\n"); | 1754 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1755 | "No space for CB\n"); | ||
1745 | netif_stop_queue(netdev); | 1756 | netif_stop_queue(netdev); |
1746 | break; | 1757 | break; |
1747 | case -ENOMEM: | 1758 | case -ENOMEM: |
1748 | /* This is a hard error - log it. */ | 1759 | /* This is a hard error - log it. */ |
1749 | DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n"); | 1760 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1761 | "Out of Tx resources, returning skb\n"); | ||
1750 | netif_stop_queue(netdev); | 1762 | netif_stop_queue(netdev); |
1751 | return NETDEV_TX_BUSY; | 1763 | return NETDEV_TX_BUSY; |
1752 | } | 1764 | } |
@@ -1767,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic) | |||
1767 | for (cb = nic->cb_to_clean; | 1779 | for (cb = nic->cb_to_clean; |
1768 | cb->status & cpu_to_le16(cb_complete); | 1780 | cb->status & cpu_to_le16(cb_complete); |
1769 | cb = nic->cb_to_clean = cb->next) { | 1781 | cb = nic->cb_to_clean = cb->next) { |
1770 | DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n", | 1782 | netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, |
1771 | (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), | 1783 | "cb[%d]->status = 0x%04X\n", |
1772 | cb->status); | 1784 | (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), |
1785 | cb->status); | ||
1773 | 1786 | ||
1774 | if (likely(cb->skb != NULL)) { | 1787 | if (likely(cb->skb != NULL)) { |
1775 | dev->stats.tx_packets++; | 1788 | dev->stats.tx_packets++; |
@@ -1912,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1912 | sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); | 1925 | sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); |
1913 | rfd_status = le16_to_cpu(rfd->status); | 1926 | rfd_status = le16_to_cpu(rfd->status); |
1914 | 1927 | ||
1915 | DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); | 1928 | netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, |
1929 | "status=0x%04X\n", rfd_status); | ||
1916 | 1930 | ||
1917 | /* If data isn't ready, nothing to indicate */ | 1931 | /* If data isn't ready, nothing to indicate */ |
1918 | if (unlikely(!(rfd_status & cb_complete))) { | 1932 | if (unlikely(!(rfd_status & cb_complete))) { |
@@ -2123,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id) | |||
2123 | struct nic *nic = netdev_priv(netdev); | 2137 | struct nic *nic = netdev_priv(netdev); |
2124 | u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); | 2138 | u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); |
2125 | 2139 | ||
2126 | DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack); | 2140 | netif_printk(nic, intr, KERN_DEBUG, nic->netdev, |
2141 | "stat_ack = 0x%02X\n", stat_ack); | ||
2127 | 2142 | ||
2128 | if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ | 2143 | if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ |
2129 | stat_ack == stat_ack_not_present) /* Hardware is ejected */ | 2144 | stat_ack == stat_ack_not_present) /* Hardware is ejected */ |
@@ -2263,8 +2278,8 @@ static void e100_tx_timeout_task(struct work_struct *work) | |||
2263 | struct nic *nic = container_of(work, struct nic, tx_timeout_task); | 2278 | struct nic *nic = container_of(work, struct nic, tx_timeout_task); |
2264 | struct net_device *netdev = nic->netdev; | 2279 | struct net_device *netdev = nic->netdev; |
2265 | 2280 | ||
2266 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", | 2281 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
2267 | ioread8(&nic->csr->scb.status)); | 2282 | "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); |
2268 | e100_down(netdev_priv(netdev)); | 2283 | e100_down(netdev_priv(netdev)); |
2269 | e100_up(netdev_priv(netdev)); | 2284 | e100_up(netdev_priv(netdev)); |
2270 | } | 2285 | } |
@@ -2526,8 +2541,8 @@ static int e100_set_ringparam(struct net_device *netdev, | |||
2526 | rfds->count = min(rfds->count, rfds->max); | 2541 | rfds->count = min(rfds->count, rfds->max); |
2527 | cbs->count = max(ring->tx_pending, cbs->min); | 2542 | cbs->count = max(ring->tx_pending, cbs->min); |
2528 | cbs->count = min(cbs->count, cbs->max); | 2543 | cbs->count = min(cbs->count, cbs->max); |
2529 | DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n", | 2544 | netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", |
2530 | rfds->count, cbs->count); | 2545 | rfds->count, cbs->count); |
2531 | if (netif_running(netdev)) | 2546 | if (netif_running(netdev)) |
2532 | e100_up(nic); | 2547 | e100_up(nic); |
2533 | 2548 | ||
@@ -2704,7 +2719,7 @@ static int e100_open(struct net_device *netdev) | |||
2704 | 2719 | ||
2705 | netif_carrier_off(netdev); | 2720 | netif_carrier_off(netdev); |
2706 | if ((err = e100_up(nic))) | 2721 | if ((err = e100_up(nic))) |
2707 | DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n"); | 2722 | netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); |
2708 | return err; | 2723 | return err; |
2709 | } | 2724 | } |
2710 | 2725 | ||
@@ -2738,7 +2753,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2738 | 2753 | ||
2739 | if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { | 2754 | if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { |
2740 | if (((1 << debug) - 1) & NETIF_MSG_PROBE) | 2755 | if (((1 << debug) - 1) & NETIF_MSG_PROBE) |
2741 | printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n"); | 2756 | pr_err("Etherdev alloc failed, aborting\n"); |
2742 | return -ENOMEM; | 2757 | return -ENOMEM; |
2743 | } | 2758 | } |
2744 | 2759 | ||
@@ -2756,35 +2771,34 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2756 | pci_set_drvdata(pdev, netdev); | 2771 | pci_set_drvdata(pdev, netdev); |
2757 | 2772 | ||
2758 | if ((err = pci_enable_device(pdev))) { | 2773 | if ((err = pci_enable_device(pdev))) { |
2759 | DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n"); | 2774 | netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); |
2760 | goto err_out_free_dev; | 2775 | goto err_out_free_dev; |
2761 | } | 2776 | } |
2762 | 2777 | ||
2763 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 2778 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
2764 | DPRINTK(PROBE, ERR, "Cannot find proper PCI device " | 2779 | netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); |
2765 | "base address, aborting.\n"); | ||
2766 | err = -ENODEV; | 2780 | err = -ENODEV; |
2767 | goto err_out_disable_pdev; | 2781 | goto err_out_disable_pdev; |
2768 | } | 2782 | } |
2769 | 2783 | ||
2770 | if ((err = pci_request_regions(pdev, DRV_NAME))) { | 2784 | if ((err = pci_request_regions(pdev, DRV_NAME))) { |
2771 | DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n"); | 2785 | netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); |
2772 | goto err_out_disable_pdev; | 2786 | goto err_out_disable_pdev; |
2773 | } | 2787 | } |
2774 | 2788 | ||
2775 | if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | 2789 | if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { |
2776 | DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n"); | 2790 | netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); |
2777 | goto err_out_free_res; | 2791 | goto err_out_free_res; |
2778 | } | 2792 | } |
2779 | 2793 | ||
2780 | SET_NETDEV_DEV(netdev, &pdev->dev); | 2794 | SET_NETDEV_DEV(netdev, &pdev->dev); |
2781 | 2795 | ||
2782 | if (use_io) | 2796 | if (use_io) |
2783 | DPRINTK(PROBE, INFO, "using i/o access mode\n"); | 2797 | netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); |
2784 | 2798 | ||
2785 | nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); | 2799 | nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); |
2786 | if (!nic->csr) { | 2800 | if (!nic->csr) { |
2787 | DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n"); | 2801 | netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); |
2788 | err = -ENOMEM; | 2802 | err = -ENOMEM; |
2789 | goto err_out_free_res; | 2803 | goto err_out_free_res; |
2790 | } | 2804 | } |
@@ -2818,7 +2832,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2818 | INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); | 2832 | INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); |
2819 | 2833 | ||
2820 | if ((err = e100_alloc(nic))) { | 2834 | if ((err = e100_alloc(nic))) { |
2821 | DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); | 2835 | netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); |
2822 | goto err_out_iounmap; | 2836 | goto err_out_iounmap; |
2823 | } | 2837 | } |
2824 | 2838 | ||
@@ -2831,13 +2845,11 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2831 | memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); | 2845 | memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); |
2832 | if (!is_valid_ether_addr(netdev->perm_addr)) { | 2846 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
2833 | if (!eeprom_bad_csum_allow) { | 2847 | if (!eeprom_bad_csum_allow) { |
2834 | DPRINTK(PROBE, ERR, "Invalid MAC address from " | 2848 | netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); |
2835 | "EEPROM, aborting.\n"); | ||
2836 | err = -EAGAIN; | 2849 | err = -EAGAIN; |
2837 | goto err_out_free; | 2850 | goto err_out_free; |
2838 | } else { | 2851 | } else { |
2839 | DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, " | 2852 | netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); |
2840 | "you MUST configure one.\n"); | ||
2841 | } | 2853 | } |
2842 | } | 2854 | } |
2843 | 2855 | ||
@@ -2853,7 +2865,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2853 | 2865 | ||
2854 | strcpy(netdev->name, "eth%d"); | 2866 | strcpy(netdev->name, "eth%d"); |
2855 | if ((err = register_netdev(netdev))) { | 2867 | if ((err = register_netdev(netdev))) { |
2856 | DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n"); | 2868 | netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); |
2857 | goto err_out_free; | 2869 | goto err_out_free; |
2858 | } | 2870 | } |
2859 | nic->cbs_pool = pci_pool_create(netdev->name, | 2871 | nic->cbs_pool = pci_pool_create(netdev->name, |
@@ -2861,9 +2873,10 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2861 | nic->params.cbs.max * sizeof(struct cb), | 2873 | nic->params.cbs.max * sizeof(struct cb), |
2862 | sizeof(u32), | 2874 | sizeof(u32), |
2863 | 0); | 2875 | 0); |
2864 | DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n", | 2876 | netif_info(nic, probe, nic->netdev, |
2865 | (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), | 2877 | "addr 0x%llx, irq %d, MAC addr %pM\n", |
2866 | pdev->irq, netdev->dev_addr); | 2878 | (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), |
2879 | pdev->irq, netdev->dev_addr); | ||
2867 | 2880 | ||
2868 | return 0; | 2881 | return 0; |
2869 | 2882 | ||
@@ -3021,7 +3034,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) | |||
3021 | struct nic *nic = netdev_priv(netdev); | 3034 | struct nic *nic = netdev_priv(netdev); |
3022 | 3035 | ||
3023 | if (pci_enable_device(pdev)) { | 3036 | if (pci_enable_device(pdev)) { |
3024 | printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n"); | 3037 | pr_err("Cannot re-enable PCI device after reset\n"); |
3025 | return PCI_ERS_RESULT_DISCONNECT; | 3038 | return PCI_ERS_RESULT_DISCONNECT; |
3026 | } | 3039 | } |
3027 | pci_set_master(pdev); | 3040 | pci_set_master(pdev); |
@@ -3080,8 +3093,8 @@ static struct pci_driver e100_driver = { | |||
3080 | static int __init e100_init_module(void) | 3093 | static int __init e100_init_module(void) |
3081 | { | 3094 | { |
3082 | if (((1 << debug) - 1) & NETIF_MSG_DRV) { | 3095 | if (((1 << debug) - 1) & NETIF_MSG_DRV) { |
3083 | printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); | 3096 | pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
3084 | printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT); | 3097 | pr_info("%s\n", DRV_COPYRIGHT); |
3085 | } | 3098 | } |
3086 | return pci_register_driver(&e100_driver); | 3099 | return pci_register_driver(&e100_driver); |
3087 | } | 3100 | } |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index c2ec095d2163..8da190b930a2 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -158,6 +158,9 @@ struct e1000_info; | |||
158 | #define HV_M_STATUS_SPEED_1000 0x0200 | 158 | #define HV_M_STATUS_SPEED_1000 0x0200 |
159 | #define HV_M_STATUS_LINK_UP 0x0040 | 159 | #define HV_M_STATUS_LINK_UP 0x0040 |
160 | 160 | ||
161 | /* Time to wait before putting the device into D3 if there's no link (in ms). */ | ||
162 | #define LINK_TIMEOUT 100 | ||
163 | |||
161 | enum e1000_boards { | 164 | enum e1000_boards { |
162 | board_82571, | 165 | board_82571, |
163 | board_82572, | 166 | board_82572, |
@@ -370,6 +373,8 @@ struct e1000_adapter { | |||
370 | struct work_struct update_phy_task; | 373 | struct work_struct update_phy_task; |
371 | struct work_struct led_blink_task; | 374 | struct work_struct led_blink_task; |
372 | struct work_struct print_hang_task; | 375 | struct work_struct print_hang_task; |
376 | |||
377 | bool idle_check; | ||
373 | }; | 378 | }; |
374 | 379 | ||
375 | struct e1000_info { | 380 | struct e1000_info { |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 88d54d3efcef..06ba46ae2983 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
45 | #include <linux/smp.h> | 45 | #include <linux/smp.h> |
46 | #include <linux/pm_qos_params.h> | 46 | #include <linux/pm_qos_params.h> |
47 | #include <linux/pm_runtime.h> | ||
47 | #include <linux/aer.h> | 48 | #include <linux/aer.h> |
48 | 49 | ||
49 | #include "e1000.h" | 50 | #include "e1000.h" |
@@ -3083,12 +3084,15 @@ static int e1000_open(struct net_device *netdev) | |||
3083 | { | 3084 | { |
3084 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3085 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3085 | struct e1000_hw *hw = &adapter->hw; | 3086 | struct e1000_hw *hw = &adapter->hw; |
3087 | struct pci_dev *pdev = adapter->pdev; | ||
3086 | int err; | 3088 | int err; |
3087 | 3089 | ||
3088 | /* disallow open during test */ | 3090 | /* disallow open during test */ |
3089 | if (test_bit(__E1000_TESTING, &adapter->state)) | 3091 | if (test_bit(__E1000_TESTING, &adapter->state)) |
3090 | return -EBUSY; | 3092 | return -EBUSY; |
3091 | 3093 | ||
3094 | pm_runtime_get_sync(&pdev->dev); | ||
3095 | |||
3092 | netif_carrier_off(netdev); | 3096 | netif_carrier_off(netdev); |
3093 | 3097 | ||
3094 | /* allocate transmit descriptors */ | 3098 | /* allocate transmit descriptors */ |
@@ -3149,6 +3153,9 @@ static int e1000_open(struct net_device *netdev) | |||
3149 | 3153 | ||
3150 | netif_start_queue(netdev); | 3154 | netif_start_queue(netdev); |
3151 | 3155 | ||
3156 | adapter->idle_check = true; | ||
3157 | pm_runtime_put(&pdev->dev); | ||
3158 | |||
3152 | /* fire a link status change interrupt to start the watchdog */ | 3159 | /* fire a link status change interrupt to start the watchdog */ |
3153 | ew32(ICS, E1000_ICS_LSC); | 3160 | ew32(ICS, E1000_ICS_LSC); |
3154 | 3161 | ||
@@ -3162,6 +3169,7 @@ err_setup_rx: | |||
3162 | e1000e_free_tx_resources(adapter); | 3169 | e1000e_free_tx_resources(adapter); |
3163 | err_setup_tx: | 3170 | err_setup_tx: |
3164 | e1000e_reset(adapter); | 3171 | e1000e_reset(adapter); |
3172 | pm_runtime_put_sync(&pdev->dev); | ||
3165 | 3173 | ||
3166 | return err; | 3174 | return err; |
3167 | } | 3175 | } |
@@ -3180,11 +3188,17 @@ err_setup_tx: | |||
3180 | static int e1000_close(struct net_device *netdev) | 3188 | static int e1000_close(struct net_device *netdev) |
3181 | { | 3189 | { |
3182 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3190 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3191 | struct pci_dev *pdev = adapter->pdev; | ||
3183 | 3192 | ||
3184 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | 3193 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); |
3185 | e1000e_down(adapter); | 3194 | |
3195 | pm_runtime_get_sync(&pdev->dev); | ||
3196 | |||
3197 | if (!test_bit(__E1000_DOWN, &adapter->state)) { | ||
3198 | e1000e_down(adapter); | ||
3199 | e1000_free_irq(adapter); | ||
3200 | } | ||
3186 | e1000_power_down_phy(adapter); | 3201 | e1000_power_down_phy(adapter); |
3187 | e1000_free_irq(adapter); | ||
3188 | 3202 | ||
3189 | e1000e_free_tx_resources(adapter); | 3203 | e1000e_free_tx_resources(adapter); |
3190 | e1000e_free_rx_resources(adapter); | 3204 | e1000e_free_rx_resources(adapter); |
@@ -3206,6 +3220,8 @@ static int e1000_close(struct net_device *netdev) | |||
3206 | if (adapter->flags & FLAG_HAS_AMT) | 3220 | if (adapter->flags & FLAG_HAS_AMT) |
3207 | e1000_release_hw_control(adapter); | 3221 | e1000_release_hw_control(adapter); |
3208 | 3222 | ||
3223 | pm_runtime_put_sync(&pdev->dev); | ||
3224 | |||
3209 | return 0; | 3225 | return 0; |
3210 | } | 3226 | } |
3211 | /** | 3227 | /** |
@@ -3550,6 +3566,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3550 | 3566 | ||
3551 | link = e1000e_has_link(adapter); | 3567 | link = e1000e_has_link(adapter); |
3552 | if ((netif_carrier_ok(netdev)) && link) { | 3568 | if ((netif_carrier_ok(netdev)) && link) { |
3569 | /* Cancel scheduled suspend requests. */ | ||
3570 | pm_runtime_resume(netdev->dev.parent); | ||
3571 | |||
3553 | e1000e_enable_receives(adapter); | 3572 | e1000e_enable_receives(adapter); |
3554 | goto link_up; | 3573 | goto link_up; |
3555 | } | 3574 | } |
@@ -3561,6 +3580,10 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3561 | if (link) { | 3580 | if (link) { |
3562 | if (!netif_carrier_ok(netdev)) { | 3581 | if (!netif_carrier_ok(netdev)) { |
3563 | bool txb2b = 1; | 3582 | bool txb2b = 1; |
3583 | |||
3584 | /* Cancel scheduled suspend requests. */ | ||
3585 | pm_runtime_resume(netdev->dev.parent); | ||
3586 | |||
3564 | /* update snapshot of PHY registers on LSC */ | 3587 | /* update snapshot of PHY registers on LSC */ |
3565 | e1000_phy_read_status(adapter); | 3588 | e1000_phy_read_status(adapter); |
3566 | mac->ops.get_link_up_info(&adapter->hw, | 3589 | mac->ops.get_link_up_info(&adapter->hw, |
@@ -3676,6 +3699,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3676 | 3699 | ||
3677 | if (adapter->flags & FLAG_RX_NEEDS_RESTART) | 3700 | if (adapter->flags & FLAG_RX_NEEDS_RESTART) |
3678 | schedule_work(&adapter->reset_task); | 3701 | schedule_work(&adapter->reset_task); |
3702 | else | ||
3703 | pm_schedule_suspend(netdev->dev.parent, | ||
3704 | LINK_TIMEOUT); | ||
3679 | } | 3705 | } |
3680 | } | 3706 | } |
3681 | 3707 | ||
@@ -4473,13 +4499,15 @@ out: | |||
4473 | return retval; | 4499 | return retval; |
4474 | } | 4500 | } |
4475 | 4501 | ||
4476 | static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) | 4502 | static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, |
4503 | bool runtime) | ||
4477 | { | 4504 | { |
4478 | struct net_device *netdev = pci_get_drvdata(pdev); | 4505 | struct net_device *netdev = pci_get_drvdata(pdev); |
4479 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4506 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4480 | struct e1000_hw *hw = &adapter->hw; | 4507 | struct e1000_hw *hw = &adapter->hw; |
4481 | u32 ctrl, ctrl_ext, rctl, status; | 4508 | u32 ctrl, ctrl_ext, rctl, status; |
4482 | u32 wufc = adapter->wol; | 4509 | /* Runtime suspend should only enable wakeup for link changes */ |
4510 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; | ||
4483 | int retval = 0; | 4511 | int retval = 0; |
4484 | 4512 | ||
4485 | netif_device_detach(netdev); | 4513 | netif_device_detach(netdev); |
@@ -4636,43 +4664,21 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev) | |||
4636 | } | 4664 | } |
4637 | } | 4665 | } |
4638 | 4666 | ||
4639 | #ifdef CONFIG_PM | 4667 | #ifdef CONFIG_PM_OPS |
4640 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | 4668 | static bool e1000e_pm_ready(struct e1000_adapter *adapter) |
4641 | { | 4669 | { |
4642 | int retval; | 4670 | return !!adapter->tx_ring->buffer_info; |
4643 | bool wake; | ||
4644 | |||
4645 | retval = __e1000_shutdown(pdev, &wake); | ||
4646 | if (!retval) | ||
4647 | e1000_complete_shutdown(pdev, true, wake); | ||
4648 | |||
4649 | return retval; | ||
4650 | } | 4671 | } |
4651 | 4672 | ||
4652 | static int e1000_resume(struct pci_dev *pdev) | 4673 | static int __e1000_resume(struct pci_dev *pdev) |
4653 | { | 4674 | { |
4654 | struct net_device *netdev = pci_get_drvdata(pdev); | 4675 | struct net_device *netdev = pci_get_drvdata(pdev); |
4655 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4676 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4656 | struct e1000_hw *hw = &adapter->hw; | 4677 | struct e1000_hw *hw = &adapter->hw; |
4657 | u32 err; | 4678 | u32 err; |
4658 | 4679 | ||
4659 | pci_set_power_state(pdev, PCI_D0); | ||
4660 | pci_restore_state(pdev); | ||
4661 | pci_save_state(pdev); | ||
4662 | e1000e_disable_l1aspm(pdev); | 4680 | e1000e_disable_l1aspm(pdev); |
4663 | 4681 | ||
4664 | err = pci_enable_device_mem(pdev); | ||
4665 | if (err) { | ||
4666 | dev_err(&pdev->dev, | ||
4667 | "Cannot enable PCI device from suspend\n"); | ||
4668 | return err; | ||
4669 | } | ||
4670 | |||
4671 | pci_set_master(pdev); | ||
4672 | |||
4673 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
4674 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4675 | |||
4676 | e1000e_set_interrupt_capability(adapter); | 4682 | e1000e_set_interrupt_capability(adapter); |
4677 | if (netif_running(netdev)) { | 4683 | if (netif_running(netdev)) { |
4678 | err = e1000_request_irq(adapter); | 4684 | err = e1000_request_irq(adapter); |
@@ -4730,13 +4736,88 @@ static int e1000_resume(struct pci_dev *pdev) | |||
4730 | 4736 | ||
4731 | return 0; | 4737 | return 0; |
4732 | } | 4738 | } |
4733 | #endif | 4739 | |
4740 | #ifdef CONFIG_PM_SLEEP | ||
4741 | static int e1000_suspend(struct device *dev) | ||
4742 | { | ||
4743 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4744 | int retval; | ||
4745 | bool wake; | ||
4746 | |||
4747 | retval = __e1000_shutdown(pdev, &wake, false); | ||
4748 | if (!retval) | ||
4749 | e1000_complete_shutdown(pdev, true, wake); | ||
4750 | |||
4751 | return retval; | ||
4752 | } | ||
4753 | |||
4754 | static int e1000_resume(struct device *dev) | ||
4755 | { | ||
4756 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4757 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
4758 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4759 | |||
4760 | if (e1000e_pm_ready(adapter)) | ||
4761 | adapter->idle_check = true; | ||
4762 | |||
4763 | return __e1000_resume(pdev); | ||
4764 | } | ||
4765 | #endif /* CONFIG_PM_SLEEP */ | ||
4766 | |||
4767 | #ifdef CONFIG_PM_RUNTIME | ||
4768 | static int e1000_runtime_suspend(struct device *dev) | ||
4769 | { | ||
4770 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4771 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
4772 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4773 | |||
4774 | if (e1000e_pm_ready(adapter)) { | ||
4775 | bool wake; | ||
4776 | |||
4777 | __e1000_shutdown(pdev, &wake, true); | ||
4778 | } | ||
4779 | |||
4780 | return 0; | ||
4781 | } | ||
4782 | |||
4783 | static int e1000_idle(struct device *dev) | ||
4784 | { | ||
4785 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4786 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
4787 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4788 | |||
4789 | if (!e1000e_pm_ready(adapter)) | ||
4790 | return 0; | ||
4791 | |||
4792 | if (adapter->idle_check) { | ||
4793 | adapter->idle_check = false; | ||
4794 | if (!e1000e_has_link(adapter)) | ||
4795 | pm_schedule_suspend(dev, MSEC_PER_SEC); | ||
4796 | } | ||
4797 | |||
4798 | return -EBUSY; | ||
4799 | } | ||
4800 | |||
4801 | static int e1000_runtime_resume(struct device *dev) | ||
4802 | { | ||
4803 | struct pci_dev *pdev = to_pci_dev(dev); | ||
4804 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
4805 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
4806 | |||
4807 | if (!e1000e_pm_ready(adapter)) | ||
4808 | return 0; | ||
4809 | |||
4810 | adapter->idle_check = !dev->power.runtime_auto; | ||
4811 | return __e1000_resume(pdev); | ||
4812 | } | ||
4813 | #endif /* CONFIG_PM_RUNTIME */ | ||
4814 | #endif /* CONFIG_PM_OPS */ | ||
4734 | 4815 | ||
4735 | static void e1000_shutdown(struct pci_dev *pdev) | 4816 | static void e1000_shutdown(struct pci_dev *pdev) |
4736 | { | 4817 | { |
4737 | bool wake = false; | 4818 | bool wake = false; |
4738 | 4819 | ||
4739 | __e1000_shutdown(pdev, &wake); | 4820 | __e1000_shutdown(pdev, &wake, false); |
4740 | 4821 | ||
4741 | if (system_state == SYSTEM_POWER_OFF) | 4822 | if (system_state == SYSTEM_POWER_OFF) |
4742 | e1000_complete_shutdown(pdev, false, wake); | 4823 | e1000_complete_shutdown(pdev, false, wake); |
@@ -4809,8 +4890,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
4809 | result = PCI_ERS_RESULT_DISCONNECT; | 4890 | result = PCI_ERS_RESULT_DISCONNECT; |
4810 | } else { | 4891 | } else { |
4811 | pci_set_master(pdev); | 4892 | pci_set_master(pdev); |
4893 | pdev->state_saved = true; | ||
4812 | pci_restore_state(pdev); | 4894 | pci_restore_state(pdev); |
4813 | pci_save_state(pdev); | ||
4814 | 4895 | ||
4815 | pci_enable_wake(pdev, PCI_D3hot, 0); | 4896 | pci_enable_wake(pdev, PCI_D3hot, 0); |
4816 | pci_enable_wake(pdev, PCI_D3cold, 0); | 4897 | pci_enable_wake(pdev, PCI_D3cold, 0); |
@@ -5217,6 +5298,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5217 | 5298 | ||
5218 | e1000_print_device_info(adapter); | 5299 | e1000_print_device_info(adapter); |
5219 | 5300 | ||
5301 | if (pci_dev_run_wake(pdev)) { | ||
5302 | pm_runtime_set_active(&pdev->dev); | ||
5303 | pm_runtime_enable(&pdev->dev); | ||
5304 | } | ||
5305 | pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC); | ||
5306 | |||
5220 | return 0; | 5307 | return 0; |
5221 | 5308 | ||
5222 | err_register: | 5309 | err_register: |
@@ -5259,12 +5346,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5259 | { | 5346 | { |
5260 | struct net_device *netdev = pci_get_drvdata(pdev); | 5347 | struct net_device *netdev = pci_get_drvdata(pdev); |
5261 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5348 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5349 | bool down = test_bit(__E1000_DOWN, &adapter->state); | ||
5350 | |||
5351 | pm_runtime_get_sync(&pdev->dev); | ||
5262 | 5352 | ||
5263 | /* | 5353 | /* |
5264 | * flush_scheduled work may reschedule our watchdog task, so | 5354 | * flush_scheduled work may reschedule our watchdog task, so |
5265 | * explicitly disable watchdog tasks from being rescheduled | 5355 | * explicitly disable watchdog tasks from being rescheduled |
5266 | */ | 5356 | */ |
5267 | set_bit(__E1000_DOWN, &adapter->state); | 5357 | if (!down) |
5358 | set_bit(__E1000_DOWN, &adapter->state); | ||
5268 | del_timer_sync(&adapter->watchdog_timer); | 5359 | del_timer_sync(&adapter->watchdog_timer); |
5269 | del_timer_sync(&adapter->phy_info_timer); | 5360 | del_timer_sync(&adapter->phy_info_timer); |
5270 | 5361 | ||
@@ -5278,8 +5369,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
5278 | if (!(netdev->flags & IFF_UP)) | 5369 | if (!(netdev->flags & IFF_UP)) |
5279 | e1000_power_down_phy(adapter); | 5370 | e1000_power_down_phy(adapter); |
5280 | 5371 | ||
5372 | /* Don't lie to e1000_close() down the road. */ | ||
5373 | if (!down) | ||
5374 | clear_bit(__E1000_DOWN, &adapter->state); | ||
5281 | unregister_netdev(netdev); | 5375 | unregister_netdev(netdev); |
5282 | 5376 | ||
5377 | if (pci_dev_run_wake(pdev)) { | ||
5378 | pm_runtime_disable(&pdev->dev); | ||
5379 | pm_runtime_set_suspended(&pdev->dev); | ||
5380 | } | ||
5381 | pm_runtime_put_noidle(&pdev->dev); | ||
5382 | |||
5283 | /* | 5383 | /* |
5284 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 5384 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
5285 | * would have already happened in close and is redundant. | 5385 | * would have already happened in close and is redundant. |
@@ -5379,16 +5479,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { | |||
5379 | }; | 5479 | }; |
5380 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | 5480 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
5381 | 5481 | ||
5482 | #ifdef CONFIG_PM_OPS | ||
5483 | static const struct dev_pm_ops e1000_pm_ops = { | ||
5484 | SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) | ||
5485 | SET_RUNTIME_PM_OPS(e1000_runtime_suspend, | ||
5486 | e1000_runtime_resume, e1000_idle) | ||
5487 | }; | ||
5488 | #endif | ||
5489 | |||
5382 | /* PCI Device API Driver */ | 5490 | /* PCI Device API Driver */ |
5383 | static struct pci_driver e1000_driver = { | 5491 | static struct pci_driver e1000_driver = { |
5384 | .name = e1000e_driver_name, | 5492 | .name = e1000e_driver_name, |
5385 | .id_table = e1000_pci_tbl, | 5493 | .id_table = e1000_pci_tbl, |
5386 | .probe = e1000_probe, | 5494 | .probe = e1000_probe, |
5387 | .remove = __devexit_p(e1000_remove), | 5495 | .remove = __devexit_p(e1000_remove), |
5388 | #ifdef CONFIG_PM | 5496 | #ifdef CONFIG_PM_OPS |
5389 | /* Power Management Hooks */ | 5497 | .driver.pm = &e1000_pm_ops, |
5390 | .suspend = e1000_suspend, | ||
5391 | .resume = e1000_resume, | ||
5392 | #endif | 5498 | #endif |
5393 | .shutdown = e1000_shutdown, | 5499 | .shutdown = e1000_shutdown, |
5394 | .err_handler = &e1000_err_handler | 5500 | .err_handler = &e1000_err_handler |
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h index 03dce9ed612c..337d1943af46 100644 --- a/drivers/net/enic/cq_enet_desc.h +++ b/drivers/net/enic/cq_enet_desc.h | |||
@@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, | |||
101 | u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, | 101 | u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, |
102 | u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) | 102 | u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) |
103 | { | 103 | { |
104 | u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); | 104 | u16 completed_index_flags; |
105 | u16 q_number_rss_type_flags = | 105 | u16 q_number_rss_type_flags; |
106 | le16_to_cpu(desc->q_number_rss_type_flags); | 106 | u16 bytes_written_flags; |
107 | u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); | ||
108 | 107 | ||
109 | cq_desc_dec((struct cq_desc *)desc, type, | 108 | cq_desc_dec((struct cq_desc *)desc, type, |
110 | color, q_number, completed_index); | 109 | color, q_number, completed_index); |
111 | 110 | ||
111 | completed_index_flags = le16_to_cpu(desc->completed_index_flags); | ||
112 | q_number_rss_type_flags = | ||
113 | le16_to_cpu(desc->q_number_rss_type_flags); | ||
114 | bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); | ||
115 | |||
112 | *ingress_port = (completed_index_flags & | 116 | *ingress_port = (completed_index_flags & |
113 | CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; | 117 | CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; |
114 | *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? | 118 | *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? |
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index ee01f5a6d0d4..5fa56f1e5590 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h | |||
@@ -33,8 +33,8 @@ | |||
33 | #include "vnic_rss.h" | 33 | #include "vnic_rss.h" |
34 | 34 | ||
35 | #define DRV_NAME "enic" | 35 | #define DRV_NAME "enic" |
36 | #define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" | 36 | #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" |
37 | #define DRV_VERSION "1.1.0.241a" | 37 | #define DRV_VERSION "1.3.1.1" |
38 | #define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" | 38 | #define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" |
39 | #define PFX DRV_NAME ": " | 39 | #define PFX DRV_NAME ": " |
40 | 40 | ||
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index cf098bb636b8..6d70c349c954 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c | |||
@@ -829,7 +829,7 @@ static void enic_set_multicast_list(struct net_device *netdev) | |||
829 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | 829 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; |
830 | unsigned int mc_count = netdev_mc_count(netdev); | 830 | unsigned int mc_count = netdev_mc_count(netdev); |
831 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | 831 | int allmulti = (netdev->flags & IFF_ALLMULTI) || |
832 | mc_count > ENIC_MULTICAST_PERFECT_FILTERS; | 832 | mc_count > ENIC_MULTICAST_PERFECT_FILTERS; |
833 | unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); | 833 | unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); |
834 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | 834 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; |
835 | unsigned int i, j; | 835 | unsigned int i, j; |
@@ -2058,8 +2058,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, | |||
2058 | netdev->watchdog_timeo = 2 * HZ; | 2058 | netdev->watchdog_timeo = 2 * HZ; |
2059 | netdev->ethtool_ops = &enic_ethtool_ops; | 2059 | netdev->ethtool_ops = &enic_ethtool_ops; |
2060 | 2060 | ||
2061 | netdev->features |= NETIF_F_HW_VLAN_TX | | 2061 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
2062 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | ||
2063 | if (ENIC_SETTING(enic, TXCSUM)) | 2062 | if (ENIC_SETTING(enic, TXCSUM)) |
2064 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | 2063 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; |
2065 | if (ENIC_SETTING(enic, TSO)) | 2064 | if (ENIC_SETTING(enic, TSO)) |
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c index 69b9b70c7da0..cbc0ba953fc6 100644 --- a/drivers/net/enic/vnic_dev.c +++ b/drivers/net/enic/vnic_dev.c | |||
@@ -573,22 +573,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr) | |||
573 | return err; | 573 | return err; |
574 | } | 574 | } |
575 | 575 | ||
576 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) | 576 | int vnic_dev_notify_setcmd(struct vnic_dev *vdev, |
577 | void *notify_addr, dma_addr_t notify_pa, u16 intr) | ||
577 | { | 578 | { |
578 | u64 a0, a1; | 579 | u64 a0, a1; |
579 | int wait = 1000; | 580 | int wait = 1000; |
580 | int r; | 581 | int r; |
581 | 582 | ||
582 | if (!vdev->notify) { | 583 | memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); |
583 | vdev->notify = pci_alloc_consistent(vdev->pdev, | 584 | vdev->notify = notify_addr; |
584 | sizeof(struct vnic_devcmd_notify), | 585 | vdev->notify_pa = notify_pa; |
585 | &vdev->notify_pa); | ||
586 | if (!vdev->notify) | ||
587 | return -ENOMEM; | ||
588 | memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify)); | ||
589 | } | ||
590 | 586 | ||
591 | a0 = vdev->notify_pa; | 587 | a0 = (u64)notify_pa; |
592 | a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; | 588 | a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; |
593 | a1 += sizeof(struct vnic_devcmd_notify); | 589 | a1 += sizeof(struct vnic_devcmd_notify); |
594 | 590 | ||
@@ -597,7 +593,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) | |||
597 | return r; | 593 | return r; |
598 | } | 594 | } |
599 | 595 | ||
600 | void vnic_dev_notify_unset(struct vnic_dev *vdev) | 596 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) |
597 | { | ||
598 | void *notify_addr; | ||
599 | dma_addr_t notify_pa; | ||
600 | |||
601 | if (vdev->notify || vdev->notify_pa) { | ||
602 | printk(KERN_ERR "notify block %p still allocated", | ||
603 | vdev->notify); | ||
604 | return -EINVAL; | ||
605 | } | ||
606 | |||
607 | notify_addr = pci_alloc_consistent(vdev->pdev, | ||
608 | sizeof(struct vnic_devcmd_notify), | ||
609 | ¬ify_pa); | ||
610 | if (!notify_addr) | ||
611 | return -ENOMEM; | ||
612 | |||
613 | return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); | ||
614 | } | ||
615 | |||
616 | void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) | ||
601 | { | 617 | { |
602 | u64 a0, a1; | 618 | u64 a0, a1; |
603 | int wait = 1000; | 619 | int wait = 1000; |
@@ -607,9 +623,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev) | |||
607 | a1 += sizeof(struct vnic_devcmd_notify); | 623 | a1 += sizeof(struct vnic_devcmd_notify); |
608 | 624 | ||
609 | vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); | 625 | vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); |
626 | vdev->notify = NULL; | ||
627 | vdev->notify_pa = 0; | ||
610 | vdev->notify_sz = 0; | 628 | vdev->notify_sz = 0; |
611 | } | 629 | } |
612 | 630 | ||
631 | void vnic_dev_notify_unset(struct vnic_dev *vdev) | ||
632 | { | ||
633 | if (vdev->notify) { | ||
634 | pci_free_consistent(vdev->pdev, | ||
635 | sizeof(struct vnic_devcmd_notify), | ||
636 | vdev->notify, | ||
637 | vdev->notify_pa); | ||
638 | } | ||
639 | |||
640 | vnic_dev_notify_unsetcmd(vdev); | ||
641 | } | ||
642 | |||
613 | static int vnic_dev_notify_ready(struct vnic_dev *vdev) | 643 | static int vnic_dev_notify_ready(struct vnic_dev *vdev) |
614 | { | 644 | { |
615 | u32 *words; | 645 | u32 *words; |
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h index fc5e3eb35a5e..f5be640b0b5c 100644 --- a/drivers/net/enic/vnic_dev.h +++ b/drivers/net/enic/vnic_dev.h | |||
@@ -107,7 +107,10 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); | |||
107 | void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); | 107 | void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); |
108 | int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); | 108 | int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); |
109 | int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); | 109 | int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); |
110 | int vnic_dev_notify_setcmd(struct vnic_dev *vdev, | ||
111 | void *notify_addr, dma_addr_t notify_pa, u16 intr); | ||
110 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); | 112 | int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); |
113 | void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); | ||
111 | void vnic_dev_notify_unset(struct vnic_dev *vdev); | 114 | void vnic_dev_notify_unset(struct vnic_dev *vdev); |
112 | int vnic_dev_link_status(struct vnic_dev *vdev); | 115 | int vnic_dev_link_status(struct vnic_dev *vdev); |
113 | u32 vnic_dev_port_speed(struct vnic_dev *vdev); | 116 | u32 vnic_dev_port_speed(struct vnic_dev *vdev); |
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c index 75583978a5e5..7bcd90373487 100644 --- a/drivers/net/enic/vnic_rq.c +++ b/drivers/net/enic/vnic_rq.c | |||
@@ -167,10 +167,10 @@ int vnic_rq_disable(struct vnic_rq *rq) | |||
167 | iowrite32(0, &rq->ctrl->enable); | 167 | iowrite32(0, &rq->ctrl->enable); |
168 | 168 | ||
169 | /* Wait for HW to ACK disable request */ | 169 | /* Wait for HW to ACK disable request */ |
170 | for (wait = 0; wait < 100; wait++) { | 170 | for (wait = 0; wait < 1000; wait++) { |
171 | if (!(ioread32(&rq->ctrl->running))) | 171 | if (!(ioread32(&rq->ctrl->running))) |
172 | return 0; | 172 | return 0; |
173 | udelay(1); | 173 | udelay(10); |
174 | } | 174 | } |
175 | 175 | ||
176 | printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); | 176 | printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); |
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c index d2e00e51b7b5..44fc3234d585 100644 --- a/drivers/net/enic/vnic_wq.c +++ b/drivers/net/enic/vnic_wq.c | |||
@@ -160,10 +160,10 @@ int vnic_wq_disable(struct vnic_wq *wq) | |||
160 | iowrite32(0, &wq->ctrl->enable); | 160 | iowrite32(0, &wq->ctrl->enable); |
161 | 161 | ||
162 | /* Wait for HW to ACK disable request */ | 162 | /* Wait for HW to ACK disable request */ |
163 | for (wait = 0; wait < 100; wait++) { | 163 | for (wait = 0; wait < 1000; wait++) { |
164 | if (!(ioread32(&wq->ctrl->running))) | 164 | if (!(ioread32(&wq->ctrl->running))) |
165 | return 0; | 165 | return 0; |
166 | udelay(1); | 166 | udelay(10); |
167 | } | 167 | } |
168 | 168 | ||
169 | printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); | 169 | printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); |
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c index 5c45cb58d023..b91492f4e48a 100644 --- a/drivers/net/ks8842.c +++ b/drivers/net/ks8842.c | |||
@@ -20,6 +20,8 @@ | |||
20 | * The Micrel KS8842 behind the timberdale FPGA | 20 | * The Micrel KS8842 behind the timberdale FPGA |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
24 | |||
23 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
24 | #include <linux/module.h> | 26 | #include <linux/module.h> |
25 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
@@ -525,8 +527,7 @@ static int ks8842_open(struct net_device *netdev) | |||
525 | err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, | 527 | err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, |
526 | adapter); | 528 | adapter); |
527 | if (err) { | 529 | if (err) { |
528 | printk(KERN_ERR "Failed to request IRQ: %d: %d\n", | 530 | pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); |
529 | adapter->irq, err); | ||
530 | return err; | 531 | return err; |
531 | } | 532 | } |
532 | 533 | ||
@@ -668,8 +669,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev) | |||
668 | 669 | ||
669 | platform_set_drvdata(pdev, netdev); | 670 | platform_set_drvdata(pdev, netdev); |
670 | 671 | ||
671 | printk(KERN_INFO DRV_NAME | 672 | pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", |
672 | " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", | ||
673 | (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); | 673 | (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); |
674 | 674 | ||
675 | return 0; | 675 | return 0; |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index 13cc1ca261d9..66be4e449f02 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #define DEBUG | 14 | #define DEBUG |
13 | 15 | ||
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
@@ -125,11 +127,6 @@ struct ks8851_net { | |||
125 | 127 | ||
126 | static int msg_enable; | 128 | static int msg_enable; |
127 | 129 | ||
128 | #define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg) | ||
129 | #define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg) | ||
130 | #define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg) | ||
131 | #define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg) | ||
132 | |||
133 | /* shift for byte-enable data */ | 130 | /* shift for byte-enable data */ |
134 | #define BYTE_EN(_x) ((_x) << 2) | 131 | #define BYTE_EN(_x) ((_x) << 2) |
135 | 132 | ||
@@ -167,7 +164,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val) | |||
167 | 164 | ||
168 | ret = spi_sync(ks->spidev, msg); | 165 | ret = spi_sync(ks->spidev, msg); |
169 | if (ret < 0) | 166 | if (ret < 0) |
170 | ks_err(ks, "spi_sync() failed\n"); | 167 | netdev_err(ks->netdev, "spi_sync() failed\n"); |
171 | } | 168 | } |
172 | 169 | ||
173 | /** | 170 | /** |
@@ -197,7 +194,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) | |||
197 | 194 | ||
198 | ret = spi_sync(ks->spidev, msg); | 195 | ret = spi_sync(ks->spidev, msg); |
199 | if (ret < 0) | 196 | if (ret < 0) |
200 | ks_err(ks, "spi_sync() failed\n"); | 197 | netdev_err(ks->netdev, "spi_sync() failed\n"); |
201 | } | 198 | } |
202 | 199 | ||
203 | /** | 200 | /** |
@@ -263,7 +260,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, | |||
263 | 260 | ||
264 | ret = spi_sync(ks->spidev, msg); | 261 | ret = spi_sync(ks->spidev, msg); |
265 | if (ret < 0) | 262 | if (ret < 0) |
266 | ks_err(ks, "read: spi_sync() failed\n"); | 263 | netdev_err(ks->netdev, "read: spi_sync() failed\n"); |
267 | else if (ks8851_rx_1msg(ks)) | 264 | else if (ks8851_rx_1msg(ks)) |
268 | memcpy(rxb, trx + 2, rxl); | 265 | memcpy(rxb, trx + 2, rxl); |
269 | else | 266 | else |
@@ -417,8 +414,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) | |||
417 | u8 txb[1]; | 414 | u8 txb[1]; |
418 | int ret; | 415 | int ret; |
419 | 416 | ||
420 | if (netif_msg_rx_status(ks)) | 417 | netif_dbg(ks, rx_status, ks->netdev, |
421 | ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff); | 418 | "%s: %d@%p\n", __func__, len, buff); |
422 | 419 | ||
423 | /* set the operation we're issuing */ | 420 | /* set the operation we're issuing */ |
424 | txb[0] = KS_SPIOP_RXFIFO; | 421 | txb[0] = KS_SPIOP_RXFIFO; |
@@ -434,7 +431,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) | |||
434 | 431 | ||
435 | ret = spi_sync(ks->spidev, msg); | 432 | ret = spi_sync(ks->spidev, msg); |
436 | if (ret < 0) | 433 | if (ret < 0) |
437 | ks_err(ks, "%s: spi_sync() failed\n", __func__); | 434 | netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); |
438 | } | 435 | } |
439 | 436 | ||
440 | /** | 437 | /** |
@@ -446,10 +443,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) | |||
446 | */ | 443 | */ |
447 | static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) | 444 | static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) |
448 | { | 445 | { |
449 | ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", | 446 | netdev_dbg(ks->netdev, |
450 | rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], | 447 | "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", |
451 | rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], | 448 | rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], |
452 | rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); | 449 | rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], |
450 | rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); | ||
453 | } | 451 | } |
454 | 452 | ||
455 | /** | 453 | /** |
@@ -471,8 +469,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
471 | 469 | ||
472 | rxfc = ks8851_rdreg8(ks, KS_RXFC); | 470 | rxfc = ks8851_rdreg8(ks, KS_RXFC); |
473 | 471 | ||
474 | if (netif_msg_rx_status(ks)) | 472 | netif_dbg(ks, rx_status, ks->netdev, |
475 | ks_dbg(ks, "%s: %d packets\n", __func__, rxfc); | 473 | "%s: %d packets\n", __func__, rxfc); |
476 | 474 | ||
477 | /* Currently we're issuing a read per packet, but we could possibly | 475 | /* Currently we're issuing a read per packet, but we could possibly |
478 | * improve the code by issuing a single read, getting the receive | 476 | * improve the code by issuing a single read, getting the receive |
@@ -489,9 +487,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
489 | rxstat = rxh & 0xffff; | 487 | rxstat = rxh & 0xffff; |
490 | rxlen = rxh >> 16; | 488 | rxlen = rxh >> 16; |
491 | 489 | ||
492 | if (netif_msg_rx_status(ks)) | 490 | netif_dbg(ks, rx_status, ks->netdev, |
493 | ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n", | 491 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); |
494 | rxstat, rxlen); | ||
495 | 492 | ||
496 | /* the length of the packet includes the 32bit CRC */ | 493 | /* the length of the packet includes the 32bit CRC */ |
497 | 494 | ||
@@ -553,9 +550,8 @@ static void ks8851_irq_work(struct work_struct *work) | |||
553 | 550 | ||
554 | status = ks8851_rdreg16(ks, KS_ISR); | 551 | status = ks8851_rdreg16(ks, KS_ISR); |
555 | 552 | ||
556 | if (netif_msg_intr(ks)) | 553 | netif_dbg(ks, intr, ks->netdev, |
557 | dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n", | 554 | "%s: status 0x%04x\n", __func__, status); |
558 | __func__, status); | ||
559 | 555 | ||
560 | if (status & IRQ_LCI) { | 556 | if (status & IRQ_LCI) { |
561 | /* should do something about checking link status */ | 557 | /* should do something about checking link status */ |
@@ -582,8 +578,8 @@ static void ks8851_irq_work(struct work_struct *work) | |||
582 | * system */ | 578 | * system */ |
583 | ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); | 579 | ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); |
584 | 580 | ||
585 | if (netif_msg_intr(ks)) | 581 | netif_dbg(ks, intr, ks->netdev, |
586 | ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space); | 582 | "%s: txspace %d\n", __func__, ks->tx_space); |
587 | } | 583 | } |
588 | 584 | ||
589 | if (status & IRQ_RXI) | 585 | if (status & IRQ_RXI) |
@@ -659,9 +655,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) | |||
659 | unsigned fid = 0; | 655 | unsigned fid = 0; |
660 | int ret; | 656 | int ret; |
661 | 657 | ||
662 | if (netif_msg_tx_queued(ks)) | 658 | netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n", |
663 | dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n", | 659 | __func__, txp, txp->len, txp->data, irq); |
664 | __func__, txp, txp->len, txp->data, irq); | ||
665 | 660 | ||
666 | fid = ks->fid++; | 661 | fid = ks->fid++; |
667 | fid &= TXFR_TXFID_MASK; | 662 | fid &= TXFR_TXFID_MASK; |
@@ -685,7 +680,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) | |||
685 | 680 | ||
686 | ret = spi_sync(ks->spidev, msg); | 681 | ret = spi_sync(ks->spidev, msg); |
687 | if (ret < 0) | 682 | if (ret < 0) |
688 | ks_err(ks, "%s: spi_sync() failed\n", __func__); | 683 | netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); |
689 | } | 684 | } |
690 | 685 | ||
691 | /** | 686 | /** |
@@ -744,8 +739,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) | |||
744 | { | 739 | { |
745 | unsigned pmecr; | 740 | unsigned pmecr; |
746 | 741 | ||
747 | if (netif_msg_hw(ks)) | 742 | netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); |
748 | ks_dbg(ks, "setting power mode %d\n", pwrmode); | ||
749 | 743 | ||
750 | pmecr = ks8851_rdreg16(ks, KS_PMECR); | 744 | pmecr = ks8851_rdreg16(ks, KS_PMECR); |
751 | pmecr &= ~PMECR_PM_MASK; | 745 | pmecr &= ~PMECR_PM_MASK; |
@@ -769,8 +763,7 @@ static int ks8851_net_open(struct net_device *dev) | |||
769 | * else at the moment */ | 763 | * else at the moment */ |
770 | mutex_lock(&ks->lock); | 764 | mutex_lock(&ks->lock); |
771 | 765 | ||
772 | if (netif_msg_ifup(ks)) | 766 | netif_dbg(ks, ifup, ks->netdev, "opening\n"); |
773 | ks_dbg(ks, "opening %s\n", dev->name); | ||
774 | 767 | ||
775 | /* bring chip out of any power saving mode it was in */ | 768 | /* bring chip out of any power saving mode it was in */ |
776 | ks8851_set_powermode(ks, PMECR_PM_NORMAL); | 769 | ks8851_set_powermode(ks, PMECR_PM_NORMAL); |
@@ -826,8 +819,7 @@ static int ks8851_net_open(struct net_device *dev) | |||
826 | 819 | ||
827 | netif_start_queue(ks->netdev); | 820 | netif_start_queue(ks->netdev); |
828 | 821 | ||
829 | if (netif_msg_ifup(ks)) | 822 | netif_dbg(ks, ifup, ks->netdev, "network device up\n"); |
830 | ks_dbg(ks, "network device %s up\n", dev->name); | ||
831 | 823 | ||
832 | mutex_unlock(&ks->lock); | 824 | mutex_unlock(&ks->lock); |
833 | return 0; | 825 | return 0; |
@@ -845,8 +837,7 @@ static int ks8851_net_stop(struct net_device *dev) | |||
845 | { | 837 | { |
846 | struct ks8851_net *ks = netdev_priv(dev); | 838 | struct ks8851_net *ks = netdev_priv(dev); |
847 | 839 | ||
848 | if (netif_msg_ifdown(ks)) | 840 | netif_info(ks, ifdown, dev, "shutting down\n"); |
849 | ks_info(ks, "%s: shutting down\n", dev->name); | ||
850 | 841 | ||
851 | netif_stop_queue(dev); | 842 | netif_stop_queue(dev); |
852 | 843 | ||
@@ -874,8 +865,8 @@ static int ks8851_net_stop(struct net_device *dev) | |||
874 | while (!skb_queue_empty(&ks->txq)) { | 865 | while (!skb_queue_empty(&ks->txq)) { |
875 | struct sk_buff *txb = skb_dequeue(&ks->txq); | 866 | struct sk_buff *txb = skb_dequeue(&ks->txq); |
876 | 867 | ||
877 | if (netif_msg_ifdown(ks)) | 868 | netif_dbg(ks, ifdown, ks->netdev, |
878 | ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb); | 869 | "%s: freeing txb %p\n", __func__, txb); |
879 | 870 | ||
880 | dev_kfree_skb(txb); | 871 | dev_kfree_skb(txb); |
881 | } | 872 | } |
@@ -904,9 +895,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb, | |||
904 | unsigned needed = calc_txlen(skb->len); | 895 | unsigned needed = calc_txlen(skb->len); |
905 | netdev_tx_t ret = NETDEV_TX_OK; | 896 | netdev_tx_t ret = NETDEV_TX_OK; |
906 | 897 | ||
907 | if (netif_msg_tx_queued(ks)) | 898 | netif_dbg(ks, tx_queued, ks->netdev, |
908 | ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__, | 899 | "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); |
909 | skb, skb->len, skb->data); | ||
910 | 900 | ||
911 | spin_lock(&ks->statelock); | 901 | spin_lock(&ks->statelock); |
912 | 902 | ||
@@ -1185,17 +1175,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks) | |||
1185 | rd = ks8851_rdreg16(ks, KS_MBIR); | 1175 | rd = ks8851_rdreg16(ks, KS_MBIR); |
1186 | 1176 | ||
1187 | if ((rd & both_done) != both_done) { | 1177 | if ((rd & both_done) != both_done) { |
1188 | ks_warn(ks, "Memory selftest not finished\n"); | 1178 | netdev_warn(ks->netdev, "Memory selftest not finished\n"); |
1189 | return 0; | 1179 | return 0; |
1190 | } | 1180 | } |
1191 | 1181 | ||
1192 | if (rd & MBIR_TXMBFA) { | 1182 | if (rd & MBIR_TXMBFA) { |
1193 | ks_err(ks, "TX memory selftest fail\n"); | 1183 | netdev_err(ks->netdev, "TX memory selftest fail\n"); |
1194 | ret |= 1; | 1184 | ret |= 1; |
1195 | } | 1185 | } |
1196 | 1186 | ||
1197 | if (rd & MBIR_RXMBFA) { | 1187 | if (rd & MBIR_RXMBFA) { |
1198 | ks_err(ks, "RX memory selftest fail\n"); | 1188 | netdev_err(ks->netdev, "RX memory selftest fail\n"); |
1199 | ret |= 2; | 1189 | ret |= 2; |
1200 | } | 1190 | } |
1201 | 1191 | ||
@@ -1293,9 +1283,9 @@ static int __devinit ks8851_probe(struct spi_device *spi) | |||
1293 | goto err_netdev; | 1283 | goto err_netdev; |
1294 | } | 1284 | } |
1295 | 1285 | ||
1296 | dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n", | 1286 | netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n", |
1297 | CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), | 1287 | CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), |
1298 | ndev->dev_addr, ndev->irq); | 1288 | ndev->dev_addr, ndev->irq); |
1299 | 1289 | ||
1300 | return 0; | 1290 | return 0; |
1301 | 1291 | ||
@@ -1314,7 +1304,7 @@ static int __devexit ks8851_remove(struct spi_device *spi) | |||
1314 | struct ks8851_net *priv = dev_get_drvdata(&spi->dev); | 1304 | struct ks8851_net *priv = dev_get_drvdata(&spi->dev); |
1315 | 1305 | ||
1316 | if (netif_msg_drv(priv)) | 1306 | if (netif_msg_drv(priv)) |
1317 | dev_info(&spi->dev, "remove"); | 1307 | dev_info(&spi->dev, "remove\n"); |
1318 | 1308 | ||
1319 | unregister_netdev(priv->netdev); | 1309 | unregister_netdev(priv->netdev); |
1320 | free_irq(spi->irq, priv); | 1310 | free_irq(spi->irq, priv); |
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c index 84b0e15831f9..d3c6a77f7ec0 100644 --- a/drivers/net/ks8851_mll.c +++ b/drivers/net/ks8851_mll.c | |||
@@ -21,6 +21,8 @@ | |||
21 | * KS8851 16bit MLL chip from Micrel Inc. | 21 | * KS8851 16bit MLL chip from Micrel Inc. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
25 | |||
24 | #include <linux/module.h> | 26 | #include <linux/module.h> |
25 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
26 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
@@ -458,11 +460,6 @@ struct ks_net { | |||
458 | 460 | ||
459 | static int msg_enable; | 461 | static int msg_enable; |
460 | 462 | ||
461 | #define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg) | ||
462 | #define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg) | ||
463 | #define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg) | ||
464 | #define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg) | ||
465 | |||
466 | #define BE3 0x8000 /* Byte Enable 3 */ | 463 | #define BE3 0x8000 /* Byte Enable 3 */ |
467 | #define BE2 0x4000 /* Byte Enable 2 */ | 464 | #define BE2 0x4000 /* Byte Enable 2 */ |
468 | #define BE1 0x2000 /* Byte Enable 1 */ | 465 | #define BE1 0x2000 /* Byte Enable 1 */ |
@@ -624,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode) | |||
624 | { | 621 | { |
625 | unsigned pmecr; | 622 | unsigned pmecr; |
626 | 623 | ||
627 | if (netif_msg_hw(ks)) | 624 | netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); |
628 | ks_dbg(ks, "setting power mode %d\n", pwrmode); | ||
629 | 625 | ||
630 | ks_rdreg16(ks, KS_GRR); | 626 | ks_rdreg16(ks, KS_GRR); |
631 | pmecr = ks_rdreg16(ks, KS_PMECR); | 627 | pmecr = ks_rdreg16(ks, KS_PMECR); |
@@ -809,7 +805,7 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev) | |||
809 | skb->protocol = eth_type_trans(skb, netdev); | 805 | skb->protocol = eth_type_trans(skb, netdev); |
810 | netif_rx(skb); | 806 | netif_rx(skb); |
811 | } else { | 807 | } else { |
812 | printk(KERN_ERR "%s: err:skb alloc\n", __func__); | 808 | pr_err("%s: err:skb alloc\n", __func__); |
813 | ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); | 809 | ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); |
814 | if (skb) | 810 | if (skb) |
815 | dev_kfree_skb_irq(skb); | 811 | dev_kfree_skb_irq(skb); |
@@ -836,9 +832,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks) | |||
836 | netif_carrier_off(netdev); | 832 | netif_carrier_off(netdev); |
837 | link_up_status = false; | 833 | link_up_status = false; |
838 | } | 834 | } |
839 | if (netif_msg_link(ks)) | 835 | netif_dbg(ks, link, ks->netdev, |
840 | ks_dbg(ks, "%s: %s\n", | 836 | "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN"); |
841 | __func__, link_up_status ? "UP" : "DOWN"); | ||
842 | } | 837 | } |
843 | 838 | ||
844 | /** | 839 | /** |
@@ -908,15 +903,13 @@ static int ks_net_open(struct net_device *netdev) | |||
908 | * else at the moment. | 903 | * else at the moment. |
909 | */ | 904 | */ |
910 | 905 | ||
911 | if (netif_msg_ifup(ks)) | 906 | netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__); |
912 | ks_dbg(ks, "%s - entry\n", __func__); | ||
913 | 907 | ||
914 | /* reset the HW */ | 908 | /* reset the HW */ |
915 | err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); | 909 | err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); |
916 | 910 | ||
917 | if (err) { | 911 | if (err) { |
918 | printk(KERN_ERR "Failed to request IRQ: %d: %d\n", | 912 | pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err); |
919 | ks->irq, err); | ||
920 | return err; | 913 | return err; |
921 | } | 914 | } |
922 | 915 | ||
@@ -929,8 +922,7 @@ static int ks_net_open(struct net_device *netdev) | |||
929 | ks_enable_qmu(ks); | 922 | ks_enable_qmu(ks); |
930 | netif_start_queue(ks->netdev); | 923 | netif_start_queue(ks->netdev); |
931 | 924 | ||
932 | if (netif_msg_ifup(ks)) | 925 | netif_dbg(ks, ifup, ks->netdev, "network device up\n"); |
933 | ks_dbg(ks, "network device %s up\n", netdev->name); | ||
934 | 926 | ||
935 | return 0; | 927 | return 0; |
936 | } | 928 | } |
@@ -947,8 +939,7 @@ static int ks_net_stop(struct net_device *netdev) | |||
947 | { | 939 | { |
948 | struct ks_net *ks = netdev_priv(netdev); | 940 | struct ks_net *ks = netdev_priv(netdev); |
949 | 941 | ||
950 | if (netif_msg_ifdown(ks)) | 942 | netif_info(ks, ifdown, netdev, "shutting down\n"); |
951 | ks_info(ks, "%s: shutting down\n", netdev->name); | ||
952 | 943 | ||
953 | netif_stop_queue(netdev); | 944 | netif_stop_queue(netdev); |
954 | 945 | ||
@@ -1429,21 +1420,21 @@ static int ks_read_selftest(struct ks_net *ks) | |||
1429 | rd = ks_rdreg16(ks, KS_MBIR); | 1420 | rd = ks_rdreg16(ks, KS_MBIR); |
1430 | 1421 | ||
1431 | if ((rd & both_done) != both_done) { | 1422 | if ((rd & both_done) != both_done) { |
1432 | ks_warn(ks, "Memory selftest not finished\n"); | 1423 | netdev_warn(ks->netdev, "Memory selftest not finished\n"); |
1433 | return 0; | 1424 | return 0; |
1434 | } | 1425 | } |
1435 | 1426 | ||
1436 | if (rd & MBIR_TXMBFA) { | 1427 | if (rd & MBIR_TXMBFA) { |
1437 | ks_err(ks, "TX memory selftest fails\n"); | 1428 | netdev_err(ks->netdev, "TX memory selftest fails\n"); |
1438 | ret |= 1; | 1429 | ret |= 1; |
1439 | } | 1430 | } |
1440 | 1431 | ||
1441 | if (rd & MBIR_RXMBFA) { | 1432 | if (rd & MBIR_RXMBFA) { |
1442 | ks_err(ks, "RX memory selftest fails\n"); | 1433 | netdev_err(ks->netdev, "RX memory selftest fails\n"); |
1443 | ret |= 2; | 1434 | ret |= 2; |
1444 | } | 1435 | } |
1445 | 1436 | ||
1446 | ks_info(ks, "the selftest passes\n"); | 1437 | netdev_info(ks->netdev, "the selftest passes\n"); |
1447 | return ret; | 1438 | return ret; |
1448 | } | 1439 | } |
1449 | 1440 | ||
@@ -1514,7 +1505,7 @@ static int ks_hw_init(struct ks_net *ks) | |||
1514 | ks->frame_head_info = (struct type_frame_head *) \ | 1505 | ks->frame_head_info = (struct type_frame_head *) \ |
1515 | kmalloc(MHEADER_SIZE, GFP_KERNEL); | 1506 | kmalloc(MHEADER_SIZE, GFP_KERNEL); |
1516 | if (!ks->frame_head_info) { | 1507 | if (!ks->frame_head_info) { |
1517 | printk(KERN_ERR "Error: Fail to allocate frame memory\n"); | 1508 | pr_err("Error: Fail to allocate frame memory\n"); |
1518 | return false; | 1509 | return false; |
1519 | } | 1510 | } |
1520 | 1511 | ||
@@ -1580,7 +1571,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev) | |||
1580 | ks->mii.mdio_read = ks_phy_read; | 1571 | ks->mii.mdio_read = ks_phy_read; |
1581 | ks->mii.mdio_write = ks_phy_write; | 1572 | ks->mii.mdio_write = ks_phy_write; |
1582 | 1573 | ||
1583 | ks_info(ks, "message enable is %d\n", msg_enable); | 1574 | netdev_info(netdev, "message enable is %d\n", msg_enable); |
1584 | /* set the default message enable */ | 1575 | /* set the default message enable */ |
1585 | ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | | 1576 | ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | |
1586 | NETIF_MSG_PROBE | | 1577 | NETIF_MSG_PROBE | |
@@ -1589,13 +1580,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev) | |||
1589 | 1580 | ||
1590 | /* simple check for a valid chip being connected to the bus */ | 1581 | /* simple check for a valid chip being connected to the bus */ |
1591 | if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { | 1582 | if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { |
1592 | ks_err(ks, "failed to read device ID\n"); | 1583 | netdev_err(netdev, "failed to read device ID\n"); |
1593 | err = -ENODEV; | 1584 | err = -ENODEV; |
1594 | goto err_register; | 1585 | goto err_register; |
1595 | } | 1586 | } |
1596 | 1587 | ||
1597 | if (ks_read_selftest(ks)) { | 1588 | if (ks_read_selftest(ks)) { |
1598 | ks_err(ks, "failed to read device ID\n"); | 1589 | netdev_err(netdev, "failed to read device ID\n"); |
1599 | err = -ENODEV; | 1590 | err = -ENODEV; |
1600 | goto err_register; | 1591 | goto err_register; |
1601 | } | 1592 | } |
@@ -1626,9 +1617,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev) | |||
1626 | 1617 | ||
1627 | id = ks_rdreg16(ks, KS_CIDER); | 1618 | id = ks_rdreg16(ks, KS_CIDER); |
1628 | 1619 | ||
1629 | printk(KERN_INFO DRV_NAME | 1620 | netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", |
1630 | " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", | 1621 | (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); |
1631 | (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); | ||
1632 | return 0; | 1622 | return 0; |
1633 | 1623 | ||
1634 | err_register: | 1624 | err_register: |
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 0f59099ee72f..b47a2b3e116e 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c | |||
@@ -14,10 +14,11 @@ | |||
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
17 | #include <linux/init.h> | 19 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 21 | #include <linux/module.h> |
20 | #include <linux/version.h> | ||
21 | #include <linux/ioport.h> | 22 | #include <linux/ioport.h> |
22 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
23 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
@@ -1483,11 +1484,6 @@ struct dev_priv { | |||
1483 | int promiscuous; | 1484 | int promiscuous; |
1484 | }; | 1485 | }; |
1485 | 1486 | ||
1486 | #define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg) | ||
1487 | #define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg) | ||
1488 | #define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg) | ||
1489 | #define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg) | ||
1490 | |||
1491 | #define DRV_NAME "KSZ884X PCI" | 1487 | #define DRV_NAME "KSZ884X PCI" |
1492 | #define DEVICE_NAME "KSZ884x PCI" | 1488 | #define DEVICE_NAME "KSZ884x PCI" |
1493 | #define DRV_VERSION "1.0.0" | 1489 | #define DRV_VERSION "1.0.0" |
@@ -3834,7 +3830,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info) | |||
3834 | alloc >>= 1; | 3830 | alloc >>= 1; |
3835 | } | 3831 | } |
3836 | if (alloc != 1 || shift < MIN_DESC_SHIFT) { | 3832 | if (alloc != 1 || shift < MIN_DESC_SHIFT) { |
3837 | printk(KERN_ALERT "Hardware descriptor numbers not right!\n"); | 3833 | pr_alert("Hardware descriptor numbers not right!\n"); |
3838 | while (alloc) { | 3834 | while (alloc) { |
3839 | shift++; | 3835 | shift++; |
3840 | alloc >>= 1; | 3836 | alloc >>= 1; |
@@ -4545,8 +4541,7 @@ static int ksz_alloc_mem(struct dev_info *adapter) | |||
4545 | (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / | 4541 | (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / |
4546 | DESC_ALIGNMENT) * DESC_ALIGNMENT); | 4542 | DESC_ALIGNMENT) * DESC_ALIGNMENT); |
4547 | if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) | 4543 | if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) |
4548 | printk(KERN_ALERT | 4544 | pr_alert("Hardware descriptor size not right!\n"); |
4549 | "Hardware descriptor size not right!\n"); | ||
4550 | ksz_check_desc_num(&hw->rx_desc_info); | 4545 | ksz_check_desc_num(&hw->rx_desc_info); |
4551 | ksz_check_desc_num(&hw->tx_desc_info); | 4546 | ksz_check_desc_num(&hw->tx_desc_info); |
4552 | 4547 | ||
@@ -5319,10 +5314,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id) | |||
5319 | u32 data; | 5314 | u32 data; |
5320 | 5315 | ||
5321 | hw->intr_mask &= ~KS884X_INT_TX_STOPPED; | 5316 | hw->intr_mask &= ~KS884X_INT_TX_STOPPED; |
5322 | printk(KERN_INFO "Tx stopped\n"); | 5317 | pr_info("Tx stopped\n"); |
5323 | data = readl(hw->io + KS_DMA_TX_CTRL); | 5318 | data = readl(hw->io + KS_DMA_TX_CTRL); |
5324 | if (!(data & DMA_TX_ENABLE)) | 5319 | if (!(data & DMA_TX_ENABLE)) |
5325 | printk(KERN_INFO "Tx disabled\n"); | 5320 | pr_info("Tx disabled\n"); |
5326 | break; | 5321 | break; |
5327 | } | 5322 | } |
5328 | } while (0); | 5323 | } while (0); |
@@ -5495,6 +5490,18 @@ static int prepare_hardware(struct net_device *dev) | |||
5495 | return 0; | 5490 | return 0; |
5496 | } | 5491 | } |
5497 | 5492 | ||
5493 | static void set_media_state(struct net_device *dev, int media_state) | ||
5494 | { | ||
5495 | struct dev_priv *priv = netdev_priv(dev); | ||
5496 | |||
5497 | if (media_state == priv->media_state) | ||
5498 | netif_carrier_on(dev); | ||
5499 | else | ||
5500 | netif_carrier_off(dev); | ||
5501 | netif_info(priv, link, dev, "link %s\n", | ||
5502 | media_state == priv->media_state ? "on" : "off"); | ||
5503 | } | ||
5504 | |||
5498 | /** | 5505 | /** |
5499 | * netdev_open - open network device | 5506 | * netdev_open - open network device |
5500 | * @dev: Network device. | 5507 | * @dev: Network device. |
@@ -5584,15 +5591,7 @@ static int netdev_open(struct net_device *dev) | |||
5584 | 5591 | ||
5585 | priv->media_state = port->linked->state; | 5592 | priv->media_state = port->linked->state; |
5586 | 5593 | ||
5587 | if (media_connected == priv->media_state) | 5594 | set_media_state(dev, media_connected); |
5588 | netif_carrier_on(dev); | ||
5589 | else | ||
5590 | netif_carrier_off(dev); | ||
5591 | if (netif_msg_link(priv)) | ||
5592 | printk(KERN_INFO "%s link %s\n", dev->name, | ||
5593 | (media_connected == priv->media_state ? | ||
5594 | "on" : "off")); | ||
5595 | |||
5596 | netif_start_queue(dev); | 5595 | netif_start_queue(dev); |
5597 | 5596 | ||
5598 | return 0; | 5597 | return 0; |
@@ -6682,16 +6681,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv, | |||
6682 | { | 6681 | { |
6683 | if (priv->media_state != port->linked->state) { | 6682 | if (priv->media_state != port->linked->state) { |
6684 | priv->media_state = port->linked->state; | 6683 | priv->media_state = port->linked->state; |
6685 | if (netif_running(dev)) { | 6684 | if (netif_running(dev)) |
6686 | if (media_connected == priv->media_state) | 6685 | set_media_state(dev, media_connected); |
6687 | netif_carrier_on(dev); | ||
6688 | else | ||
6689 | netif_carrier_off(dev); | ||
6690 | if (netif_msg_link(priv)) | ||
6691 | printk(KERN_INFO "%s link %s\n", dev->name, | ||
6692 | (media_connected == priv->media_state ? | ||
6693 | "on" : "off")); | ||
6694 | } | ||
6695 | } | 6686 | } |
6696 | } | 6687 | } |
6697 | 6688 | ||
@@ -6985,7 +6976,7 @@ static int __init pcidev_init(struct pci_dev *pdev, | |||
6985 | int pi; | 6976 | int pi; |
6986 | int port_count; | 6977 | int port_count; |
6987 | int result; | 6978 | int result; |
6988 | char banner[80]; | 6979 | char banner[sizeof(version)]; |
6989 | struct ksz_switch *sw = NULL; | 6980 | struct ksz_switch *sw = NULL; |
6990 | 6981 | ||
6991 | result = pci_enable_device(pdev); | 6982 | result = pci_enable_device(pdev); |
@@ -7009,10 +7000,9 @@ static int __init pcidev_init(struct pci_dev *pdev, | |||
7009 | 7000 | ||
7010 | result = -ENOMEM; | 7001 | result = -ENOMEM; |
7011 | 7002 | ||
7012 | info = kmalloc(sizeof(struct platform_info), GFP_KERNEL); | 7003 | info = kzalloc(sizeof(struct platform_info), GFP_KERNEL); |
7013 | if (!info) | 7004 | if (!info) |
7014 | goto pcidev_init_dev_err; | 7005 | goto pcidev_init_dev_err; |
7015 | memset(info, 0, sizeof(struct platform_info)); | ||
7016 | 7006 | ||
7017 | hw_priv = &info->dev_info; | 7007 | hw_priv = &info->dev_info; |
7018 | hw_priv->pdev = pdev; | 7008 | hw_priv->pdev = pdev; |
@@ -7026,15 +7016,15 @@ static int __init pcidev_init(struct pci_dev *pdev, | |||
7026 | cnt = hw_init(hw); | 7016 | cnt = hw_init(hw); |
7027 | if (!cnt) { | 7017 | if (!cnt) { |
7028 | if (msg_enable & NETIF_MSG_PROBE) | 7018 | if (msg_enable & NETIF_MSG_PROBE) |
7029 | printk(KERN_ALERT "chip not detected\n"); | 7019 | pr_alert("chip not detected\n"); |
7030 | result = -ENODEV; | 7020 | result = -ENODEV; |
7031 | goto pcidev_init_alloc_err; | 7021 | goto pcidev_init_alloc_err; |
7032 | } | 7022 | } |
7033 | 7023 | ||
7034 | sprintf(banner, "%s\n", version); | 7024 | snprintf(banner, sizeof(banner), "%s", version); |
7035 | banner[13] = cnt + '0'; | 7025 | banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */ |
7036 | ks_info(hw_priv, "%s", banner); | 7026 | dev_info(&hw_priv->pdev->dev, "%s\n", banner); |
7037 | ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); | 7027 | dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); |
7038 | 7028 | ||
7039 | /* Assume device is KSZ8841. */ | 7029 | /* Assume device is KSZ8841. */ |
7040 | hw->dev_count = 1; | 7030 | hw->dev_count = 1; |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 40faa368b07a..445e73c343ba 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -748,6 +748,9 @@ static int macvlan_device_event(struct notifier_block *unused, | |||
748 | list_for_each_entry_safe(vlan, next, &port->vlans, list) | 748 | list_for_each_entry_safe(vlan, next, &port->vlans, list) |
749 | vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); | 749 | vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); |
750 | break; | 750 | break; |
751 | case NETDEV_PRE_TYPE_CHANGE: | ||
752 | /* Forbid underlaying device to change its type. */ | ||
753 | return NOTIFY_BAD; | ||
751 | } | 754 | } |
752 | return NOTIFY_DONE; | 755 | return NOTIFY_DONE; |
753 | } | 756 | } |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index c48b0f4b17b7..7cd0933735e2 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -160,39 +160,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work) | |||
160 | static void mlx4_en_clear_list(struct net_device *dev) | 160 | static void mlx4_en_clear_list(struct net_device *dev) |
161 | { | 161 | { |
162 | struct mlx4_en_priv *priv = netdev_priv(dev); | 162 | struct mlx4_en_priv *priv = netdev_priv(dev); |
163 | struct dev_mc_list *plist = priv->mc_list; | ||
164 | struct dev_mc_list *next; | ||
165 | 163 | ||
166 | while (plist) { | 164 | kfree(priv->mc_addrs); |
167 | next = plist->next; | 165 | priv->mc_addrs_cnt = 0; |
168 | kfree(plist); | ||
169 | plist = next; | ||
170 | } | ||
171 | priv->mc_list = NULL; | ||
172 | } | 166 | } |
173 | 167 | ||
174 | static void mlx4_en_cache_mclist(struct net_device *dev) | 168 | static void mlx4_en_cache_mclist(struct net_device *dev) |
175 | { | 169 | { |
176 | struct mlx4_en_priv *priv = netdev_priv(dev); | 170 | struct mlx4_en_priv *priv = netdev_priv(dev); |
177 | struct dev_mc_list *mclist; | 171 | struct dev_mc_list *mclist; |
178 | struct dev_mc_list *tmp; | 172 | char *mc_addrs; |
179 | struct dev_mc_list *plist = NULL; | 173 | int mc_addrs_cnt = netdev_mc_count(dev); |
180 | 174 | int i; | |
181 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { | 175 | |
182 | tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); | 176 | mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); |
183 | if (!tmp) { | 177 | if (!mc_addrs) { |
184 | en_err(priv, "failed to allocate multicast list\n"); | 178 | en_err(priv, "failed to allocate multicast list\n"); |
185 | mlx4_en_clear_list(dev); | 179 | return; |
186 | return; | ||
187 | } | ||
188 | memcpy(tmp, mclist, sizeof(struct dev_mc_list)); | ||
189 | tmp->next = NULL; | ||
190 | if (plist) | ||
191 | plist->next = tmp; | ||
192 | else | ||
193 | priv->mc_list = tmp; | ||
194 | plist = tmp; | ||
195 | } | 180 | } |
181 | i = 0; | ||
182 | netdev_for_each_mc_addr(mclist, dev) | ||
183 | memcpy(mc_addrs + i++ * ETH_ALEN, mclist->dmi_addr, ETH_ALEN); | ||
184 | priv->mc_addrs = mc_addrs; | ||
185 | priv->mc_addrs_cnt = mc_addrs_cnt; | ||
196 | } | 186 | } |
197 | 187 | ||
198 | 188 | ||
@@ -212,7 +202,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
212 | mcast_task); | 202 | mcast_task); |
213 | struct mlx4_en_dev *mdev = priv->mdev; | 203 | struct mlx4_en_dev *mdev = priv->mdev; |
214 | struct net_device *dev = priv->dev; | 204 | struct net_device *dev = priv->dev; |
215 | struct dev_mc_list *mclist; | ||
216 | u64 mcast_addr = 0; | 205 | u64 mcast_addr = 0; |
217 | int err; | 206 | int err; |
218 | 207 | ||
@@ -288,6 +277,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
288 | if (err) | 277 | if (err) |
289 | en_err(priv, "Failed disabling multicast filter\n"); | 278 | en_err(priv, "Failed disabling multicast filter\n"); |
290 | } else { | 279 | } else { |
280 | int i; | ||
281 | |||
291 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 282 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
292 | 0, MLX4_MCAST_DISABLE); | 283 | 0, MLX4_MCAST_DISABLE); |
293 | if (err) | 284 | if (err) |
@@ -302,8 +293,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
302 | netif_tx_lock_bh(dev); | 293 | netif_tx_lock_bh(dev); |
303 | mlx4_en_cache_mclist(dev); | 294 | mlx4_en_cache_mclist(dev); |
304 | netif_tx_unlock_bh(dev); | 295 | netif_tx_unlock_bh(dev); |
305 | for (mclist = priv->mc_list; mclist; mclist = mclist->next) { | 296 | for (i = 0; i < priv->mc_addrs_cnt; i++) { |
306 | mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); | 297 | mcast_addr = |
298 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); | ||
307 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | 299 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, |
308 | mcast_addr, 0, MLX4_MCAST_CONFIG); | 300 | mcast_addr, 0, MLX4_MCAST_CONFIG); |
309 | } | 301 | } |
@@ -984,7 +976,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
984 | priv->flags = prof->flags; | 976 | priv->flags = prof->flags; |
985 | priv->tx_ring_num = prof->tx_ring_num; | 977 | priv->tx_ring_num = prof->tx_ring_num; |
986 | priv->rx_ring_num = prof->rx_ring_num; | 978 | priv->rx_ring_num = prof->rx_ring_num; |
987 | priv->mc_list = NULL; | ||
988 | priv->mac_index = -1; | 979 | priv->mac_index = -1; |
989 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | 980 | priv->msg_enable = MLX4_EN_MSG_LEVEL; |
990 | spin_lock_init(&priv->stats_lock); | 981 | spin_lock_init(&priv->stats_lock); |
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index 82c3ebc584e3..b55e46c8b682 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -492,7 +492,8 @@ struct mlx4_en_priv { | |||
492 | struct mlx4_en_perf_stats pstats; | 492 | struct mlx4_en_perf_stats pstats; |
493 | struct mlx4_en_pkt_stats pkstats; | 493 | struct mlx4_en_pkt_stats pkstats; |
494 | struct mlx4_en_port_stats port_stats; | 494 | struct mlx4_en_port_stats port_stats; |
495 | struct dev_mc_list *mc_list; | 495 | char *mc_addrs; |
496 | int mc_addrs_cnt; | ||
496 | struct mlx4_en_stat_out_mbox hw_stats; | 497 | struct mlx4_en_stat_out_mbox hw_stats; |
497 | }; | 498 | }; |
498 | 499 | ||
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 2576055b350b..0ff06617a4ab 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c | |||
@@ -23,8 +23,13 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | 25 | ||
26 | #define MDIO_READ 1 | 26 | #define MDIO_READ 2 |
27 | #define MDIO_WRITE 0 | 27 | #define MDIO_WRITE 1 |
28 | |||
29 | #define MDIO_C45 (1<<15) | ||
30 | #define MDIO_C45_ADDR (MDIO_C45 | 0) | ||
31 | #define MDIO_C45_READ (MDIO_C45 | 3) | ||
32 | #define MDIO_C45_WRITE (MDIO_C45 | 1) | ||
28 | 33 | ||
29 | #define MDIO_SETUP_TIME 10 | 34 | #define MDIO_SETUP_TIME 10 |
30 | #define MDIO_HOLD_TIME 10 | 35 | #define MDIO_HOLD_TIME 10 |
@@ -90,7 +95,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits) | |||
90 | /* Utility to send the preamble, address, and | 95 | /* Utility to send the preamble, address, and |
91 | * register (common to read and write). | 96 | * register (common to read and write). |
92 | */ | 97 | */ |
93 | static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) | 98 | static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg) |
94 | { | 99 | { |
95 | const struct mdiobb_ops *ops = ctrl->ops; | 100 | const struct mdiobb_ops *ops = ctrl->ops; |
96 | int i; | 101 | int i; |
@@ -109,23 +114,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) | |||
109 | for (i = 0; i < 32; i++) | 114 | for (i = 0; i < 32; i++) |
110 | mdiobb_send_bit(ctrl, 1); | 115 | mdiobb_send_bit(ctrl, 1); |
111 | 116 | ||
112 | /* send the start bit (01) and the read opcode (10) or write (10) */ | 117 | /* send the start bit (01) and the read opcode (10) or write (10). |
118 | Clause 45 operation uses 00 for the start and 11, 10 for | ||
119 | read/write */ | ||
113 | mdiobb_send_bit(ctrl, 0); | 120 | mdiobb_send_bit(ctrl, 0); |
114 | mdiobb_send_bit(ctrl, 1); | 121 | if (op & MDIO_C45) |
115 | mdiobb_send_bit(ctrl, read); | 122 | mdiobb_send_bit(ctrl, 0); |
116 | mdiobb_send_bit(ctrl, !read); | 123 | else |
124 | mdiobb_send_bit(ctrl, 1); | ||
125 | mdiobb_send_bit(ctrl, (op >> 1) & 1); | ||
126 | mdiobb_send_bit(ctrl, (op >> 0) & 1); | ||
117 | 127 | ||
118 | mdiobb_send_num(ctrl, phy, 5); | 128 | mdiobb_send_num(ctrl, phy, 5); |
119 | mdiobb_send_num(ctrl, reg, 5); | 129 | mdiobb_send_num(ctrl, reg, 5); |
120 | } | 130 | } |
121 | 131 | ||
132 | /* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the | ||
133 | lower 16 bits of the 21 bit address. This transfer is done identically to a | ||
134 | MDIO_WRITE except for a different code. To enable clause 45 mode or | ||
135 | MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices | ||
136 | can exist on the same bus. Normal devices should ignore the MDIO_ADDR | ||
137 | phase. */ | ||
138 | static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr) | ||
139 | { | ||
140 | unsigned int dev_addr = (addr >> 16) & 0x1F; | ||
141 | unsigned int reg = addr & 0xFFFF; | ||
142 | mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr); | ||
143 | |||
144 | /* send the turnaround (10) */ | ||
145 | mdiobb_send_bit(ctrl, 1); | ||
146 | mdiobb_send_bit(ctrl, 0); | ||
147 | |||
148 | mdiobb_send_num(ctrl, reg, 16); | ||
149 | |||
150 | ctrl->ops->set_mdio_dir(ctrl, 0); | ||
151 | mdiobb_get_bit(ctrl); | ||
152 | |||
153 | return dev_addr; | ||
154 | } | ||
122 | 155 | ||
123 | static int mdiobb_read(struct mii_bus *bus, int phy, int reg) | 156 | static int mdiobb_read(struct mii_bus *bus, int phy, int reg) |
124 | { | 157 | { |
125 | struct mdiobb_ctrl *ctrl = bus->priv; | 158 | struct mdiobb_ctrl *ctrl = bus->priv; |
126 | int ret, i; | 159 | int ret, i; |
127 | 160 | ||
128 | mdiobb_cmd(ctrl, MDIO_READ, phy, reg); | 161 | if (reg & MII_ADDR_C45) { |
162 | reg = mdiobb_cmd_addr(ctrl, phy, reg); | ||
163 | mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); | ||
164 | } else | ||
165 | mdiobb_cmd(ctrl, MDIO_READ, phy, reg); | ||
166 | |||
129 | ctrl->ops->set_mdio_dir(ctrl, 0); | 167 | ctrl->ops->set_mdio_dir(ctrl, 0); |
130 | 168 | ||
131 | /* check the turnaround bit: the PHY should be driving it to zero */ | 169 | /* check the turnaround bit: the PHY should be driving it to zero */ |
@@ -148,7 +186,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) | |||
148 | { | 186 | { |
149 | struct mdiobb_ctrl *ctrl = bus->priv; | 187 | struct mdiobb_ctrl *ctrl = bus->priv; |
150 | 188 | ||
151 | mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); | 189 | if (reg & MII_ADDR_C45) { |
190 | reg = mdiobb_cmd_addr(ctrl, phy, reg); | ||
191 | mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); | ||
192 | } else | ||
193 | mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); | ||
152 | 194 | ||
153 | /* send the turnaround (10) */ | 195 | /* send the turnaround (10) */ |
154 | mdiobb_send_bit(ctrl, 1); | 196 | mdiobb_send_bit(ctrl, 1); |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index e17b70291bbc..6a6b8199a0d6 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan); | |||
208 | * because the bus read/write functions may wait for an interrupt | 208 | * because the bus read/write functions may wait for an interrupt |
209 | * to conclude the operation. | 209 | * to conclude the operation. |
210 | */ | 210 | */ |
211 | int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum) | 211 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) |
212 | { | 212 | { |
213 | int retval; | 213 | int retval; |
214 | 214 | ||
@@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read); | |||
233 | * because the bus read/write functions may wait for an interrupt | 233 | * because the bus read/write functions may wait for an interrupt |
234 | * to conclude the operation. | 234 | * to conclude the operation. |
235 | */ | 235 | */ |
236 | int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val) | 236 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) |
237 | { | 237 | { |
238 | int err; | 238 | int err; |
239 | 239 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 9d3ebf3e975e..964305c7f9f1 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/tcp.h> | 23 | #include <linux/tcp.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/pm_runtime.h> | ||
26 | 27 | ||
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/io.h> | 29 | #include <asm/io.h> |
@@ -504,6 +505,7 @@ struct rtl8169_private { | |||
504 | 505 | ||
505 | struct mii_if_info mii; | 506 | struct mii_if_info mii; |
506 | struct rtl8169_counters counters; | 507 | struct rtl8169_counters counters; |
508 | u32 saved_wolopts; | ||
507 | }; | 509 | }; |
508 | 510 | ||
509 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); | 511 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); |
@@ -744,53 +746,61 @@ static void rtl8169_check_link_status(struct net_device *dev, | |||
744 | 746 | ||
745 | spin_lock_irqsave(&tp->lock, flags); | 747 | spin_lock_irqsave(&tp->lock, flags); |
746 | if (tp->link_ok(ioaddr)) { | 748 | if (tp->link_ok(ioaddr)) { |
749 | /* This is to cancel a scheduled suspend if there's one. */ | ||
750 | pm_request_resume(&tp->pci_dev->dev); | ||
747 | netif_carrier_on(dev); | 751 | netif_carrier_on(dev); |
748 | netif_info(tp, ifup, dev, "link up\n"); | 752 | netif_info(tp, ifup, dev, "link up\n"); |
749 | } else { | 753 | } else { |
750 | netif_carrier_off(dev); | 754 | netif_carrier_off(dev); |
751 | netif_info(tp, ifdown, dev, "link down\n"); | 755 | netif_info(tp, ifdown, dev, "link down\n"); |
756 | pm_schedule_suspend(&tp->pci_dev->dev, 100); | ||
752 | } | 757 | } |
753 | spin_unlock_irqrestore(&tp->lock, flags); | 758 | spin_unlock_irqrestore(&tp->lock, flags); |
754 | } | 759 | } |
755 | 760 | ||
756 | static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 761 | #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) |
762 | |||
763 | static u32 __rtl8169_get_wol(struct rtl8169_private *tp) | ||
757 | { | 764 | { |
758 | struct rtl8169_private *tp = netdev_priv(dev); | ||
759 | void __iomem *ioaddr = tp->mmio_addr; | 765 | void __iomem *ioaddr = tp->mmio_addr; |
760 | u8 options; | 766 | u8 options; |
761 | 767 | u32 wolopts = 0; | |
762 | wol->wolopts = 0; | ||
763 | |||
764 | #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) | ||
765 | wol->supported = WAKE_ANY; | ||
766 | |||
767 | spin_lock_irq(&tp->lock); | ||
768 | 768 | ||
769 | options = RTL_R8(Config1); | 769 | options = RTL_R8(Config1); |
770 | if (!(options & PMEnable)) | 770 | if (!(options & PMEnable)) |
771 | goto out_unlock; | 771 | return 0; |
772 | 772 | ||
773 | options = RTL_R8(Config3); | 773 | options = RTL_R8(Config3); |
774 | if (options & LinkUp) | 774 | if (options & LinkUp) |
775 | wol->wolopts |= WAKE_PHY; | 775 | wolopts |= WAKE_PHY; |
776 | if (options & MagicPacket) | 776 | if (options & MagicPacket) |
777 | wol->wolopts |= WAKE_MAGIC; | 777 | wolopts |= WAKE_MAGIC; |
778 | 778 | ||
779 | options = RTL_R8(Config5); | 779 | options = RTL_R8(Config5); |
780 | if (options & UWF) | 780 | if (options & UWF) |
781 | wol->wolopts |= WAKE_UCAST; | 781 | wolopts |= WAKE_UCAST; |
782 | if (options & BWF) | 782 | if (options & BWF) |
783 | wol->wolopts |= WAKE_BCAST; | 783 | wolopts |= WAKE_BCAST; |
784 | if (options & MWF) | 784 | if (options & MWF) |
785 | wol->wolopts |= WAKE_MCAST; | 785 | wolopts |= WAKE_MCAST; |
786 | 786 | ||
787 | out_unlock: | 787 | return wolopts; |
788 | spin_unlock_irq(&tp->lock); | ||
789 | } | 788 | } |
790 | 789 | ||
791 | static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 790 | static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
792 | { | 791 | { |
793 | struct rtl8169_private *tp = netdev_priv(dev); | 792 | struct rtl8169_private *tp = netdev_priv(dev); |
793 | |||
794 | spin_lock_irq(&tp->lock); | ||
795 | |||
796 | wol->supported = WAKE_ANY; | ||
797 | wol->wolopts = __rtl8169_get_wol(tp); | ||
798 | |||
799 | spin_unlock_irq(&tp->lock); | ||
800 | } | ||
801 | |||
802 | static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) | ||
803 | { | ||
794 | void __iomem *ioaddr = tp->mmio_addr; | 804 | void __iomem *ioaddr = tp->mmio_addr; |
795 | unsigned int i; | 805 | unsigned int i; |
796 | static const struct { | 806 | static const struct { |
@@ -807,23 +817,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
807 | { WAKE_ANY, Config5, LanWake } | 817 | { WAKE_ANY, Config5, LanWake } |
808 | }; | 818 | }; |
809 | 819 | ||
810 | spin_lock_irq(&tp->lock); | ||
811 | |||
812 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 820 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
813 | 821 | ||
814 | for (i = 0; i < ARRAY_SIZE(cfg); i++) { | 822 | for (i = 0; i < ARRAY_SIZE(cfg); i++) { |
815 | u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; | 823 | u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; |
816 | if (wol->wolopts & cfg[i].opt) | 824 | if (wolopts & cfg[i].opt) |
817 | options |= cfg[i].mask; | 825 | options |= cfg[i].mask; |
818 | RTL_W8(cfg[i].reg, options); | 826 | RTL_W8(cfg[i].reg, options); |
819 | } | 827 | } |
820 | 828 | ||
821 | RTL_W8(Cfg9346, Cfg9346_Lock); | 829 | RTL_W8(Cfg9346, Cfg9346_Lock); |
830 | } | ||
831 | |||
832 | static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
833 | { | ||
834 | struct rtl8169_private *tp = netdev_priv(dev); | ||
835 | |||
836 | spin_lock_irq(&tp->lock); | ||
822 | 837 | ||
823 | if (wol->wolopts) | 838 | if (wol->wolopts) |
824 | tp->features |= RTL_FEATURE_WOL; | 839 | tp->features |= RTL_FEATURE_WOL; |
825 | else | 840 | else |
826 | tp->features &= ~RTL_FEATURE_WOL; | 841 | tp->features &= ~RTL_FEATURE_WOL; |
842 | __rtl8169_set_wol(tp, wol->wolopts); | ||
827 | device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); | 843 | device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); |
828 | 844 | ||
829 | spin_unlock_irq(&tp->lock); | 845 | spin_unlock_irq(&tp->lock); |
@@ -3189,6 +3205,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3189 | 3205 | ||
3190 | device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); | 3206 | device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); |
3191 | 3207 | ||
3208 | if (pci_dev_run_wake(pdev)) { | ||
3209 | pm_runtime_set_active(&pdev->dev); | ||
3210 | pm_runtime_enable(&pdev->dev); | ||
3211 | } | ||
3212 | pm_runtime_idle(&pdev->dev); | ||
3213 | |||
3192 | out: | 3214 | out: |
3193 | return rc; | 3215 | return rc; |
3194 | 3216 | ||
@@ -3211,10 +3233,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
3211 | struct net_device *dev = pci_get_drvdata(pdev); | 3233 | struct net_device *dev = pci_get_drvdata(pdev); |
3212 | struct rtl8169_private *tp = netdev_priv(dev); | 3234 | struct rtl8169_private *tp = netdev_priv(dev); |
3213 | 3235 | ||
3236 | pm_runtime_get_sync(&pdev->dev); | ||
3237 | |||
3214 | flush_scheduled_work(); | 3238 | flush_scheduled_work(); |
3215 | 3239 | ||
3216 | unregister_netdev(dev); | 3240 | unregister_netdev(dev); |
3217 | 3241 | ||
3242 | if (pci_dev_run_wake(pdev)) { | ||
3243 | pm_runtime_disable(&pdev->dev); | ||
3244 | pm_runtime_set_suspended(&pdev->dev); | ||
3245 | } | ||
3246 | pm_runtime_put_noidle(&pdev->dev); | ||
3247 | |||
3218 | /* restore original MAC address */ | 3248 | /* restore original MAC address */ |
3219 | rtl_rar_set(tp, dev->perm_addr); | 3249 | rtl_rar_set(tp, dev->perm_addr); |
3220 | 3250 | ||
@@ -3237,6 +3267,7 @@ static int rtl8169_open(struct net_device *dev) | |||
3237 | struct pci_dev *pdev = tp->pci_dev; | 3267 | struct pci_dev *pdev = tp->pci_dev; |
3238 | int retval = -ENOMEM; | 3268 | int retval = -ENOMEM; |
3239 | 3269 | ||
3270 | pm_runtime_get_sync(&pdev->dev); | ||
3240 | 3271 | ||
3241 | rtl8169_set_rxbufsize(tp, dev); | 3272 | rtl8169_set_rxbufsize(tp, dev); |
3242 | 3273 | ||
@@ -3247,7 +3278,7 @@ static int rtl8169_open(struct net_device *dev) | |||
3247 | tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, | 3278 | tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, |
3248 | &tp->TxPhyAddr); | 3279 | &tp->TxPhyAddr); |
3249 | if (!tp->TxDescArray) | 3280 | if (!tp->TxDescArray) |
3250 | goto out; | 3281 | goto err_pm_runtime_put; |
3251 | 3282 | ||
3252 | tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, | 3283 | tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, |
3253 | &tp->RxPhyAddr); | 3284 | &tp->RxPhyAddr); |
@@ -3274,6 +3305,9 @@ static int rtl8169_open(struct net_device *dev) | |||
3274 | 3305 | ||
3275 | rtl8169_request_timer(dev); | 3306 | rtl8169_request_timer(dev); |
3276 | 3307 | ||
3308 | tp->saved_wolopts = 0; | ||
3309 | pm_runtime_put_noidle(&pdev->dev); | ||
3310 | |||
3277 | rtl8169_check_link_status(dev, tp, tp->mmio_addr); | 3311 | rtl8169_check_link_status(dev, tp, tp->mmio_addr); |
3278 | out: | 3312 | out: |
3279 | return retval; | 3313 | return retval; |
@@ -3283,9 +3317,13 @@ err_release_ring_2: | |||
3283 | err_free_rx_1: | 3317 | err_free_rx_1: |
3284 | pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, | 3318 | pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, |
3285 | tp->RxPhyAddr); | 3319 | tp->RxPhyAddr); |
3320 | tp->RxDescArray = NULL; | ||
3286 | err_free_tx_0: | 3321 | err_free_tx_0: |
3287 | pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, | 3322 | pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, |
3288 | tp->TxPhyAddr); | 3323 | tp->TxPhyAddr); |
3324 | tp->TxDescArray = NULL; | ||
3325 | err_pm_runtime_put: | ||
3326 | pm_runtime_put_noidle(&pdev->dev); | ||
3289 | goto out; | 3327 | goto out; |
3290 | } | 3328 | } |
3291 | 3329 | ||
@@ -4692,6 +4730,8 @@ static int rtl8169_close(struct net_device *dev) | |||
4692 | struct rtl8169_private *tp = netdev_priv(dev); | 4730 | struct rtl8169_private *tp = netdev_priv(dev); |
4693 | struct pci_dev *pdev = tp->pci_dev; | 4731 | struct pci_dev *pdev = tp->pci_dev; |
4694 | 4732 | ||
4733 | pm_runtime_get_sync(&pdev->dev); | ||
4734 | |||
4695 | /* update counters before going down */ | 4735 | /* update counters before going down */ |
4696 | rtl8169_update_counters(dev); | 4736 | rtl8169_update_counters(dev); |
4697 | 4737 | ||
@@ -4706,6 +4746,8 @@ static int rtl8169_close(struct net_device *dev) | |||
4706 | tp->TxDescArray = NULL; | 4746 | tp->TxDescArray = NULL; |
4707 | tp->RxDescArray = NULL; | 4747 | tp->RxDescArray = NULL; |
4708 | 4748 | ||
4749 | pm_runtime_put_sync(&pdev->dev); | ||
4750 | |||
4709 | return 0; | 4751 | return 0; |
4710 | } | 4752 | } |
4711 | 4753 | ||
@@ -4804,21 +4846,74 @@ static int rtl8169_suspend(struct device *device) | |||
4804 | return 0; | 4846 | return 0; |
4805 | } | 4847 | } |
4806 | 4848 | ||
4849 | static void __rtl8169_resume(struct net_device *dev) | ||
4850 | { | ||
4851 | netif_device_attach(dev); | ||
4852 | rtl8169_schedule_work(dev, rtl8169_reset_task); | ||
4853 | } | ||
4854 | |||
4807 | static int rtl8169_resume(struct device *device) | 4855 | static int rtl8169_resume(struct device *device) |
4808 | { | 4856 | { |
4809 | struct pci_dev *pdev = to_pci_dev(device); | 4857 | struct pci_dev *pdev = to_pci_dev(device); |
4810 | struct net_device *dev = pci_get_drvdata(pdev); | 4858 | struct net_device *dev = pci_get_drvdata(pdev); |
4811 | 4859 | ||
4812 | if (!netif_running(dev)) | 4860 | if (netif_running(dev)) |
4813 | goto out; | 4861 | __rtl8169_resume(dev); |
4814 | 4862 | ||
4815 | netif_device_attach(dev); | 4863 | return 0; |
4864 | } | ||
4865 | |||
4866 | static int rtl8169_runtime_suspend(struct device *device) | ||
4867 | { | ||
4868 | struct pci_dev *pdev = to_pci_dev(device); | ||
4869 | struct net_device *dev = pci_get_drvdata(pdev); | ||
4870 | struct rtl8169_private *tp = netdev_priv(dev); | ||
4871 | |||
4872 | if (!tp->TxDescArray) | ||
4873 | return 0; | ||
4874 | |||
4875 | spin_lock_irq(&tp->lock); | ||
4876 | tp->saved_wolopts = __rtl8169_get_wol(tp); | ||
4877 | __rtl8169_set_wol(tp, WAKE_ANY); | ||
4878 | spin_unlock_irq(&tp->lock); | ||
4879 | |||
4880 | rtl8169_net_suspend(dev); | ||
4881 | |||
4882 | return 0; | ||
4883 | } | ||
4884 | |||
4885 | static int rtl8169_runtime_resume(struct device *device) | ||
4886 | { | ||
4887 | struct pci_dev *pdev = to_pci_dev(device); | ||
4888 | struct net_device *dev = pci_get_drvdata(pdev); | ||
4889 | struct rtl8169_private *tp = netdev_priv(dev); | ||
4890 | |||
4891 | if (!tp->TxDescArray) | ||
4892 | return 0; | ||
4893 | |||
4894 | spin_lock_irq(&tp->lock); | ||
4895 | __rtl8169_set_wol(tp, tp->saved_wolopts); | ||
4896 | tp->saved_wolopts = 0; | ||
4897 | spin_unlock_irq(&tp->lock); | ||
4898 | |||
4899 | __rtl8169_resume(dev); | ||
4816 | 4900 | ||
4817 | rtl8169_schedule_work(dev, rtl8169_reset_task); | ||
4818 | out: | ||
4819 | return 0; | 4901 | return 0; |
4820 | } | 4902 | } |
4821 | 4903 | ||
4904 | static int rtl8169_runtime_idle(struct device *device) | ||
4905 | { | ||
4906 | struct pci_dev *pdev = to_pci_dev(device); | ||
4907 | struct net_device *dev = pci_get_drvdata(pdev); | ||
4908 | struct rtl8169_private *tp = netdev_priv(dev); | ||
4909 | |||
4910 | if (!tp->TxDescArray) | ||
4911 | return 0; | ||
4912 | |||
4913 | rtl8169_check_link_status(dev, tp, tp->mmio_addr); | ||
4914 | return -EBUSY; | ||
4915 | } | ||
4916 | |||
4822 | static const struct dev_pm_ops rtl8169_pm_ops = { | 4917 | static const struct dev_pm_ops rtl8169_pm_ops = { |
4823 | .suspend = rtl8169_suspend, | 4918 | .suspend = rtl8169_suspend, |
4824 | .resume = rtl8169_resume, | 4919 | .resume = rtl8169_resume, |
@@ -4826,6 +4921,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = { | |||
4826 | .thaw = rtl8169_resume, | 4921 | .thaw = rtl8169_resume, |
4827 | .poweroff = rtl8169_suspend, | 4922 | .poweroff = rtl8169_suspend, |
4828 | .restore = rtl8169_resume, | 4923 | .restore = rtl8169_resume, |
4924 | .runtime_suspend = rtl8169_runtime_suspend, | ||
4925 | .runtime_resume = rtl8169_runtime_resume, | ||
4926 | .runtime_idle = rtl8169_runtime_idle, | ||
4829 | }; | 4927 | }; |
4830 | 4928 | ||
4831 | #define RTL8169_PM_OPS (&rtl8169_pm_ops) | 4929 | #define RTL8169_PM_OPS (&rtl8169_pm_ops) |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 6f1e3036bafd..7576ad5a833a 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -619,7 +619,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) | |||
619 | return; | 619 | return; |
620 | qeth_l2_del_all_mc(card); | 620 | qeth_l2_del_all_mc(card); |
621 | spin_lock_bh(&card->mclock); | 621 | spin_lock_bh(&card->mclock); |
622 | for (dm = dev->mc_list; dm; dm = dm->next) | 622 | netdev_for_each_mc_addr(dm, dev) |
623 | qeth_l2_add_mc(card, dm->da_addr, 0); | 623 | qeth_l2_add_mc(card, dm->da_addr, 0); |
624 | 624 | ||
625 | netdev_for_each_uc_addr(ha, dev) | 625 | netdev_for_each_uc_addr(ha, dev) |
diff --git a/include/linux/if_link.h b/include/linux/if_link.h index c9bf92cd7653..cfd420ba72df 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
@@ -37,6 +37,38 @@ struct rtnl_link_stats { | |||
37 | __u32 tx_compressed; | 37 | __u32 tx_compressed; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | struct rtnl_link_stats64 { | ||
41 | __u64 rx_packets; /* total packets received */ | ||
42 | __u64 tx_packets; /* total packets transmitted */ | ||
43 | __u64 rx_bytes; /* total bytes received */ | ||
44 | __u64 tx_bytes; /* total bytes transmitted */ | ||
45 | __u64 rx_errors; /* bad packets received */ | ||
46 | __u64 tx_errors; /* packet transmit problems */ | ||
47 | __u64 rx_dropped; /* no space in linux buffers */ | ||
48 | __u64 tx_dropped; /* no space available in linux */ | ||
49 | __u64 multicast; /* multicast packets received */ | ||
50 | __u64 collisions; | ||
51 | |||
52 | /* detailed rx_errors: */ | ||
53 | __u64 rx_length_errors; | ||
54 | __u64 rx_over_errors; /* receiver ring buff overflow */ | ||
55 | __u64 rx_crc_errors; /* recved pkt with crc error */ | ||
56 | __u64 rx_frame_errors; /* recv'd frame alignment error */ | ||
57 | __u64 rx_fifo_errors; /* recv'r fifo overrun */ | ||
58 | __u64 rx_missed_errors; /* receiver missed packet */ | ||
59 | |||
60 | /* detailed tx_errors */ | ||
61 | __u64 tx_aborted_errors; | ||
62 | __u64 tx_carrier_errors; | ||
63 | __u64 tx_fifo_errors; | ||
64 | __u64 tx_heartbeat_errors; | ||
65 | __u64 tx_window_errors; | ||
66 | |||
67 | /* for cslip etc */ | ||
68 | __u64 rx_compressed; | ||
69 | __u64 tx_compressed; | ||
70 | }; | ||
71 | |||
40 | /* The struct should be in sync with struct ifmap */ | 72 | /* The struct should be in sync with struct ifmap */ |
41 | struct rtnl_link_ifmap { | 73 | struct rtnl_link_ifmap { |
42 | __u64 mem_start; | 74 | __u64 mem_start; |
@@ -83,6 +115,7 @@ enum { | |||
83 | IFLA_VF_VLAN, | 115 | IFLA_VF_VLAN, |
84 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ | 116 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ |
85 | IFLA_VFINFO, | 117 | IFLA_VFINFO, |
118 | IFLA_STATS64, | ||
86 | __IFLA_MAX | 119 | __IFLA_MAX |
87 | }; | 120 | }; |
88 | 121 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fa8b47637997..9fc6ee8e7508 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -223,6 +223,7 @@ struct netif_rx_stats { | |||
223 | unsigned dropped; | 223 | unsigned dropped; |
224 | unsigned time_squeeze; | 224 | unsigned time_squeeze; |
225 | unsigned cpu_collision; | 225 | unsigned cpu_collision; |
226 | unsigned received_rps; | ||
226 | }; | 227 | }; |
227 | 228 | ||
228 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | 229 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); |
@@ -530,6 +531,24 @@ struct netdev_queue { | |||
530 | unsigned long tx_dropped; | 531 | unsigned long tx_dropped; |
531 | } ____cacheline_aligned_in_smp; | 532 | } ____cacheline_aligned_in_smp; |
532 | 533 | ||
534 | /* | ||
535 | * This structure holds an RPS map which can be of variable length. The | ||
536 | * map is an array of CPUs. | ||
537 | */ | ||
538 | struct rps_map { | ||
539 | unsigned int len; | ||
540 | struct rcu_head rcu; | ||
541 | u16 cpus[0]; | ||
542 | }; | ||
543 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) | ||
544 | |||
545 | /* This structure contains an instance of an RX queue. */ | ||
546 | struct netdev_rx_queue { | ||
547 | struct rps_map *rps_map; | ||
548 | struct kobject kobj; | ||
549 | struct netdev_rx_queue *first; | ||
550 | atomic_t count; | ||
551 | } ____cacheline_aligned_in_smp; | ||
533 | 552 | ||
534 | /* | 553 | /* |
535 | * This structure defines the management hooks for network devices. | 554 | * This structure defines the management hooks for network devices. |
@@ -878,6 +897,13 @@ struct net_device { | |||
878 | 897 | ||
879 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 898 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
880 | 899 | ||
900 | struct kset *queues_kset; | ||
901 | |||
902 | struct netdev_rx_queue *_rx; | ||
903 | |||
904 | /* Number of RX queues allocated at alloc_netdev_mq() time */ | ||
905 | unsigned int num_rx_queues; | ||
906 | |||
881 | struct netdev_queue rx_queue; | 907 | struct netdev_queue rx_queue; |
882 | 908 | ||
883 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | 909 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
@@ -1311,14 +1337,18 @@ static inline int unregister_gifconf(unsigned int family) | |||
1311 | */ | 1337 | */ |
1312 | struct softnet_data { | 1338 | struct softnet_data { |
1313 | struct Qdisc *output_queue; | 1339 | struct Qdisc *output_queue; |
1314 | struct sk_buff_head input_pkt_queue; | ||
1315 | struct list_head poll_list; | 1340 | struct list_head poll_list; |
1316 | struct sk_buff *completion_queue; | 1341 | struct sk_buff *completion_queue; |
1317 | 1342 | ||
1343 | /* Elements below can be accessed between CPUs for RPS */ | ||
1344 | #ifdef CONFIG_SMP | ||
1345 | struct call_single_data csd ____cacheline_aligned_in_smp; | ||
1346 | #endif | ||
1347 | struct sk_buff_head input_pkt_queue; | ||
1318 | struct napi_struct backlog; | 1348 | struct napi_struct backlog; |
1319 | }; | 1349 | }; |
1320 | 1350 | ||
1321 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 1351 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
1322 | 1352 | ||
1323 | #define HAVE_NETIF_QUEUE | 1353 | #define HAVE_NETIF_QUEUE |
1324 | 1354 | ||
@@ -1975,7 +2005,7 @@ extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct | |||
1975 | extern int dev_set_promiscuity(struct net_device *dev, int inc); | 2005 | extern int dev_set_promiscuity(struct net_device *dev, int inc); |
1976 | extern int dev_set_allmulti(struct net_device *dev, int inc); | 2006 | extern int dev_set_allmulti(struct net_device *dev, int inc); |
1977 | extern void netdev_state_change(struct net_device *dev); | 2007 | extern void netdev_state_change(struct net_device *dev); |
1978 | extern void netdev_bonding_change(struct net_device *dev, | 2008 | extern int netdev_bonding_change(struct net_device *dev, |
1979 | unsigned long event); | 2009 | unsigned long event); |
1980 | extern void netdev_features_change(struct net_device *dev); | 2010 | extern void netdev_features_change(struct net_device *dev); |
1981 | /* Load a device via the kmod */ | 2011 | /* Load a device via the kmod */ |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index fee6c2f68075..f3635fc6e942 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -199,8 +199,8 @@ static inline int notifier_to_errno(int ret) | |||
199 | #define NETDEV_FEAT_CHANGE 0x000B | 199 | #define NETDEV_FEAT_CHANGE 0x000B |
200 | #define NETDEV_BONDING_FAILOVER 0x000C | 200 | #define NETDEV_BONDING_FAILOVER 0x000C |
201 | #define NETDEV_PRE_UP 0x000D | 201 | #define NETDEV_PRE_UP 0x000D |
202 | #define NETDEV_BONDING_OLDTYPE 0x000E | 202 | #define NETDEV_PRE_TYPE_CHANGE 0x000E |
203 | #define NETDEV_BONDING_NEWTYPE 0x000F | 203 | #define NETDEV_POST_TYPE_CHANGE 0x000F |
204 | #define NETDEV_POST_INIT 0x0010 | 204 | #define NETDEV_POST_INIT 0x0010 |
205 | #define NETDEV_UNREGISTER_BATCH 0x0011 | 205 | #define NETDEV_UNREGISTER_BATCH 0x0011 |
206 | 206 | ||
diff --git a/include/linux/phy.h b/include/linux/phy.h index 14d7fdf6a90a..d9bce4b526b4 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -81,6 +81,10 @@ typedef enum { | |||
81 | */ | 81 | */ |
82 | #define MII_BUS_ID_SIZE (20 - 3) | 82 | #define MII_BUS_ID_SIZE (20 - 3) |
83 | 83 | ||
84 | /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit | ||
85 | IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ | ||
86 | #define MII_ADDR_C45 (1<<30) | ||
87 | |||
84 | /* | 88 | /* |
85 | * The Bus class for PHYs. Devices which provide access to | 89 | * The Bus class for PHYs. Devices which provide access to |
86 | * PHYs should register using this structure | 90 | * PHYs should register using this structure |
@@ -127,8 +131,8 @@ int mdiobus_register(struct mii_bus *bus); | |||
127 | void mdiobus_unregister(struct mii_bus *bus); | 131 | void mdiobus_unregister(struct mii_bus *bus); |
128 | void mdiobus_free(struct mii_bus *bus); | 132 | void mdiobus_free(struct mii_bus *bus); |
129 | struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); | 133 | struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); |
130 | int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum); | 134 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); |
131 | int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val); | 135 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); |
132 | 136 | ||
133 | 137 | ||
134 | #define PHY_INTERRUPT_DISABLED 0x0 | 138 | #define PHY_INTERRUPT_DISABLED 0x0 |
@@ -422,7 +426,7 @@ struct phy_fixup { | |||
422 | * because the bus read/write functions may wait for an interrupt | 426 | * because the bus read/write functions may wait for an interrupt |
423 | * to conclude the operation. | 427 | * to conclude the operation. |
424 | */ | 428 | */ |
425 | static inline int phy_read(struct phy_device *phydev, u16 regnum) | 429 | static inline int phy_read(struct phy_device *phydev, u32 regnum) |
426 | { | 430 | { |
427 | return mdiobus_read(phydev->bus, phydev->addr, regnum); | 431 | return mdiobus_read(phydev->bus, phydev->addr, regnum); |
428 | } | 432 | } |
@@ -437,7 +441,7 @@ static inline int phy_read(struct phy_device *phydev, u16 regnum) | |||
437 | * because the bus read/write functions may wait for an interrupt | 441 | * because the bus read/write functions may wait for an interrupt |
438 | * to conclude the operation. | 442 | * to conclude the operation. |
439 | */ | 443 | */ |
440 | static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val) | 444 | static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) |
441 | { | 445 | { |
442 | return mdiobus_write(phydev->bus, phydev->addr, regnum, val); | 446 | return mdiobus_write(phydev->bus, phydev->addr, regnum, val); |
443 | } | 447 | } |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 03f816a9b659..def10b064f29 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -300,6 +300,7 @@ typedef unsigned char *sk_buff_data_t; | |||
300 | * @nfct_reasm: netfilter conntrack re-assembly pointer | 300 | * @nfct_reasm: netfilter conntrack re-assembly pointer |
301 | * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c | 301 | * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c |
302 | * @skb_iif: ifindex of device we arrived on | 302 | * @skb_iif: ifindex of device we arrived on |
303 | * @rxhash: the packet hash computed on receive | ||
303 | * @queue_mapping: Queue mapping for multiqueue devices | 304 | * @queue_mapping: Queue mapping for multiqueue devices |
304 | * @tc_index: Traffic control index | 305 | * @tc_index: Traffic control index |
305 | * @tc_verd: traffic control verdict | 306 | * @tc_verd: traffic control verdict |
@@ -375,6 +376,8 @@ struct sk_buff { | |||
375 | #endif | 376 | #endif |
376 | #endif | 377 | #endif |
377 | 378 | ||
379 | __u32 rxhash; | ||
380 | |||
378 | kmemcheck_bitfield_begin(flags2); | 381 | kmemcheck_bitfield_begin(flags2); |
379 | __u16 queue_mapping:16; | 382 | __u16 queue_mapping:16; |
380 | #ifdef CONFIG_IPV6_NDISC_NODETYPE | 383 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 453512266ea1..c39a5f41169c 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -530,6 +530,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
530 | } | 530 | } |
531 | unregister_netdevice_many(&list); | 531 | unregister_netdevice_many(&list); |
532 | break; | 532 | break; |
533 | |||
534 | case NETDEV_PRE_TYPE_CHANGE: | ||
535 | /* Forbid underlaying device to change its type. */ | ||
536 | return NOTIFY_BAD; | ||
533 | } | 537 | } |
534 | 538 | ||
535 | out: | 539 | out: |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index b6234b73c4cf..326ab453edb7 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -87,7 +87,7 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
87 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); | 87 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); |
88 | r->len = htons(ETH_ALEN * 2); | 88 | r->len = htons(ETH_ALEN * 2); |
89 | } else { | 89 | } else { |
90 | struct dev_mc_list *dmi = dev->mc_list; | 90 | struct dev_mc_list *dmi; |
91 | int i, len = skb->len; | 91 | int i, len = skb->len; |
92 | 92 | ||
93 | if (dev->flags & IFF_BROADCAST) { | 93 | if (dev->flags & IFF_BROADCAST) { |
@@ -97,12 +97,12 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
97 | 97 | ||
98 | /* FIXME: We should group addresses here. */ | 98 | /* FIXME: We should group addresses here. */ |
99 | 99 | ||
100 | for (i = 0; | 100 | i = 0; |
101 | i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS; | 101 | netdev_for_each_mc_addr(dmi, dev) { |
102 | i++) { | 102 | if (i == BNEP_MAX_MULTICAST_FILTERS) |
103 | break; | ||
103 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 104 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); |
104 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 105 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); |
105 | dmi = dmi->next; | ||
106 | } | 106 | } |
107 | r->len = htons(skb->len - len); | 107 | r->len = htons(skb->len - len); |
108 | } | 108 | } |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 90a9024e5c1e..5b8a6e73b02f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -26,11 +26,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
26 | const unsigned char *dest = skb->data; | 26 | const unsigned char *dest = skb->data; |
27 | struct net_bridge_fdb_entry *dst; | 27 | struct net_bridge_fdb_entry *dst; |
28 | struct net_bridge_mdb_entry *mdst; | 28 | struct net_bridge_mdb_entry *mdst; |
29 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
29 | 30 | ||
30 | BR_INPUT_SKB_CB(skb)->brdev = dev; | 31 | brstats->tx_packets++; |
32 | brstats->tx_bytes += skb->len; | ||
31 | 33 | ||
32 | dev->stats.tx_packets++; | 34 | BR_INPUT_SKB_CB(skb)->brdev = dev; |
33 | dev->stats.tx_bytes += skb->len; | ||
34 | 35 | ||
35 | skb_reset_mac_header(skb); | 36 | skb_reset_mac_header(skb); |
36 | skb_pull(skb, ETH_HLEN); | 37 | skb_pull(skb, ETH_HLEN); |
@@ -81,6 +82,31 @@ static int br_dev_stop(struct net_device *dev) | |||
81 | return 0; | 82 | return 0; |
82 | } | 83 | } |
83 | 84 | ||
85 | static struct net_device_stats *br_get_stats(struct net_device *dev) | ||
86 | { | ||
87 | struct net_bridge *br = netdev_priv(dev); | ||
88 | struct net_device_stats *stats = &dev->stats; | ||
89 | struct br_cpu_netstats sum = { 0 }; | ||
90 | unsigned int cpu; | ||
91 | |||
92 | for_each_possible_cpu(cpu) { | ||
93 | const struct br_cpu_netstats *bstats | ||
94 | = per_cpu_ptr(br->stats, cpu); | ||
95 | |||
96 | sum.tx_bytes += bstats->tx_bytes; | ||
97 | sum.tx_packets += bstats->tx_packets; | ||
98 | sum.rx_bytes += bstats->rx_bytes; | ||
99 | sum.rx_packets += bstats->rx_packets; | ||
100 | } | ||
101 | |||
102 | stats->tx_bytes = sum.tx_bytes; | ||
103 | stats->tx_packets = sum.tx_packets; | ||
104 | stats->rx_bytes = sum.rx_bytes; | ||
105 | stats->rx_packets = sum.rx_packets; | ||
106 | |||
107 | return stats; | ||
108 | } | ||
109 | |||
84 | static int br_change_mtu(struct net_device *dev, int new_mtu) | 110 | static int br_change_mtu(struct net_device *dev, int new_mtu) |
85 | { | 111 | { |
86 | struct net_bridge *br = netdev_priv(dev); | 112 | struct net_bridge *br = netdev_priv(dev); |
@@ -180,19 +206,28 @@ static const struct net_device_ops br_netdev_ops = { | |||
180 | .ndo_open = br_dev_open, | 206 | .ndo_open = br_dev_open, |
181 | .ndo_stop = br_dev_stop, | 207 | .ndo_stop = br_dev_stop, |
182 | .ndo_start_xmit = br_dev_xmit, | 208 | .ndo_start_xmit = br_dev_xmit, |
209 | .ndo_get_stats = br_get_stats, | ||
183 | .ndo_set_mac_address = br_set_mac_address, | 210 | .ndo_set_mac_address = br_set_mac_address, |
184 | .ndo_set_multicast_list = br_dev_set_multicast_list, | 211 | .ndo_set_multicast_list = br_dev_set_multicast_list, |
185 | .ndo_change_mtu = br_change_mtu, | 212 | .ndo_change_mtu = br_change_mtu, |
186 | .ndo_do_ioctl = br_dev_ioctl, | 213 | .ndo_do_ioctl = br_dev_ioctl, |
187 | }; | 214 | }; |
188 | 215 | ||
216 | static void br_dev_free(struct net_device *dev) | ||
217 | { | ||
218 | struct net_bridge *br = netdev_priv(dev); | ||
219 | |||
220 | free_percpu(br->stats); | ||
221 | free_netdev(dev); | ||
222 | } | ||
223 | |||
189 | void br_dev_setup(struct net_device *dev) | 224 | void br_dev_setup(struct net_device *dev) |
190 | { | 225 | { |
191 | random_ether_addr(dev->dev_addr); | 226 | random_ether_addr(dev->dev_addr); |
192 | ether_setup(dev); | 227 | ether_setup(dev); |
193 | 228 | ||
194 | dev->netdev_ops = &br_netdev_ops; | 229 | dev->netdev_ops = &br_netdev_ops; |
195 | dev->destructor = free_netdev; | 230 | dev->destructor = br_dev_free; |
196 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); | 231 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); |
197 | dev->tx_queue_len = 0; | 232 | dev->tx_queue_len = 0; |
198 | dev->priv_flags = IFF_EBRIDGE; | 233 | dev->priv_flags = IFF_EBRIDGE; |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index b6a3872f5681..b7cdd2e98050 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -185,6 +185,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name) | |||
185 | br = netdev_priv(dev); | 185 | br = netdev_priv(dev); |
186 | br->dev = dev; | 186 | br->dev = dev; |
187 | 187 | ||
188 | br->stats = alloc_percpu(struct br_cpu_netstats); | ||
189 | if (!br->stats) { | ||
190 | free_netdev(dev); | ||
191 | return NULL; | ||
192 | } | ||
193 | |||
188 | spin_lock_init(&br->lock); | 194 | spin_lock_init(&br->lock); |
189 | INIT_LIST_HEAD(&br->port_list); | 195 | INIT_LIST_HEAD(&br->port_list); |
190 | spin_lock_init(&br->hash_lock); | 196 | spin_lock_init(&br->hash_lock); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index d74d570fc848..333dfb7c5886 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -23,9 +23,11 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; | |||
23 | static int br_pass_frame_up(struct sk_buff *skb) | 23 | static int br_pass_frame_up(struct sk_buff *skb) |
24 | { | 24 | { |
25 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; | 25 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
26 | struct net_bridge *br = netdev_priv(brdev); | ||
27 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
26 | 28 | ||
27 | brdev->stats.rx_packets++; | 29 | brstats->rx_packets++; |
28 | brdev->stats.rx_bytes += skb->len; | 30 | brstats->rx_bytes += skb->len; |
29 | 31 | ||
30 | indev = skb->dev; | 32 | indev = skb->dev; |
31 | skb->dev = brdev; | 33 | skb->dev = brdev; |
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 763a3ec292e5..1413b72acc7f 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
82 | case NETDEV_UNREGISTER: | 82 | case NETDEV_UNREGISTER: |
83 | br_del_if(br, dev); | 83 | br_del_if(br, dev); |
84 | break; | 84 | break; |
85 | |||
86 | case NETDEV_PRE_TYPE_CHANGE: | ||
87 | /* Forbid underlaying device to change its type. */ | ||
88 | return NOTIFY_BAD; | ||
85 | } | 89 | } |
86 | 90 | ||
87 | /* Events that may cause spanning tree to refresh */ | 91 | /* Events that may cause spanning tree to refresh */ |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 846d7d1e2075..791d4ab0fd4d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -135,6 +135,14 @@ struct net_bridge | |||
135 | spinlock_t lock; | 135 | spinlock_t lock; |
136 | struct list_head port_list; | 136 | struct list_head port_list; |
137 | struct net_device *dev; | 137 | struct net_device *dev; |
138 | |||
139 | struct br_cpu_netstats __percpu { | ||
140 | unsigned long rx_packets; | ||
141 | unsigned long rx_bytes; | ||
142 | unsigned long tx_packets; | ||
143 | unsigned long tx_bytes; | ||
144 | } *stats; | ||
145 | |||
138 | spinlock_t hash_lock; | 146 | spinlock_t hash_lock; |
139 | struct hlist_head hash[BR_HASH_SIZE]; | 147 | struct hlist_head hash[BR_HASH_SIZE]; |
140 | unsigned long feature_mask; | 148 | unsigned long feature_mask; |
diff --git a/net/core/dev.c b/net/core/dev.c index 59d4394d2ce8..c0e260870c0a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1084,9 +1084,9 @@ void netdev_state_change(struct net_device *dev) | |||
1084 | } | 1084 | } |
1085 | EXPORT_SYMBOL(netdev_state_change); | 1085 | EXPORT_SYMBOL(netdev_state_change); |
1086 | 1086 | ||
1087 | void netdev_bonding_change(struct net_device *dev, unsigned long event) | 1087 | int netdev_bonding_change(struct net_device *dev, unsigned long event) |
1088 | { | 1088 | { |
1089 | call_netdevice_notifiers(event, dev); | 1089 | return call_netdevice_notifiers(event, dev); |
1090 | } | 1090 | } |
1091 | EXPORT_SYMBOL(netdev_bonding_change); | 1091 | EXPORT_SYMBOL(netdev_bonding_change); |
1092 | 1092 | ||
@@ -1931,7 +1931,7 @@ out_kfree_skb: | |||
1931 | return rc; | 1931 | return rc; |
1932 | } | 1932 | } |
1933 | 1933 | ||
1934 | static u32 skb_tx_hashrnd; | 1934 | static u32 hashrnd __read_mostly; |
1935 | 1935 | ||
1936 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | 1936 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) |
1937 | { | 1937 | { |
@@ -1949,7 +1949,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1949 | else | 1949 | else |
1950 | hash = skb->protocol; | 1950 | hash = skb->protocol; |
1951 | 1951 | ||
1952 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1952 | hash = jhash_1word(hash, hashrnd); |
1953 | 1953 | ||
1954 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 1954 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
1955 | } | 1955 | } |
@@ -1959,10 +1959,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
1959 | { | 1959 | { |
1960 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | 1960 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { |
1961 | if (net_ratelimit()) { | 1961 | if (net_ratelimit()) { |
1962 | WARN(1, "%s selects TX queue %d, but " | 1962 | netdev_warn(dev, "selects TX queue %d, but " |
1963 | "real number of TX queues is %d\n", | 1963 | "real number of TX queues is %d\n", |
1964 | dev->name, queue_index, | 1964 | queue_index, dev->real_num_tx_queues); |
1965 | dev->real_num_tx_queues); | ||
1966 | } | 1965 | } |
1967 | return 0; | 1966 | return 0; |
1968 | } | 1967 | } |
@@ -2175,6 +2174,178 @@ int weight_p __read_mostly = 64; /* old backlog weight */ | |||
2175 | 2174 | ||
2176 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | 2175 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; |
2177 | 2176 | ||
2177 | #ifdef CONFIG_SMP | ||
2178 | /* | ||
2179 | * get_rps_cpu is called from netif_receive_skb and returns the target | ||
2180 | * CPU from the RPS map of the receiving queue for a given skb. | ||
2181 | */ | ||
2182 | static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) | ||
2183 | { | ||
2184 | struct ipv6hdr *ip6; | ||
2185 | struct iphdr *ip; | ||
2186 | struct netdev_rx_queue *rxqueue; | ||
2187 | struct rps_map *map; | ||
2188 | int cpu = -1; | ||
2189 | u8 ip_proto; | ||
2190 | u32 addr1, addr2, ports, ihl; | ||
2191 | |||
2192 | rcu_read_lock(); | ||
2193 | |||
2194 | if (skb_rx_queue_recorded(skb)) { | ||
2195 | u16 index = skb_get_rx_queue(skb); | ||
2196 | if (unlikely(index >= dev->num_rx_queues)) { | ||
2197 | if (net_ratelimit()) { | ||
2198 | netdev_warn(dev, "received packet on queue " | ||
2199 | "%u, but number of RX queues is %u\n", | ||
2200 | index, dev->num_rx_queues); | ||
2201 | } | ||
2202 | goto done; | ||
2203 | } | ||
2204 | rxqueue = dev->_rx + index; | ||
2205 | } else | ||
2206 | rxqueue = dev->_rx; | ||
2207 | |||
2208 | if (!rxqueue->rps_map) | ||
2209 | goto done; | ||
2210 | |||
2211 | if (skb->rxhash) | ||
2212 | goto got_hash; /* Skip hash computation on packet header */ | ||
2213 | |||
2214 | switch (skb->protocol) { | ||
2215 | case __constant_htons(ETH_P_IP): | ||
2216 | if (!pskb_may_pull(skb, sizeof(*ip))) | ||
2217 | goto done; | ||
2218 | |||
2219 | ip = (struct iphdr *) skb->data; | ||
2220 | ip_proto = ip->protocol; | ||
2221 | addr1 = ip->saddr; | ||
2222 | addr2 = ip->daddr; | ||
2223 | ihl = ip->ihl; | ||
2224 | break; | ||
2225 | case __constant_htons(ETH_P_IPV6): | ||
2226 | if (!pskb_may_pull(skb, sizeof(*ip6))) | ||
2227 | goto done; | ||
2228 | |||
2229 | ip6 = (struct ipv6hdr *) skb->data; | ||
2230 | ip_proto = ip6->nexthdr; | ||
2231 | addr1 = ip6->saddr.s6_addr32[3]; | ||
2232 | addr2 = ip6->daddr.s6_addr32[3]; | ||
2233 | ihl = (40 >> 2); | ||
2234 | break; | ||
2235 | default: | ||
2236 | goto done; | ||
2237 | } | ||
2238 | ports = 0; | ||
2239 | switch (ip_proto) { | ||
2240 | case IPPROTO_TCP: | ||
2241 | case IPPROTO_UDP: | ||
2242 | case IPPROTO_DCCP: | ||
2243 | case IPPROTO_ESP: | ||
2244 | case IPPROTO_AH: | ||
2245 | case IPPROTO_SCTP: | ||
2246 | case IPPROTO_UDPLITE: | ||
2247 | if (pskb_may_pull(skb, (ihl * 4) + 4)) | ||
2248 | ports = *((u32 *) (skb->data + (ihl * 4))); | ||
2249 | break; | ||
2250 | |||
2251 | default: | ||
2252 | break; | ||
2253 | } | ||
2254 | |||
2255 | skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd); | ||
2256 | if (!skb->rxhash) | ||
2257 | skb->rxhash = 1; | ||
2258 | |||
2259 | got_hash: | ||
2260 | map = rcu_dereference(rxqueue->rps_map); | ||
2261 | if (map) { | ||
2262 | u16 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; | ||
2263 | |||
2264 | if (cpu_online(tcpu)) { | ||
2265 | cpu = tcpu; | ||
2266 | goto done; | ||
2267 | } | ||
2268 | } | ||
2269 | |||
2270 | done: | ||
2271 | rcu_read_unlock(); | ||
2272 | return cpu; | ||
2273 | } | ||
2274 | |||
2275 | /* | ||
2276 | * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled | ||
2277 | * to be sent to kick remote softirq processing. There are two masks since | ||
2278 | * the sending of IPIs must be done with interrupts enabled. The select field | ||
2279 | * indicates the current mask that enqueue_backlog uses to schedule IPIs. | ||
2280 | * select is flipped before net_rps_action is called while still under lock, | ||
2281 | * net_rps_action then uses the non-selected mask to send the IPIs and clears | ||
2282 | * it without conflicting with enqueue_backlog operation. | ||
2283 | */ | ||
2284 | struct rps_remote_softirq_cpus { | ||
2285 | cpumask_t mask[2]; | ||
2286 | int select; | ||
2287 | }; | ||
2288 | static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus); | ||
2289 | |||
2290 | /* Called from hardirq (IPI) context */ | ||
2291 | static void trigger_softirq(void *data) | ||
2292 | { | ||
2293 | struct softnet_data *queue = data; | ||
2294 | __napi_schedule(&queue->backlog); | ||
2295 | __get_cpu_var(netdev_rx_stat).received_rps++; | ||
2296 | } | ||
2297 | #endif /* CONFIG_SMP */ | ||
2298 | |||
2299 | /* | ||
2300 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog | ||
2301 | * queue (may be a remote CPU queue). | ||
2302 | */ | ||
2303 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu) | ||
2304 | { | ||
2305 | struct softnet_data *queue; | ||
2306 | unsigned long flags; | ||
2307 | |||
2308 | queue = &per_cpu(softnet_data, cpu); | ||
2309 | |||
2310 | local_irq_save(flags); | ||
2311 | __get_cpu_var(netdev_rx_stat).total++; | ||
2312 | |||
2313 | spin_lock(&queue->input_pkt_queue.lock); | ||
2314 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2315 | if (queue->input_pkt_queue.qlen) { | ||
2316 | enqueue: | ||
2317 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2318 | spin_unlock_irqrestore(&queue->input_pkt_queue.lock, | ||
2319 | flags); | ||
2320 | return NET_RX_SUCCESS; | ||
2321 | } | ||
2322 | |||
2323 | /* Schedule NAPI for backlog device */ | ||
2324 | if (napi_schedule_prep(&queue->backlog)) { | ||
2325 | #ifdef CONFIG_SMP | ||
2326 | if (cpu != smp_processor_id()) { | ||
2327 | struct rps_remote_softirq_cpus *rcpus = | ||
2328 | &__get_cpu_var(rps_remote_softirq_cpus); | ||
2329 | |||
2330 | cpu_set(cpu, rcpus->mask[rcpus->select]); | ||
2331 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2332 | } else | ||
2333 | __napi_schedule(&queue->backlog); | ||
2334 | #else | ||
2335 | __napi_schedule(&queue->backlog); | ||
2336 | #endif | ||
2337 | } | ||
2338 | goto enqueue; | ||
2339 | } | ||
2340 | |||
2341 | spin_unlock(&queue->input_pkt_queue.lock); | ||
2342 | |||
2343 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2344 | local_irq_restore(flags); | ||
2345 | |||
2346 | kfree_skb(skb); | ||
2347 | return NET_RX_DROP; | ||
2348 | } | ||
2178 | 2349 | ||
2179 | /** | 2350 | /** |
2180 | * netif_rx - post buffer to the network code | 2351 | * netif_rx - post buffer to the network code |
@@ -2193,8 +2364,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |||
2193 | 2364 | ||
2194 | int netif_rx(struct sk_buff *skb) | 2365 | int netif_rx(struct sk_buff *skb) |
2195 | { | 2366 | { |
2196 | struct softnet_data *queue; | 2367 | int cpu; |
2197 | unsigned long flags; | ||
2198 | 2368 | ||
2199 | /* if netpoll wants it, pretend we never saw it */ | 2369 | /* if netpoll wants it, pretend we never saw it */ |
2200 | if (netpoll_rx(skb)) | 2370 | if (netpoll_rx(skb)) |
@@ -2203,31 +2373,15 @@ int netif_rx(struct sk_buff *skb) | |||
2203 | if (!skb->tstamp.tv64) | 2373 | if (!skb->tstamp.tv64) |
2204 | net_timestamp(skb); | 2374 | net_timestamp(skb); |
2205 | 2375 | ||
2206 | /* | 2376 | #ifdef CONFIG_SMP |
2207 | * The code is rearranged so that the path is the most | 2377 | cpu = get_rps_cpu(skb->dev, skb); |
2208 | * short when CPU is congested, but is still operating. | 2378 | if (cpu < 0) |
2209 | */ | 2379 | cpu = smp_processor_id(); |
2210 | local_irq_save(flags); | 2380 | #else |
2211 | queue = &__get_cpu_var(softnet_data); | 2381 | cpu = smp_processor_id(); |
2212 | 2382 | #endif | |
2213 | __get_cpu_var(netdev_rx_stat).total++; | ||
2214 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2215 | if (queue->input_pkt_queue.qlen) { | ||
2216 | enqueue: | ||
2217 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2218 | local_irq_restore(flags); | ||
2219 | return NET_RX_SUCCESS; | ||
2220 | } | ||
2221 | |||
2222 | napi_schedule(&queue->backlog); | ||
2223 | goto enqueue; | ||
2224 | } | ||
2225 | |||
2226 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2227 | local_irq_restore(flags); | ||
2228 | 2383 | ||
2229 | kfree_skb(skb); | 2384 | return enqueue_to_backlog(skb, cpu); |
2230 | return NET_RX_DROP; | ||
2231 | } | 2385 | } |
2232 | EXPORT_SYMBOL(netif_rx); | 2386 | EXPORT_SYMBOL(netif_rx); |
2233 | 2387 | ||
@@ -2464,22 +2618,7 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2464 | rcu_read_unlock(); | 2618 | rcu_read_unlock(); |
2465 | } | 2619 | } |
2466 | 2620 | ||
2467 | /** | 2621 | int __netif_receive_skb(struct sk_buff *skb) |
2468 | * netif_receive_skb - process receive buffer from network | ||
2469 | * @skb: buffer to process | ||
2470 | * | ||
2471 | * netif_receive_skb() is the main receive data processing function. | ||
2472 | * It always succeeds. The buffer may be dropped during processing | ||
2473 | * for congestion control or by the protocol layers. | ||
2474 | * | ||
2475 | * This function may only be called from softirq context and interrupts | ||
2476 | * should be enabled. | ||
2477 | * | ||
2478 | * Return values (usually ignored): | ||
2479 | * NET_RX_SUCCESS: no congestion | ||
2480 | * NET_RX_DROP: packet was dropped | ||
2481 | */ | ||
2482 | int netif_receive_skb(struct sk_buff *skb) | ||
2483 | { | 2622 | { |
2484 | struct packet_type *ptype, *pt_prev; | 2623 | struct packet_type *ptype, *pt_prev; |
2485 | struct net_device *orig_dev; | 2624 | struct net_device *orig_dev; |
@@ -2590,6 +2729,37 @@ out: | |||
2590 | rcu_read_unlock(); | 2729 | rcu_read_unlock(); |
2591 | return ret; | 2730 | return ret; |
2592 | } | 2731 | } |
2732 | |||
2733 | /** | ||
2734 | * netif_receive_skb - process receive buffer from network | ||
2735 | * @skb: buffer to process | ||
2736 | * | ||
2737 | * netif_receive_skb() is the main receive data processing function. | ||
2738 | * It always succeeds. The buffer may be dropped during processing | ||
2739 | * for congestion control or by the protocol layers. | ||
2740 | * | ||
2741 | * This function may only be called from softirq context and interrupts | ||
2742 | * should be enabled. | ||
2743 | * | ||
2744 | * Return values (usually ignored): | ||
2745 | * NET_RX_SUCCESS: no congestion | ||
2746 | * NET_RX_DROP: packet was dropped | ||
2747 | */ | ||
2748 | int netif_receive_skb(struct sk_buff *skb) | ||
2749 | { | ||
2750 | #ifdef CONFIG_SMP | ||
2751 | int cpu; | ||
2752 | |||
2753 | cpu = get_rps_cpu(skb->dev, skb); | ||
2754 | |||
2755 | if (cpu < 0) | ||
2756 | return __netif_receive_skb(skb); | ||
2757 | else | ||
2758 | return enqueue_to_backlog(skb, cpu); | ||
2759 | #else | ||
2760 | return __netif_receive_skb(skb); | ||
2761 | #endif | ||
2762 | } | ||
2593 | EXPORT_SYMBOL(netif_receive_skb); | 2763 | EXPORT_SYMBOL(netif_receive_skb); |
2594 | 2764 | ||
2595 | /* Network device is going away, flush any packets still pending */ | 2765 | /* Network device is going away, flush any packets still pending */ |
@@ -2916,16 +3086,16 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2916 | do { | 3086 | do { |
2917 | struct sk_buff *skb; | 3087 | struct sk_buff *skb; |
2918 | 3088 | ||
2919 | local_irq_disable(); | 3089 | spin_lock_irq(&queue->input_pkt_queue.lock); |
2920 | skb = __skb_dequeue(&queue->input_pkt_queue); | 3090 | skb = __skb_dequeue(&queue->input_pkt_queue); |
2921 | if (!skb) { | 3091 | if (!skb) { |
2922 | __napi_complete(napi); | 3092 | __napi_complete(napi); |
2923 | local_irq_enable(); | 3093 | spin_unlock_irq(&queue->input_pkt_queue.lock); |
2924 | break; | 3094 | break; |
2925 | } | 3095 | } |
2926 | local_irq_enable(); | 3096 | spin_unlock_irq(&queue->input_pkt_queue.lock); |
2927 | 3097 | ||
2928 | netif_receive_skb(skb); | 3098 | __netif_receive_skb(skb); |
2929 | } while (++work < quota && jiffies == start_time); | 3099 | } while (++work < quota && jiffies == start_time); |
2930 | 3100 | ||
2931 | return work; | 3101 | return work; |
@@ -3014,6 +3184,24 @@ void netif_napi_del(struct napi_struct *napi) | |||
3014 | } | 3184 | } |
3015 | EXPORT_SYMBOL(netif_napi_del); | 3185 | EXPORT_SYMBOL(netif_napi_del); |
3016 | 3186 | ||
3187 | #ifdef CONFIG_SMP | ||
3188 | /* | ||
3189 | * net_rps_action sends any pending IPI's for rps. This is only called from | ||
3190 | * softirq and interrupts must be enabled. | ||
3191 | */ | ||
3192 | static void net_rps_action(cpumask_t *mask) | ||
3193 | { | ||
3194 | int cpu; | ||
3195 | |||
3196 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | ||
3197 | for_each_cpu_mask_nr(cpu, *mask) { | ||
3198 | struct softnet_data *queue = &per_cpu(softnet_data, cpu); | ||
3199 | if (cpu_online(cpu)) | ||
3200 | __smp_call_function_single(cpu, &queue->csd, 0); | ||
3201 | } | ||
3202 | cpus_clear(*mask); | ||
3203 | } | ||
3204 | #endif | ||
3017 | 3205 | ||
3018 | static void net_rx_action(struct softirq_action *h) | 3206 | static void net_rx_action(struct softirq_action *h) |
3019 | { | 3207 | { |
@@ -3021,6 +3209,10 @@ static void net_rx_action(struct softirq_action *h) | |||
3021 | unsigned long time_limit = jiffies + 2; | 3209 | unsigned long time_limit = jiffies + 2; |
3022 | int budget = netdev_budget; | 3210 | int budget = netdev_budget; |
3023 | void *have; | 3211 | void *have; |
3212 | #ifdef CONFIG_SMP | ||
3213 | int select; | ||
3214 | struct rps_remote_softirq_cpus *rcpus; | ||
3215 | #endif | ||
3024 | 3216 | ||
3025 | local_irq_disable(); | 3217 | local_irq_disable(); |
3026 | 3218 | ||
@@ -3083,8 +3275,18 @@ static void net_rx_action(struct softirq_action *h) | |||
3083 | netpoll_poll_unlock(have); | 3275 | netpoll_poll_unlock(have); |
3084 | } | 3276 | } |
3085 | out: | 3277 | out: |
3278 | #ifdef CONFIG_SMP | ||
3279 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); | ||
3280 | select = rcpus->select; | ||
3281 | rcpus->select ^= 1; | ||
3282 | |||
3086 | local_irq_enable(); | 3283 | local_irq_enable(); |
3087 | 3284 | ||
3285 | net_rps_action(&rcpus->mask[select]); | ||
3286 | #else | ||
3287 | local_irq_enable(); | ||
3288 | #endif | ||
3289 | |||
3088 | #ifdef CONFIG_NET_DMA | 3290 | #ifdef CONFIG_NET_DMA |
3089 | /* | 3291 | /* |
3090 | * There may not be any more sk_buffs coming right now, so push | 3292 | * There may not be any more sk_buffs coming right now, so push |
@@ -3329,10 +3531,10 @@ static int softnet_seq_show(struct seq_file *seq, void *v) | |||
3329 | { | 3531 | { |
3330 | struct netif_rx_stats *s = v; | 3532 | struct netif_rx_stats *s = v; |
3331 | 3533 | ||
3332 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 3534 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
3333 | s->total, s->dropped, s->time_squeeze, 0, | 3535 | s->total, s->dropped, s->time_squeeze, 0, |
3334 | 0, 0, 0, 0, /* was fastroute */ | 3536 | 0, 0, 0, 0, /* was fastroute */ |
3335 | s->cpu_collision); | 3537 | s->cpu_collision, s->received_rps); |
3336 | return 0; | 3538 | return 0; |
3337 | } | 3539 | } |
3338 | 3540 | ||
@@ -5069,6 +5271,23 @@ int register_netdevice(struct net_device *dev) | |||
5069 | 5271 | ||
5070 | dev->iflink = -1; | 5272 | dev->iflink = -1; |
5071 | 5273 | ||
5274 | if (!dev->num_rx_queues) { | ||
5275 | /* | ||
5276 | * Allocate a single RX queue if driver never called | ||
5277 | * alloc_netdev_mq | ||
5278 | */ | ||
5279 | |||
5280 | dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
5281 | if (!dev->_rx) { | ||
5282 | ret = -ENOMEM; | ||
5283 | goto out; | ||
5284 | } | ||
5285 | |||
5286 | dev->_rx->first = dev->_rx; | ||
5287 | atomic_set(&dev->_rx->count, 1); | ||
5288 | dev->num_rx_queues = 1; | ||
5289 | } | ||
5290 | |||
5072 | /* Init, if this function is available */ | 5291 | /* Init, if this function is available */ |
5073 | if (dev->netdev_ops->ndo_init) { | 5292 | if (dev->netdev_ops->ndo_init) { |
5074 | ret = dev->netdev_ops->ndo_init(dev); | 5293 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -5426,9 +5645,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5426 | void (*setup)(struct net_device *), unsigned int queue_count) | 5645 | void (*setup)(struct net_device *), unsigned int queue_count) |
5427 | { | 5646 | { |
5428 | struct netdev_queue *tx; | 5647 | struct netdev_queue *tx; |
5648 | struct netdev_rx_queue *rx; | ||
5429 | struct net_device *dev; | 5649 | struct net_device *dev; |
5430 | size_t alloc_size; | 5650 | size_t alloc_size; |
5431 | struct net_device *p; | 5651 | struct net_device *p; |
5652 | int i; | ||
5432 | 5653 | ||
5433 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5654 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5434 | 5655 | ||
@@ -5454,11 +5675,27 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5454 | goto free_p; | 5675 | goto free_p; |
5455 | } | 5676 | } |
5456 | 5677 | ||
5678 | rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
5679 | if (!rx) { | ||
5680 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | ||
5681 | "rx queues.\n"); | ||
5682 | goto free_tx; | ||
5683 | } | ||
5684 | |||
5685 | atomic_set(&rx->count, queue_count); | ||
5686 | |||
5687 | /* | ||
5688 | * Set a pointer to first element in the array which holds the | ||
5689 | * reference count. | ||
5690 | */ | ||
5691 | for (i = 0; i < queue_count; i++) | ||
5692 | rx[i].first = rx; | ||
5693 | |||
5457 | dev = PTR_ALIGN(p, NETDEV_ALIGN); | 5694 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5458 | dev->padded = (char *)dev - (char *)p; | 5695 | dev->padded = (char *)dev - (char *)p; |
5459 | 5696 | ||
5460 | if (dev_addr_init(dev)) | 5697 | if (dev_addr_init(dev)) |
5461 | goto free_tx; | 5698 | goto free_rx; |
5462 | 5699 | ||
5463 | dev_unicast_init(dev); | 5700 | dev_unicast_init(dev); |
5464 | 5701 | ||
@@ -5468,6 +5705,9 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5468 | dev->num_tx_queues = queue_count; | 5705 | dev->num_tx_queues = queue_count; |
5469 | dev->real_num_tx_queues = queue_count; | 5706 | dev->real_num_tx_queues = queue_count; |
5470 | 5707 | ||
5708 | dev->_rx = rx; | ||
5709 | dev->num_rx_queues = queue_count; | ||
5710 | |||
5471 | dev->gso_max_size = GSO_MAX_SIZE; | 5711 | dev->gso_max_size = GSO_MAX_SIZE; |
5472 | 5712 | ||
5473 | netdev_init_queues(dev); | 5713 | netdev_init_queues(dev); |
@@ -5482,9 +5722,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5482 | strcpy(dev->name, name); | 5722 | strcpy(dev->name, name); |
5483 | return dev; | 5723 | return dev; |
5484 | 5724 | ||
5725 | free_rx: | ||
5726 | kfree(rx); | ||
5485 | free_tx: | 5727 | free_tx: |
5486 | kfree(tx); | 5728 | kfree(tx); |
5487 | |||
5488 | free_p: | 5729 | free_p: |
5489 | kfree(p); | 5730 | kfree(p); |
5490 | return NULL; | 5731 | return NULL; |
@@ -5987,6 +6228,12 @@ static int __init net_dev_init(void) | |||
5987 | queue->completion_queue = NULL; | 6228 | queue->completion_queue = NULL; |
5988 | INIT_LIST_HEAD(&queue->poll_list); | 6229 | INIT_LIST_HEAD(&queue->poll_list); |
5989 | 6230 | ||
6231 | #ifdef CONFIG_SMP | ||
6232 | queue->csd.func = trigger_softirq; | ||
6233 | queue->csd.info = queue; | ||
6234 | queue->csd.flags = 0; | ||
6235 | #endif | ||
6236 | |||
5990 | queue->backlog.poll = process_backlog; | 6237 | queue->backlog.poll = process_backlog; |
5991 | queue->backlog.weight = weight_p; | 6238 | queue->backlog.weight = weight_p; |
5992 | queue->backlog.gro_list = NULL; | 6239 | queue->backlog.gro_list = NULL; |
@@ -6025,7 +6272,7 @@ subsys_initcall(net_dev_init); | |||
6025 | 6272 | ||
6026 | static int __init initialize_hashrnd(void) | 6273 | static int __init initialize_hashrnd(void) |
6027 | { | 6274 | { |
6028 | get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd)); | 6275 | get_random_bytes(&hashrnd, sizeof(hashrnd)); |
6029 | return 0; | 6276 | return 0; |
6030 | } | 6277 | } |
6031 | 6278 | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 9a24377146bf..2ff34894357a 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -108,7 +108,7 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | |||
108 | struct fib_rules_ops *ops; | 108 | struct fib_rules_ops *ops; |
109 | int err; | 109 | int err; |
110 | 110 | ||
111 | ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL); | 111 | ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); |
112 | if (ops == NULL) | 112 | if (ops == NULL) |
113 | return ERR_PTR(-ENOMEM); | 113 | return ERR_PTR(-ENOMEM); |
114 | 114 | ||
@@ -123,7 +123,6 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | |||
123 | 123 | ||
124 | return ops; | 124 | return ops; |
125 | } | 125 | } |
126 | |||
127 | EXPORT_SYMBOL_GPL(fib_rules_register); | 126 | EXPORT_SYMBOL_GPL(fib_rules_register); |
128 | 127 | ||
129 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) | 128 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) |
@@ -157,7 +156,6 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
157 | 156 | ||
158 | call_rcu(&ops->rcu, fib_rules_put_rcu); | 157 | call_rcu(&ops->rcu, fib_rules_put_rcu); |
159 | } | 158 | } |
160 | |||
161 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 159 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
162 | 160 | ||
163 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, | 161 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, |
@@ -220,7 +218,6 @@ out: | |||
220 | 218 | ||
221 | return err; | 219 | return err; |
222 | } | 220 | } |
223 | |||
224 | EXPORT_SYMBOL_GPL(fib_rules_lookup); | 221 | EXPORT_SYMBOL_GPL(fib_rules_lookup); |
225 | 222 | ||
226 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, | 223 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, |
@@ -613,7 +610,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | |||
613 | break; | 610 | break; |
614 | 611 | ||
615 | cb->args[1] = 0; | 612 | cb->args[1] = 0; |
616 | skip: | 613 | skip: |
617 | idx++; | 614 | idx++; |
618 | } | 615 | } |
619 | rcu_read_unlock(); | 616 | rcu_read_unlock(); |
@@ -685,7 +682,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
685 | struct fib_rules_ops *ops; | 682 | struct fib_rules_ops *ops; |
686 | 683 | ||
687 | ASSERT_RTNL(); | 684 | ASSERT_RTNL(); |
688 | rcu_read_lock(); | ||
689 | 685 | ||
690 | switch (event) { | 686 | switch (event) { |
691 | case NETDEV_REGISTER: | 687 | case NETDEV_REGISTER: |
@@ -699,8 +695,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
699 | break; | 695 | break; |
700 | } | 696 | } |
701 | 697 | ||
702 | rcu_read_unlock(); | ||
703 | |||
704 | return NOTIFY_DONE; | 698 | return NOTIFY_DONE; |
705 | } | 699 | } |
706 | 700 | ||
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 099c753c4213..7a46343d5ae3 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -466,6 +466,216 @@ static struct attribute_group wireless_group = { | |||
466 | }; | 466 | }; |
467 | #endif | 467 | #endif |
468 | 468 | ||
469 | /* | ||
470 | * RX queue sysfs structures and functions. | ||
471 | */ | ||
472 | struct rx_queue_attribute { | ||
473 | struct attribute attr; | ||
474 | ssize_t (*show)(struct netdev_rx_queue *queue, | ||
475 | struct rx_queue_attribute *attr, char *buf); | ||
476 | ssize_t (*store)(struct netdev_rx_queue *queue, | ||
477 | struct rx_queue_attribute *attr, const char *buf, size_t len); | ||
478 | }; | ||
479 | #define to_rx_queue_attr(_attr) container_of(_attr, \ | ||
480 | struct rx_queue_attribute, attr) | ||
481 | |||
482 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) | ||
483 | |||
484 | static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, | ||
485 | char *buf) | ||
486 | { | ||
487 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
488 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
489 | |||
490 | if (!attribute->show) | ||
491 | return -EIO; | ||
492 | |||
493 | return attribute->show(queue, attribute, buf); | ||
494 | } | ||
495 | |||
496 | static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, | ||
497 | const char *buf, size_t count) | ||
498 | { | ||
499 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
500 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
501 | |||
502 | if (!attribute->store) | ||
503 | return -EIO; | ||
504 | |||
505 | return attribute->store(queue, attribute, buf, count); | ||
506 | } | ||
507 | |||
508 | static struct sysfs_ops rx_queue_sysfs_ops = { | ||
509 | .show = rx_queue_attr_show, | ||
510 | .store = rx_queue_attr_store, | ||
511 | }; | ||
512 | |||
513 | static ssize_t show_rps_map(struct netdev_rx_queue *queue, | ||
514 | struct rx_queue_attribute *attribute, char *buf) | ||
515 | { | ||
516 | struct rps_map *map; | ||
517 | cpumask_var_t mask; | ||
518 | size_t len = 0; | ||
519 | int i; | ||
520 | |||
521 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
522 | return -ENOMEM; | ||
523 | |||
524 | rcu_read_lock(); | ||
525 | map = rcu_dereference(queue->rps_map); | ||
526 | if (map) | ||
527 | for (i = 0; i < map->len; i++) | ||
528 | cpumask_set_cpu(map->cpus[i], mask); | ||
529 | |||
530 | len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask); | ||
531 | if (PAGE_SIZE - len < 3) { | ||
532 | rcu_read_unlock(); | ||
533 | free_cpumask_var(mask); | ||
534 | return -EINVAL; | ||
535 | } | ||
536 | rcu_read_unlock(); | ||
537 | |||
538 | free_cpumask_var(mask); | ||
539 | len += sprintf(buf + len, "\n"); | ||
540 | return len; | ||
541 | } | ||
542 | |||
543 | static void rps_map_release(struct rcu_head *rcu) | ||
544 | { | ||
545 | struct rps_map *map = container_of(rcu, struct rps_map, rcu); | ||
546 | |||
547 | kfree(map); | ||
548 | } | ||
549 | |||
550 | ssize_t store_rps_map(struct netdev_rx_queue *queue, | ||
551 | struct rx_queue_attribute *attribute, | ||
552 | const char *buf, size_t len) | ||
553 | { | ||
554 | struct rps_map *old_map, *map; | ||
555 | cpumask_var_t mask; | ||
556 | int err, cpu, i; | ||
557 | static DEFINE_SPINLOCK(rps_map_lock); | ||
558 | |||
559 | if (!capable(CAP_NET_ADMIN)) | ||
560 | return -EPERM; | ||
561 | |||
562 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | ||
563 | return -ENOMEM; | ||
564 | |||
565 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); | ||
566 | if (err) { | ||
567 | free_cpumask_var(mask); | ||
568 | return err; | ||
569 | } | ||
570 | |||
571 | map = kzalloc(max_t(unsigned, | ||
572 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), | ||
573 | GFP_KERNEL); | ||
574 | if (!map) { | ||
575 | free_cpumask_var(mask); | ||
576 | return -ENOMEM; | ||
577 | } | ||
578 | |||
579 | i = 0; | ||
580 | for_each_cpu_and(cpu, mask, cpu_online_mask) | ||
581 | map->cpus[i++] = cpu; | ||
582 | |||
583 | if (i) | ||
584 | map->len = i; | ||
585 | else { | ||
586 | kfree(map); | ||
587 | map = NULL; | ||
588 | } | ||
589 | |||
590 | spin_lock(&rps_map_lock); | ||
591 | old_map = queue->rps_map; | ||
592 | rcu_assign_pointer(queue->rps_map, map); | ||
593 | spin_unlock(&rps_map_lock); | ||
594 | |||
595 | if (old_map) | ||
596 | call_rcu(&old_map->rcu, rps_map_release); | ||
597 | |||
598 | free_cpumask_var(mask); | ||
599 | return len; | ||
600 | } | ||
601 | |||
602 | static struct rx_queue_attribute rps_cpus_attribute = | ||
603 | __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); | ||
604 | |||
605 | static struct attribute *rx_queue_default_attrs[] = { | ||
606 | &rps_cpus_attribute.attr, | ||
607 | NULL | ||
608 | }; | ||
609 | |||
610 | static void rx_queue_release(struct kobject *kobj) | ||
611 | { | ||
612 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
613 | struct rps_map *map = queue->rps_map; | ||
614 | struct netdev_rx_queue *first = queue->first; | ||
615 | |||
616 | if (map) | ||
617 | call_rcu(&map->rcu, rps_map_release); | ||
618 | |||
619 | if (atomic_dec_and_test(&first->count)) | ||
620 | kfree(first); | ||
621 | } | ||
622 | |||
623 | static struct kobj_type rx_queue_ktype = { | ||
624 | .sysfs_ops = &rx_queue_sysfs_ops, | ||
625 | .release = rx_queue_release, | ||
626 | .default_attrs = rx_queue_default_attrs, | ||
627 | }; | ||
628 | |||
629 | static int rx_queue_add_kobject(struct net_device *net, int index) | ||
630 | { | ||
631 | struct netdev_rx_queue *queue = net->_rx + index; | ||
632 | struct kobject *kobj = &queue->kobj; | ||
633 | int error = 0; | ||
634 | |||
635 | kobj->kset = net->queues_kset; | ||
636 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, | ||
637 | "rx-%u", index); | ||
638 | if (error) { | ||
639 | kobject_put(kobj); | ||
640 | return error; | ||
641 | } | ||
642 | |||
643 | kobject_uevent(kobj, KOBJ_ADD); | ||
644 | |||
645 | return error; | ||
646 | } | ||
647 | |||
648 | static int rx_queue_register_kobjects(struct net_device *net) | ||
649 | { | ||
650 | int i; | ||
651 | int error = 0; | ||
652 | |||
653 | net->queues_kset = kset_create_and_add("queues", | ||
654 | NULL, &net->dev.kobj); | ||
655 | if (!net->queues_kset) | ||
656 | return -ENOMEM; | ||
657 | for (i = 0; i < net->num_rx_queues; i++) { | ||
658 | error = rx_queue_add_kobject(net, i); | ||
659 | if (error) | ||
660 | break; | ||
661 | } | ||
662 | |||
663 | if (error) | ||
664 | while (--i >= 0) | ||
665 | kobject_put(&net->_rx[i].kobj); | ||
666 | |||
667 | return error; | ||
668 | } | ||
669 | |||
670 | static void rx_queue_remove_kobjects(struct net_device *net) | ||
671 | { | ||
672 | int i; | ||
673 | |||
674 | for (i = 0; i < net->num_rx_queues; i++) | ||
675 | kobject_put(&net->_rx[i].kobj); | ||
676 | kset_unregister(net->queues_kset); | ||
677 | } | ||
678 | |||
469 | #endif /* CONFIG_SYSFS */ | 679 | #endif /* CONFIG_SYSFS */ |
470 | 680 | ||
471 | #ifdef CONFIG_HOTPLUG | 681 | #ifdef CONFIG_HOTPLUG |
@@ -529,6 +739,8 @@ void netdev_unregister_kobject(struct net_device * net) | |||
529 | if (!net_eq(dev_net(net), &init_net)) | 739 | if (!net_eq(dev_net(net), &init_net)) |
530 | return; | 740 | return; |
531 | 741 | ||
742 | rx_queue_remove_kobjects(net); | ||
743 | |||
532 | device_del(dev); | 744 | device_del(dev); |
533 | } | 745 | } |
534 | 746 | ||
@@ -537,6 +749,7 @@ int netdev_register_kobject(struct net_device *net) | |||
537 | { | 749 | { |
538 | struct device *dev = &(net->dev); | 750 | struct device *dev = &(net->dev); |
539 | const struct attribute_group **groups = net->sysfs_groups; | 751 | const struct attribute_group **groups = net->sysfs_groups; |
752 | int error = 0; | ||
540 | 753 | ||
541 | dev->class = &net_class; | 754 | dev->class = &net_class; |
542 | dev->platform_data = net; | 755 | dev->platform_data = net; |
@@ -563,7 +776,17 @@ int netdev_register_kobject(struct net_device *net) | |||
563 | if (!net_eq(dev_net(net), &init_net)) | 776 | if (!net_eq(dev_net(net), &init_net)) |
564 | return 0; | 777 | return 0; |
565 | 778 | ||
566 | return device_add(dev); | 779 | error = device_add(dev); |
780 | if (error) | ||
781 | return error; | ||
782 | |||
783 | error = rx_queue_register_kobjects(net); | ||
784 | if (error) { | ||
785 | device_del(dev); | ||
786 | return error; | ||
787 | } | ||
788 | |||
789 | return error; | ||
567 | } | 790 | } |
568 | 791 | ||
569 | int netdev_class_create_file(struct class_attribute *class_attr) | 792 | int netdev_class_create_file(struct class_attribute *class_attr) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4568120d8533..e1121f0bca6a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -600,7 +600,39 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
600 | 600 | ||
601 | a->rx_compressed = b->rx_compressed; | 601 | a->rx_compressed = b->rx_compressed; |
602 | a->tx_compressed = b->tx_compressed; | 602 | a->tx_compressed = b->tx_compressed; |
603 | }; | 603 | } |
604 | |||
605 | static void copy_rtnl_link_stats64(struct rtnl_link_stats64 *a, | ||
606 | const struct net_device_stats *b) | ||
607 | { | ||
608 | a->rx_packets = b->rx_packets; | ||
609 | a->tx_packets = b->tx_packets; | ||
610 | a->rx_bytes = b->rx_bytes; | ||
611 | a->tx_bytes = b->tx_bytes; | ||
612 | a->rx_errors = b->rx_errors; | ||
613 | a->tx_errors = b->tx_errors; | ||
614 | a->rx_dropped = b->rx_dropped; | ||
615 | a->tx_dropped = b->tx_dropped; | ||
616 | |||
617 | a->multicast = b->multicast; | ||
618 | a->collisions = b->collisions; | ||
619 | |||
620 | a->rx_length_errors = b->rx_length_errors; | ||
621 | a->rx_over_errors = b->rx_over_errors; | ||
622 | a->rx_crc_errors = b->rx_crc_errors; | ||
623 | a->rx_frame_errors = b->rx_frame_errors; | ||
624 | a->rx_fifo_errors = b->rx_fifo_errors; | ||
625 | a->rx_missed_errors = b->rx_missed_errors; | ||
626 | |||
627 | a->tx_aborted_errors = b->tx_aborted_errors; | ||
628 | a->tx_carrier_errors = b->tx_carrier_errors; | ||
629 | a->tx_fifo_errors = b->tx_fifo_errors; | ||
630 | a->tx_heartbeat_errors = b->tx_heartbeat_errors; | ||
631 | a->tx_window_errors = b->tx_window_errors; | ||
632 | |||
633 | a->rx_compressed = b->rx_compressed; | ||
634 | a->tx_compressed = b->tx_compressed; | ||
635 | } | ||
604 | 636 | ||
605 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | 637 | static inline int rtnl_vfinfo_size(const struct net_device *dev) |
606 | { | 638 | { |
@@ -698,6 +730,14 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
698 | stats = dev_get_stats(dev); | 730 | stats = dev_get_stats(dev); |
699 | copy_rtnl_link_stats(nla_data(attr), stats); | 731 | copy_rtnl_link_stats(nla_data(attr), stats); |
700 | 732 | ||
733 | attr = nla_reserve(skb, IFLA_STATS64, | ||
734 | sizeof(struct rtnl_link_stats64)); | ||
735 | if (attr == NULL) | ||
736 | goto nla_put_failure; | ||
737 | |||
738 | stats = dev_get_stats(dev); | ||
739 | copy_rtnl_link_stats64(nla_data(attr), stats); | ||
740 | |||
701 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | 741 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { |
702 | int i; | 742 | int i; |
703 | struct ifla_vf_info ivi; | 743 | struct ifla_vf_info ivi; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 93c4e060c91e..bdea0efdf8cb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -534,6 +534,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
534 | new->network_header = old->network_header; | 534 | new->network_header = old->network_header; |
535 | new->mac_header = old->mac_header; | 535 | new->mac_header = old->mac_header; |
536 | skb_dst_set(new, dst_clone(skb_dst(old))); | 536 | skb_dst_set(new, dst_clone(skb_dst(old))); |
537 | new->rxhash = old->rxhash; | ||
537 | #ifdef CONFIG_XFRM | 538 | #ifdef CONFIG_XFRM |
538 | new->sp = secpath_get(old->sp); | 539 | new->sp = secpath_get(old->sp); |
539 | #endif | 540 | #endif |
@@ -581,6 +582,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
581 | C(len); | 582 | C(len); |
582 | C(data_len); | 583 | C(data_len); |
583 | C(mac_len); | 584 | C(mac_len); |
585 | C(rxhash); | ||
584 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 586 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
585 | n->cloned = 1; | 587 | n->cloned = 1; |
586 | n->nohdr = 0; | 588 | n->nohdr = 0; |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 0c94a1ac2946..c9a1c68767ff 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -587,9 +587,15 @@ choice | |||
587 | config DEFAULT_HTCP | 587 | config DEFAULT_HTCP |
588 | bool "Htcp" if TCP_CONG_HTCP=y | 588 | bool "Htcp" if TCP_CONG_HTCP=y |
589 | 589 | ||
590 | config DEFAULT_HYBLA | ||
591 | bool "Hybla" if TCP_CONG_HYBLA=y | ||
592 | |||
590 | config DEFAULT_VEGAS | 593 | config DEFAULT_VEGAS |
591 | bool "Vegas" if TCP_CONG_VEGAS=y | 594 | bool "Vegas" if TCP_CONG_VEGAS=y |
592 | 595 | ||
596 | config DEFAULT_VENO | ||
597 | bool "Veno" if TCP_CONG_VENO=y | ||
598 | |||
593 | config DEFAULT_WESTWOOD | 599 | config DEFAULT_WESTWOOD |
594 | bool "Westwood" if TCP_CONG_WESTWOOD=y | 600 | bool "Westwood" if TCP_CONG_WESTWOOD=y |
595 | 601 | ||
@@ -610,8 +616,10 @@ config DEFAULT_TCP_CONG | |||
610 | default "bic" if DEFAULT_BIC | 616 | default "bic" if DEFAULT_BIC |
611 | default "cubic" if DEFAULT_CUBIC | 617 | default "cubic" if DEFAULT_CUBIC |
612 | default "htcp" if DEFAULT_HTCP | 618 | default "htcp" if DEFAULT_HTCP |
619 | default "hybla" if DEFAULT_HYBLA | ||
613 | default "vegas" if DEFAULT_VEGAS | 620 | default "vegas" if DEFAULT_VEGAS |
614 | default "westwood" if DEFAULT_WESTWOOD | 621 | default "westwood" if DEFAULT_WESTWOOD |
622 | default "veno" if DEFAULT_VENO | ||
615 | default "reno" if DEFAULT_RENO | 623 | default "reno" if DEFAULT_RENO |
616 | default "cubic" | 624 | default "cubic" |
617 | 625 | ||
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 51ca946e3392..c75320ef95c2 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1095,10 +1095,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1095 | case NETDEV_DOWN: | 1095 | case NETDEV_DOWN: |
1096 | ip_mc_down(in_dev); | 1096 | ip_mc_down(in_dev); |
1097 | break; | 1097 | break; |
1098 | case NETDEV_BONDING_OLDTYPE: | 1098 | case NETDEV_PRE_TYPE_CHANGE: |
1099 | ip_mc_unmap(in_dev); | 1099 | ip_mc_unmap(in_dev); |
1100 | break; | 1100 | break; |
1101 | case NETDEV_BONDING_NEWTYPE: | 1101 | case NETDEV_POST_TYPE_CHANGE: |
1102 | ip_mc_remap(in_dev); | 1102 | ip_mc_remap(in_dev); |
1103 | break; | 1103 | break; |
1104 | case NETDEV_CHANGEMTU: | 1104 | case NETDEV_CHANGEMTU: |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3381b4317c27..8d41abc40db5 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -137,8 +137,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock); | |||
137 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); | 137 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); |
138 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); | 138 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); |
139 | 139 | ||
140 | static void addrconf_bonding_change(struct net_device *dev, | 140 | static void addrconf_type_change(struct net_device *dev, |
141 | unsigned long event); | 141 | unsigned long event); |
142 | static int addrconf_ifdown(struct net_device *dev, int how); | 142 | static int addrconf_ifdown(struct net_device *dev, int how); |
143 | 143 | ||
144 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); | 144 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); |
@@ -2584,9 +2584,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2584 | return notifier_from_errno(err); | 2584 | return notifier_from_errno(err); |
2585 | } | 2585 | } |
2586 | break; | 2586 | break; |
2587 | case NETDEV_BONDING_OLDTYPE: | 2587 | case NETDEV_PRE_TYPE_CHANGE: |
2588 | case NETDEV_BONDING_NEWTYPE: | 2588 | case NETDEV_POST_TYPE_CHANGE: |
2589 | addrconf_bonding_change(dev, event); | 2589 | addrconf_type_change(dev, event); |
2590 | break; | 2590 | break; |
2591 | } | 2591 | } |
2592 | 2592 | ||
@@ -2601,16 +2601,16 @@ static struct notifier_block ipv6_dev_notf = { | |||
2601 | .priority = 0 | 2601 | .priority = 0 |
2602 | }; | 2602 | }; |
2603 | 2603 | ||
2604 | static void addrconf_bonding_change(struct net_device *dev, unsigned long event) | 2604 | static void addrconf_type_change(struct net_device *dev, unsigned long event) |
2605 | { | 2605 | { |
2606 | struct inet6_dev *idev; | 2606 | struct inet6_dev *idev; |
2607 | ASSERT_RTNL(); | 2607 | ASSERT_RTNL(); |
2608 | 2608 | ||
2609 | idev = __in6_dev_get(dev); | 2609 | idev = __in6_dev_get(dev); |
2610 | 2610 | ||
2611 | if (event == NETDEV_BONDING_NEWTYPE) | 2611 | if (event == NETDEV_POST_TYPE_CHANGE) |
2612 | ipv6_mc_remap(idev); | 2612 | ipv6_mc_remap(idev); |
2613 | else if (event == NETDEV_BONDING_OLDTYPE) | 2613 | else if (event == NETDEV_PRE_TYPE_CHANGE) |
2614 | ipv6_mc_unmap(idev); | 2614 | ipv6_mc_unmap(idev); |
2615 | } | 2615 | } |
2616 | 2616 | ||
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 853c52be781f..937ecda4abe7 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -159,7 +159,8 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
159 | 159 | ||
160 | poll_wait(file, sk->sk_sleep, wait); | 160 | poll_wait(file, sk->sk_sleep, wait); |
161 | 161 | ||
162 | poll_wait(file, &rds_poll_waitq, wait); | 162 | if (rs->rs_seen_congestion) |
163 | poll_wait(file, &rds_poll_waitq, wait); | ||
163 | 164 | ||
164 | read_lock_irqsave(&rs->rs_recv_lock, flags); | 165 | read_lock_irqsave(&rs->rs_recv_lock, flags); |
165 | if (!rs->rs_cong_monitor) { | 166 | if (!rs->rs_cong_monitor) { |
@@ -181,6 +182,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
181 | mask |= (POLLOUT | POLLWRNORM); | 182 | mask |= (POLLOUT | POLLWRNORM); |
182 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); | 183 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); |
183 | 184 | ||
185 | /* clear state any time we wake a seen-congested socket */ | ||
186 | if (mask) | ||
187 | rs->rs_seen_congestion = 0; | ||
188 | |||
184 | return mask; | 189 | return mask; |
185 | } | 190 | } |
186 | 191 | ||
diff --git a/net/rds/cong.c b/net/rds/cong.c index 6d06cac2649c..dd2711df640b 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
@@ -218,8 +218,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map) | |||
218 | spin_lock_irqsave(&rds_cong_lock, flags); | 218 | spin_lock_irqsave(&rds_cong_lock, flags); |
219 | 219 | ||
220 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { | 220 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { |
221 | if (conn->c_loopback) | ||
222 | continue; | ||
223 | if (!test_and_set_bit(0, &conn->c_map_queued)) { | 221 | if (!test_and_set_bit(0, &conn->c_map_queued)) { |
224 | rds_stats_inc(s_cong_update_queued); | 222 | rds_stats_inc(s_cong_update_queued); |
225 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 223 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 647cb8ffc39b..e1f124bf03bb 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -203,9 +203,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) | |||
203 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); | 203 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); |
204 | break; | 204 | break; |
205 | default: | 205 | default: |
206 | rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " | 206 | rdsdebug("Fatal QP Event %u " |
207 | "- connection %pI4->%pI4, reconnecting\n", | 207 | "- connection %pI4->%pI4, reconnecting\n", |
208 | event->event, &conn->c_laddr, &conn->c_faddr); | 208 | event->event, &conn->c_laddr, &conn->c_faddr); |
209 | rds_conn_drop(conn); | ||
209 | break; | 210 | break; |
210 | } | 211 | } |
211 | } | 212 | } |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 4b0da865a72c..cfb1d904ed00 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -234,8 +234,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | |||
234 | { | 234 | { |
235 | flush_workqueue(rds_wq); | 235 | flush_workqueue(rds_wq); |
236 | rds_ib_flush_mr_pool(pool, 1); | 236 | rds_ib_flush_mr_pool(pool, 1); |
237 | BUG_ON(atomic_read(&pool->item_count)); | 237 | WARN_ON(atomic_read(&pool->item_count)); |
238 | BUG_ON(atomic_read(&pool->free_pinned)); | 238 | WARN_ON(atomic_read(&pool->free_pinned)); |
239 | kfree(pool); | 239 | kfree(pool); |
240 | } | 240 | } |
241 | 241 | ||
@@ -440,6 +440,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |||
440 | 440 | ||
441 | /* FIXME we need a way to tell a r/w MR | 441 | /* FIXME we need a way to tell a r/w MR |
442 | * from a r/o MR */ | 442 | * from a r/o MR */ |
443 | BUG_ON(in_interrupt()); | ||
443 | set_page_dirty(page); | 444 | set_page_dirty(page); |
444 | put_page(page); | 445 | put_page(page); |
445 | } | 446 | } |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 04dc0d3f3c95..c338881eca71 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -468,8 +468,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi | |||
468 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 468 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
469 | 469 | ||
470 | rds_ib_stats_inc(s_ib_ack_send_failure); | 470 | rds_ib_stats_inc(s_ib_ack_send_failure); |
471 | /* Need to finesse this later. */ | 471 | |
472 | BUG(); | 472 | rds_ib_conn_error(ic->conn, "sending ack failed\n"); |
473 | } else | 473 | } else |
474 | rds_ib_stats_inc(s_ib_ack_sent); | 474 | rds_ib_stats_inc(s_ib_ack_sent); |
475 | } | 475 | } |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index a10fab6886d1..17fa80803ab0 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
243 | struct rds_message *rm; | 243 | struct rds_message *rm; |
244 | 244 | ||
245 | rm = rds_send_get_message(conn, send->s_op); | 245 | rm = rds_send_get_message(conn, send->s_op); |
246 | if (rm) | 246 | if (rm) { |
247 | if (rm->m_rdma_op) | ||
248 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); | ||
247 | rds_ib_send_rdma_complete(rm, wc.status); | 249 | rds_ib_send_rdma_complete(rm, wc.status); |
250 | rds_message_put(rm); | ||
251 | } | ||
248 | } | 252 | } |
249 | 253 | ||
250 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; | 254 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; |
@@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
482 | BUG_ON(off % RDS_FRAG_SIZE); | 486 | BUG_ON(off % RDS_FRAG_SIZE); |
483 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); | 487 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); |
484 | 488 | ||
489 | /* Do not send cong updates to IB loopback */ | ||
490 | if (conn->c_loopback | ||
491 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | ||
492 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | ||
493 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | ||
494 | } | ||
495 | |||
485 | /* FIXME we may overallocate here */ | 496 | /* FIXME we may overallocate here */ |
486 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | 497 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
487 | i = 1; | 498 | i = 1; |
@@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
574 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 585 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
575 | adv_credits += posted; | 586 | adv_credits += posted; |
576 | BUG_ON(adv_credits > 255); | 587 | BUG_ON(adv_credits > 255); |
577 | } else if (ic->i_rm != rm) | 588 | } |
578 | BUG(); | ||
579 | 589 | ||
580 | send = &ic->i_sends[pos]; | 590 | send = &ic->i_sends[pos]; |
581 | first = send; | 591 | first = send; |
@@ -714,8 +724,8 @@ add_header: | |||
714 | ic->i_rm = prev->s_rm; | 724 | ic->i_rm = prev->s_rm; |
715 | prev->s_rm = NULL; | 725 | prev->s_rm = NULL; |
716 | } | 726 | } |
717 | /* Finesse this later */ | 727 | |
718 | BUG(); | 728 | rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); |
719 | goto out; | 729 | goto out; |
720 | } | 730 | } |
721 | 731 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 394cf6b4d0aa..6bc638fd252c 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -156,9 +156,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data) | |||
156 | case IB_EVENT_QP_REQ_ERR: | 156 | case IB_EVENT_QP_REQ_ERR: |
157 | case IB_EVENT_QP_FATAL: | 157 | case IB_EVENT_QP_FATAL: |
158 | default: | 158 | default: |
159 | rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n", | 159 | rdsdebug("Fatal QP Event %u " |
160 | "- connection %pI4->%pI4, reconnecting\n", | ||
160 | event->event, &conn->c_laddr, | 161 | event->event, &conn->c_laddr, |
161 | &conn->c_faddr); | 162 | &conn->c_faddr); |
163 | rds_conn_drop(conn); | ||
162 | break; | 164 | break; |
163 | } | 165 | } |
164 | } | 166 | } |
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 54af7d6b92da..337e4e5025e2 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -468,8 +468,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi | |||
468 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 468 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
469 | 469 | ||
470 | rds_iw_stats_inc(s_iw_ack_send_failure); | 470 | rds_iw_stats_inc(s_iw_ack_send_failure); |
471 | /* Need to finesse this later. */ | 471 | |
472 | BUG(); | 472 | rds_iw_conn_error(ic->conn, "sending ack failed\n"); |
473 | } else | 473 | } else |
474 | rds_iw_stats_inc(s_iw_ack_sent); | 474 | rds_iw_stats_inc(s_iw_ack_sent); |
475 | } | 475 | } |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 1379e9d66a78..52182ff7519e 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
617 | adv_credits += posted; | 617 | adv_credits += posted; |
618 | BUG_ON(adv_credits > 255); | 618 | BUG_ON(adv_credits > 255); |
619 | } else if (ic->i_rm != rm) | 619 | } |
620 | BUG(); | ||
621 | 620 | ||
622 | send = &ic->i_sends[pos]; | 621 | send = &ic->i_sends[pos]; |
623 | first = send; | 622 | first = send; |
diff --git a/net/rds/loop.c b/net/rds/loop.c index 4a61997f554d..93a45f1ce61f 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -80,16 +80,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn, | |||
80 | struct rds_cong_map *map, | 80 | struct rds_cong_map *map, |
81 | unsigned long offset) | 81 | unsigned long offset) |
82 | { | 82 | { |
83 | unsigned long i; | ||
84 | |||
85 | BUG_ON(offset); | 83 | BUG_ON(offset); |
86 | BUG_ON(map != conn->c_lcong); | 84 | BUG_ON(map != conn->c_lcong); |
87 | 85 | ||
88 | for (i = 0; i < RDS_CONG_MAP_PAGES; i++) { | ||
89 | memcpy((void *)conn->c_fcong->m_page_addrs[i], | ||
90 | (void *)map->m_page_addrs[i], PAGE_SIZE); | ||
91 | } | ||
92 | |||
93 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 86 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
94 | 87 | ||
95 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 88 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 4c64daa1f5d5..61b359d9dffd 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -438,8 +438,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro) | |||
438 | /* Mark page dirty if it was possibly modified, which | 438 | /* Mark page dirty if it was possibly modified, which |
439 | * is the case for a RDMA_READ which copies from remote | 439 | * is the case for a RDMA_READ which copies from remote |
440 | * to local memory */ | 440 | * to local memory */ |
441 | if (!ro->r_write) | 441 | if (!ro->r_write) { |
442 | BUG_ON(in_interrupt()); | ||
442 | set_page_dirty(page); | 443 | set_page_dirty(page); |
444 | } | ||
443 | put_page(page); | 445 | put_page(page); |
444 | } | 446 | } |
445 | 447 | ||
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 9ece910ea394..5ea82fc47c3e 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
101 | break; | 101 | break; |
102 | 102 | ||
103 | case RDMA_CM_EVENT_DISCONNECTED: | 103 | case RDMA_CM_EVENT_DISCONNECTED: |
104 | printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection " | 104 | rdsdebug("DISCONNECT event - dropping connection " |
105 | "%pI4->%pI4\n", &conn->c_laddr, | 105 | "%pI4->%pI4\n", &conn->c_laddr, |
106 | &conn->c_faddr); | 106 | &conn->c_faddr); |
107 | rds_conn_drop(conn); | 107 | rds_conn_drop(conn); |
@@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
109 | 109 | ||
110 | default: | 110 | default: |
111 | /* things like device disconnect? */ | 111 | /* things like device disconnect? */ |
112 | printk(KERN_ERR "unknown event %u\n", event->event); | 112 | printk(KERN_ERR "RDS: unknown event %u!\n", event->event); |
113 | BUG(); | ||
114 | break; | 113 | break; |
115 | } | 114 | } |
116 | 115 | ||
diff --git a/net/rds/rds.h b/net/rds/rds.h index 85d6f897ecc7..4bec6e2ed495 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -388,6 +388,8 @@ struct rds_sock { | |||
388 | 388 | ||
389 | /* flag indicating we were congested or not */ | 389 | /* flag indicating we were congested or not */ |
390 | int rs_congested; | 390 | int rs_congested; |
391 | /* seen congestion (ENOBUFS) when sending? */ | ||
392 | int rs_seen_congestion; | ||
391 | 393 | ||
392 | /* rs_lock protects all these adjacent members before the newline */ | 394 | /* rs_lock protects all these adjacent members before the newline */ |
393 | spinlock_t rs_lock; | 395 | spinlock_t rs_lock; |
diff --git a/net/rds/send.c b/net/rds/send.c index b2fccfc20769..4629a0b63bbd 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -507,12 +507,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message); | |||
507 | */ | 507 | */ |
508 | void rds_send_remove_from_sock(struct list_head *messages, int status) | 508 | void rds_send_remove_from_sock(struct list_head *messages, int status) |
509 | { | 509 | { |
510 | unsigned long flags = 0; /* silence gcc :P */ | 510 | unsigned long flags; |
511 | struct rds_sock *rs = NULL; | 511 | struct rds_sock *rs = NULL; |
512 | struct rds_message *rm; | 512 | struct rds_message *rm; |
513 | 513 | ||
514 | local_irq_save(flags); | ||
515 | while (!list_empty(messages)) { | 514 | while (!list_empty(messages)) { |
515 | int was_on_sock = 0; | ||
516 | |||
516 | rm = list_entry(messages->next, struct rds_message, | 517 | rm = list_entry(messages->next, struct rds_message, |
517 | m_conn_item); | 518 | m_conn_item); |
518 | list_del_init(&rm->m_conn_item); | 519 | list_del_init(&rm->m_conn_item); |
@@ -527,20 +528,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
527 | * while we're messing with it. It does not prevent the | 528 | * while we're messing with it. It does not prevent the |
528 | * message from being removed from the socket, though. | 529 | * message from being removed from the socket, though. |
529 | */ | 530 | */ |
530 | spin_lock(&rm->m_rs_lock); | 531 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
531 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) | 532 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
532 | goto unlock_and_drop; | 533 | goto unlock_and_drop; |
533 | 534 | ||
534 | if (rs != rm->m_rs) { | 535 | if (rs != rm->m_rs) { |
535 | if (rs) { | 536 | if (rs) { |
536 | spin_unlock(&rs->rs_lock); | ||
537 | rds_wake_sk_sleep(rs); | 537 | rds_wake_sk_sleep(rs); |
538 | sock_put(rds_rs_to_sk(rs)); | 538 | sock_put(rds_rs_to_sk(rs)); |
539 | } | 539 | } |
540 | rs = rm->m_rs; | 540 | rs = rm->m_rs; |
541 | spin_lock(&rs->rs_lock); | ||
542 | sock_hold(rds_rs_to_sk(rs)); | 541 | sock_hold(rds_rs_to_sk(rs)); |
543 | } | 542 | } |
543 | spin_lock(&rs->rs_lock); | ||
544 | 544 | ||
545 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | 545 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { |
546 | struct rds_rdma_op *ro = rm->m_rdma_op; | 546 | struct rds_rdma_op *ro = rm->m_rdma_op; |
@@ -557,21 +557,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
557 | notifier->n_status = status; | 557 | notifier->n_status = status; |
558 | rm->m_rdma_op->r_notifier = NULL; | 558 | rm->m_rdma_op->r_notifier = NULL; |
559 | } | 559 | } |
560 | rds_message_put(rm); | 560 | was_on_sock = 1; |
561 | rm->m_rs = NULL; | 561 | rm->m_rs = NULL; |
562 | } | 562 | } |
563 | spin_unlock(&rs->rs_lock); | ||
563 | 564 | ||
564 | unlock_and_drop: | 565 | unlock_and_drop: |
565 | spin_unlock(&rm->m_rs_lock); | 566 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
566 | rds_message_put(rm); | 567 | rds_message_put(rm); |
568 | if (was_on_sock) | ||
569 | rds_message_put(rm); | ||
567 | } | 570 | } |
568 | 571 | ||
569 | if (rs) { | 572 | if (rs) { |
570 | spin_unlock(&rs->rs_lock); | ||
571 | rds_wake_sk_sleep(rs); | 573 | rds_wake_sk_sleep(rs); |
572 | sock_put(rds_rs_to_sk(rs)); | 574 | sock_put(rds_rs_to_sk(rs)); |
573 | } | 575 | } |
574 | local_irq_restore(flags); | ||
575 | } | 576 | } |
576 | 577 | ||
577 | /* | 578 | /* |
@@ -633,9 +634,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
633 | list_move(&rm->m_sock_item, &list); | 634 | list_move(&rm->m_sock_item, &list); |
634 | rds_send_sndbuf_remove(rs, rm); | 635 | rds_send_sndbuf_remove(rs, rm); |
635 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 636 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
636 | |||
637 | /* If this is a RDMA operation, notify the app. */ | ||
638 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
639 | } | 637 | } |
640 | 638 | ||
641 | /* order flag updates with the rs lock */ | 639 | /* order flag updates with the rs lock */ |
@@ -644,9 +642,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
644 | 642 | ||
645 | spin_unlock_irqrestore(&rs->rs_lock, flags); | 643 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
646 | 644 | ||
647 | if (wake) | ||
648 | rds_wake_sk_sleep(rs); | ||
649 | |||
650 | conn = NULL; | 645 | conn = NULL; |
651 | 646 | ||
652 | /* now remove the messages from the conn list as needed */ | 647 | /* now remove the messages from the conn list as needed */ |
@@ -654,6 +649,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
654 | /* We do this here rather than in the loop above, so that | 649 | /* We do this here rather than in the loop above, so that |
655 | * we don't have to nest m_rs_lock under rs->rs_lock */ | 650 | * we don't have to nest m_rs_lock under rs->rs_lock */ |
656 | spin_lock_irqsave(&rm->m_rs_lock, flags2); | 651 | spin_lock_irqsave(&rm->m_rs_lock, flags2); |
652 | /* If this is a RDMA operation, notify the app. */ | ||
653 | spin_lock(&rs->rs_lock); | ||
654 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
655 | spin_unlock(&rs->rs_lock); | ||
657 | rm->m_rs = NULL; | 656 | rm->m_rs = NULL; |
658 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); | 657 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); |
659 | 658 | ||
@@ -682,6 +681,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
682 | if (conn) | 681 | if (conn) |
683 | spin_unlock_irqrestore(&conn->c_lock, flags); | 682 | spin_unlock_irqrestore(&conn->c_lock, flags); |
684 | 683 | ||
684 | if (wake) | ||
685 | rds_wake_sk_sleep(rs); | ||
686 | |||
685 | while (!list_empty(&list)) { | 687 | while (!list_empty(&list)) { |
686 | rm = list_entry(list.next, struct rds_message, m_sock_item); | 688 | rm = list_entry(list.next, struct rds_message, m_sock_item); |
687 | list_del_init(&rm->m_sock_item); | 689 | list_del_init(&rm->m_sock_item); |
@@ -815,7 +817,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
815 | int ret = 0; | 817 | int ret = 0; |
816 | int queued = 0, allocated_mr = 0; | 818 | int queued = 0, allocated_mr = 0; |
817 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | 819 | int nonblock = msg->msg_flags & MSG_DONTWAIT; |
818 | long timeo = sock_rcvtimeo(sk, nonblock); | 820 | long timeo = sock_sndtimeo(sk, nonblock); |
819 | 821 | ||
820 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | 822 | /* Mirror Linux UDP mirror of BSD error message compatibility */ |
821 | /* XXX: Perhaps MSG_MORE someday */ | 823 | /* XXX: Perhaps MSG_MORE someday */ |
@@ -894,8 +896,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
894 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 896 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
895 | 897 | ||
896 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 898 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
897 | if (ret) | 899 | if (ret) { |
900 | rs->rs_seen_congestion = 1; | ||
898 | goto out; | 901 | goto out; |
902 | } | ||
899 | 903 | ||
900 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, | 904 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, |
901 | dport, &queued)) { | 905 | dport, &queued)) { |
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index c00dafffbb5a..40bfcf887465 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c | |||
@@ -97,6 +97,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | |||
97 | goto out; | 97 | goto out; |
98 | } | 98 | } |
99 | 99 | ||
100 | rds_stats_add(s_copy_to_user, to_copy); | ||
100 | size -= to_copy; | 101 | size -= to_copy; |
101 | ret += to_copy; | 102 | ret += to_copy; |
102 | skb_off += to_copy; | 103 | skb_off += to_copy; |
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 34fdcc059e54..a28b895ff0d1 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c | |||
@@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk) | |||
240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); | 240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); | 241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
242 | 242 | ||
243 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 243 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) |
244 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | ||
245 | |||
244 | out: | 246 | out: |
245 | read_unlock(&sk->sk_callback_lock); | 247 | read_unlock(&sk->sk_callback_lock); |
246 | 248 | ||
diff --git a/net/rds/threads.c b/net/rds/threads.c index 00fa10e59af8..786c20eaaf5e 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -259,7 +259,7 @@ void rds_threads_exit(void) | |||
259 | 259 | ||
260 | int __init rds_threads_init(void) | 260 | int __init rds_threads_init(void) |
261 | { | 261 | { |
262 | rds_wq = create_singlethread_workqueue("krdsd"); | 262 | rds_wq = create_workqueue("krdsd"); |
263 | if (rds_wq == NULL) | 263 | if (rds_wq == NULL) |
264 | return -ENOMEM; | 264 | return -ENOMEM; |
265 | 265 | ||
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index a3bfd4064912..90a051912c03 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -558,10 +558,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
558 | struct tipc_bearer *unused1, | 558 | struct tipc_bearer *unused1, |
559 | struct tipc_media_addr *unused2) | 559 | struct tipc_media_addr *unused2) |
560 | { | 560 | { |
561 | static int send_count = 0; | ||
562 | |||
563 | int bp_index; | 561 | int bp_index; |
564 | int swap_time; | ||
565 | 562 | ||
566 | /* Prepare buffer for broadcasting (if first time trying to send it) */ | 563 | /* Prepare buffer for broadcasting (if first time trying to send it) */ |
567 | 564 | ||
@@ -575,11 +572,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
575 | msg_set_mc_netid(msg, tipc_net_id); | 572 | msg_set_mc_netid(msg, tipc_net_id); |
576 | } | 573 | } |
577 | 574 | ||
578 | /* Determine if bearer pairs should be swapped following this attempt */ | ||
579 | |||
580 | if ((swap_time = (++send_count >= 10))) | ||
581 | send_count = 0; | ||
582 | |||
583 | /* Send buffer over bearers until all targets reached */ | 575 | /* Send buffer over bearers until all targets reached */ |
584 | 576 | ||
585 | bcbearer->remains = tipc_cltr_bcast_nodes; | 577 | bcbearer->remains = tipc_cltr_bcast_nodes; |
@@ -595,21 +587,22 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
595 | if (bcbearer->remains_new.count == bcbearer->remains.count) | 587 | if (bcbearer->remains_new.count == bcbearer->remains.count) |
596 | continue; /* bearer pair doesn't add anything */ | 588 | continue; /* bearer pair doesn't add anything */ |
597 | 589 | ||
598 | if (!p->publ.blocked && | 590 | if (p->publ.blocked || |
599 | !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { | 591 | p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { |
600 | if (swap_time && s && !s->publ.blocked) | 592 | /* unable to send on primary bearer */ |
601 | goto swap; | 593 | if (!s || s->publ.blocked || |
602 | else | 594 | s->media->send_msg(buf, &s->publ, |
603 | goto update; | 595 | &s->media->bcast_addr)) { |
596 | /* unable to send on either bearer */ | ||
597 | continue; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | if (s) { | ||
602 | bcbearer->bpairs[bp_index].primary = s; | ||
603 | bcbearer->bpairs[bp_index].secondary = p; | ||
604 | } | 604 | } |
605 | 605 | ||
606 | if (!s || s->publ.blocked || | ||
607 | s->media->send_msg(buf, &s->publ, &s->media->bcast_addr)) | ||
608 | continue; /* unable to send using bearer pair */ | ||
609 | swap: | ||
610 | bcbearer->bpairs[bp_index].primary = s; | ||
611 | bcbearer->bpairs[bp_index].secondary = p; | ||
612 | update: | ||
613 | if (bcbearer->remains_new.count == 0) | 606 | if (bcbearer->remains_new.count == 0) |
614 | return 0; | 607 | return 0; |
615 | 608 | ||
diff --git a/net/tipc/link.c b/net/tipc/link.c index 1a7e4665af80..49f2be8622a9 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1553,7 +1553,7 @@ u32 tipc_link_push_packet(struct link *l_ptr) | |||
1553 | 1553 | ||
1554 | /* Continue retransmission now, if there is anything: */ | 1554 | /* Continue retransmission now, if there is anything: */ |
1555 | 1555 | ||
1556 | if (r_q_size && buf && !skb_cloned(buf)) { | 1556 | if (r_q_size && buf) { |
1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); | 1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); |
1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); | 1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); |
1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { |
@@ -1722,15 +1722,16 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); | 1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); |
1723 | 1723 | ||
1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { | 1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { |
1725 | if (!skb_cloned(buf)) { | 1725 | if (l_ptr->retransm_queue_size == 0) { |
1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); | 1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); |
1727 | dbg_print_link(l_ptr, " "); | 1727 | dbg_print_link(l_ptr, " "); |
1728 | l_ptr->retransm_queue_head = msg_seqno(msg); | 1728 | l_ptr->retransm_queue_head = msg_seqno(msg); |
1729 | l_ptr->retransm_queue_size = retransmits; | 1729 | l_ptr->retransm_queue_size = retransmits; |
1730 | return; | ||
1731 | } else { | 1730 | } else { |
1732 | /* Don't retransmit if driver already has the buffer */ | 1731 | err("Unexpected retransmit on link %s (qsize=%d)\n", |
1732 | l_ptr->name, l_ptr->retransm_queue_size); | ||
1733 | } | 1733 | } |
1734 | return; | ||
1734 | } else { | 1735 | } else { |
1735 | /* Detect repeated retransmit failures on uncongested bearer */ | 1736 | /* Detect repeated retransmit failures on uncongested bearer */ |
1736 | 1737 | ||
@@ -1745,7 +1746,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1745 | } | 1746 | } |
1746 | } | 1747 | } |
1747 | 1748 | ||
1748 | while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { | 1749 | while (retransmits && (buf != l_ptr->next_out) && buf) { |
1749 | msg = buf_msg(buf); | 1750 | msg = buf_msg(buf); |
1750 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1751 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1751 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1752 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |