diff options
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 149 |
1 files changed, 88 insertions, 61 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index ea2469398bd5..4d891beab138 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -76,7 +76,6 @@ | |||
| 76 | #include <asm/system.h> | 76 | #include <asm/system.h> |
| 77 | #include <linux/bitops.h> | 77 | #include <linux/bitops.h> |
| 78 | #include <linux/capability.h> | 78 | #include <linux/capability.h> |
| 79 | #include <linux/config.h> | ||
| 80 | #include <linux/cpu.h> | 79 | #include <linux/cpu.h> |
| 81 | #include <linux/types.h> | 80 | #include <linux/types.h> |
| 82 | #include <linux/kernel.h> | 81 | #include <linux/kernel.h> |
| @@ -117,6 +116,7 @@ | |||
| 117 | #include <linux/audit.h> | 116 | #include <linux/audit.h> |
| 118 | #include <linux/dmaengine.h> | 117 | #include <linux/dmaengine.h> |
| 119 | #include <linux/err.h> | 118 | #include <linux/err.h> |
| 119 | #include <linux/ctype.h> | ||
| 120 | 120 | ||
| 121 | /* | 121 | /* |
| 122 | * The list of packet types we will receive (as opposed to discard) | 122 | * The list of packet types we will receive (as opposed to discard) |
| @@ -230,7 +230,7 @@ extern void netdev_unregister_sysfs(struct net_device *); | |||
| 230 | * For efficiency | 230 | * For efficiency |
| 231 | */ | 231 | */ |
| 232 | 232 | ||
| 233 | int netdev_nit; | 233 | static int netdev_nit; |
| 234 | 234 | ||
| 235 | /* | 235 | /* |
| 236 | * Add a protocol ID to the list. Now that the input handler is | 236 | * Add a protocol ID to the list. Now that the input handler is |
| @@ -633,14 +633,24 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas | |||
| 633 | * @name: name string | 633 | * @name: name string |
| 634 | * | 634 | * |
| 635 | * Network device names need to be valid file names to | 635 | * Network device names need to be valid file names to |
| 636 | * to allow sysfs to work | 636 | * to allow sysfs to work. We also disallow any kind of |
| 637 | * whitespace. | ||
| 637 | */ | 638 | */ |
| 638 | int dev_valid_name(const char *name) | 639 | int dev_valid_name(const char *name) |
| 639 | { | 640 | { |
| 640 | return !(*name == '\0' | 641 | if (*name == '\0') |
| 641 | || !strcmp(name, ".") | 642 | return 0; |
| 642 | || !strcmp(name, "..") | 643 | if (strlen(name) >= IFNAMSIZ) |
| 643 | || strchr(name, '/')); | 644 | return 0; |
| 645 | if (!strcmp(name, ".") || !strcmp(name, "..")) | ||
| 646 | return 0; | ||
| 647 | |||
| 648 | while (*name) { | ||
| 649 | if (*name == '/' || isspace(*name)) | ||
| 650 | return 0; | ||
| 651 | name++; | ||
| 652 | } | ||
| 653 | return 1; | ||
| 644 | } | 654 | } |
| 645 | 655 | ||
| 646 | /** | 656 | /** |
| @@ -1158,14 +1168,17 @@ EXPORT_SYMBOL(netif_device_attach); | |||
| 1158 | * Invalidate hardware checksum when packet is to be mangled, and | 1168 | * Invalidate hardware checksum when packet is to be mangled, and |
| 1159 | * complete checksum manually on outgoing path. | 1169 | * complete checksum manually on outgoing path. |
| 1160 | */ | 1170 | */ |
| 1161 | int skb_checksum_help(struct sk_buff *skb, int inward) | 1171 | int skb_checksum_help(struct sk_buff *skb) |
| 1162 | { | 1172 | { |
| 1163 | unsigned int csum; | 1173 | unsigned int csum; |
| 1164 | int ret = 0, offset = skb->h.raw - skb->data; | 1174 | int ret = 0, offset = skb->h.raw - skb->data; |
| 1165 | 1175 | ||
| 1166 | if (inward) { | 1176 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
| 1167 | skb->ip_summed = CHECKSUM_NONE; | 1177 | goto out_set_summed; |
| 1168 | goto out; | 1178 | |
| 1179 | if (unlikely(skb_shinfo(skb)->gso_size)) { | ||
| 1180 | /* Let GSO fix up the checksum. */ | ||
| 1181 | goto out_set_summed; | ||
| 1169 | } | 1182 | } |
| 1170 | 1183 | ||
| 1171 | if (skb_cloned(skb)) { | 1184 | if (skb_cloned(skb)) { |
| @@ -1182,6 +1195,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward) | |||
| 1182 | BUG_ON(skb->csum + 2 > offset); | 1195 | BUG_ON(skb->csum + 2 > offset); |
| 1183 | 1196 | ||
| 1184 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); | 1197 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); |
| 1198 | |||
| 1199 | out_set_summed: | ||
| 1185 | skb->ip_summed = CHECKSUM_NONE; | 1200 | skb->ip_summed = CHECKSUM_NONE; |
| 1186 | out: | 1201 | out: |
| 1187 | return ret; | 1202 | return ret; |
| @@ -1190,32 +1205,50 @@ out: | |||
| 1190 | /** | 1205 | /** |
| 1191 | * skb_gso_segment - Perform segmentation on skb. | 1206 | * skb_gso_segment - Perform segmentation on skb. |
| 1192 | * @skb: buffer to segment | 1207 | * @skb: buffer to segment |
| 1193 | * @sg: whether scatter-gather is supported on the target. | 1208 | * @features: features for the output path (see dev->features) |
| 1194 | * | 1209 | * |
| 1195 | * This function segments the given skb and returns a list of segments. | 1210 | * This function segments the given skb and returns a list of segments. |
| 1211 | * | ||
| 1212 | * It may return NULL if the skb requires no segmentation. This is | ||
| 1213 | * only possible when GSO is used for verifying header integrity. | ||
| 1196 | */ | 1214 | */ |
| 1197 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg) | 1215 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) |
| 1198 | { | 1216 | { |
| 1199 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 1217 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
| 1200 | struct packet_type *ptype; | 1218 | struct packet_type *ptype; |
| 1201 | int type = skb->protocol; | 1219 | int type = skb->protocol; |
| 1220 | int err; | ||
| 1202 | 1221 | ||
| 1203 | BUG_ON(skb_shinfo(skb)->frag_list); | 1222 | BUG_ON(skb_shinfo(skb)->frag_list); |
| 1204 | BUG_ON(skb->ip_summed != CHECKSUM_HW); | ||
| 1205 | 1223 | ||
| 1206 | skb->mac.raw = skb->data; | 1224 | skb->mac.raw = skb->data; |
| 1207 | skb->mac_len = skb->nh.raw - skb->data; | 1225 | skb->mac_len = skb->nh.raw - skb->data; |
| 1208 | __skb_pull(skb, skb->mac_len); | 1226 | __skb_pull(skb, skb->mac_len); |
| 1209 | 1227 | ||
| 1228 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { | ||
| 1229 | if (skb_header_cloned(skb) && | ||
| 1230 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | ||
| 1231 | return ERR_PTR(err); | ||
| 1232 | } | ||
| 1233 | |||
| 1210 | rcu_read_lock(); | 1234 | rcu_read_lock(); |
| 1211 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { | 1235 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { |
| 1212 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { | 1236 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { |
| 1213 | segs = ptype->gso_segment(skb, sg); | 1237 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
| 1238 | err = ptype->gso_send_check(skb); | ||
| 1239 | segs = ERR_PTR(err); | ||
| 1240 | if (err || skb_gso_ok(skb, features)) | ||
| 1241 | break; | ||
| 1242 | __skb_push(skb, skb->data - skb->nh.raw); | ||
| 1243 | } | ||
| 1244 | segs = ptype->gso_segment(skb, features); | ||
| 1214 | break; | 1245 | break; |
| 1215 | } | 1246 | } |
| 1216 | } | 1247 | } |
| 1217 | rcu_read_unlock(); | 1248 | rcu_read_unlock(); |
| 1218 | 1249 | ||
| 1250 | __skb_push(skb, skb->data - skb->mac.raw); | ||
| 1251 | |||
| 1219 | return segs; | 1252 | return segs; |
| 1220 | } | 1253 | } |
| 1221 | 1254 | ||
| @@ -1234,7 +1267,6 @@ void netdev_rx_csum_fault(struct net_device *dev) | |||
| 1234 | EXPORT_SYMBOL(netdev_rx_csum_fault); | 1267 | EXPORT_SYMBOL(netdev_rx_csum_fault); |
| 1235 | #endif | 1268 | #endif |
| 1236 | 1269 | ||
| 1237 | #ifdef CONFIG_HIGHMEM | ||
| 1238 | /* Actually, we should eliminate this check as soon as we know, that: | 1270 | /* Actually, we should eliminate this check as soon as we know, that: |
| 1239 | * 1. IOMMU is present and allows to map all the memory. | 1271 | * 1. IOMMU is present and allows to map all the memory. |
| 1240 | * 2. No high memory really exists on this machine. | 1272 | * 2. No high memory really exists on this machine. |
| @@ -1242,6 +1274,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); | |||
| 1242 | 1274 | ||
| 1243 | static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 1275 | static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) |
| 1244 | { | 1276 | { |
| 1277 | #ifdef CONFIG_HIGHMEM | ||
| 1245 | int i; | 1278 | int i; |
| 1246 | 1279 | ||
| 1247 | if (dev->features & NETIF_F_HIGHDMA) | 1280 | if (dev->features & NETIF_F_HIGHDMA) |
| @@ -1251,11 +1284,9 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | |||
| 1251 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | 1284 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) |
| 1252 | return 1; | 1285 | return 1; |
| 1253 | 1286 | ||
| 1287 | #endif | ||
| 1254 | return 0; | 1288 | return 0; |
| 1255 | } | 1289 | } |
| 1256 | #else | ||
| 1257 | #define illegal_highdma(dev, skb) (0) | ||
| 1258 | #endif | ||
| 1259 | 1290 | ||
| 1260 | struct dev_gso_cb { | 1291 | struct dev_gso_cb { |
| 1261 | void (*destructor)(struct sk_buff *skb); | 1292 | void (*destructor)(struct sk_buff *skb); |
| @@ -1291,9 +1322,15 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
| 1291 | { | 1322 | { |
| 1292 | struct net_device *dev = skb->dev; | 1323 | struct net_device *dev = skb->dev; |
| 1293 | struct sk_buff *segs; | 1324 | struct sk_buff *segs; |
| 1325 | int features = dev->features & ~(illegal_highdma(dev, skb) ? | ||
| 1326 | NETIF_F_SG : 0); | ||
| 1327 | |||
| 1328 | segs = skb_gso_segment(skb, features); | ||
| 1329 | |||
| 1330 | /* Verifying header integrity only. */ | ||
| 1331 | if (!segs) | ||
| 1332 | return 0; | ||
| 1294 | 1333 | ||
| 1295 | segs = skb_gso_segment(skb, dev->features & NETIF_F_SG && | ||
| 1296 | !illegal_highdma(dev, skb)); | ||
| 1297 | if (unlikely(IS_ERR(segs))) | 1334 | if (unlikely(IS_ERR(segs))) |
| 1298 | return PTR_ERR(segs); | 1335 | return PTR_ERR(segs); |
| 1299 | 1336 | ||
| @@ -1310,13 +1347,17 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1310 | if (netdev_nit) | 1347 | if (netdev_nit) |
| 1311 | dev_queue_xmit_nit(skb, dev); | 1348 | dev_queue_xmit_nit(skb, dev); |
| 1312 | 1349 | ||
| 1313 | if (!netif_needs_gso(dev, skb)) | 1350 | if (netif_needs_gso(dev, skb)) { |
| 1314 | return dev->hard_start_xmit(skb, dev); | 1351 | if (unlikely(dev_gso_segment(skb))) |
| 1352 | goto out_kfree_skb; | ||
| 1353 | if (skb->next) | ||
| 1354 | goto gso; | ||
| 1355 | } | ||
| 1315 | 1356 | ||
| 1316 | if (unlikely(dev_gso_segment(skb))) | 1357 | return dev->hard_start_xmit(skb, dev); |
| 1317 | goto out_kfree_skb; | ||
| 1318 | } | 1358 | } |
| 1319 | 1359 | ||
| 1360 | gso: | ||
| 1320 | do { | 1361 | do { |
| 1321 | struct sk_buff *nskb = skb->next; | 1362 | struct sk_buff *nskb = skb->next; |
| 1322 | int rc; | 1363 | int rc; |
| @@ -1325,9 +1366,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1325 | nskb->next = NULL; | 1366 | nskb->next = NULL; |
| 1326 | rc = dev->hard_start_xmit(nskb, dev); | 1367 | rc = dev->hard_start_xmit(nskb, dev); |
| 1327 | if (unlikely(rc)) { | 1368 | if (unlikely(rc)) { |
| 1369 | nskb->next = skb->next; | ||
| 1328 | skb->next = nskb; | 1370 | skb->next = nskb; |
| 1329 | return rc; | 1371 | return rc; |
| 1330 | } | 1372 | } |
| 1373 | if (unlikely(netif_queue_stopped(dev) && skb->next)) | ||
| 1374 | return NETDEV_TX_BUSY; | ||
| 1331 | } while (skb->next); | 1375 | } while (skb->next); |
| 1332 | 1376 | ||
| 1333 | skb->destructor = DEV_GSO_CB(skb)->destructor; | 1377 | skb->destructor = DEV_GSO_CB(skb)->destructor; |
| @@ -1402,11 +1446,11 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
| 1402 | /* If packet is not checksummed and device does not support | 1446 | /* If packet is not checksummed and device does not support |
| 1403 | * checksumming for this protocol, complete checksumming here. | 1447 | * checksumming for this protocol, complete checksumming here. |
| 1404 | */ | 1448 | */ |
| 1405 | if (skb->ip_summed == CHECKSUM_HW && | 1449 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
| 1406 | (!(dev->features & NETIF_F_GEN_CSUM) && | 1450 | (!(dev->features & NETIF_F_GEN_CSUM) && |
| 1407 | (!(dev->features & NETIF_F_IP_CSUM) || | 1451 | (!(dev->features & NETIF_F_IP_CSUM) || |
| 1408 | skb->protocol != htons(ETH_P_IP)))) | 1452 | skb->protocol != htons(ETH_P_IP)))) |
| 1409 | if (skb_checksum_help(skb, 0)) | 1453 | if (skb_checksum_help(skb)) |
| 1410 | goto out_kfree_skb; | 1454 | goto out_kfree_skb; |
| 1411 | 1455 | ||
| 1412 | gso: | 1456 | gso: |
| @@ -1436,14 +1480,16 @@ gso: | |||
| 1436 | if (q->enqueue) { | 1480 | if (q->enqueue) { |
| 1437 | /* Grab device queue */ | 1481 | /* Grab device queue */ |
| 1438 | spin_lock(&dev->queue_lock); | 1482 | spin_lock(&dev->queue_lock); |
| 1483 | q = dev->qdisc; | ||
| 1484 | if (q->enqueue) { | ||
| 1485 | rc = q->enqueue(skb, q); | ||
| 1486 | qdisc_run(dev); | ||
| 1487 | spin_unlock(&dev->queue_lock); | ||
| 1439 | 1488 | ||
| 1440 | rc = q->enqueue(skb, q); | 1489 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; |
| 1441 | 1490 | goto out; | |
| 1442 | qdisc_run(dev); | 1491 | } |
| 1443 | |||
| 1444 | spin_unlock(&dev->queue_lock); | 1492 | spin_unlock(&dev->queue_lock); |
| 1445 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; | ||
| 1446 | goto out; | ||
| 1447 | } | 1493 | } |
| 1448 | 1494 | ||
| 1449 | /* The device has no queue. Common case for software devices: | 1495 | /* The device has no queue. Common case for software devices: |
| @@ -1586,26 +1632,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb) | |||
| 1586 | struct net_device *dev = skb->dev; | 1632 | struct net_device *dev = skb->dev; |
| 1587 | 1633 | ||
| 1588 | if (dev->master) { | 1634 | if (dev->master) { |
| 1589 | /* | 1635 | if (skb_bond_should_drop(skb)) { |
| 1590 | * On bonding slaves other than the currently active | ||
| 1591 | * slave, suppress duplicates except for 802.3ad | ||
| 1592 | * ETH_P_SLOW and alb non-mcast/bcast. | ||
| 1593 | */ | ||
| 1594 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | ||
| 1595 | if (dev->master->priv_flags & IFF_MASTER_ALB) { | ||
| 1596 | if (skb->pkt_type != PACKET_BROADCAST && | ||
| 1597 | skb->pkt_type != PACKET_MULTICAST) | ||
| 1598 | goto keep; | ||
| 1599 | } | ||
| 1600 | |||
| 1601 | if (dev->master->priv_flags & IFF_MASTER_8023AD && | ||
| 1602 | skb->protocol == __constant_htons(ETH_P_SLOW)) | ||
| 1603 | goto keep; | ||
| 1604 | |||
| 1605 | kfree_skb(skb); | 1636 | kfree_skb(skb); |
| 1606 | return NULL; | 1637 | return NULL; |
| 1607 | } | 1638 | } |
| 1608 | keep: | ||
| 1609 | skb->dev = dev->master; | 1639 | skb->dev = dev->master; |
| 1610 | } | 1640 | } |
| 1611 | 1641 | ||
| @@ -1712,7 +1742,7 @@ static int ing_filter(struct sk_buff *skb) | |||
| 1712 | if (dev->qdisc_ingress) { | 1742 | if (dev->qdisc_ingress) { |
| 1713 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); | 1743 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); |
| 1714 | if (MAX_RED_LOOP < ttl++) { | 1744 | if (MAX_RED_LOOP < ttl++) { |
| 1715 | printk("Redir loop detected Dropping packet (%s->%s)\n", | 1745 | printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n", |
| 1716 | skb->input_dev->name, skb->dev->name); | 1746 | skb->input_dev->name, skb->dev->name); |
| 1717 | return TC_ACT_SHOT; | 1747 | return TC_ACT_SHOT; |
| 1718 | } | 1748 | } |
| @@ -2907,7 +2937,7 @@ int register_netdevice(struct net_device *dev) | |||
| 2907 | /* Fix illegal SG+CSUM combinations. */ | 2937 | /* Fix illegal SG+CSUM combinations. */ |
| 2908 | if ((dev->features & NETIF_F_SG) && | 2938 | if ((dev->features & NETIF_F_SG) && |
| 2909 | !(dev->features & NETIF_F_ALL_CSUM)) { | 2939 | !(dev->features & NETIF_F_ALL_CSUM)) { |
| 2910 | printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", | 2940 | printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", |
| 2911 | dev->name); | 2941 | dev->name); |
| 2912 | dev->features &= ~NETIF_F_SG; | 2942 | dev->features &= ~NETIF_F_SG; |
| 2913 | } | 2943 | } |
| @@ -2915,7 +2945,7 @@ int register_netdevice(struct net_device *dev) | |||
| 2915 | /* TSO requires that SG is present as well. */ | 2945 | /* TSO requires that SG is present as well. */ |
| 2916 | if ((dev->features & NETIF_F_TSO) && | 2946 | if ((dev->features & NETIF_F_TSO) && |
| 2917 | !(dev->features & NETIF_F_SG)) { | 2947 | !(dev->features & NETIF_F_SG)) { |
| 2918 | printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", | 2948 | printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", |
| 2919 | dev->name); | 2949 | dev->name); |
| 2920 | dev->features &= ~NETIF_F_TSO; | 2950 | dev->features &= ~NETIF_F_TSO; |
| 2921 | } | 2951 | } |
| @@ -3165,13 +3195,15 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name, | |||
| 3165 | struct net_device *dev; | 3195 | struct net_device *dev; |
| 3166 | int alloc_size; | 3196 | int alloc_size; |
| 3167 | 3197 | ||
| 3198 | BUG_ON(strlen(name) >= sizeof(dev->name)); | ||
| 3199 | |||
| 3168 | /* ensure 32-byte alignment of both the device and private area */ | 3200 | /* ensure 32-byte alignment of both the device and private area */ |
| 3169 | alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; | 3201 | alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; |
| 3170 | alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; | 3202 | alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; |
| 3171 | 3203 | ||
| 3172 | p = kzalloc(alloc_size, GFP_KERNEL); | 3204 | p = kzalloc(alloc_size, GFP_KERNEL); |
| 3173 | if (!p) { | 3205 | if (!p) { |
| 3174 | printk(KERN_ERR "alloc_dev: Unable to allocate device.\n"); | 3206 | printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); |
| 3175 | return NULL; | 3207 | return NULL; |
| 3176 | } | 3208 | } |
| 3177 | 3209 | ||
| @@ -3386,12 +3418,9 @@ static void net_dma_rebalance(void) | |||
| 3386 | unsigned int cpu, i, n; | 3418 | unsigned int cpu, i, n; |
| 3387 | struct dma_chan *chan; | 3419 | struct dma_chan *chan; |
| 3388 | 3420 | ||
| 3389 | lock_cpu_hotplug(); | ||
| 3390 | |||
| 3391 | if (net_dma_count == 0) { | 3421 | if (net_dma_count == 0) { |
| 3392 | for_each_online_cpu(cpu) | 3422 | for_each_online_cpu(cpu) |
| 3393 | rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); | 3423 | rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); |
| 3394 | unlock_cpu_hotplug(); | ||
| 3395 | return; | 3424 | return; |
| 3396 | } | 3425 | } |
| 3397 | 3426 | ||
| @@ -3404,15 +3433,13 @@ static void net_dma_rebalance(void) | |||
| 3404 | + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); | 3433 | + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); |
| 3405 | 3434 | ||
| 3406 | while(n) { | 3435 | while(n) { |
| 3407 | per_cpu(softnet_data.net_dma, cpu) = chan; | 3436 | per_cpu(softnet_data, cpu).net_dma = chan; |
| 3408 | cpu = next_cpu(cpu, cpu_online_map); | 3437 | cpu = next_cpu(cpu, cpu_online_map); |
| 3409 | n--; | 3438 | n--; |
| 3410 | } | 3439 | } |
| 3411 | i++; | 3440 | i++; |
| 3412 | } | 3441 | } |
| 3413 | rcu_read_unlock(); | 3442 | rcu_read_unlock(); |
| 3414 | |||
| 3415 | unlock_cpu_hotplug(); | ||
| 3416 | } | 3443 | } |
| 3417 | 3444 | ||
| 3418 | /** | 3445 | /** |
