aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-04-21 16:20:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-04-21 16:20:47 -0400
commit057a650bdcb05ec5947558b6baa5305a3cb15f17 (patch)
tree92ed7c7d193690a8ee5b6930548d478d2fd54872 /drivers
parent92b4fc75636be07af00b1c085513ce98e6bab324 (diff)
parentc70b17b775edb21280e9de7531acf6db3b365274 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Don't race in IPSEC dumps, from Yuejie Shi. 2) Verify lengths properly in IPSEC reqeusts, from Herbert Xu. 3) Fix out of bounds access in ipv6 segment routing code, from David Lebrun. 4) Don't write into the header of cloned SKBs in smsc95xx driver, from James Hughes. 5) Several other drivers have this bug too, fix them. From Eric Dumazet. 6) Fix access to uninitialized data in TC action cookie code, from Wolfgang Bumiller. 7) Fix double free in IPV6 segment routing, again from David Lebrun. 8) Don't let userspace set the RTF_PCPU flag, oops. From David Ahern. 9) Fix use after free in qrtr code, from Dan Carpenter. 10) Don't double-destroy devices in ip6mr code, from Nikolay Aleksandrov. 11) Don't pass out-of-range TX queue indices into drivers, from Tushar Dave. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits) netpoll: Check for skb->queue_mapping ip6mr: fix notification device destruction bpf, doc: update bpf maintainers entry net: qrtr: potential use after free in qrtr_sendmsg() bpf: Fix values type used in test_maps net: ipv6: RTF_PCPU should not be settable from userspace gso: Validate assumption of frag_list segementation kaweth: use skb_cow_head() to deal with cloned skbs ch9200: use skb_cow_head() to deal with cloned skbs lan78xx: use skb_cow_head() to deal with cloned skbs sr9700: use skb_cow_head() to deal with cloned skbs cx82310_eth: use skb_cow_head() to deal with cloned skbs smsc75xx: use skb_cow_head() to deal with cloned skbs ipv6: sr: fix double free of skb after handling invalid SRH MAINTAINERS: Add "B:" field for networking. net sched actions: allocate act cookie early qed: Fix issue in populating the PFC config paramters. qed: Fix possible system hang in the dcbnl-getdcbx() path. qed: Fix sending an invalid PFC error mask to MFW. qed: Fix possible error in populating max_tc field. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c13
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c122
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/usb/ch9200.c9
-rw-r--r--drivers/net/usb/cx82310_eth.c7
-rw-r--r--drivers/net/usb/kaweth.c18
-rw-r--r--drivers/net/usb/lan78xx.c9
-rw-r--r--drivers/net/usb/smsc75xx.c8
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/usb/sr9700.c9
10 files changed, 101 insertions, 108 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 5bd36a4a8fcd..a6e2bbe629bd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -583,6 +583,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
583 p_params->ets_cbs, 583 p_params->ets_cbs,
584 p_ets->pri_tc_tbl[0], p_params->max_ets_tc); 584 p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
585 585
586 if (p_params->ets_enabled && !p_params->max_ets_tc) {
587 p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
588 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
589 "ETS params: max_ets_tc is forced to %d\n",
590 p_params->max_ets_tc);
591 }
592
586 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are 593 /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
587 * encoded in a type u32 array of size 2. 594 * encoded in a type u32 array of size 2.
588 */ 595 */
@@ -1001,6 +1008,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
1001 u8 pfc_map = 0; 1008 u8 pfc_map = 0;
1002 int i; 1009 int i;
1003 1010
1011 *pfc &= ~DCBX_PFC_ERROR_MASK;
1012
1004 if (p_params->pfc.willing) 1013 if (p_params->pfc.willing)
1005 *pfc |= DCBX_PFC_WILLING_MASK; 1014 *pfc |= DCBX_PFC_WILLING_MASK;
1006 else 1015 else
@@ -1255,7 +1264,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1255{ 1264{
1256 struct qed_dcbx_get *dcbx_info; 1265 struct qed_dcbx_get *dcbx_info;
1257 1266
1258 dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); 1267 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
1259 if (!dcbx_info) 1268 if (!dcbx_info)
1260 return NULL; 1269 return NULL;
1261 1270
@@ -2073,6 +2082,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
2073 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) 2082 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
2074 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); 2083 dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
2075 2084
2085 dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
2086
2076 ptt = qed_ptt_acquire(hwfn); 2087 ptt = qed_ptt_acquire(hwfn);
2077 if (!ptt) 2088 if (!ptt)
2078 return -EINVAL; 2089 return -EINVAL;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 54248775f227..f68c4db656ed 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = {
1127 .get_mdio_data = sh_get_mdio, 1127 .get_mdio_data = sh_get_mdio,
1128}; 1128};
1129 1129
1130/* free Tx skb function */
1131static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1132{
1133 struct sh_eth_private *mdp = netdev_priv(ndev);
1134 struct sh_eth_txdesc *txdesc;
1135 int free_num = 0;
1136 int entry;
1137 bool sent;
1138
1139 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1140 entry = mdp->dirty_tx % mdp->num_tx_ring;
1141 txdesc = &mdp->tx_ring[entry];
1142 sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1143 if (sent_only && !sent)
1144 break;
1145 /* TACT bit must be checked before all the following reads */
1146 dma_rmb();
1147 netif_info(mdp, tx_done, ndev,
1148 "tx entry %d status 0x%08x\n",
1149 entry, le32_to_cpu(txdesc->status));
1150 /* Free the original skb. */
1151 if (mdp->tx_skbuff[entry]) {
1152 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1153 le32_to_cpu(txdesc->len) >> 16,
1154 DMA_TO_DEVICE);
1155 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1156 mdp->tx_skbuff[entry] = NULL;
1157 free_num++;
1158 }
1159 txdesc->status = cpu_to_le32(TD_TFP);
1160 if (entry >= mdp->num_tx_ring - 1)
1161 txdesc->status |= cpu_to_le32(TD_TDLE);
1162
1163 if (sent) {
1164 ndev->stats.tx_packets++;
1165 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1166 }
1167 }
1168 return free_num;
1169}
1170
1130/* free skb and descriptor buffer */ 1171/* free skb and descriptor buffer */
1131static void sh_eth_ring_free(struct net_device *ndev) 1172static void sh_eth_ring_free(struct net_device *ndev)
1132{ 1173{
1133 struct sh_eth_private *mdp = netdev_priv(ndev); 1174 struct sh_eth_private *mdp = netdev_priv(ndev);
1134 int ringsize, i; 1175 int ringsize, i;
1135 1176
1177 if (mdp->rx_ring) {
1178 for (i = 0; i < mdp->num_rx_ring; i++) {
1179 if (mdp->rx_skbuff[i]) {
1180 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1181
1182 dma_unmap_single(&ndev->dev,
1183 le32_to_cpu(rxdesc->addr),
1184 ALIGN(mdp->rx_buf_sz, 32),
1185 DMA_FROM_DEVICE);
1186 }
1187 }
1188 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1189 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1190 mdp->rx_desc_dma);
1191 mdp->rx_ring = NULL;
1192 }
1193
1136 /* Free Rx skb ringbuffer */ 1194 /* Free Rx skb ringbuffer */
1137 if (mdp->rx_skbuff) { 1195 if (mdp->rx_skbuff) {
1138 for (i = 0; i < mdp->num_rx_ring; i++) 1196 for (i = 0; i < mdp->num_rx_ring; i++)
@@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
1141 kfree(mdp->rx_skbuff); 1199 kfree(mdp->rx_skbuff);
1142 mdp->rx_skbuff = NULL; 1200 mdp->rx_skbuff = NULL;
1143 1201
1144 /* Free Tx skb ringbuffer */
1145 if (mdp->tx_skbuff) {
1146 for (i = 0; i < mdp->num_tx_ring; i++)
1147 dev_kfree_skb(mdp->tx_skbuff[i]);
1148 }
1149 kfree(mdp->tx_skbuff);
1150 mdp->tx_skbuff = NULL;
1151
1152 if (mdp->rx_ring) {
1153 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1154 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1155 mdp->rx_desc_dma);
1156 mdp->rx_ring = NULL;
1157 }
1158
1159 if (mdp->tx_ring) { 1202 if (mdp->tx_ring) {
1203 sh_eth_tx_free(ndev, false);
1204
1160 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1205 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1161 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 1206 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1162 mdp->tx_desc_dma); 1207 mdp->tx_desc_dma);
1163 mdp->tx_ring = NULL; 1208 mdp->tx_ring = NULL;
1164 } 1209 }
1210
1211 /* Free Tx skb ringbuffer */
1212 kfree(mdp->tx_skbuff);
1213 mdp->tx_skbuff = NULL;
1165} 1214}
1166 1215
1167/* format skb and descriptor buffer */ 1216/* format skb and descriptor buffer */
@@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1409 update_mac_address(ndev); 1458 update_mac_address(ndev);
1410} 1459}
1411 1460
1412/* free Tx skb function */
1413static int sh_eth_txfree(struct net_device *ndev)
1414{
1415 struct sh_eth_private *mdp = netdev_priv(ndev);
1416 struct sh_eth_txdesc *txdesc;
1417 int free_num = 0;
1418 int entry;
1419
1420 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1421 entry = mdp->dirty_tx % mdp->num_tx_ring;
1422 txdesc = &mdp->tx_ring[entry];
1423 if (txdesc->status & cpu_to_le32(TD_TACT))
1424 break;
1425 /* TACT bit must be checked before all the following reads */
1426 dma_rmb();
1427 netif_info(mdp, tx_done, ndev,
1428 "tx entry %d status 0x%08x\n",
1429 entry, le32_to_cpu(txdesc->status));
1430 /* Free the original skb. */
1431 if (mdp->tx_skbuff[entry]) {
1432 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1433 le32_to_cpu(txdesc->len) >> 16,
1434 DMA_TO_DEVICE);
1435 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1436 mdp->tx_skbuff[entry] = NULL;
1437 free_num++;
1438 }
1439 txdesc->status = cpu_to_le32(TD_TFP);
1440 if (entry >= mdp->num_tx_ring - 1)
1441 txdesc->status |= cpu_to_le32(TD_TDLE);
1442
1443 ndev->stats.tx_packets++;
1444 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1445 }
1446 return free_num;
1447}
1448
1449/* Packet receive function */ 1461/* Packet receive function */
1450static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) 1462static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1451{ 1463{
@@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1690 intr_status, mdp->cur_tx, mdp->dirty_tx, 1702 intr_status, mdp->cur_tx, mdp->dirty_tx,
1691 (u32)ndev->state, edtrr); 1703 (u32)ndev->state, edtrr);
1692 /* dirty buffer free */ 1704 /* dirty buffer free */
1693 sh_eth_txfree(ndev); 1705 sh_eth_tx_free(ndev, true);
1694 1706
1695 /* SH7712 BUG */ 1707 /* SH7712 BUG */
1696 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1708 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
@@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1751 /* Clear Tx interrupts */ 1763 /* Clear Tx interrupts */
1752 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); 1764 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1753 1765
1754 sh_eth_txfree(ndev); 1766 sh_eth_tx_free(ndev, true);
1755 netif_wake_queue(ndev); 1767 netif_wake_queue(ndev);
1756 } 1768 }
1757 1769
@@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2412 2424
2413 spin_lock_irqsave(&mdp->lock, flags); 2425 spin_lock_irqsave(&mdp->lock, flags);
2414 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2426 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2415 if (!sh_eth_txfree(ndev)) { 2427 if (!sh_eth_tx_free(ndev, true)) {
2416 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); 2428 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2417 netif_stop_queue(ndev); 2429 netif_stop_queue(ndev);
2418 spin_unlock_irqrestore(&mdp->lock, flags); 2430 spin_unlock_irqrestore(&mdp->lock, flags);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index e2460a57e4b1..ed0d10f54f26 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; 1438 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
1439 skb_queue_tail(&dp83640->rx_queue, skb); 1439 skb_queue_tail(&dp83640->rx_queue, skb);
1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT); 1440 schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
1441 } else {
1442 netif_rx_ni(skb);
1443 } 1441 }
1444 1442
1445 return true; 1443 return true;
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 8a40202c0a17..c4f1c363e24b 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
254 tx_overhead = 0x40; 254 tx_overhead = 0x40;
255 255
256 len = skb->len; 256 len = skb->len;
257 if (skb_headroom(skb) < tx_overhead) { 257 if (skb_cow_head(skb, tx_overhead)) {
258 struct sk_buff *skb2;
259
260 skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
261 dev_kfree_skb_any(skb); 258 dev_kfree_skb_any(skb);
262 skb = skb2; 259 return NULL;
263 if (!skb)
264 return NULL;
265 } 260 }
266 261
267 __skb_push(skb, tx_overhead); 262 __skb_push(skb, tx_overhead);
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index e221bfcee76b..947bea81d924 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
293{ 293{
294 int len = skb->len; 294 int len = skb->len;
295 295
296 if (skb_headroom(skb) < 2) { 296 if (skb_cow_head(skb, 2)) {
297 struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
298 dev_kfree_skb_any(skb); 297 dev_kfree_skb_any(skb);
299 skb = skb2; 298 return NULL;
300 if (!skb)
301 return NULL;
302 } 299 }
303 skb_push(skb, 2); 300 skb_push(skb, 2);
304 301
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 876f02f4945e..2a2c3edb6bad 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -803,18 +803,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
803 } 803 }
804 804
805 /* We now decide whether we can put our special header into the sk_buff */ 805 /* We now decide whether we can put our special header into the sk_buff */
806 if (skb_cloned(skb) || skb_headroom(skb) < 2) { 806 if (skb_cow_head(skb, 2)) {
807 /* no such luck - we make our own */ 807 kaweth->stats.tx_errors++;
808 struct sk_buff *copied_skb; 808 netif_start_queue(net);
809 copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC); 809 spin_unlock_irq(&kaweth->device_lock);
810 dev_kfree_skb_irq(skb); 810 dev_kfree_skb_any(skb);
811 skb = copied_skb; 811 return NETDEV_TX_OK;
812 if (!copied_skb) {
813 kaweth->stats.tx_errors++;
814 netif_start_queue(net);
815 spin_unlock_irq(&kaweth->device_lock);
816 return NETDEV_TX_OK;
817 }
818 } 812 }
819 813
820 private_header = (__le16 *)__skb_push(skb, 2); 814 private_header = (__le16 *)__skb_push(skb, 2);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 9889a70ff4f6..636f48f19d1e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2607,14 +2607,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2607{ 2607{
2608 u32 tx_cmd_a, tx_cmd_b; 2608 u32 tx_cmd_a, tx_cmd_b;
2609 2609
2610 if (skb_headroom(skb) < TX_OVERHEAD) { 2610 if (skb_cow_head(skb, TX_OVERHEAD)) {
2611 struct sk_buff *skb2;
2612
2613 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2614 dev_kfree_skb_any(skb); 2611 dev_kfree_skb_any(skb);
2615 skb = skb2; 2612 return NULL;
2616 if (!skb)
2617 return NULL;
2618 } 2613 }
2619 2614
2620 if (lan78xx_linearize(skb) < 0) 2615 if (lan78xx_linearize(skb) < 0)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 0b17b40d7a4f..190de9a90f73 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2203,13 +2203,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
2203{ 2203{
2204 u32 tx_cmd_a, tx_cmd_b; 2204 u32 tx_cmd_a, tx_cmd_b;
2205 2205
2206 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 2206 if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
2207 struct sk_buff *skb2 =
2208 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
2209 dev_kfree_skb_any(skb); 2207 dev_kfree_skb_any(skb);
2210 skb = skb2; 2208 return NULL;
2211 if (!skb)
2212 return NULL;
2213 } 2209 }
2214 2210
2215 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; 2211 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 831aa33d078a..5f19fb0f025d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2001,13 +2001,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
2001 /* We do not advertise SG, so skbs should be already linearized */ 2001 /* We do not advertise SG, so skbs should be already linearized */
2002 BUG_ON(skb_shinfo(skb)->nr_frags); 2002 BUG_ON(skb_shinfo(skb)->nr_frags);
2003 2003
2004 if (skb_headroom(skb) < overhead) { 2004 /* Make writable and expand header space by overhead if required */
2005 struct sk_buff *skb2 = skb_copy_expand(skb, 2005 if (skb_cow_head(skb, overhead)) {
2006 overhead, 0, flags); 2006 /* Must deallocate here as returning NULL to indicate error
2007 * means the skb won't be deallocated in the caller.
2008 */
2007 dev_kfree_skb_any(skb); 2009 dev_kfree_skb_any(skb);
2008 skb = skb2; 2010 return NULL;
2009 if (!skb)
2010 return NULL;
2011 } 2011 }
2012 2012
2013 if (csum) { 2013 if (csum) {
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 4a1e9c489f1f..aadfe1d1c37e 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
456 456
457 len = skb->len; 457 len = skb->len;
458 458
459 if (skb_headroom(skb) < SR_TX_OVERHEAD) { 459 if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
460 struct sk_buff *skb2;
461
462 skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
463 dev_kfree_skb_any(skb); 460 dev_kfree_skb_any(skb);
464 skb = skb2; 461 return NULL;
465 if (!skb)
466 return NULL;
467 } 462 }
468 463
469 __skb_push(skb, SR_TX_OVERHEAD); 464 __skb_push(skb, SR_TX_OVERHEAD);