diff options
| -rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 495 | ||||
| -rw-r--r-- | drivers/net/ethernet/freescale/gianfar.h | 72 | ||||
| -rw-r--r-- | drivers/net/ethernet/freescale/gianfar_ethtool.c | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 1 | ||||
| -rw-r--r-- | net/sched/sch_qfq.c | 1 |
5 files changed, 330 insertions, 243 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index ff875028fdff..648ca85c5859 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -109,15 +109,15 @@ | |||
| 109 | 109 | ||
| 110 | #define TX_TIMEOUT (1*HZ) | 110 | #define TX_TIMEOUT (1*HZ) |
| 111 | 111 | ||
| 112 | const char gfar_driver_version[] = "1.3"; | 112 | const char gfar_driver_version[] = "2.0"; |
| 113 | 113 | ||
| 114 | static int gfar_enet_open(struct net_device *dev); | 114 | static int gfar_enet_open(struct net_device *dev); |
| 115 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | 115 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); |
| 116 | static void gfar_reset_task(struct work_struct *work); | 116 | static void gfar_reset_task(struct work_struct *work); |
| 117 | static void gfar_timeout(struct net_device *dev); | 117 | static void gfar_timeout(struct net_device *dev); |
| 118 | static int gfar_close(struct net_device *dev); | 118 | static int gfar_close(struct net_device *dev); |
| 119 | static struct sk_buff *gfar_new_skb(struct net_device *dev, | 119 | static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, |
| 120 | dma_addr_t *bufaddr); | 120 | int alloc_cnt); |
| 121 | static int gfar_set_mac_address(struct net_device *dev); | 121 | static int gfar_set_mac_address(struct net_device *dev); |
| 122 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | 122 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
| 123 | static irqreturn_t gfar_error(int irq, void *dev_id); | 123 | static irqreturn_t gfar_error(int irq, void *dev_id); |
| @@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev); | |||
| 141 | #endif | 141 | #endif |
| 142 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); | 142 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
| 143 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); | 143 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
| 144 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | 144 | static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); |
| 145 | int amount_pull, struct napi_struct *napi); | ||
| 146 | static void gfar_halt_nodisable(struct gfar_private *priv); | 145 | static void gfar_halt_nodisable(struct gfar_private *priv); |
| 147 | static void gfar_clear_exact_match(struct net_device *dev); | 146 | static void gfar_clear_exact_match(struct net_device *dev); |
| 148 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, | 147 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
| @@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | |||
| 169 | bdp->lstatus = cpu_to_be32(lstatus); | 168 | bdp->lstatus = cpu_to_be32(lstatus); |
| 170 | } | 169 | } |
| 171 | 170 | ||
| 172 | static int gfar_init_bds(struct net_device *ndev) | 171 | static void gfar_init_bds(struct net_device *ndev) |
| 173 | { | 172 | { |
| 174 | struct gfar_private *priv = netdev_priv(ndev); | 173 | struct gfar_private *priv = netdev_priv(ndev); |
| 175 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | 174 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 176 | struct gfar_priv_tx_q *tx_queue = NULL; | 175 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 177 | struct gfar_priv_rx_q *rx_queue = NULL; | 176 | struct gfar_priv_rx_q *rx_queue = NULL; |
| 178 | struct txbd8 *txbdp; | 177 | struct txbd8 *txbdp; |
| 179 | struct rxbd8 *rxbdp; | ||
| 180 | u32 __iomem *rfbptr; | 178 | u32 __iomem *rfbptr; |
| 181 | int i, j; | 179 | int i, j; |
| 182 | dma_addr_t bufaddr; | ||
| 183 | 180 | ||
| 184 | for (i = 0; i < priv->num_tx_queues; i++) { | 181 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 185 | tx_queue = priv->tx_queue[i]; | 182 | tx_queue = priv->tx_queue[i]; |
| @@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev) | |||
| 207 | rfbptr = ®s->rfbptr0; | 204 | rfbptr = ®s->rfbptr0; |
| 208 | for (i = 0; i < priv->num_rx_queues; i++) { | 205 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 209 | rx_queue = priv->rx_queue[i]; | 206 | rx_queue = priv->rx_queue[i]; |
| 210 | rx_queue->cur_rx = rx_queue->rx_bd_base; | ||
| 211 | rx_queue->skb_currx = 0; | ||
| 212 | rxbdp = rx_queue->rx_bd_base; | ||
| 213 | 207 | ||
| 214 | for (j = 0; j < rx_queue->rx_ring_size; j++) { | 208 | rx_queue->next_to_clean = 0; |
| 215 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | 209 | rx_queue->next_to_use = 0; |
| 210 | rx_queue->next_to_alloc = 0; | ||
| 216 | 211 | ||
| 217 | if (skb) { | 212 | /* make sure next_to_clean != next_to_use after this |
| 218 | bufaddr = be32_to_cpu(rxbdp->bufPtr); | 213 | * by leaving at least 1 unused descriptor |
| 219 | } else { | 214 | */ |
| 220 | skb = gfar_new_skb(ndev, &bufaddr); | 215 | gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); |
| 221 | if (!skb) { | ||
| 222 | netdev_err(ndev, "Can't allocate RX buffers\n"); | ||
| 223 | return -ENOMEM; | ||
| 224 | } | ||
| 225 | rx_queue->rx_skbuff[j] = skb; | ||
| 226 | } | ||
| 227 | |||
| 228 | gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); | ||
| 229 | rxbdp++; | ||
| 230 | } | ||
| 231 | 216 | ||
| 232 | rx_queue->rfbptr = rfbptr; | 217 | rx_queue->rfbptr = rfbptr; |
| 233 | rfbptr += 2; | 218 | rfbptr += 2; |
| 234 | } | 219 | } |
| 235 | |||
| 236 | return 0; | ||
| 237 | } | 220 | } |
| 238 | 221 | ||
| 239 | static int gfar_alloc_skb_resources(struct net_device *ndev) | 222 | static int gfar_alloc_skb_resources(struct net_device *ndev) |
| 240 | { | 223 | { |
| 241 | void *vaddr; | 224 | void *vaddr; |
| 242 | dma_addr_t addr; | 225 | dma_addr_t addr; |
| 243 | int i, j, k; | 226 | int i, j; |
| 244 | struct gfar_private *priv = netdev_priv(ndev); | 227 | struct gfar_private *priv = netdev_priv(ndev); |
| 245 | struct device *dev = priv->dev; | 228 | struct device *dev = priv->dev; |
| 246 | struct gfar_priv_tx_q *tx_queue = NULL; | 229 | struct gfar_priv_tx_q *tx_queue = NULL; |
| @@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) | |||
| 279 | rx_queue = priv->rx_queue[i]; | 262 | rx_queue = priv->rx_queue[i]; |
| 280 | rx_queue->rx_bd_base = vaddr; | 263 | rx_queue->rx_bd_base = vaddr; |
| 281 | rx_queue->rx_bd_dma_base = addr; | 264 | rx_queue->rx_bd_dma_base = addr; |
| 282 | rx_queue->dev = ndev; | 265 | rx_queue->ndev = ndev; |
| 266 | rx_queue->dev = dev; | ||
| 283 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; | 267 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
| 284 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; | 268 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
| 285 | } | 269 | } |
| @@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) | |||
| 294 | if (!tx_queue->tx_skbuff) | 278 | if (!tx_queue->tx_skbuff) |
| 295 | goto cleanup; | 279 | goto cleanup; |
| 296 | 280 | ||
| 297 | for (k = 0; k < tx_queue->tx_ring_size; k++) | 281 | for (j = 0; j < tx_queue->tx_ring_size; j++) |
| 298 | tx_queue->tx_skbuff[k] = NULL; | 282 | tx_queue->tx_skbuff[j] = NULL; |
| 299 | } | 283 | } |
| 300 | 284 | ||
| 301 | for (i = 0; i < priv->num_rx_queues; i++) { | 285 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 302 | rx_queue = priv->rx_queue[i]; | 286 | rx_queue = priv->rx_queue[i]; |
| 303 | rx_queue->rx_skbuff = | 287 | rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, |
| 304 | kmalloc_array(rx_queue->rx_ring_size, | 288 | sizeof(*rx_queue->rx_buff), |
| 305 | sizeof(*rx_queue->rx_skbuff), | 289 | GFP_KERNEL); |
| 306 | GFP_KERNEL); | 290 | if (!rx_queue->rx_buff) |
| 307 | if (!rx_queue->rx_skbuff) | ||
| 308 | goto cleanup; | 291 | goto cleanup; |
| 309 | |||
| 310 | for (j = 0; j < rx_queue->rx_ring_size; j++) | ||
| 311 | rx_queue->rx_skbuff[j] = NULL; | ||
| 312 | } | 292 | } |
| 313 | 293 | ||
| 314 | if (gfar_init_bds(ndev)) | 294 | gfar_init_bds(ndev); |
| 315 | goto cleanup; | ||
| 316 | 295 | ||
| 317 | return 0; | 296 | return 0; |
| 318 | 297 | ||
| @@ -354,10 +333,8 @@ static void gfar_init_rqprm(struct gfar_private *priv) | |||
| 354 | } | 333 | } |
| 355 | } | 334 | } |
| 356 | 335 | ||
| 357 | static void gfar_rx_buff_size_config(struct gfar_private *priv) | 336 | static void gfar_rx_offload_en(struct gfar_private *priv) |
| 358 | { | 337 | { |
| 359 | int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; | ||
| 360 | |||
| 361 | /* set this when rx hw offload (TOE) functions are being used */ | 338 | /* set this when rx hw offload (TOE) functions are being used */ |
| 362 | priv->uses_rxfcb = 0; | 339 | priv->uses_rxfcb = 0; |
| 363 | 340 | ||
| @@ -366,16 +343,6 @@ static void gfar_rx_buff_size_config(struct gfar_private *priv) | |||
| 366 | 343 | ||
| 367 | if (priv->hwts_rx_en) | 344 | if (priv->hwts_rx_en) |
| 368 | priv->uses_rxfcb = 1; | 345 | priv->uses_rxfcb = 1; |
| 369 | |||
| 370 | if (priv->uses_rxfcb) | ||
| 371 | frame_size += GMAC_FCB_LEN; | ||
| 372 | |||
| 373 | frame_size += priv->padding; | ||
| 374 | |||
| 375 | frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | ||
| 376 | INCREMENTAL_BUFFER_SIZE; | ||
| 377 | |||
| 378 | priv->rx_buffer_size = frame_size; | ||
| 379 | } | 346 | } |
| 380 | 347 | ||
| 381 | static void gfar_mac_rx_config(struct gfar_private *priv) | 348 | static void gfar_mac_rx_config(struct gfar_private *priv) |
| @@ -609,9 +576,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv) | |||
| 609 | if (!priv->rx_queue[i]) | 576 | if (!priv->rx_queue[i]) |
| 610 | return -ENOMEM; | 577 | return -ENOMEM; |
| 611 | 578 | ||
| 612 | priv->rx_queue[i]->rx_skbuff = NULL; | ||
| 613 | priv->rx_queue[i]->qindex = i; | 579 | priv->rx_queue[i]->qindex = i; |
| 614 | priv->rx_queue[i]->dev = priv->ndev; | 580 | priv->rx_queue[i]->ndev = priv->ndev; |
| 615 | } | 581 | } |
| 616 | return 0; | 582 | return 0; |
| 617 | } | 583 | } |
| @@ -1203,12 +1169,11 @@ void gfar_mac_reset(struct gfar_private *priv) | |||
| 1203 | 1169 | ||
| 1204 | udelay(3); | 1170 | udelay(3); |
| 1205 | 1171 | ||
| 1206 | /* Compute rx_buff_size based on config flags */ | 1172 | gfar_rx_offload_en(priv); |
| 1207 | gfar_rx_buff_size_config(priv); | ||
| 1208 | 1173 | ||
| 1209 | /* Initialize the max receive frame/buffer lengths */ | 1174 | /* Initialize the max receive frame/buffer lengths */ |
| 1210 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | 1175 | gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); |
| 1211 | gfar_write(®s->mrblr, priv->rx_buffer_size); | 1176 | gfar_write(®s->mrblr, GFAR_RXB_SIZE); |
| 1212 | 1177 | ||
| 1213 | /* Initialize the Minimum Frame Length Register */ | 1178 | /* Initialize the Minimum Frame Length Register */ |
| 1214 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); | 1179 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
| @@ -1216,12 +1181,11 @@ void gfar_mac_reset(struct gfar_private *priv) | |||
| 1216 | /* Initialize MACCFG2. */ | 1181 | /* Initialize MACCFG2. */ |
| 1217 | tempval = MACCFG2_INIT_SETTINGS; | 1182 | tempval = MACCFG2_INIT_SETTINGS; |
| 1218 | 1183 | ||
| 1219 | /* If the mtu is larger than the max size for standard | 1184 | /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 |
| 1220 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | 1185 | * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, |
| 1221 | * to allow huge frames, and to check the length | 1186 | * and by checking RxBD[LG] and discarding larger than MAXFRM. |
| 1222 | */ | 1187 | */ |
| 1223 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || | 1188 | if (gfar_has_errata(priv, GFAR_ERRATA_74)) |
| 1224 | gfar_has_errata(priv, GFAR_ERRATA_74)) | ||
| 1225 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; | 1189 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
| 1226 | 1190 | ||
| 1227 | gfar_write(®s->maccfg2, tempval); | 1191 | gfar_write(®s->maccfg2, tempval); |
| @@ -1432,8 +1396,6 @@ static int gfar_probe(struct platform_device *ofdev) | |||
| 1432 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | 1396 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
| 1433 | dev->needed_headroom = GMAC_FCB_LEN; | 1397 | dev->needed_headroom = GMAC_FCB_LEN; |
| 1434 | 1398 | ||
| 1435 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; | ||
| 1436 | |||
| 1437 | /* Initializing some of the rx/tx queue level parameters */ | 1399 | /* Initializing some of the rx/tx queue level parameters */ |
| 1438 | for (i = 0; i < priv->num_tx_queues; i++) { | 1400 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 1439 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | 1401 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; |
| @@ -1639,10 +1601,7 @@ static int gfar_restore(struct device *dev) | |||
| 1639 | return 0; | 1601 | return 0; |
| 1640 | } | 1602 | } |
| 1641 | 1603 | ||
| 1642 | if (gfar_init_bds(ndev)) { | 1604 | gfar_init_bds(ndev); |
| 1643 | free_skb_resources(priv); | ||
| 1644 | return -ENOMEM; | ||
| 1645 | } | ||
| 1646 | 1605 | ||
| 1647 | gfar_mac_reset(priv); | 1606 | gfar_mac_reset(priv); |
| 1648 | 1607 | ||
| @@ -1933,26 +1892,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) | |||
| 1933 | 1892 | ||
| 1934 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) | 1893 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
| 1935 | { | 1894 | { |
| 1936 | struct rxbd8 *rxbdp; | ||
| 1937 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | ||
| 1938 | int i; | 1895 | int i; |
| 1939 | 1896 | ||
| 1940 | rxbdp = rx_queue->rx_bd_base; | 1897 | struct rxbd8 *rxbdp = rx_queue->rx_bd_base; |
| 1898 | |||
| 1899 | if (rx_queue->skb) | ||
| 1900 | dev_kfree_skb(rx_queue->skb); | ||
| 1941 | 1901 | ||
| 1942 | for (i = 0; i < rx_queue->rx_ring_size; i++) { | 1902 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
| 1943 | if (rx_queue->rx_skbuff[i]) { | 1903 | struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; |
| 1944 | dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), | 1904 | |
| 1945 | priv->rx_buffer_size, | ||
| 1946 | DMA_FROM_DEVICE); | ||
| 1947 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); | ||
| 1948 | rx_queue->rx_skbuff[i] = NULL; | ||
| 1949 | } | ||
| 1950 | rxbdp->lstatus = 0; | 1905 | rxbdp->lstatus = 0; |
| 1951 | rxbdp->bufPtr = 0; | 1906 | rxbdp->bufPtr = 0; |
| 1952 | rxbdp++; | 1907 | rxbdp++; |
| 1908 | |||
| 1909 | if (!rxb->page) | ||
| 1910 | continue; | ||
| 1911 | |||
| 1912 | dma_unmap_single(rx_queue->dev, rxb->dma, | ||
| 1913 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
| 1914 | __free_page(rxb->page); | ||
| 1915 | |||
| 1916 | rxb->page = NULL; | ||
| 1953 | } | 1917 | } |
| 1954 | kfree(rx_queue->rx_skbuff); | 1918 | |
| 1955 | rx_queue->rx_skbuff = NULL; | 1919 | kfree(rx_queue->rx_buff); |
| 1920 | rx_queue->rx_buff = NULL; | ||
| 1956 | } | 1921 | } |
| 1957 | 1922 | ||
| 1958 | /* If there are any tx skbs or rx skbs still around, free them. | 1923 | /* If there are any tx skbs or rx skbs still around, free them. |
| @@ -1977,7 +1942,7 @@ static void free_skb_resources(struct gfar_private *priv) | |||
| 1977 | 1942 | ||
| 1978 | for (i = 0; i < priv->num_rx_queues; i++) { | 1943 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1979 | rx_queue = priv->rx_queue[i]; | 1944 | rx_queue = priv->rx_queue[i]; |
| 1980 | if (rx_queue->rx_skbuff) | 1945 | if (rx_queue->rx_buff) |
| 1981 | free_skb_rx_queue(rx_queue); | 1946 | free_skb_rx_queue(rx_queue); |
| 1982 | } | 1947 | } |
| 1983 | 1948 | ||
| @@ -2535,7 +2500,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) | |||
| 2535 | struct gfar_private *priv = netdev_priv(dev); | 2500 | struct gfar_private *priv = netdev_priv(dev); |
| 2536 | int frame_size = new_mtu + ETH_HLEN; | 2501 | int frame_size = new_mtu + ETH_HLEN; |
| 2537 | 2502 | ||
| 2538 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { | 2503 | if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) { |
| 2539 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); | 2504 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); |
| 2540 | return -EINVAL; | 2505 | return -EINVAL; |
| 2541 | } | 2506 | } |
| @@ -2589,15 +2554,6 @@ static void gfar_timeout(struct net_device *dev) | |||
| 2589 | schedule_work(&priv->reset_task); | 2554 | schedule_work(&priv->reset_task); |
| 2590 | } | 2555 | } |
| 2591 | 2556 | ||
| 2592 | static void gfar_align_skb(struct sk_buff *skb) | ||
| 2593 | { | ||
| 2594 | /* We need the data buffer to be aligned properly. We will reserve | ||
| 2595 | * as many bytes as needed to align the data properly | ||
| 2596 | */ | ||
| 2597 | skb_reserve(skb, RXBUF_ALIGNMENT - | ||
| 2598 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); | ||
| 2599 | } | ||
| 2600 | |||
| 2601 | /* Interrupt Handler for Transmit complete */ | 2557 | /* Interrupt Handler for Transmit complete */ |
| 2602 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | 2558 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
| 2603 | { | 2559 | { |
| @@ -2704,49 +2660,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
| 2704 | netdev_tx_completed_queue(txq, howmany, bytes_sent); | 2660 | netdev_tx_completed_queue(txq, howmany, bytes_sent); |
| 2705 | } | 2661 | } |
| 2706 | 2662 | ||
| 2707 | static struct sk_buff *gfar_alloc_skb(struct net_device *dev) | 2663 | static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) |
| 2708 | { | 2664 | { |
| 2709 | struct gfar_private *priv = netdev_priv(dev); | 2665 | struct page *page; |
| 2710 | struct sk_buff *skb; | 2666 | dma_addr_t addr; |
| 2711 | 2667 | ||
| 2712 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); | 2668 | page = dev_alloc_page(); |
| 2713 | if (!skb) | 2669 | if (unlikely(!page)) |
| 2714 | return NULL; | 2670 | return false; |
| 2715 | 2671 | ||
| 2716 | gfar_align_skb(skb); | 2672 | addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
| 2673 | if (unlikely(dma_mapping_error(rxq->dev, addr))) { | ||
| 2674 | __free_page(page); | ||
| 2717 | 2675 | ||
| 2718 | return skb; | 2676 | return false; |
| 2677 | } | ||
| 2678 | |||
| 2679 | rxb->dma = addr; | ||
| 2680 | rxb->page = page; | ||
| 2681 | rxb->page_offset = 0; | ||
| 2682 | |||
| 2683 | return true; | ||
| 2719 | } | 2684 | } |
| 2720 | 2685 | ||
| 2721 | static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) | 2686 | static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) |
| 2722 | { | 2687 | { |
| 2723 | struct gfar_private *priv = netdev_priv(dev); | 2688 | struct gfar_private *priv = netdev_priv(rx_queue->ndev); |
| 2724 | struct sk_buff *skb; | 2689 | struct gfar_extra_stats *estats = &priv->extra_stats; |
| 2725 | dma_addr_t addr; | ||
| 2726 | 2690 | ||
| 2727 | skb = gfar_alloc_skb(dev); | 2691 | netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); |
| 2728 | if (!skb) | 2692 | atomic64_inc(&estats->rx_alloc_err); |
| 2729 | return NULL; | 2693 | } |
| 2730 | 2694 | ||
| 2731 | addr = dma_map_single(priv->dev, skb->data, | 2695 | static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, |
| 2732 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 2696 | int alloc_cnt) |
| 2733 | if (unlikely(dma_mapping_error(priv->dev, addr))) { | 2697 | { |
| 2734 | dev_kfree_skb_any(skb); | 2698 | struct rxbd8 *bdp; |
| 2735 | return NULL; | 2699 | struct gfar_rx_buff *rxb; |
| 2700 | int i; | ||
| 2701 | |||
| 2702 | i = rx_queue->next_to_use; | ||
| 2703 | bdp = &rx_queue->rx_bd_base[i]; | ||
| 2704 | rxb = &rx_queue->rx_buff[i]; | ||
| 2705 | |||
| 2706 | while (alloc_cnt--) { | ||
| 2707 | /* try reuse page */ | ||
| 2708 | if (unlikely(!rxb->page)) { | ||
| 2709 | if (unlikely(!gfar_new_page(rx_queue, rxb))) { | ||
| 2710 | gfar_rx_alloc_err(rx_queue); | ||
| 2711 | break; | ||
| 2712 | } | ||
| 2713 | } | ||
| 2714 | |||
| 2715 | /* Setup the new RxBD */ | ||
| 2716 | gfar_init_rxbdp(rx_queue, bdp, | ||
| 2717 | rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); | ||
| 2718 | |||
| 2719 | /* Update to the next pointer */ | ||
| 2720 | bdp++; | ||
| 2721 | rxb++; | ||
| 2722 | |||
| 2723 | if (unlikely(++i == rx_queue->rx_ring_size)) { | ||
| 2724 | i = 0; | ||
| 2725 | bdp = rx_queue->rx_bd_base; | ||
| 2726 | rxb = rx_queue->rx_buff; | ||
| 2727 | } | ||
| 2736 | } | 2728 | } |
| 2737 | 2729 | ||
| 2738 | *bufaddr = addr; | 2730 | rx_queue->next_to_use = i; |
| 2739 | return skb; | 2731 | rx_queue->next_to_alloc = i; |
| 2740 | } | 2732 | } |
| 2741 | 2733 | ||
| 2742 | static inline void count_errors(unsigned short status, struct net_device *dev) | 2734 | static void count_errors(u32 lstatus, struct net_device *ndev) |
| 2743 | { | 2735 | { |
| 2744 | struct gfar_private *priv = netdev_priv(dev); | 2736 | struct gfar_private *priv = netdev_priv(ndev); |
| 2745 | struct net_device_stats *stats = &dev->stats; | 2737 | struct net_device_stats *stats = &ndev->stats; |
| 2746 | struct gfar_extra_stats *estats = &priv->extra_stats; | 2738 | struct gfar_extra_stats *estats = &priv->extra_stats; |
| 2747 | 2739 | ||
| 2748 | /* If the packet was truncated, none of the other errors matter */ | 2740 | /* If the packet was truncated, none of the other errors matter */ |
| 2749 | if (status & RXBD_TRUNCATED) { | 2741 | if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { |
| 2750 | stats->rx_length_errors++; | 2742 | stats->rx_length_errors++; |
| 2751 | 2743 | ||
| 2752 | atomic64_inc(&estats->rx_trunc); | 2744 | atomic64_inc(&estats->rx_trunc); |
| @@ -2754,25 +2746,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev) | |||
| 2754 | return; | 2746 | return; |
| 2755 | } | 2747 | } |
| 2756 | /* Count the errors, if there were any */ | 2748 | /* Count the errors, if there were any */ |
| 2757 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | 2749 | if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { |
| 2758 | stats->rx_length_errors++; | 2750 | stats->rx_length_errors++; |
| 2759 | 2751 | ||
| 2760 | if (status & RXBD_LARGE) | 2752 | if (lstatus & BD_LFLAG(RXBD_LARGE)) |
| 2761 | atomic64_inc(&estats->rx_large); | 2753 | atomic64_inc(&estats->rx_large); |
| 2762 | else | 2754 | else |
| 2763 | atomic64_inc(&estats->rx_short); | 2755 | atomic64_inc(&estats->rx_short); |
| 2764 | } | 2756 | } |
| 2765 | if (status & RXBD_NONOCTET) { | 2757 | if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { |
| 2766 | stats->rx_frame_errors++; | 2758 | stats->rx_frame_errors++; |
| 2767 | atomic64_inc(&estats->rx_nonoctet); | 2759 | atomic64_inc(&estats->rx_nonoctet); |
| 2768 | } | 2760 | } |
| 2769 | if (status & RXBD_CRCERR) { | 2761 | if (lstatus & BD_LFLAG(RXBD_CRCERR)) { |
| 2770 | atomic64_inc(&estats->rx_crcerr); | 2762 | atomic64_inc(&estats->rx_crcerr); |
| 2771 | stats->rx_crc_errors++; | 2763 | stats->rx_crc_errors++; |
| 2772 | } | 2764 | } |
| 2773 | if (status & RXBD_OVERRUN) { | 2765 | if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { |
| 2774 | atomic64_inc(&estats->rx_overrun); | 2766 | atomic64_inc(&estats->rx_overrun); |
| 2775 | stats->rx_crc_errors++; | 2767 | stats->rx_over_errors++; |
| 2776 | } | 2768 | } |
| 2777 | } | 2769 | } |
| 2778 | 2770 | ||
| @@ -2823,6 +2815,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) | |||
| 2823 | return IRQ_HANDLED; | 2815 | return IRQ_HANDLED; |
| 2824 | } | 2816 | } |
| 2825 | 2817 | ||
| 2818 | static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, | ||
| 2819 | struct sk_buff *skb, bool first) | ||
| 2820 | { | ||
| 2821 | unsigned int size = lstatus & BD_LENGTH_MASK; | ||
| 2822 | struct page *page = rxb->page; | ||
| 2823 | |||
| 2824 | /* Remove the FCS from the packet length */ | ||
| 2825 | if (likely(lstatus & BD_LFLAG(RXBD_LAST))) | ||
| 2826 | size -= ETH_FCS_LEN; | ||
| 2827 | |||
| 2828 | if (likely(first)) | ||
| 2829 | skb_put(skb, size); | ||
| 2830 | else | ||
| 2831 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
| 2832 | rxb->page_offset + RXBUF_ALIGNMENT, | ||
| 2833 | size, GFAR_RXB_TRUESIZE); | ||
| 2834 | |||
| 2835 | /* try reuse page */ | ||
| 2836 | if (unlikely(page_count(page) != 1)) | ||
| 2837 | return false; | ||
| 2838 | |||
| 2839 | /* change offset to the other half */ | ||
| 2840 | rxb->page_offset ^= GFAR_RXB_TRUESIZE; | ||
| 2841 | |||
| 2842 | atomic_inc(&page->_count); | ||
| 2843 | |||
| 2844 | return true; | ||
| 2845 | } | ||
| 2846 | |||
| 2847 | static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, | ||
| 2848 | struct gfar_rx_buff *old_rxb) | ||
| 2849 | { | ||
| 2850 | struct gfar_rx_buff *new_rxb; | ||
| 2851 | u16 nta = rxq->next_to_alloc; | ||
| 2852 | |||
| 2853 | new_rxb = &rxq->rx_buff[nta]; | ||
| 2854 | |||
| 2855 | /* find next buf that can reuse a page */ | ||
| 2856 | nta++; | ||
| 2857 | rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; | ||
| 2858 | |||
| 2859 | /* copy page reference */ | ||
| 2860 | *new_rxb = *old_rxb; | ||
| 2861 | |||
| 2862 | /* sync for use by the device */ | ||
| 2863 | dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, | ||
| 2864 | old_rxb->page_offset, | ||
| 2865 | GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); | ||
| 2866 | } | ||
| 2867 | |||
| 2868 | static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, | ||
| 2869 | u32 lstatus, struct sk_buff *skb) | ||
| 2870 | { | ||
| 2871 | struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; | ||
| 2872 | struct page *page = rxb->page; | ||
| 2873 | bool first = false; | ||
| 2874 | |||
| 2875 | if (likely(!skb)) { | ||
| 2876 | void *buff_addr = page_address(page) + rxb->page_offset; | ||
| 2877 | |||
| 2878 | skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); | ||
| 2879 | if (unlikely(!skb)) { | ||
| 2880 | gfar_rx_alloc_err(rx_queue); | ||
| 2881 | return NULL; | ||
| 2882 | } | ||
| 2883 | skb_reserve(skb, RXBUF_ALIGNMENT); | ||
| 2884 | first = true; | ||
| 2885 | } | ||
| 2886 | |||
| 2887 | dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, | ||
| 2888 | GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); | ||
| 2889 | |||
| 2890 | if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { | ||
| 2891 | /* reuse the free half of the page */ | ||
| 2892 | gfar_reuse_rx_page(rx_queue, rxb); | ||
| 2893 | } else { | ||
| 2894 | /* page cannot be reused, unmap it */ | ||
| 2895 | dma_unmap_page(rx_queue->dev, rxb->dma, | ||
| 2896 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
| 2897 | } | ||
| 2898 | |||
| 2899 | /* clear rxb content */ | ||
| 2900 | rxb->page = NULL; | ||
| 2901 | |||
| 2902 | return skb; | ||
| 2903 | } | ||
| 2904 | |||
| 2826 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) | 2905 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
| 2827 | { | 2906 | { |
| 2828 | /* If valid headers were found, and valid sums | 2907 | /* If valid headers were found, and valid sums |
| @@ -2837,10 +2916,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) | |||
| 2837 | } | 2916 | } |
| 2838 | 2917 | ||
| 2839 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ | 2918 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
| 2840 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | 2919 | static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) |
| 2841 | int amount_pull, struct napi_struct *napi) | ||
| 2842 | { | 2920 | { |
| 2843 | struct gfar_private *priv = netdev_priv(dev); | 2921 | struct gfar_private *priv = netdev_priv(ndev); |
| 2844 | struct rxfcb *fcb = NULL; | 2922 | struct rxfcb *fcb = NULL; |
| 2845 | 2923 | ||
| 2846 | /* fcb is at the beginning if exists */ | 2924 | /* fcb is at the beginning if exists */ |
| @@ -2849,10 +2927,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
| 2849 | /* Remove the FCB from the skb | 2927 | /* Remove the FCB from the skb |
| 2850 | * Remove the padded bytes, if there are any | 2928 | * Remove the padded bytes, if there are any |
| 2851 | */ | 2929 | */ |
| 2852 | if (amount_pull) { | 2930 | if (priv->uses_rxfcb) |
| 2853 | skb_record_rx_queue(skb, fcb->rq); | 2931 | skb_pull(skb, GMAC_FCB_LEN); |
| 2854 | skb_pull(skb, amount_pull); | ||
| 2855 | } | ||
| 2856 | 2932 | ||
| 2857 | /* Get receive timestamp from the skb */ | 2933 | /* Get receive timestamp from the skb */ |
| 2858 | if (priv->hwts_rx_en) { | 2934 | if (priv->hwts_rx_en) { |
| @@ -2866,24 +2942,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
| 2866 | if (priv->padding) | 2942 | if (priv->padding) |
| 2867 | skb_pull(skb, priv->padding); | 2943 | skb_pull(skb, priv->padding); |
| 2868 | 2944 | ||
| 2869 | if (dev->features & NETIF_F_RXCSUM) | 2945 | if (ndev->features & NETIF_F_RXCSUM) |
| 2870 | gfar_rx_checksum(skb, fcb); | 2946 | gfar_rx_checksum(skb, fcb); |
| 2871 | 2947 | ||
| 2872 | /* Tell the skb what kind of packet this is */ | 2948 | /* Tell the skb what kind of packet this is */ |
| 2873 | skb->protocol = eth_type_trans(skb, dev); | 2949 | skb->protocol = eth_type_trans(skb, ndev); |
| 2874 | 2950 | ||
| 2875 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. | 2951 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
| 2876 | * Even if vlan rx accel is disabled, on some chips | 2952 | * Even if vlan rx accel is disabled, on some chips |
| 2877 | * RXFCB_VLN is pseudo randomly set. | 2953 | * RXFCB_VLN is pseudo randomly set. |
| 2878 | */ | 2954 | */ |
| 2879 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && | 2955 | if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && |
| 2880 | be16_to_cpu(fcb->flags) & RXFCB_VLN) | 2956 | be16_to_cpu(fcb->flags) & RXFCB_VLN) |
| 2881 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | 2957 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
| 2882 | be16_to_cpu(fcb->vlctl)); | 2958 | be16_to_cpu(fcb->vlctl)); |
| 2883 | |||
| 2884 | /* Send the packet up the stack */ | ||
| 2885 | napi_gro_receive(napi, skb); | ||
| 2886 | |||
| 2887 | } | 2959 | } |
| 2888 | 2960 | ||
| 2889 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | 2961 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring |
| @@ -2892,91 +2964,88 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
| 2892 | */ | 2964 | */ |
| 2893 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | 2965 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
| 2894 | { | 2966 | { |
| 2895 | struct net_device *dev = rx_queue->dev; | 2967 | struct net_device *ndev = rx_queue->ndev; |
| 2896 | struct rxbd8 *bdp, *base; | 2968 | struct gfar_private *priv = netdev_priv(ndev); |
| 2897 | struct sk_buff *skb; | 2969 | struct rxbd8 *bdp; |
| 2898 | int pkt_len; | 2970 | int i, howmany = 0; |
| 2899 | int amount_pull; | 2971 | struct sk_buff *skb = rx_queue->skb; |
| 2900 | int howmany = 0; | 2972 | int cleaned_cnt = gfar_rxbd_unused(rx_queue); |
| 2901 | struct gfar_private *priv = netdev_priv(dev); | 2973 | unsigned int total_bytes = 0, total_pkts = 0; |
| 2902 | 2974 | ||
| 2903 | /* Get the first full descriptor */ | 2975 | /* Get the first full descriptor */ |
| 2904 | bdp = rx_queue->cur_rx; | 2976 | i = rx_queue->next_to_clean; |
| 2905 | base = rx_queue->rx_bd_base; | ||
| 2906 | 2977 | ||
| 2907 | amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; | 2978 | while (rx_work_limit--) { |
| 2979 | u32 lstatus; | ||
| 2908 | 2980 | ||
| 2909 | while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { | 2981 | if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { |
| 2910 | struct sk_buff *newskb; | 2982 | gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); |
| 2911 | dma_addr_t bufaddr; | 2983 | cleaned_cnt = 0; |
| 2984 | } | ||
| 2985 | |||
| 2986 | bdp = &rx_queue->rx_bd_base[i]; | ||
| 2987 | lstatus = be32_to_cpu(bdp->lstatus); | ||
| 2988 | if (lstatus & BD_LFLAG(RXBD_EMPTY)) | ||
| 2989 | break; | ||
| 2912 | 2990 | ||
| 2991 | /* order rx buffer descriptor reads */ | ||
| 2913 | rmb(); | 2992 | rmb(); |
| 2914 | 2993 | ||
| 2915 | /* Add another skb for the future */ | 2994 | /* fetch next to clean buffer from the ring */ |
| 2916 | newskb = gfar_new_skb(dev, &bufaddr); | 2995 | skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); |
| 2996 | if (unlikely(!skb)) | ||
| 2997 | break; | ||
| 2917 | 2998 | ||
| 2918 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; | 2999 | cleaned_cnt++; |
| 3000 | howmany++; | ||
| 2919 | 3001 | ||
| 2920 | dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), | 3002 | if (unlikely(++i == rx_queue->rx_ring_size)) |
| 2921 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 3003 | i = 0; |
| 2922 | |||
| 2923 | if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && | ||
| 2924 | be16_to_cpu(bdp->length) > priv->rx_buffer_size)) | ||
| 2925 | bdp->status = cpu_to_be16(RXBD_LARGE); | ||
| 2926 | |||
| 2927 | /* We drop the frame if we failed to allocate a new buffer */ | ||
| 2928 | if (unlikely(!newskb || | ||
| 2929 | !(be16_to_cpu(bdp->status) & RXBD_LAST) || | ||
| 2930 | be16_to_cpu(bdp->status) & RXBD_ERR)) { | ||
| 2931 | count_errors(be16_to_cpu(bdp->status), dev); | ||
| 2932 | |||
| 2933 | if (unlikely(!newskb)) { | ||
| 2934 | newskb = skb; | ||
| 2935 | bufaddr = be32_to_cpu(bdp->bufPtr); | ||
| 2936 | } else if (skb) | ||
| 2937 | dev_kfree_skb(skb); | ||
| 2938 | } else { | ||
| 2939 | /* Increment the number of packets */ | ||
| 2940 | rx_queue->stats.rx_packets++; | ||
| 2941 | howmany++; | ||
| 2942 | |||
| 2943 | if (likely(skb)) { | ||
| 2944 | pkt_len = be16_to_cpu(bdp->length) - | ||
| 2945 | ETH_FCS_LEN; | ||
| 2946 | /* Remove the FCS from the packet length */ | ||
| 2947 | skb_put(skb, pkt_len); | ||
| 2948 | rx_queue->stats.rx_bytes += pkt_len; | ||
| 2949 | skb_record_rx_queue(skb, rx_queue->qindex); | ||
| 2950 | gfar_process_frame(dev, skb, amount_pull, | ||
| 2951 | &rx_queue->grp->napi_rx); | ||
| 2952 | 3004 | ||
| 2953 | } else { | 3005 | rx_queue->next_to_clean = i; |
| 2954 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); | 3006 | |
| 2955 | rx_queue->stats.rx_dropped++; | 3007 | /* fetch next buffer if not the last in frame */ |
| 2956 | atomic64_inc(&priv->extra_stats.rx_skbmissing); | 3008 | if (!(lstatus & BD_LFLAG(RXBD_LAST))) |
| 2957 | } | 3009 | continue; |
| 2958 | 3010 | ||
| 3011 | if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { | ||
| 3012 | count_errors(lstatus, ndev); | ||
| 3013 | |||
| 3014 | /* discard faulty buffer */ | ||
| 3015 | dev_kfree_skb(skb); | ||
| 3016 | skb = NULL; | ||
| 3017 | rx_queue->stats.rx_dropped++; | ||
| 3018 | continue; | ||
| 2959 | } | 3019 | } |
| 2960 | 3020 | ||
| 2961 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; | 3021 | /* Increment the number of packets */ |
| 3022 | total_pkts++; | ||
| 3023 | total_bytes += skb->len; | ||
| 2962 | 3024 | ||
| 2963 | /* Setup the new bdp */ | 3025 | skb_record_rx_queue(skb, rx_queue->qindex); |
| 2964 | gfar_init_rxbdp(rx_queue, bdp, bufaddr); | ||
| 2965 | 3026 | ||
| 2966 | /* Update Last Free RxBD pointer for LFC */ | 3027 | gfar_process_frame(ndev, skb); |
| 2967 | if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) | ||
| 2968 | gfar_write(rx_queue->rfbptr, (u32)bdp); | ||
| 2969 | 3028 | ||
| 2970 | /* Update to the next pointer */ | 3029 | /* Send the packet up the stack */ |
| 2971 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); | 3030 | napi_gro_receive(&rx_queue->grp->napi_rx, skb); |
| 2972 | 3031 | ||
| 2973 | /* update to point at the next skb */ | 3032 | skb = NULL; |
| 2974 | rx_queue->skb_currx = (rx_queue->skb_currx + 1) & | ||
| 2975 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | ||
| 2976 | } | 3033 | } |
| 2977 | 3034 | ||
| 2978 | /* Update the current rxbd pointer to be the next one */ | 3035 | /* Store incomplete frames for completion */ |
| 2979 | rx_queue->cur_rx = bdp; | 3036 | rx_queue->skb = skb; |
| 3037 | |||
| 3038 | rx_queue->stats.rx_packets += total_pkts; | ||
| 3039 | rx_queue->stats.rx_bytes += total_bytes; | ||
| 3040 | |||
| 3041 | if (cleaned_cnt) | ||
| 3042 | gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); | ||
| 3043 | |||
| 3044 | /* Update Last Free RxBD pointer for LFC */ | ||
| 3045 | if (unlikely(priv->tx_actual_en)) { | ||
| 3046 | bdp = gfar_rxbd_lastfree(rx_queue); | ||
| 3047 | gfar_write(rx_queue->rfbptr, (u32)bdp); | ||
| 3048 | } | ||
| 2980 | 3049 | ||
| 2981 | return howmany; | 3050 | return howmany; |
| 2982 | } | 3051 | } |
| @@ -3552,14 +3621,8 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) | |||
| 3552 | if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { | 3621 | if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { |
| 3553 | for (i = 0; i < priv->num_rx_queues; i++) { | 3622 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 3554 | rx_queue = priv->rx_queue[i]; | 3623 | rx_queue = priv->rx_queue[i]; |
| 3555 | bdp = rx_queue->cur_rx; | 3624 | bdp = gfar_rxbd_lastfree(rx_queue); |
| 3556 | /* skip to previous bd */ | 3625 | gfar_write(rx_queue->rfbptr, (u32)bdp); |
| 3557 | bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, | ||
| 3558 | rx_queue->rx_bd_base, | ||
| 3559 | rx_queue->rx_ring_size); | ||
| 3560 | |||
| 3561 | if (rx_queue->rfbptr) | ||
| 3562 | gfar_write(rx_queue->rfbptr, (u32)bdp); | ||
| 3563 | } | 3626 | } |
| 3564 | 3627 | ||
| 3565 | priv->tx_actual_en = 1; | 3628 | priv->tx_actual_en = 1; |
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index daa1d37de642..44021243c187 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
| @@ -71,11 +71,6 @@ struct ethtool_rx_list { | |||
| 71 | /* Number of bytes to align the rx bufs to */ | 71 | /* Number of bytes to align the rx bufs to */ |
| 72 | #define RXBUF_ALIGNMENT 64 | 72 | #define RXBUF_ALIGNMENT 64 |
| 73 | 73 | ||
| 74 | /* The number of bytes which composes a unit for the purpose of | ||
| 75 | * allocating data buffers. ie-for any given MTU, the data buffer | ||
| 76 | * will be the next highest multiple of 512 bytes. */ | ||
| 77 | #define INCREMENTAL_BUFFER_SIZE 512 | ||
| 78 | |||
| 79 | #define PHY_INIT_TIMEOUT 100000 | 74 | #define PHY_INIT_TIMEOUT 100000 |
| 80 | 75 | ||
| 81 | #define DRV_NAME "gfar-enet" | 76 | #define DRV_NAME "gfar-enet" |
| @@ -92,6 +87,8 @@ extern const char gfar_driver_version[]; | |||
| 92 | #define DEFAULT_TX_RING_SIZE 256 | 87 | #define DEFAULT_TX_RING_SIZE 256 |
| 93 | #define DEFAULT_RX_RING_SIZE 256 | 88 | #define DEFAULT_RX_RING_SIZE 256 |
| 94 | 89 | ||
| 90 | #define GFAR_RX_BUFF_ALLOC 16 | ||
| 91 | |||
| 95 | #define GFAR_RX_MAX_RING_SIZE 256 | 92 | #define GFAR_RX_MAX_RING_SIZE 256 |
| 96 | #define GFAR_TX_MAX_RING_SIZE 256 | 93 | #define GFAR_TX_MAX_RING_SIZE 256 |
| 97 | 94 | ||
| @@ -103,11 +100,14 @@ extern const char gfar_driver_version[]; | |||
| 103 | #define DEFAULT_RX_LFC_THR 16 | 100 | #define DEFAULT_RX_LFC_THR 16 |
| 104 | #define DEFAULT_LFC_PTVVAL 4 | 101 | #define DEFAULT_LFC_PTVVAL 4 |
| 105 | 102 | ||
| 106 | #define DEFAULT_RX_BUFFER_SIZE 1536 | 103 | #define GFAR_RXB_SIZE 1536 |
| 104 | #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ | ||
| 105 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | ||
| 106 | #define GFAR_RXB_TRUESIZE 2048 | ||
| 107 | |||
| 107 | #define TX_RING_MOD_MASK(size) (size-1) | 108 | #define TX_RING_MOD_MASK(size) (size-1) |
| 108 | #define RX_RING_MOD_MASK(size) (size-1) | 109 | #define RX_RING_MOD_MASK(size) (size-1) |
| 109 | #define JUMBO_BUFFER_SIZE 9728 | 110 | #define GFAR_JUMBO_FRAME_SIZE 9600 |
| 110 | #define JUMBO_FRAME_SIZE 9600 | ||
| 111 | 111 | ||
| 112 | #define DEFAULT_FIFO_TX_THR 0x100 | 112 | #define DEFAULT_FIFO_TX_THR 0x100 |
| 113 | #define DEFAULT_FIFO_TX_STARVE 0x40 | 113 | #define DEFAULT_FIFO_TX_STARVE 0x40 |
| @@ -640,6 +640,7 @@ struct rmon_mib | |||
| 640 | }; | 640 | }; |
| 641 | 641 | ||
| 642 | struct gfar_extra_stats { | 642 | struct gfar_extra_stats { |
| 643 | atomic64_t rx_alloc_err; | ||
| 643 | atomic64_t rx_large; | 644 | atomic64_t rx_large; |
| 644 | atomic64_t rx_short; | 645 | atomic64_t rx_short; |
| 645 | atomic64_t rx_nonoctet; | 646 | atomic64_t rx_nonoctet; |
| @@ -651,7 +652,6 @@ struct gfar_extra_stats { | |||
| 651 | atomic64_t eberr; | 652 | atomic64_t eberr; |
| 652 | atomic64_t tx_babt; | 653 | atomic64_t tx_babt; |
| 653 | atomic64_t tx_underrun; | 654 | atomic64_t tx_underrun; |
| 654 | atomic64_t rx_skbmissing; | ||
| 655 | atomic64_t tx_timeout; | 655 | atomic64_t tx_timeout; |
| 656 | }; | 656 | }; |
| 657 | 657 | ||
| @@ -1012,34 +1012,42 @@ struct rx_q_stats { | |||
| 1012 | unsigned long rx_dropped; | 1012 | unsigned long rx_dropped; |
| 1013 | }; | 1013 | }; |
| 1014 | 1014 | ||
| 1015 | struct gfar_rx_buff { | ||
| 1016 | dma_addr_t dma; | ||
| 1017 | struct page *page; | ||
| 1018 | unsigned int page_offset; | ||
| 1019 | }; | ||
| 1020 | |||
| 1015 | /** | 1021 | /** |
| 1016 | * struct gfar_priv_rx_q - per rx queue structure | 1022 | * struct gfar_priv_rx_q - per rx queue structure |
| 1017 | * @rx_skbuff: skb pointers | 1023 | * @rx_buff: Array of buffer info metadata structs |
| 1018 | * @skb_currx: currently use skb pointer | ||
| 1019 | * @rx_bd_base: First rx buffer descriptor | 1024 | * @rx_bd_base: First rx buffer descriptor |
| 1020 | * @cur_rx: Next free rx ring entry | 1025 | * @next_to_use: index of the next buffer to be alloc'd |
| 1026 | * @next_to_clean: index of the next buffer to be cleaned | ||
| 1021 | * @qindex: index of this queue | 1027 | * @qindex: index of this queue |
| 1022 | * @dev: back pointer to the dev structure | 1028 | * @ndev: back pointer to net_device |
| 1023 | * @rx_ring_size: Rx ring size | 1029 | * @rx_ring_size: Rx ring size |
| 1024 | * @rxcoalescing: enable/disable rx-coalescing | 1030 | * @rxcoalescing: enable/disable rx-coalescing |
| 1025 | * @rxic: receive interrupt coalescing vlaue | 1031 | * @rxic: receive interrupt coalescing vlaue |
| 1026 | */ | 1032 | */ |
| 1027 | 1033 | ||
| 1028 | struct gfar_priv_rx_q { | 1034 | struct gfar_priv_rx_q { |
| 1029 | struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); | 1035 | struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES); |
| 1030 | dma_addr_t rx_bd_dma_base; | ||
| 1031 | struct rxbd8 *rx_bd_base; | 1036 | struct rxbd8 *rx_bd_base; |
| 1032 | struct rxbd8 *cur_rx; | 1037 | struct net_device *ndev; |
| 1033 | struct net_device *dev; | 1038 | struct device *dev; |
| 1034 | struct gfar_priv_grp *grp; | 1039 | u16 rx_ring_size; |
| 1040 | u16 qindex; | ||
| 1041 | struct gfar_priv_grp *grp; | ||
| 1042 | u16 next_to_clean; | ||
| 1043 | u16 next_to_use; | ||
| 1044 | u16 next_to_alloc; | ||
| 1045 | struct sk_buff *skb; | ||
| 1035 | struct rx_q_stats stats; | 1046 | struct rx_q_stats stats; |
| 1036 | u16 skb_currx; | 1047 | u32 __iomem *rfbptr; |
| 1037 | u16 qindex; | ||
| 1038 | unsigned int rx_ring_size; | ||
| 1039 | /* RX Coalescing values */ | ||
| 1040 | unsigned char rxcoalescing; | 1048 | unsigned char rxcoalescing; |
| 1041 | unsigned long rxic; | 1049 | unsigned long rxic; |
| 1042 | u32 __iomem *rfbptr; | 1050 | dma_addr_t rx_bd_dma_base; |
| 1043 | }; | 1051 | }; |
| 1044 | 1052 | ||
| 1045 | enum gfar_irqinfo_id { | 1053 | enum gfar_irqinfo_id { |
| @@ -1109,7 +1117,6 @@ struct gfar_private { | |||
| 1109 | struct device *dev; | 1117 | struct device *dev; |
| 1110 | struct net_device *ndev; | 1118 | struct net_device *ndev; |
| 1111 | enum gfar_errata errata; | 1119 | enum gfar_errata errata; |
| 1112 | unsigned int rx_buffer_size; | ||
| 1113 | 1120 | ||
| 1114 | u16 uses_rxfcb; | 1121 | u16 uses_rxfcb; |
| 1115 | u16 padding; | 1122 | u16 padding; |
| @@ -1295,6 +1302,23 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp) | |||
| 1295 | bdp->lstatus = cpu_to_be32(lstatus); | 1302 | bdp->lstatus = cpu_to_be32(lstatus); |
| 1296 | } | 1303 | } |
| 1297 | 1304 | ||
| 1305 | static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq) | ||
| 1306 | { | ||
| 1307 | if (rxq->next_to_clean > rxq->next_to_use) | ||
| 1308 | return rxq->next_to_clean - rxq->next_to_use - 1; | ||
| 1309 | |||
| 1310 | return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | static inline struct rxbd8 *gfar_rxbd_lastfree(struct gfar_priv_rx_q *rxq) | ||
| 1314 | { | ||
| 1315 | int i; | ||
| 1316 | |||
| 1317 | i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1; | ||
| 1318 | |||
| 1319 | return &rxq->rx_bd_base[i]; | ||
| 1320 | } | ||
| 1321 | |||
| 1298 | irqreturn_t gfar_receive(int irq, void *dev_id); | 1322 | irqreturn_t gfar_receive(int irq, void *dev_id); |
| 1299 | int startup_gfar(struct net_device *dev); | 1323 | int startup_gfar(struct net_device *dev); |
| 1300 | void stop_gfar(struct net_device *dev); | 1324 | void stop_gfar(struct net_device *dev); |
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index fda12fb32ec7..3020aaabf0e4 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c | |||
| @@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev, | |||
| 61 | struct ethtool_drvinfo *drvinfo); | 61 | struct ethtool_drvinfo *drvinfo); |
| 62 | 62 | ||
| 63 | static const char stat_gstrings[][ETH_GSTRING_LEN] = { | 63 | static const char stat_gstrings[][ETH_GSTRING_LEN] = { |
| 64 | /* extra stats */ | ||
| 65 | "rx-allocation-errors", | ||
| 64 | "rx-large-frame-errors", | 66 | "rx-large-frame-errors", |
| 65 | "rx-short-frame-errors", | 67 | "rx-short-frame-errors", |
| 66 | "rx-non-octet-errors", | 68 | "rx-non-octet-errors", |
| @@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = { | |||
| 72 | "ethernet-bus-error", | 74 | "ethernet-bus-error", |
| 73 | "tx-babbling-errors", | 75 | "tx-babbling-errors", |
| 74 | "tx-underrun-errors", | 76 | "tx-underrun-errors", |
| 75 | "rx-skb-missing-errors", | ||
| 76 | "tx-timeout-errors", | 77 | "tx-timeout-errors", |
| 78 | /* rmon stats */ | ||
| 77 | "tx-rx-64-frames", | 79 | "tx-rx-64-frames", |
| 78 | "tx-rx-65-127-frames", | 80 | "tx-rx-65-127-frames", |
| 79 | "tx-rx-128-255-frames", | 81 | "tx-rx-128-255-frames", |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2f6cc423ab1d..7dbab3c20db5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -2403,7 +2403,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, | |||
| 2403 | qlcnic_free_tx_rings(adapter); | 2403 | qlcnic_free_tx_rings(adapter); |
| 2404 | return -ENOMEM; | 2404 | return -ENOMEM; |
| 2405 | } | 2405 | } |
| 2406 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); | ||
| 2407 | tx_ring->cmd_buf_arr = cmd_buf_arr; | 2406 | tx_ring->cmd_buf_arr = cmd_buf_arr; |
| 2408 | spin_lock_init(&tx_ring->tx_clean_lock); | 2407 | spin_lock_init(&tx_ring->tx_clean_lock); |
| 2409 | } | 2408 | } |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index b8d73bca683c..ffaeea63d473 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -186,7 +186,6 @@ struct qfq_sched { | |||
| 186 | 186 | ||
| 187 | u64 oldV, V; /* Precise virtual times. */ | 187 | u64 oldV, V; /* Precise virtual times. */ |
| 188 | struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */ | 188 | struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */ |
| 189 | u32 num_active_agg; /* Num. of active aggregates */ | ||
| 190 | u32 wsum; /* weight sum */ | 189 | u32 wsum; /* weight sum */ |
| 191 | u32 iwsum; /* inverse weight sum */ | 190 | u32 iwsum; /* inverse weight sum */ |
| 192 | 191 | ||
