diff options
Diffstat (limited to 'drivers/net/gianfar.c')
| -rw-r--r-- | drivers/net/gianfar.c | 1826 |
1 files changed, 1254 insertions, 572 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 5bf31f1509c9..16def131c390 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
| @@ -8,9 +8,10 @@ | |||
| 8 | * | 8 | * |
| 9 | * Author: Andy Fleming | 9 | * Author: Andy Fleming |
| 10 | * Maintainer: Kumar Gala | 10 | * Maintainer: Kumar Gala |
| 11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> | ||
| 11 | * | 12 | * |
| 12 | * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. | 13 | * Copyright 2002-2009 Freescale Semiconductor, Inc. |
| 13 | * Copyright (c) 2007 MontaVista Software, Inc. | 14 | * Copyright 2007 MontaVista Software, Inc. |
| 14 | * | 15 | * |
| 15 | * This program is free software; you can redistribute it and/or modify it | 16 | * This program is free software; you can redistribute it and/or modify it |
| 16 | * under the terms of the GNU General Public License as published by the | 17 | * under the terms of the GNU General Public License as published by the |
| @@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work); | |||
| 109 | static void gfar_timeout(struct net_device *dev); | 110 | static void gfar_timeout(struct net_device *dev); |
| 110 | static int gfar_close(struct net_device *dev); | 111 | static int gfar_close(struct net_device *dev); |
| 111 | struct sk_buff *gfar_new_skb(struct net_device *dev); | 112 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
| 112 | static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, | 113 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
| 113 | struct sk_buff *skb); | 114 | struct sk_buff *skb); |
| 114 | static int gfar_set_mac_address(struct net_device *dev); | 115 | static int gfar_set_mac_address(struct net_device *dev); |
| 115 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | 116 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); |
| @@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget); | |||
| 130 | #ifdef CONFIG_NET_POLL_CONTROLLER | 131 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 131 | static void gfar_netpoll(struct net_device *dev); | 132 | static void gfar_netpoll(struct net_device *dev); |
| 132 | #endif | 133 | #endif |
| 133 | int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); | 134 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
| 134 | static int gfar_clean_tx_ring(struct net_device *dev); | 135 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
| 135 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | 136 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
| 136 | int amount_pull); | 137 | int amount_pull); |
| 137 | static void gfar_vlan_rx_register(struct net_device *netdev, | 138 | static void gfar_vlan_rx_register(struct net_device *netdev, |
| @@ -142,11 +143,277 @@ void gfar_start(struct net_device *dev); | |||
| 142 | static void gfar_clear_exact_match(struct net_device *dev); | 143 | static void gfar_clear_exact_match(struct net_device *dev); |
| 143 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | 144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); |
| 144 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 145 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
| 146 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb); | ||
| 145 | 147 | ||
| 146 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | 148 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
| 147 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | 149 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); |
| 148 | MODULE_LICENSE("GPL"); | 150 | MODULE_LICENSE("GPL"); |
| 149 | 151 | ||
| 152 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
| 153 | dma_addr_t buf) | ||
| 154 | { | ||
| 155 | u32 lstatus; | ||
| 156 | |||
| 157 | bdp->bufPtr = buf; | ||
| 158 | |||
| 159 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | ||
| 160 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) | ||
| 161 | lstatus |= BD_LFLAG(RXBD_WRAP); | ||
| 162 | |||
| 163 | eieio(); | ||
| 164 | |||
| 165 | bdp->lstatus = lstatus; | ||
| 166 | } | ||
| 167 | |||
| 168 | static int gfar_init_bds(struct net_device *ndev) | ||
| 169 | { | ||
| 170 | struct gfar_private *priv = netdev_priv(ndev); | ||
| 171 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
| 172 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
| 173 | struct txbd8 *txbdp; | ||
| 174 | struct rxbd8 *rxbdp; | ||
| 175 | int i, j; | ||
| 176 | |||
| 177 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
| 178 | tx_queue = priv->tx_queue[i]; | ||
| 179 | /* Initialize some variables in our dev structure */ | ||
| 180 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | ||
| 181 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | ||
| 182 | tx_queue->cur_tx = tx_queue->tx_bd_base; | ||
| 183 | tx_queue->skb_curtx = 0; | ||
| 184 | tx_queue->skb_dirtytx = 0; | ||
| 185 | |||
| 186 | /* Initialize Transmit Descriptor Ring */ | ||
| 187 | txbdp = tx_queue->tx_bd_base; | ||
| 188 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | ||
| 189 | txbdp->lstatus = 0; | ||
| 190 | txbdp->bufPtr = 0; | ||
| 191 | txbdp++; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* Set the last descriptor in the ring to indicate wrap */ | ||
| 195 | txbdp--; | ||
| 196 | txbdp->status |= TXBD_WRAP; | ||
| 197 | } | ||
| 198 | |||
| 199 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
| 200 | rx_queue = priv->rx_queue[i]; | ||
| 201 | rx_queue->cur_rx = rx_queue->rx_bd_base; | ||
| 202 | rx_queue->skb_currx = 0; | ||
| 203 | rxbdp = rx_queue->rx_bd_base; | ||
| 204 | |||
| 205 | for (j = 0; j < rx_queue->rx_ring_size; j++) { | ||
| 206 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | ||
| 207 | |||
| 208 | if (skb) { | ||
| 209 | gfar_init_rxbdp(rx_queue, rxbdp, | ||
| 210 | rxbdp->bufPtr); | ||
| 211 | } else { | ||
| 212 | skb = gfar_new_skb(ndev); | ||
| 213 | if (!skb) { | ||
| 214 | pr_err("%s: Can't allocate RX buffers\n", | ||
| 215 | ndev->name); | ||
| 216 | goto err_rxalloc_fail; | ||
| 217 | } | ||
| 218 | rx_queue->rx_skbuff[j] = skb; | ||
| 219 | |||
| 220 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | ||
| 221 | } | ||
| 222 | |||
| 223 | rxbdp++; | ||
| 224 | } | ||
| 225 | |||
| 226 | } | ||
| 227 | |||
| 228 | return 0; | ||
| 229 | |||
| 230 | err_rxalloc_fail: | ||
| 231 | free_skb_resources(priv); | ||
| 232 | return -ENOMEM; | ||
| 233 | } | ||
| 234 | |||
| 235 | static int gfar_alloc_skb_resources(struct net_device *ndev) | ||
| 236 | { | ||
| 237 | void *vaddr; | ||
| 238 | dma_addr_t addr; | ||
| 239 | int i, j, k; | ||
| 240 | struct gfar_private *priv = netdev_priv(ndev); | ||
| 241 | struct device *dev = &priv->ofdev->dev; | ||
| 242 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
| 243 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
| 244 | |||
| 245 | priv->total_tx_ring_size = 0; | ||
| 246 | for (i = 0; i < priv->num_tx_queues; i++) | ||
| 247 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | ||
| 248 | |||
| 249 | priv->total_rx_ring_size = 0; | ||
| 250 | for (i = 0; i < priv->num_rx_queues; i++) | ||
| 251 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | ||
| 252 | |||
| 253 | /* Allocate memory for the buffer descriptors */ | ||
| 254 | vaddr = dma_alloc_coherent(dev, | ||
| 255 | sizeof(struct txbd8) * priv->total_tx_ring_size + | ||
| 256 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | ||
| 257 | &addr, GFP_KERNEL); | ||
| 258 | if (!vaddr) { | ||
| 259 | if (netif_msg_ifup(priv)) | ||
| 260 | pr_err("%s: Could not allocate buffer descriptors!\n", | ||
| 261 | ndev->name); | ||
| 262 | return -ENOMEM; | ||
| 263 | } | ||
| 264 | |||
| 265 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
| 266 | tx_queue = priv->tx_queue[i]; | ||
| 267 | tx_queue->tx_bd_base = (struct txbd8 *) vaddr; | ||
| 268 | tx_queue->tx_bd_dma_base = addr; | ||
| 269 | tx_queue->dev = ndev; | ||
| 270 | /* enet DMA only understands physical addresses */ | ||
| 271 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
| 272 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
| 273 | } | ||
| 274 | |||
| 275 | /* Start the rx descriptor ring where the tx ring leaves off */ | ||
| 276 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
| 277 | rx_queue = priv->rx_queue[i]; | ||
| 278 | rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; | ||
| 279 | rx_queue->rx_bd_dma_base = addr; | ||
| 280 | rx_queue->dev = ndev; | ||
| 281 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
| 282 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
| 283 | } | ||
| 284 | |||
| 285 | /* Setup the skbuff rings */ | ||
| 286 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
| 287 | tx_queue = priv->tx_queue[i]; | ||
| 288 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | ||
| 289 | tx_queue->tx_ring_size, GFP_KERNEL); | ||
| 290 | if (!tx_queue->tx_skbuff) { | ||
| 291 | if (netif_msg_ifup(priv)) | ||
| 292 | pr_err("%s: Could not allocate tx_skbuff\n", | ||
| 293 | ndev->name); | ||
| 294 | goto cleanup; | ||
| 295 | } | ||
| 296 | |||
| 297 | for (k = 0; k < tx_queue->tx_ring_size; k++) | ||
| 298 | tx_queue->tx_skbuff[k] = NULL; | ||
| 299 | } | ||
| 300 | |||
| 301 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
| 302 | rx_queue = priv->rx_queue[i]; | ||
| 303 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | ||
| 304 | rx_queue->rx_ring_size, GFP_KERNEL); | ||
| 305 | |||
| 306 | if (!rx_queue->rx_skbuff) { | ||
| 307 | if (netif_msg_ifup(priv)) | ||
| 308 | pr_err("%s: Could not allocate rx_skbuff\n", | ||
| 309 | ndev->name); | ||
| 310 | goto cleanup; | ||
| 311 | } | ||
| 312 | |||
| 313 | for (j = 0; j < rx_queue->rx_ring_size; j++) | ||
| 314 | rx_queue->rx_skbuff[j] = NULL; | ||
| 315 | } | ||
| 316 | |||
| 317 | if (gfar_init_bds(ndev)) | ||
| 318 | goto cleanup; | ||
| 319 | |||
| 320 | return 0; | ||
| 321 | |||
| 322 | cleanup: | ||
| 323 | free_skb_resources(priv); | ||
| 324 | return -ENOMEM; | ||
| 325 | } | ||
| 326 | |||
| 327 | static void gfar_init_tx_rx_base(struct gfar_private *priv) | ||
| 328 | { | ||
| 329 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
| 330 | u32 __iomem *baddr; | ||
| 331 | int i; | ||
| 332 | |||
| 333 | baddr = ®s->tbase0; | ||
| 334 | for(i = 0; i < priv->num_tx_queues; i++) { | ||
| 335 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | ||
| 336 | baddr += 2; | ||
| 337 | } | ||
| 338 | |||
| 339 | baddr = ®s->rbase0; | ||
| 340 | for(i = 0; i < priv->num_rx_queues; i++) { | ||
| 341 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | ||
| 342 | baddr += 2; | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | static void gfar_init_mac(struct net_device *ndev) | ||
| 347 | { | ||
| 348 | struct gfar_private *priv = netdev_priv(ndev); | ||
| 349 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
| 350 | u32 rctrl = 0; | ||
| 351 | u32 tctrl = 0; | ||
| 352 | u32 attrs = 0; | ||
| 353 | |||
| 354 | /* write the tx/rx base registers */ | ||
| 355 | gfar_init_tx_rx_base(priv); | ||
| 356 | |||
| 357 | /* Configure the coalescing support */ | ||
| 358 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | ||
| 359 | |||
| 360 | if (priv->rx_filer_enable) | ||
| 361 | rctrl |= RCTRL_FILREN; | ||
| 362 | |||
| 363 | if (priv->rx_csum_enable) | ||
| 364 | rctrl |= RCTRL_CHECKSUMMING; | ||
| 365 | |||
| 366 | if (priv->extended_hash) { | ||
| 367 | rctrl |= RCTRL_EXTHASH; | ||
| 368 | |||
| 369 | gfar_clear_exact_match(ndev); | ||
| 370 | rctrl |= RCTRL_EMEN; | ||
| 371 | } | ||
| 372 | |||
| 373 | if (priv->padding) { | ||
| 374 | rctrl &= ~RCTRL_PAL_MASK; | ||
| 375 | rctrl |= RCTRL_PADDING(priv->padding); | ||
| 376 | } | ||
| 377 | |||
| 378 | /* keep vlan related bits if it's enabled */ | ||
| 379 | if (priv->vlgrp) { | ||
| 380 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | ||
| 381 | tctrl |= TCTRL_VLINS; | ||
| 382 | } | ||
| 383 | |||
| 384 | /* Init rctrl based on our settings */ | ||
| 385 | gfar_write(®s->rctrl, rctrl); | ||
| 386 | |||
| 387 | if (ndev->features & NETIF_F_IP_CSUM) | ||
| 388 | tctrl |= TCTRL_INIT_CSUM; | ||
| 389 | |||
| 390 | tctrl |= TCTRL_TXSCHED_PRIO; | ||
| 391 | |||
| 392 | gfar_write(®s->tctrl, tctrl); | ||
| 393 | |||
| 394 | /* Set the extraction length and index */ | ||
| 395 | attrs = ATTRELI_EL(priv->rx_stash_size) | | ||
| 396 | ATTRELI_EI(priv->rx_stash_index); | ||
| 397 | |||
| 398 | gfar_write(®s->attreli, attrs); | ||
| 399 | |||
| 400 | /* Start with defaults, and add stashing or locking | ||
| 401 | * depending on the approprate variables */ | ||
| 402 | attrs = ATTR_INIT_SETTINGS; | ||
| 403 | |||
| 404 | if (priv->bd_stash_en) | ||
| 405 | attrs |= ATTR_BDSTASH; | ||
| 406 | |||
| 407 | if (priv->rx_stash_size != 0) | ||
| 408 | attrs |= ATTR_BUFSTASH; | ||
| 409 | |||
| 410 | gfar_write(®s->attr, attrs); | ||
| 411 | |||
| 412 | gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); | ||
| 413 | gfar_write(®s->fifo_tx_starve, priv->fifo_starve); | ||
| 414 | gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | ||
| 415 | } | ||
| 416 | |||
| 150 | static const struct net_device_ops gfar_netdev_ops = { | 417 | static const struct net_device_ops gfar_netdev_ops = { |
| 151 | .ndo_open = gfar_enet_open, | 418 | .ndo_open = gfar_enet_open, |
| 152 | .ndo_start_xmit = gfar_start_xmit, | 419 | .ndo_start_xmit = gfar_start_xmit, |
| @@ -155,6 +422,7 @@ static const struct net_device_ops gfar_netdev_ops = { | |||
| 155 | .ndo_set_multicast_list = gfar_set_multi, | 422 | .ndo_set_multicast_list = gfar_set_multi, |
| 156 | .ndo_tx_timeout = gfar_timeout, | 423 | .ndo_tx_timeout = gfar_timeout, |
| 157 | .ndo_do_ioctl = gfar_ioctl, | 424 | .ndo_do_ioctl = gfar_ioctl, |
| 425 | .ndo_select_queue = gfar_select_queue, | ||
| 158 | .ndo_vlan_rx_register = gfar_vlan_rx_register, | 426 | .ndo_vlan_rx_register = gfar_vlan_rx_register, |
| 159 | .ndo_set_mac_address = eth_mac_addr, | 427 | .ndo_set_mac_address = eth_mac_addr, |
| 160 | .ndo_validate_addr = eth_validate_addr, | 428 | .ndo_validate_addr = eth_validate_addr, |
| @@ -163,56 +431,252 @@ static const struct net_device_ops gfar_netdev_ops = { | |||
| 163 | #endif | 431 | #endif |
| 164 | }; | 432 | }; |
| 165 | 433 | ||
| 434 | unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; | ||
| 435 | unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; | ||
| 436 | |||
| 437 | void lock_rx_qs(struct gfar_private *priv) | ||
| 438 | { | ||
| 439 | int i = 0x0; | ||
| 440 | |||
| 441 | for (i = 0; i < priv->num_rx_queues; i++) | ||
| 442 | spin_lock(&priv->rx_queue[i]->rxlock); | ||
| 443 | } | ||
| 444 | |||
| 445 | void lock_tx_qs(struct gfar_private *priv) | ||
| 446 | { | ||
| 447 | int i = 0x0; | ||
| 448 | |||
| 449 | for (i = 0; i < priv->num_tx_queues; i++) | ||
| 450 | spin_lock(&priv->tx_queue[i]->txlock); | ||
| 451 | } | ||
| 452 | |||
| 453 | void unlock_rx_qs(struct gfar_private *priv) | ||
| 454 | { | ||
| 455 | int i = 0x0; | ||
| 456 | |||
| 457 | for (i = 0; i < priv->num_rx_queues; i++) | ||
| 458 | spin_unlock(&priv->rx_queue[i]->rxlock); | ||
| 459 | } | ||
| 460 | |||
| 461 | void unlock_tx_qs(struct gfar_private *priv) | ||
| 462 | { | ||
| 463 | int i = 0x0; | ||
| 464 | |||
| 465 | for (i = 0; i < priv->num_tx_queues; i++) | ||
| 466 | spin_unlock(&priv->tx_queue[i]->txlock); | ||
| 467 | } | ||
| 468 | |||
| 166 | /* Returns 1 if incoming frames use an FCB */ | 469 | /* Returns 1 if incoming frames use an FCB */ |
| 167 | static inline int gfar_uses_fcb(struct gfar_private *priv) | 470 | static inline int gfar_uses_fcb(struct gfar_private *priv) |
| 168 | { | 471 | { |
| 169 | return priv->vlgrp || priv->rx_csum_enable; | 472 | return priv->vlgrp || priv->rx_csum_enable; |
| 170 | } | 473 | } |
| 171 | 474 | ||
| 172 | static int gfar_of_init(struct net_device *dev) | 475 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb) |
| 476 | { | ||
| 477 | return skb_get_queue_mapping(skb); | ||
| 478 | } | ||
| 479 | static void free_tx_pointers(struct gfar_private *priv) | ||
| 480 | { | ||
| 481 | int i = 0; | ||
| 482 | |||
| 483 | for (i = 0; i < priv->num_tx_queues; i++) | ||
| 484 | kfree(priv->tx_queue[i]); | ||
| 485 | } | ||
| 486 | |||
| 487 | static void free_rx_pointers(struct gfar_private *priv) | ||
| 488 | { | ||
| 489 | int i = 0; | ||
| 490 | |||
| 491 | for (i = 0; i < priv->num_rx_queues; i++) | ||
| 492 | kfree(priv->rx_queue[i]); | ||
| 493 | } | ||
| 494 | |||
| 495 | static void unmap_group_regs(struct gfar_private *priv) | ||
| 496 | { | ||
| 497 | int i = 0; | ||
| 498 | |||
| 499 | for (i = 0; i < MAXGROUPS; i++) | ||
| 500 | if (priv->gfargrp[i].regs) | ||
| 501 | iounmap(priv->gfargrp[i].regs); | ||
| 502 | } | ||
| 503 | |||
| 504 | static void disable_napi(struct gfar_private *priv) | ||
| 505 | { | ||
| 506 | int i = 0; | ||
| 507 | |||
| 508 | for (i = 0; i < priv->num_grps; i++) | ||
| 509 | napi_disable(&priv->gfargrp[i].napi); | ||
| 510 | } | ||
| 511 | |||
| 512 | static void enable_napi(struct gfar_private *priv) | ||
| 513 | { | ||
| 514 | int i = 0; | ||
| 515 | |||
| 516 | for (i = 0; i < priv->num_grps; i++) | ||
| 517 | napi_enable(&priv->gfargrp[i].napi); | ||
| 518 | } | ||
| 519 | |||
| 520 | static int gfar_parse_group(struct device_node *np, | ||
| 521 | struct gfar_private *priv, const char *model) | ||
| 522 | { | ||
| 523 | u32 *queue_mask; | ||
| 524 | u64 addr, size; | ||
| 525 | |||
| 526 | addr = of_translate_address(np, | ||
| 527 | of_get_address(np, 0, &size, NULL)); | ||
| 528 | priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); | ||
| 529 | |||
| 530 | if (!priv->gfargrp[priv->num_grps].regs) | ||
| 531 | return -ENOMEM; | ||
| 532 | |||
| 533 | priv->gfargrp[priv->num_grps].interruptTransmit = | ||
| 534 | irq_of_parse_and_map(np, 0); | ||
| 535 | |||
| 536 | /* If we aren't the FEC we have multiple interrupts */ | ||
| 537 | if (model && strcasecmp(model, "FEC")) { | ||
| 538 | priv->gfargrp[priv->num_grps].interruptReceive = | ||
| 539 | irq_of_parse_and_map(np, 1); | ||
| 540 | priv->gfargrp[priv->num_grps].interruptError = | ||
| 541 | irq_of_parse_and_map(np,2); | ||
| 542 | if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || | ||
| 543 | priv->gfargrp[priv->num_grps].interruptReceive < 0 || | ||
| 544 | priv->gfargrp[priv->num_grps].interruptError < 0) { | ||
| 545 | return -EINVAL; | ||
| 546 | } | ||
| 547 | } | ||
| 548 | |||
| 549 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | ||
| 550 | priv->gfargrp[priv->num_grps].priv = priv; | ||
| 551 | spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); | ||
| 552 | if(priv->mode == MQ_MG_MODE) { | ||
| 553 | queue_mask = (u32 *)of_get_property(np, | ||
| 554 | "fsl,rx-bit-map", NULL); | ||
| 555 | priv->gfargrp[priv->num_grps].rx_bit_map = | ||
| 556 | queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); | ||
| 557 | queue_mask = (u32 *)of_get_property(np, | ||
| 558 | "fsl,tx-bit-map", NULL); | ||
| 559 | priv->gfargrp[priv->num_grps].tx_bit_map = | ||
| 560 | queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | ||
| 561 | } else { | ||
| 562 | priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; | ||
| 563 | priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; | ||
| 564 | } | ||
| 565 | priv->num_grps++; | ||
| 566 | |||
| 567 | return 0; | ||
| 568 | } | ||
| 569 | |||
| 570 | static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) | ||
| 173 | { | 571 | { |
| 174 | const char *model; | 572 | const char *model; |
| 175 | const char *ctype; | 573 | const char *ctype; |
| 176 | const void *mac_addr; | 574 | const void *mac_addr; |
| 177 | u64 addr, size; | 575 | int err = 0, i; |
| 178 | int err = 0; | 576 | struct net_device *dev = NULL; |
| 179 | struct gfar_private *priv = netdev_priv(dev); | 577 | struct gfar_private *priv = NULL; |
| 180 | struct device_node *np = priv->node; | 578 | struct device_node *np = ofdev->node; |
| 579 | struct device_node *child = NULL; | ||
| 181 | const u32 *stash; | 580 | const u32 *stash; |
| 182 | const u32 *stash_len; | 581 | const u32 *stash_len; |
| 183 | const u32 *stash_idx; | 582 | const u32 *stash_idx; |
| 583 | unsigned int num_tx_qs, num_rx_qs; | ||
| 584 | u32 *tx_queues, *rx_queues; | ||
| 184 | 585 | ||
| 185 | if (!np || !of_device_is_available(np)) | 586 | if (!np || !of_device_is_available(np)) |
| 186 | return -ENODEV; | 587 | return -ENODEV; |
| 187 | 588 | ||
| 188 | /* get a pointer to the register memory */ | 589 | /* parse the num of tx and rx queues */ |
| 189 | addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); | 590 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); |
| 190 | priv->regs = ioremap(addr, size); | 591 | num_tx_qs = tx_queues ? *tx_queues : 1; |
| 592 | |||
| 593 | if (num_tx_qs > MAX_TX_QS) { | ||
| 594 | printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", | ||
| 595 | num_tx_qs, MAX_TX_QS); | ||
| 596 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | ||
| 597 | return -EINVAL; | ||
| 598 | } | ||
| 599 | |||
| 600 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | ||
| 601 | num_rx_qs = rx_queues ? *rx_queues : 1; | ||
| 191 | 602 | ||
| 192 | if (priv->regs == NULL) | 603 | if (num_rx_qs > MAX_RX_QS) { |
| 604 | printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", | ||
| 605 | num_tx_qs, MAX_TX_QS); | ||
| 606 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | ||
| 607 | return -EINVAL; | ||
| 608 | } | ||
| 609 | |||
| 610 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | ||
| 611 | dev = *pdev; | ||
| 612 | if (NULL == dev) | ||
| 193 | return -ENOMEM; | 613 | return -ENOMEM; |
| 194 | 614 | ||
| 195 | priv->interruptTransmit = irq_of_parse_and_map(np, 0); | 615 | priv = netdev_priv(dev); |
| 616 | priv->node = ofdev->node; | ||
| 617 | priv->ndev = dev; | ||
| 618 | |||
| 619 | dev->num_tx_queues = num_tx_qs; | ||
| 620 | dev->real_num_tx_queues = num_tx_qs; | ||
| 621 | priv->num_tx_queues = num_tx_qs; | ||
| 622 | priv->num_rx_queues = num_rx_qs; | ||
| 623 | priv->num_grps = 0x0; | ||
| 196 | 624 | ||
| 197 | model = of_get_property(np, "model", NULL); | 625 | model = of_get_property(np, "model", NULL); |
| 198 | 626 | ||
| 199 | /* If we aren't the FEC we have multiple interrupts */ | 627 | for (i = 0; i < MAXGROUPS; i++) |
| 200 | if (model && strcasecmp(model, "FEC")) { | 628 | priv->gfargrp[i].regs = NULL; |
| 201 | priv->interruptReceive = irq_of_parse_and_map(np, 1); | 629 | |
| 630 | /* Parse and initialize group specific information */ | ||
| 631 | if (of_device_is_compatible(np, "fsl,etsec2")) { | ||
| 632 | priv->mode = MQ_MG_MODE; | ||
| 633 | for_each_child_of_node(np, child) { | ||
| 634 | err = gfar_parse_group(child, priv, model); | ||
| 635 | if (err) | ||
| 636 | goto err_grp_init; | ||
| 637 | } | ||
| 638 | } else { | ||
| 639 | priv->mode = SQ_SG_MODE; | ||
| 640 | err = gfar_parse_group(np, priv, model); | ||
| 641 | if(err) | ||
| 642 | goto err_grp_init; | ||
| 643 | } | ||
| 202 | 644 | ||
| 203 | priv->interruptError = irq_of_parse_and_map(np, 2); | 645 | for (i = 0; i < priv->num_tx_queues; i++) |
| 646 | priv->tx_queue[i] = NULL; | ||
| 647 | for (i = 0; i < priv->num_rx_queues; i++) | ||
| 648 | priv->rx_queue[i] = NULL; | ||
| 649 | |||
| 650 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
| 651 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( | ||
| 652 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | ||
| 653 | if (!priv->tx_queue[i]) { | ||
| 654 | err = -ENOMEM; | ||
| 655 | goto tx_alloc_failed; | ||
| 656 | } | ||
| 657 | priv->tx_queue[i]->tx_skbuff = NULL; | ||
| 658 | priv->tx_queue[i]->qindex = i; | ||
| 659 | priv->tx_queue[i]->dev = dev; | ||
| 660 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | ||
| 661 | } | ||
| 204 | 662 | ||
| 205 | if (priv->interruptTransmit < 0 || | 663 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 206 | priv->interruptReceive < 0 || | 664 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( |
| 207 | priv->interruptError < 0) { | 665 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); |
| 208 | err = -EINVAL; | 666 | if (!priv->rx_queue[i]) { |
| 209 | goto err_out; | 667 | err = -ENOMEM; |
| 668 | goto rx_alloc_failed; | ||
| 210 | } | 669 | } |
| 670 | priv->rx_queue[i]->rx_skbuff = NULL; | ||
| 671 | priv->rx_queue[i]->qindex = i; | ||
| 672 | priv->rx_queue[i]->dev = dev; | ||
| 673 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | ||
| 211 | } | 674 | } |
| 212 | 675 | ||
| 676 | |||
| 213 | stash = of_get_property(np, "bd-stash", NULL); | 677 | stash = of_get_property(np, "bd-stash", NULL); |
| 214 | 678 | ||
| 215 | if(stash) { | 679 | if (stash) { |
| 216 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; | 680 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
| 217 | priv->bd_stash_en = 1; | 681 | priv->bd_stash_en = 1; |
| 218 | } | 682 | } |
| @@ -270,8 +734,13 @@ static int gfar_of_init(struct net_device *dev) | |||
| 270 | 734 | ||
| 271 | return 0; | 735 | return 0; |
| 272 | 736 | ||
| 273 | err_out: | 737 | rx_alloc_failed: |
| 274 | iounmap(priv->regs); | 738 | free_rx_pointers(priv); |
| 739 | tx_alloc_failed: | ||
| 740 | free_tx_pointers(priv); | ||
| 741 | err_grp_init: | ||
| 742 | unmap_group_regs(priv); | ||
| 743 | free_netdev(dev); | ||
| 275 | return err; | 744 | return err; |
| 276 | } | 745 | } |
| 277 | 746 | ||
| @@ -289,6 +758,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
| 289 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | 758 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); |
| 290 | } | 759 | } |
| 291 | 760 | ||
| 761 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) | ||
| 762 | { | ||
| 763 | unsigned int new_bit_map = 0x0; | ||
| 764 | int mask = 0x1 << (max_qs - 1), i; | ||
| 765 | for (i = 0; i < max_qs; i++) { | ||
| 766 | if (bit_map & mask) | ||
| 767 | new_bit_map = new_bit_map + (1 << i); | ||
| 768 | mask = mask >> 0x1; | ||
| 769 | } | ||
| 770 | return new_bit_map; | ||
| 771 | } | ||
| 772 | |||
| 773 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, | ||
| 774 | u32 class) | ||
| 775 | { | ||
| 776 | u32 rqfpr = FPR_FILER_MASK; | ||
| 777 | u32 rqfcr = 0x0; | ||
| 778 | |||
| 779 | rqfar--; | ||
| 780 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | ||
| 781 | ftp_rqfpr[rqfar] = rqfpr; | ||
| 782 | ftp_rqfcr[rqfar] = rqfcr; | ||
| 783 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
| 784 | |||
| 785 | rqfar--; | ||
| 786 | rqfcr = RQFCR_CMP_NOMATCH; | ||
| 787 | ftp_rqfpr[rqfar] = rqfpr; | ||
| 788 | ftp_rqfcr[rqfar] = rqfcr; | ||
| 789 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
| 790 | |||
| 791 | rqfar--; | ||
| 792 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | ||
| 793 | rqfpr = class; | ||
| 794 | ftp_rqfcr[rqfar] = rqfcr; | ||
| 795 | ftp_rqfpr[rqfar] = rqfpr; | ||
| 796 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
| 797 | |||
| 798 | rqfar--; | ||
| 799 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | ||
| 800 | rqfpr = class; | ||
| 801 | ftp_rqfcr[rqfar] = rqfcr; | ||
| 802 | ftp_rqfpr[rqfar] = rqfpr; | ||
| 803 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
| 804 | |||
| 805 | return rqfar; | ||
| 806 | } | ||
| 807 | |||
| 808 | static void gfar_init_filer_table(struct gfar_private *priv) | ||
| 809 | { | ||
| 810 | int i = 0x0; | ||
| 811 | u32 rqfar = MAX_FILER_IDX; | ||
| 812 | u32 rqfcr = 0x0; | ||
| 813 | u32 rqfpr = FPR_FILER_MASK; | ||
| 814 | |||
| 815 | /* Default rule */ | ||
| 816 | rqfcr = RQFCR_CMP_MATCH; | ||
| 817 | ftp_rqfcr[rqfar] = rqfcr; | ||
| 818 | ftp_rqfpr[rqfar] = rqfpr; | ||
| 819 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
| 820 | |||
| 821 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | ||
| 822 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | ||
| 823 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | ||
| 824 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | ||
| 825 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | ||
| 826 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | ||
| 827 | |||
| 828 | /* cur_filer_idx indicated the fisrt non-masked rule */ | ||
| 829 | priv->cur_filer_idx = rqfar; | ||
| 830 | |||
| 831 | /* Rest are masked rules */ | ||
| 832 | rqfcr = RQFCR_CMP_NOMATCH; | ||
| 833 | for (i = 0; i < rqfar; i++) { | ||
| 834 | ftp_rqfcr[i] = rqfcr; | ||
| 835 | ftp_rqfpr[i] = rqfpr; | ||
| 836 | gfar_write_filer(priv, i, rqfcr, rqfpr); | ||
| 837 | } | ||
| 838 | } | ||
| 839 | |||
| 292 | /* Set up the ethernet device structure, private data, | 840 | /* Set up the ethernet device structure, private data, |
| 293 | * and anything else we need before we start */ | 841 | * and anything else we need before we start */ |
| 294 | static int gfar_probe(struct of_device *ofdev, | 842 | static int gfar_probe(struct of_device *ofdev, |
| @@ -297,14 +845,17 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 297 | u32 tempval; | 845 | u32 tempval; |
| 298 | struct net_device *dev = NULL; | 846 | struct net_device *dev = NULL; |
| 299 | struct gfar_private *priv = NULL; | 847 | struct gfar_private *priv = NULL; |
| 300 | int err = 0; | 848 | struct gfar __iomem *regs = NULL; |
| 849 | int err = 0, i, grp_idx = 0; | ||
| 301 | int len_devname; | 850 | int len_devname; |
| 851 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; | ||
| 852 | u32 isrg = 0; | ||
| 853 | u32 __iomem *baddr; | ||
| 302 | 854 | ||
| 303 | /* Create an ethernet device instance */ | 855 | err = gfar_of_init(ofdev, &dev); |
| 304 | dev = alloc_etherdev(sizeof (*priv)); | ||
| 305 | 856 | ||
| 306 | if (NULL == dev) | 857 | if (err) |
| 307 | return -ENOMEM; | 858 | return err; |
| 308 | 859 | ||
| 309 | priv = netdev_priv(dev); | 860 | priv = netdev_priv(dev); |
| 310 | priv->ndev = dev; | 861 | priv->ndev = dev; |
| @@ -312,50 +863,46 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 312 | priv->node = ofdev->node; | 863 | priv->node = ofdev->node; |
| 313 | SET_NETDEV_DEV(dev, &ofdev->dev); | 864 | SET_NETDEV_DEV(dev, &ofdev->dev); |
| 314 | 865 | ||
| 315 | err = gfar_of_init(dev); | ||
| 316 | |||
| 317 | if (err) | ||
| 318 | goto regs_fail; | ||
| 319 | |||
| 320 | spin_lock_init(&priv->txlock); | ||
| 321 | spin_lock_init(&priv->rxlock); | ||
| 322 | spin_lock_init(&priv->bflock); | 866 | spin_lock_init(&priv->bflock); |
| 323 | INIT_WORK(&priv->reset_task, gfar_reset_task); | 867 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
| 324 | 868 | ||
| 325 | dev_set_drvdata(&ofdev->dev, priv); | 869 | dev_set_drvdata(&ofdev->dev, priv); |
| 870 | regs = priv->gfargrp[0].regs; | ||
| 326 | 871 | ||
| 327 | /* Stop the DMA engine now, in case it was running before */ | 872 | /* Stop the DMA engine now, in case it was running before */ |
| 328 | /* (The firmware could have used it, and left it running). */ | 873 | /* (The firmware could have used it, and left it running). */ |
| 329 | gfar_halt(dev); | 874 | gfar_halt(dev); |
| 330 | 875 | ||
| 331 | /* Reset MAC layer */ | 876 | /* Reset MAC layer */ |
| 332 | gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); | 877 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
| 333 | 878 | ||
| 334 | /* We need to delay at least 3 TX clocks */ | 879 | /* We need to delay at least 3 TX clocks */ |
| 335 | udelay(2); | 880 | udelay(2); |
| 336 | 881 | ||
| 337 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | 882 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
| 338 | gfar_write(&priv->regs->maccfg1, tempval); | 883 | gfar_write(®s->maccfg1, tempval); |
| 339 | 884 | ||
| 340 | /* Initialize MACCFG2. */ | 885 | /* Initialize MACCFG2. */ |
| 341 | gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); | 886 | gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS); |
| 342 | 887 | ||
| 343 | /* Initialize ECNTRL */ | 888 | /* Initialize ECNTRL */ |
| 344 | gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); | 889 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
| 345 | 890 | ||
| 346 | /* Set the dev->base_addr to the gfar reg region */ | 891 | /* Set the dev->base_addr to the gfar reg region */ |
| 347 | dev->base_addr = (unsigned long) (priv->regs); | 892 | dev->base_addr = (unsigned long) regs; |
| 348 | 893 | ||
| 349 | SET_NETDEV_DEV(dev, &ofdev->dev); | 894 | SET_NETDEV_DEV(dev, &ofdev->dev); |
| 350 | 895 | ||
| 351 | /* Fill in the dev structure */ | 896 | /* Fill in the dev structure */ |
| 352 | dev->watchdog_timeo = TX_TIMEOUT; | 897 | dev->watchdog_timeo = TX_TIMEOUT; |
| 353 | netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); | ||
| 354 | dev->mtu = 1500; | 898 | dev->mtu = 1500; |
| 355 | |||
| 356 | dev->netdev_ops = &gfar_netdev_ops; | 899 | dev->netdev_ops = &gfar_netdev_ops; |
| 357 | dev->ethtool_ops = &gfar_ethtool_ops; | 900 | dev->ethtool_ops = &gfar_ethtool_ops; |
| 358 | 901 | ||
| 902 | /* Register for napi ...We are registering NAPI for each grp */ | ||
| 903 | for (i = 0; i < priv->num_grps; i++) | ||
| 904 | netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); | ||
| 905 | |||
| 359 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { | 906 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
| 360 | priv->rx_csum_enable = 1; | 907 | priv->rx_csum_enable = 1; |
| 361 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; | 908 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; |
| @@ -371,35 +918,35 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 371 | priv->extended_hash = 1; | 918 | priv->extended_hash = 1; |
| 372 | priv->hash_width = 9; | 919 | priv->hash_width = 9; |
| 373 | 920 | ||
| 374 | priv->hash_regs[0] = &priv->regs->igaddr0; | 921 | priv->hash_regs[0] = ®s->igaddr0; |
| 375 | priv->hash_regs[1] = &priv->regs->igaddr1; | 922 | priv->hash_regs[1] = ®s->igaddr1; |
| 376 | priv->hash_regs[2] = &priv->regs->igaddr2; | 923 | priv->hash_regs[2] = ®s->igaddr2; |
| 377 | priv->hash_regs[3] = &priv->regs->igaddr3; | 924 | priv->hash_regs[3] = ®s->igaddr3; |
| 378 | priv->hash_regs[4] = &priv->regs->igaddr4; | 925 | priv->hash_regs[4] = ®s->igaddr4; |
| 379 | priv->hash_regs[5] = &priv->regs->igaddr5; | 926 | priv->hash_regs[5] = ®s->igaddr5; |
| 380 | priv->hash_regs[6] = &priv->regs->igaddr6; | 927 | priv->hash_regs[6] = ®s->igaddr6; |
| 381 | priv->hash_regs[7] = &priv->regs->igaddr7; | 928 | priv->hash_regs[7] = ®s->igaddr7; |
| 382 | priv->hash_regs[8] = &priv->regs->gaddr0; | 929 | priv->hash_regs[8] = ®s->gaddr0; |
| 383 | priv->hash_regs[9] = &priv->regs->gaddr1; | 930 | priv->hash_regs[9] = ®s->gaddr1; |
| 384 | priv->hash_regs[10] = &priv->regs->gaddr2; | 931 | priv->hash_regs[10] = ®s->gaddr2; |
| 385 | priv->hash_regs[11] = &priv->regs->gaddr3; | 932 | priv->hash_regs[11] = ®s->gaddr3; |
| 386 | priv->hash_regs[12] = &priv->regs->gaddr4; | 933 | priv->hash_regs[12] = ®s->gaddr4; |
| 387 | priv->hash_regs[13] = &priv->regs->gaddr5; | 934 | priv->hash_regs[13] = ®s->gaddr5; |
| 388 | priv->hash_regs[14] = &priv->regs->gaddr6; | 935 | priv->hash_regs[14] = ®s->gaddr6; |
| 389 | priv->hash_regs[15] = &priv->regs->gaddr7; | 936 | priv->hash_regs[15] = ®s->gaddr7; |
| 390 | 937 | ||
| 391 | } else { | 938 | } else { |
| 392 | priv->extended_hash = 0; | 939 | priv->extended_hash = 0; |
| 393 | priv->hash_width = 8; | 940 | priv->hash_width = 8; |
| 394 | 941 | ||
| 395 | priv->hash_regs[0] = &priv->regs->gaddr0; | 942 | priv->hash_regs[0] = ®s->gaddr0; |
| 396 | priv->hash_regs[1] = &priv->regs->gaddr1; | 943 | priv->hash_regs[1] = ®s->gaddr1; |
| 397 | priv->hash_regs[2] = &priv->regs->gaddr2; | 944 | priv->hash_regs[2] = ®s->gaddr2; |
| 398 | priv->hash_regs[3] = &priv->regs->gaddr3; | 945 | priv->hash_regs[3] = ®s->gaddr3; |
| 399 | priv->hash_regs[4] = &priv->regs->gaddr4; | 946 | priv->hash_regs[4] = ®s->gaddr4; |
| 400 | priv->hash_regs[5] = &priv->regs->gaddr5; | 947 | priv->hash_regs[5] = ®s->gaddr5; |
| 401 | priv->hash_regs[6] = &priv->regs->gaddr6; | 948 | priv->hash_regs[6] = ®s->gaddr6; |
| 402 | priv->hash_regs[7] = &priv->regs->gaddr7; | 949 | priv->hash_regs[7] = ®s->gaddr7; |
| 403 | } | 950 | } |
| 404 | 951 | ||
| 405 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) | 952 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) |
| @@ -410,15 +957,70 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 410 | if (dev->features & NETIF_F_IP_CSUM) | 957 | if (dev->features & NETIF_F_IP_CSUM) |
| 411 | dev->hard_header_len += GMAC_FCB_LEN; | 958 | dev->hard_header_len += GMAC_FCB_LEN; |
| 412 | 959 | ||
| 960 | /* Program the isrg regs only if number of grps > 1 */ | ||
| 961 | if (priv->num_grps > 1) { | ||
| 962 | baddr = ®s->isrg0; | ||
| 963 | for (i = 0; i < priv->num_grps; i++) { | ||
| 964 | isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); | ||
| 965 | isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); | ||
| 966 | gfar_write(baddr, isrg); | ||
| 967 | baddr++; | ||
| 968 | isrg = 0x0; | ||
| 969 | } | ||
| 970 | } | ||
| 971 | |||
| 972 | /* Need to reverse the bit maps as bit_map's MSB is q0 | ||
| 973 | * but, for_each_bit parses from right to left, which | ||
| 974 | * basically reverses the queue numbers */ | ||
| 975 | for (i = 0; i< priv->num_grps; i++) { | ||
| 976 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | ||
| 977 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | ||
| 978 | priv->gfargrp[i].rx_bit_map = reverse_bitmap( | ||
| 979 | priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | ||
| 980 | } | ||
| 981 | |||
| 982 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | ||
| 983 | * also assign queues to groups */ | ||
| 984 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | ||
| 985 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | ||
| 986 | for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, | ||
| 987 | priv->num_rx_queues) { | ||
| 988 | priv->gfargrp[grp_idx].num_rx_queues++; | ||
| 989 | priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; | ||
| 990 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | ||
| 991 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | ||
| 992 | } | ||
| 993 | priv->gfargrp[grp_idx].num_tx_queues = 0x0; | ||
| 994 | for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, | ||
| 995 | priv->num_tx_queues) { | ||
| 996 | priv->gfargrp[grp_idx].num_tx_queues++; | ||
| 997 | priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; | ||
| 998 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | ||
| 999 | tqueue = tqueue | (TQUEUE_EN0 >> i); | ||
| 1000 | } | ||
| 1001 | priv->gfargrp[grp_idx].rstat = rstat; | ||
| 1002 | priv->gfargrp[grp_idx].tstat = tstat; | ||
| 1003 | rstat = tstat =0; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | gfar_write(®s->rqueue, rqueue); | ||
| 1007 | gfar_write(®s->tqueue, tqueue); | ||
| 1008 | |||
| 413 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; | 1009 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
| 414 | priv->tx_ring_size = DEFAULT_TX_RING_SIZE; | ||
| 415 | priv->rx_ring_size = DEFAULT_RX_RING_SIZE; | ||
| 416 | priv->num_txbdfree = DEFAULT_TX_RING_SIZE; | ||
| 417 | 1010 | ||
| 418 | priv->txcoalescing = DEFAULT_TX_COALESCE; | 1011 | /* Initializing some of the rx/tx queue level parameters */ |
| 419 | priv->txic = DEFAULT_TXIC; | 1012 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 420 | priv->rxcoalescing = DEFAULT_RX_COALESCE; | 1013 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; |
| 421 | priv->rxic = DEFAULT_RXIC; | 1014 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; |
| 1015 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | ||
| 1016 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
| 1020 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | ||
| 1021 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | ||
| 1022 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | ||
| 1023 | } | ||
| 422 | 1024 | ||
| 423 | /* Enable most messages by default */ | 1025 | /* Enable most messages by default */ |
| 424 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | 1026 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
| @@ -439,20 +1041,43 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 439 | 1041 | ||
| 440 | /* fill out IRQ number and name fields */ | 1042 | /* fill out IRQ number and name fields */ |
| 441 | len_devname = strlen(dev->name); | 1043 | len_devname = strlen(dev->name); |
| 442 | strncpy(&priv->int_name_tx[0], dev->name, len_devname); | 1044 | for (i = 0; i < priv->num_grps; i++) { |
| 443 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | 1045 | strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, |
| 444 | strncpy(&priv->int_name_tx[len_devname], | 1046 | len_devname); |
| 445 | "_tx", sizeof("_tx") + 1); | 1047 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
| 446 | 1048 | strncpy(&priv->gfargrp[i].int_name_tx[len_devname], | |
| 447 | strncpy(&priv->int_name_rx[0], dev->name, len_devname); | 1049 | "_g", sizeof("_g")); |
| 448 | strncpy(&priv->int_name_rx[len_devname], | 1050 | priv->gfargrp[i].int_name_tx[ |
| 449 | "_rx", sizeof("_rx") + 1); | 1051 | strlen(priv->gfargrp[i].int_name_tx)] = i+48; |
| 1052 | strncpy(&priv->gfargrp[i].int_name_tx[strlen( | ||
| 1053 | priv->gfargrp[i].int_name_tx)], | ||
| 1054 | "_tx", sizeof("_tx") + 1); | ||
| 1055 | |||
| 1056 | strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, | ||
| 1057 | len_devname); | ||
| 1058 | strncpy(&priv->gfargrp[i].int_name_rx[len_devname], | ||
| 1059 | "_g", sizeof("_g")); | ||
| 1060 | priv->gfargrp[i].int_name_rx[ | ||
| 1061 | strlen(priv->gfargrp[i].int_name_rx)] = i+48; | ||
| 1062 | strncpy(&priv->gfargrp[i].int_name_rx[strlen( | ||
| 1063 | priv->gfargrp[i].int_name_rx)], | ||
| 1064 | "_rx", sizeof("_rx") + 1); | ||
| 1065 | |||
| 1066 | strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, | ||
| 1067 | len_devname); | ||
| 1068 | strncpy(&priv->gfargrp[i].int_name_er[len_devname], | ||
| 1069 | "_g", sizeof("_g")); | ||
| 1070 | priv->gfargrp[i].int_name_er[strlen( | ||
| 1071 | priv->gfargrp[i].int_name_er)] = i+48; | ||
| 1072 | strncpy(&priv->gfargrp[i].int_name_er[strlen(\ | ||
| 1073 | priv->gfargrp[i].int_name_er)], | ||
| 1074 | "_er", sizeof("_er") + 1); | ||
| 1075 | } else | ||
| 1076 | priv->gfargrp[i].int_name_tx[len_devname] = '\0'; | ||
| 1077 | } | ||
| 450 | 1078 | ||
| 451 | strncpy(&priv->int_name_er[0], dev->name, len_devname); | 1079 | /* Initialize the filer table */ |
| 452 | strncpy(&priv->int_name_er[len_devname], | 1080 | gfar_init_filer_table(priv); |
| 453 | "_er", sizeof("_er") + 1); | ||
| 454 | } else | ||
| 455 | priv->int_name_tx[len_devname] = '\0'; | ||
| 456 | 1081 | ||
| 457 | /* Create all the sysfs files */ | 1082 | /* Create all the sysfs files */ |
| 458 | gfar_init_sysfs(dev); | 1083 | gfar_init_sysfs(dev); |
| @@ -463,14 +1088,19 @@ static int gfar_probe(struct of_device *ofdev, | |||
| 463 | /* Even more device info helps when determining which kernel */ | 1088 | /* Even more device info helps when determining which kernel */ |
| 464 | /* provided which set of benchmarks. */ | 1089 | /* provided which set of benchmarks. */ |
| 465 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); | 1090 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); |
| 466 | printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", | 1091 | for (i = 0; i < priv->num_rx_queues; i++) |
| 467 | dev->name, priv->rx_ring_size, priv->tx_ring_size); | 1092 | printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", |
| 1093 | dev->name, i, priv->rx_queue[i]->rx_ring_size); | ||
| 1094 | for(i = 0; i < priv->num_tx_queues; i++) | ||
| 1095 | printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", | ||
| 1096 | dev->name, i, priv->tx_queue[i]->tx_ring_size); | ||
| 468 | 1097 | ||
| 469 | return 0; | 1098 | return 0; |
| 470 | 1099 | ||
| 471 | register_fail: | 1100 | register_fail: |
| 472 | iounmap(priv->regs); | 1101 | unmap_group_regs(priv); |
| 473 | regs_fail: | 1102 | free_tx_pointers(priv); |
| 1103 | free_rx_pointers(priv); | ||
| 474 | if (priv->phy_node) | 1104 | if (priv->phy_node) |
| 475 | of_node_put(priv->phy_node); | 1105 | of_node_put(priv->phy_node); |
| 476 | if (priv->tbi_node) | 1106 | if (priv->tbi_node) |
| @@ -491,54 +1121,59 @@ static int gfar_remove(struct of_device *ofdev) | |||
| 491 | dev_set_drvdata(&ofdev->dev, NULL); | 1121 | dev_set_drvdata(&ofdev->dev, NULL); |
| 492 | 1122 | ||
| 493 | unregister_netdev(priv->ndev); | 1123 | unregister_netdev(priv->ndev); |
| 494 | iounmap(priv->regs); | 1124 | unmap_group_regs(priv); |
| 495 | free_netdev(priv->ndev); | 1125 | free_netdev(priv->ndev); |
| 496 | 1126 | ||
| 497 | return 0; | 1127 | return 0; |
| 498 | } | 1128 | } |
| 499 | 1129 | ||
| 500 | #ifdef CONFIG_PM | 1130 | #ifdef CONFIG_PM |
| 501 | static int gfar_suspend(struct of_device *ofdev, pm_message_t state) | 1131 | |
| 1132 | static int gfar_suspend(struct device *dev) | ||
| 502 | { | 1133 | { |
| 503 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); | 1134 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 504 | struct net_device *dev = priv->ndev; | 1135 | struct net_device *ndev = priv->ndev; |
| 1136 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
| 505 | unsigned long flags; | 1137 | unsigned long flags; |
| 506 | u32 tempval; | 1138 | u32 tempval; |
| 507 | 1139 | ||
| 508 | int magic_packet = priv->wol_en && | 1140 | int magic_packet = priv->wol_en && |
| 509 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | 1141 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
| 510 | 1142 | ||
| 511 | netif_device_detach(dev); | 1143 | netif_device_detach(ndev); |
| 512 | 1144 | ||
| 513 | if (netif_running(dev)) { | 1145 | if (netif_running(ndev)) { |
| 514 | spin_lock_irqsave(&priv->txlock, flags); | ||
| 515 | spin_lock(&priv->rxlock); | ||
| 516 | 1146 | ||
| 517 | gfar_halt_nodisable(dev); | 1147 | local_irq_save(flags); |
| 1148 | lock_tx_qs(priv); | ||
| 1149 | lock_rx_qs(priv); | ||
| 1150 | |||
| 1151 | gfar_halt_nodisable(ndev); | ||
| 518 | 1152 | ||
| 519 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | 1153 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ |
| 520 | tempval = gfar_read(&priv->regs->maccfg1); | 1154 | tempval = gfar_read(®s->maccfg1); |
| 521 | 1155 | ||
| 522 | tempval &= ~MACCFG1_TX_EN; | 1156 | tempval &= ~MACCFG1_TX_EN; |
| 523 | 1157 | ||
| 524 | if (!magic_packet) | 1158 | if (!magic_packet) |
| 525 | tempval &= ~MACCFG1_RX_EN; | 1159 | tempval &= ~MACCFG1_RX_EN; |
| 526 | 1160 | ||
| 527 | gfar_write(&priv->regs->maccfg1, tempval); | 1161 | gfar_write(®s->maccfg1, tempval); |
| 528 | 1162 | ||
| 529 | spin_unlock(&priv->rxlock); | 1163 | unlock_rx_qs(priv); |
| 530 | spin_unlock_irqrestore(&priv->txlock, flags); | 1164 | unlock_tx_qs(priv); |
| 1165 | local_irq_restore(flags); | ||
| 531 | 1166 | ||
| 532 | napi_disable(&priv->napi); | 1167 | disable_napi(priv); |
| 533 | 1168 | ||
| 534 | if (magic_packet) { | 1169 | if (magic_packet) { |
| 535 | /* Enable interrupt on Magic Packet */ | 1170 | /* Enable interrupt on Magic Packet */ |
| 536 | gfar_write(&priv->regs->imask, IMASK_MAG); | 1171 | gfar_write(®s->imask, IMASK_MAG); |
| 537 | 1172 | ||
| 538 | /* Enable Magic Packet mode */ | 1173 | /* Enable Magic Packet mode */ |
| 539 | tempval = gfar_read(&priv->regs->maccfg2); | 1174 | tempval = gfar_read(®s->maccfg2); |
| 540 | tempval |= MACCFG2_MPEN; | 1175 | tempval |= MACCFG2_MPEN; |
| 541 | gfar_write(&priv->regs->maccfg2, tempval); | 1176 | gfar_write(®s->maccfg2, tempval); |
| 542 | } else { | 1177 | } else { |
| 543 | phy_stop(priv->phydev); | 1178 | phy_stop(priv->phydev); |
| 544 | } | 1179 | } |
| @@ -547,17 +1182,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state) | |||
| 547 | return 0; | 1182 | return 0; |
| 548 | } | 1183 | } |
| 549 | 1184 | ||
| 550 | static int gfar_resume(struct of_device *ofdev) | 1185 | static int gfar_resume(struct device *dev) |
| 551 | { | 1186 | { |
| 552 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); | 1187 | struct gfar_private *priv = dev_get_drvdata(dev); |
| 553 | struct net_device *dev = priv->ndev; | 1188 | struct net_device *ndev = priv->ndev; |
| 1189 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
| 554 | unsigned long flags; | 1190 | unsigned long flags; |
| 555 | u32 tempval; | 1191 | u32 tempval; |
| 556 | int magic_packet = priv->wol_en && | 1192 | int magic_packet = priv->wol_en && |
| 557 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | 1193 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
| 558 | 1194 | ||
| 559 | if (!netif_running(dev)) { | 1195 | if (!netif_running(ndev)) { |
| 560 | netif_device_attach(dev); | 1196 | netif_device_attach(ndev); |
| 561 | return 0; | 1197 | return 0; |
| 562 | } | 1198 | } |
| 563 | 1199 | ||
| @@ -567,28 +1203,80 @@ static int gfar_resume(struct of_device *ofdev) | |||
| 567 | /* Disable Magic Packet mode, in case something | 1203 | /* Disable Magic Packet mode, in case something |
| 568 | * else woke us up. | 1204 | * else woke us up. |
| 569 | */ | 1205 | */ |
| 1206 | local_irq_save(flags); | ||
| 1207 | lock_tx_qs(priv); | ||
| 1208 | lock_rx_qs(priv); | ||
| 570 | 1209 | ||
| 571 | spin_lock_irqsave(&priv->txlock, flags); | 1210 | tempval = gfar_read(®s->maccfg2); |
| 572 | spin_lock(&priv->rxlock); | ||
| 573 | |||
| 574 | tempval = gfar_read(&priv->regs->maccfg2); | ||
| 575 | tempval &= ~MACCFG2_MPEN; | 1211 | tempval &= ~MACCFG2_MPEN; |
| 576 | gfar_write(&priv->regs->maccfg2, tempval); | 1212 | gfar_write(®s->maccfg2, tempval); |
| 577 | 1213 | ||
| 578 | gfar_start(dev); | 1214 | gfar_start(ndev); |
| 579 | 1215 | ||
| 580 | spin_unlock(&priv->rxlock); | 1216 | unlock_rx_qs(priv); |
| 581 | spin_unlock_irqrestore(&priv->txlock, flags); | 1217 | unlock_tx_qs(priv); |
| 1218 | local_irq_restore(flags); | ||
| 582 | 1219 | ||
| 583 | netif_device_attach(dev); | 1220 | netif_device_attach(ndev); |
| 584 | 1221 | ||
| 585 | napi_enable(&priv->napi); | 1222 | enable_napi(priv); |
| 586 | 1223 | ||
| 587 | return 0; | 1224 | return 0; |
| 588 | } | 1225 | } |
| 1226 | |||
| 1227 | static int gfar_restore(struct device *dev) | ||
| 1228 | { | ||
| 1229 | struct gfar_private *priv = dev_get_drvdata(dev); | ||
| 1230 | struct net_device *ndev = priv->ndev; | ||
| 1231 | |||
| 1232 | if (!netif_running(ndev)) | ||
| 1233 | return 0; | ||
| 1234 | |||
| 1235 | gfar_init_bds(ndev); | ||
| 1236 | init_registers(ndev); | ||
| 1237 | gfar_set_mac_address(ndev); | ||
| 1238 | gfar_init_mac(ndev); | ||
| 1239 | gfar_start(ndev); | ||
| 1240 | |||
| 1241 | priv->oldlink = 0; | ||
| 1242 | priv->oldspeed = 0; | ||
| 1243 | priv->oldduplex = -1; | ||
| 1244 | |||
| 1245 | if (priv->phydev) | ||
| 1246 | phy_start(priv->phydev); | ||
| 1247 | |||
| 1248 | netif_device_attach(ndev); | ||
| 1249 | enable_napi(priv); | ||
| 1250 | |||
| 1251 | return 0; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | static struct dev_pm_ops gfar_pm_ops = { | ||
| 1255 | .suspend = gfar_suspend, | ||
| 1256 | .resume = gfar_resume, | ||
| 1257 | .freeze = gfar_suspend, | ||
| 1258 | .thaw = gfar_resume, | ||
| 1259 | .restore = gfar_restore, | ||
| 1260 | }; | ||
| 1261 | |||
| 1262 | #define GFAR_PM_OPS (&gfar_pm_ops) | ||
| 1263 | |||
| 1264 | static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state) | ||
| 1265 | { | ||
| 1266 | return gfar_suspend(&ofdev->dev); | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | static int gfar_legacy_resume(struct of_device *ofdev) | ||
| 1270 | { | ||
| 1271 | return gfar_resume(&ofdev->dev); | ||
| 1272 | } | ||
| 1273 | |||
| 589 | #else | 1274 | #else |
| 590 | #define gfar_suspend NULL | 1275 | |
| 591 | #define gfar_resume NULL | 1276 | #define GFAR_PM_OPS NULL |
| 1277 | #define gfar_legacy_suspend NULL | ||
| 1278 | #define gfar_legacy_resume NULL | ||
| 1279 | |||
| 592 | #endif | 1280 | #endif |
| 593 | 1281 | ||
| 594 | /* Reads the controller's registers to determine what interface | 1282 | /* Reads the controller's registers to determine what interface |
| @@ -597,7 +1285,10 @@ static int gfar_resume(struct of_device *ofdev) | |||
| 597 | static phy_interface_t gfar_get_interface(struct net_device *dev) | 1285 | static phy_interface_t gfar_get_interface(struct net_device *dev) |
| 598 | { | 1286 | { |
| 599 | struct gfar_private *priv = netdev_priv(dev); | 1287 | struct gfar_private *priv = netdev_priv(dev); |
| 600 | u32 ecntrl = gfar_read(&priv->regs->ecntrl); | 1288 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 1289 | u32 ecntrl; | ||
| 1290 | |||
| 1291 | ecntrl = gfar_read(®s->ecntrl); | ||
| 601 | 1292 | ||
| 602 | if (ecntrl & ECNTRL_SGMII_MODE) | 1293 | if (ecntrl & ECNTRL_SGMII_MODE) |
| 603 | return PHY_INTERFACE_MODE_SGMII; | 1294 | return PHY_INTERFACE_MODE_SGMII; |
| @@ -719,46 +1410,52 @@ static void gfar_configure_serdes(struct net_device *dev) | |||
| 719 | static void init_registers(struct net_device *dev) | 1410 | static void init_registers(struct net_device *dev) |
| 720 | { | 1411 | { |
| 721 | struct gfar_private *priv = netdev_priv(dev); | 1412 | struct gfar_private *priv = netdev_priv(dev); |
| 1413 | struct gfar __iomem *regs = NULL; | ||
| 1414 | int i = 0; | ||
| 722 | 1415 | ||
| 723 | /* Clear IEVENT */ | 1416 | for (i = 0; i < priv->num_grps; i++) { |
| 724 | gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); | 1417 | regs = priv->gfargrp[i].regs; |
| 1418 | /* Clear IEVENT */ | ||
| 1419 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | ||
| 725 | 1420 | ||
| 726 | /* Initialize IMASK */ | 1421 | /* Initialize IMASK */ |
| 727 | gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); | 1422 | gfar_write(®s->imask, IMASK_INIT_CLEAR); |
| 1423 | } | ||
| 728 | 1424 | ||
| 1425 | regs = priv->gfargrp[0].regs; | ||
| 729 | /* Init hash registers to zero */ | 1426 | /* Init hash registers to zero */ |
| 730 | gfar_write(&priv->regs->igaddr0, 0); | 1427 | gfar_write(®s->igaddr0, 0); |
| 731 | gfar_write(&priv->regs->igaddr1, 0); | 1428 | gfar_write(®s->igaddr1, 0); |
| 732 | gfar_write(&priv->regs->igaddr2, 0); | 1429 | gfar_write(®s->igaddr2, 0); |
| 733 | gfar_write(&priv->regs->igaddr3, 0); | 1430 | gfar_write(®s->igaddr3, 0); |
| 734 | gfar_write(&priv->regs->igaddr4, 0); | 1431 | gfar_write(®s->igaddr4, 0); |
| 735 | gfar_write(&priv->regs->igaddr5, 0); | 1432 | gfar_write(®s->igaddr5, 0); |
| 736 | gfar_write(&priv->regs->igaddr6, 0); | 1433 | gfar_write(®s->igaddr6, 0); |
| 737 | gfar_write(&priv->regs->igaddr7, 0); | 1434 | gfar_write(®s->igaddr7, 0); |
| 738 | 1435 | ||
| 739 | gfar_write(&priv->regs->gaddr0, 0); | 1436 | gfar_write(®s->gaddr0, 0); |
| 740 | gfar_write(&priv->regs->gaddr1, 0); | 1437 | gfar_write(®s->gaddr1, 0); |
| 741 | gfar_write(&priv->regs->gaddr2, 0); | 1438 | gfar_write(®s->gaddr2, 0); |
| 742 | gfar_write(&priv->regs->gaddr3, 0); | 1439 | gfar_write(®s->gaddr3, 0); |
| 743 | gfar_write(&priv->regs->gaddr4, 0); | 1440 | gfar_write(®s->gaddr4, 0); |
| 744 | gfar_write(&priv->regs->gaddr5, 0); | 1441 | gfar_write(®s->gaddr5, 0); |
| 745 | gfar_write(&priv->regs->gaddr6, 0); | 1442 | gfar_write(®s->gaddr6, 0); |
| 746 | gfar_write(&priv->regs->gaddr7, 0); | 1443 | gfar_write(®s->gaddr7, 0); |
| 747 | 1444 | ||
| 748 | /* Zero out the rmon mib registers if it has them */ | 1445 | /* Zero out the rmon mib registers if it has them */ |
| 749 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { | 1446 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
| 750 | memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); | 1447 | memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); |
| 751 | 1448 | ||
| 752 | /* Mask off the CAM interrupts */ | 1449 | /* Mask off the CAM interrupts */ |
| 753 | gfar_write(&priv->regs->rmon.cam1, 0xffffffff); | 1450 | gfar_write(®s->rmon.cam1, 0xffffffff); |
| 754 | gfar_write(&priv->regs->rmon.cam2, 0xffffffff); | 1451 | gfar_write(®s->rmon.cam2, 0xffffffff); |
| 755 | } | 1452 | } |
| 756 | 1453 | ||
| 757 | /* Initialize the max receive buffer length */ | 1454 | /* Initialize the max receive buffer length */ |
| 758 | gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); | 1455 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
| 759 | 1456 | ||
| 760 | /* Initialize the Minimum Frame Length Register */ | 1457 | /* Initialize the Minimum Frame Length Register */ |
| 761 | gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); | 1458 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
| 762 | } | 1459 | } |
| 763 | 1460 | ||
| 764 | 1461 | ||
| @@ -766,23 +1463,28 @@ static void init_registers(struct net_device *dev) | |||
| 766 | static void gfar_halt_nodisable(struct net_device *dev) | 1463 | static void gfar_halt_nodisable(struct net_device *dev) |
| 767 | { | 1464 | { |
| 768 | struct gfar_private *priv = netdev_priv(dev); | 1465 | struct gfar_private *priv = netdev_priv(dev); |
| 769 | struct gfar __iomem *regs = priv->regs; | 1466 | struct gfar __iomem *regs = NULL; |
| 770 | u32 tempval; | 1467 | u32 tempval; |
| 1468 | int i = 0; | ||
| 771 | 1469 | ||
| 772 | /* Mask all interrupts */ | 1470 | for (i = 0; i < priv->num_grps; i++) { |
| 773 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | 1471 | regs = priv->gfargrp[i].regs; |
| 1472 | /* Mask all interrupts */ | ||
| 1473 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
| 774 | 1474 | ||
| 775 | /* Clear all interrupts */ | 1475 | /* Clear all interrupts */ |
| 776 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | 1476 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); |
| 1477 | } | ||
| 777 | 1478 | ||
| 1479 | regs = priv->gfargrp[0].regs; | ||
| 778 | /* Stop the DMA, and wait for it to stop */ | 1480 | /* Stop the DMA, and wait for it to stop */ |
| 779 | tempval = gfar_read(&priv->regs->dmactrl); | 1481 | tempval = gfar_read(®s->dmactrl); |
| 780 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) | 1482 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) |
| 781 | != (DMACTRL_GRS | DMACTRL_GTS)) { | 1483 | != (DMACTRL_GRS | DMACTRL_GTS)) { |
| 782 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | 1484 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
| 783 | gfar_write(&priv->regs->dmactrl, tempval); | 1485 | gfar_write(®s->dmactrl, tempval); |
| 784 | 1486 | ||
| 785 | while (!(gfar_read(&priv->regs->ievent) & | 1487 | while (!(gfar_read(®s->ievent) & |
| 786 | (IEVENT_GRSC | IEVENT_GTSC))) | 1488 | (IEVENT_GRSC | IEVENT_GTSC))) |
| 787 | cpu_relax(); | 1489 | cpu_relax(); |
| 788 | } | 1490 | } |
| @@ -792,7 +1494,7 @@ static void gfar_halt_nodisable(struct net_device *dev) | |||
| 792 | void gfar_halt(struct net_device *dev) | 1494 | void gfar_halt(struct net_device *dev) |
| 793 | { | 1495 | { |
| 794 | struct gfar_private *priv = netdev_priv(dev); | 1496 | struct gfar_private *priv = netdev_priv(dev); |
| 795 | struct gfar __iomem *regs = priv->regs; | 1497 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 796 | u32 tempval; | 1498 | u32 tempval; |
| 797 | 1499 | ||
| 798 | gfar_halt_nodisable(dev); | 1500 | gfar_halt_nodisable(dev); |
| @@ -803,101 +1505,131 @@ void gfar_halt(struct net_device *dev) | |||
| 803 | gfar_write(®s->maccfg1, tempval); | 1505 | gfar_write(®s->maccfg1, tempval); |
| 804 | } | 1506 | } |
| 805 | 1507 | ||
| 1508 | static void free_grp_irqs(struct gfar_priv_grp *grp) | ||
| 1509 | { | ||
| 1510 | free_irq(grp->interruptError, grp); | ||
| 1511 | free_irq(grp->interruptTransmit, grp); | ||
| 1512 | free_irq(grp->interruptReceive, grp); | ||
| 1513 | } | ||
| 1514 | |||
| 806 | void stop_gfar(struct net_device *dev) | 1515 | void stop_gfar(struct net_device *dev) |
| 807 | { | 1516 | { |
| 808 | struct gfar_private *priv = netdev_priv(dev); | 1517 | struct gfar_private *priv = netdev_priv(dev); |
| 809 | struct gfar __iomem *regs = priv->regs; | ||
| 810 | unsigned long flags; | 1518 | unsigned long flags; |
| 1519 | int i; | ||
| 811 | 1520 | ||
| 812 | phy_stop(priv->phydev); | 1521 | phy_stop(priv->phydev); |
| 813 | 1522 | ||
| 1523 | |||
| 814 | /* Lock it down */ | 1524 | /* Lock it down */ |
| 815 | spin_lock_irqsave(&priv->txlock, flags); | 1525 | local_irq_save(flags); |
| 816 | spin_lock(&priv->rxlock); | 1526 | lock_tx_qs(priv); |
| 1527 | lock_rx_qs(priv); | ||
| 817 | 1528 | ||
| 818 | gfar_halt(dev); | 1529 | gfar_halt(dev); |
| 819 | 1530 | ||
| 820 | spin_unlock(&priv->rxlock); | 1531 | unlock_rx_qs(priv); |
| 821 | spin_unlock_irqrestore(&priv->txlock, flags); | 1532 | unlock_tx_qs(priv); |
| 1533 | local_irq_restore(flags); | ||
| 822 | 1534 | ||
| 823 | /* Free the IRQs */ | 1535 | /* Free the IRQs */ |
| 824 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | 1536 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
| 825 | free_irq(priv->interruptError, dev); | 1537 | for (i = 0; i < priv->num_grps; i++) |
| 826 | free_irq(priv->interruptTransmit, dev); | 1538 | free_grp_irqs(&priv->gfargrp[i]); |
| 827 | free_irq(priv->interruptReceive, dev); | ||
| 828 | } else { | 1539 | } else { |
| 829 | free_irq(priv->interruptTransmit, dev); | 1540 | for (i = 0; i < priv->num_grps; i++) |
| 1541 | free_irq(priv->gfargrp[i].interruptTransmit, | ||
| 1542 | &priv->gfargrp[i]); | ||
| 830 | } | 1543 | } |
| 831 | 1544 | ||
| 832 | free_skb_resources(priv); | 1545 | free_skb_resources(priv); |
| 833 | |||
| 834 | dma_free_coherent(&priv->ofdev->dev, | ||
| 835 | sizeof(struct txbd8)*priv->tx_ring_size | ||
| 836 | + sizeof(struct rxbd8)*priv->rx_ring_size, | ||
| 837 | priv->tx_bd_base, | ||
| 838 | gfar_read(®s->tbase0)); | ||
| 839 | } | 1546 | } |
| 840 | 1547 | ||
| 841 | /* If there are any tx skbs or rx skbs still around, free them. | 1548 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
| 842 | * Then free tx_skbuff and rx_skbuff */ | ||
| 843 | static void free_skb_resources(struct gfar_private *priv) | ||
| 844 | { | 1549 | { |
| 845 | struct rxbd8 *rxbdp; | ||
| 846 | struct txbd8 *txbdp; | 1550 | struct txbd8 *txbdp; |
| 1551 | struct gfar_private *priv = netdev_priv(tx_queue->dev); | ||
| 847 | int i, j; | 1552 | int i, j; |
| 848 | 1553 | ||
| 849 | /* Go through all the buffer descriptors and free their data buffers */ | 1554 | txbdp = tx_queue->tx_bd_base; |
| 850 | txbdp = priv->tx_bd_base; | ||
| 851 | 1555 | ||
| 852 | for (i = 0; i < priv->tx_ring_size; i++) { | 1556 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
| 853 | if (!priv->tx_skbuff[i]) | 1557 | if (!tx_queue->tx_skbuff[i]) |
| 854 | continue; | 1558 | continue; |
| 855 | 1559 | ||
| 856 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, | 1560 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, |
| 857 | txbdp->length, DMA_TO_DEVICE); | 1561 | txbdp->length, DMA_TO_DEVICE); |
| 858 | txbdp->lstatus = 0; | 1562 | txbdp->lstatus = 0; |
| 859 | for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { | 1563 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
| 1564 | j++) { | ||
| 860 | txbdp++; | 1565 | txbdp++; |
| 861 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, | 1566 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, |
| 862 | txbdp->length, DMA_TO_DEVICE); | 1567 | txbdp->length, DMA_TO_DEVICE); |
| 863 | } | 1568 | } |
| 864 | txbdp++; | 1569 | txbdp++; |
| 865 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 1570 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
| 866 | priv->tx_skbuff[i] = NULL; | 1571 | tx_queue->tx_skbuff[i] = NULL; |
| 867 | } | 1572 | } |
| 1573 | kfree(tx_queue->tx_skbuff); | ||
| 1574 | } | ||
| 868 | 1575 | ||
| 869 | kfree(priv->tx_skbuff); | 1576 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
| 870 | 1577 | { | |
| 871 | rxbdp = priv->rx_bd_base; | 1578 | struct rxbd8 *rxbdp; |
| 1579 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | ||
| 1580 | int i; | ||
| 872 | 1581 | ||
| 873 | /* rx_skbuff is not guaranteed to be allocated, so only | 1582 | rxbdp = rx_queue->rx_bd_base; |
| 874 | * free it and its contents if it is allocated */ | ||
| 875 | if(priv->rx_skbuff != NULL) { | ||
| 876 | for (i = 0; i < priv->rx_ring_size; i++) { | ||
| 877 | if (priv->rx_skbuff[i]) { | ||
| 878 | dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, | ||
| 879 | priv->rx_buffer_size, | ||
| 880 | DMA_FROM_DEVICE); | ||
| 881 | 1583 | ||
| 882 | dev_kfree_skb_any(priv->rx_skbuff[i]); | 1584 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
| 883 | priv->rx_skbuff[i] = NULL; | 1585 | if (rx_queue->rx_skbuff[i]) { |
| 884 | } | 1586 | dma_unmap_single(&priv->ofdev->dev, |
| 1587 | rxbdp->bufPtr, priv->rx_buffer_size, | ||
| 1588 | DMA_FROM_DEVICE); | ||
| 1589 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); | ||
| 1590 | rx_queue->rx_skbuff[i] = NULL; | ||
| 1591 | } | ||
| 1592 | rxbdp->lstatus = 0; | ||
| 1593 | rxbdp->bufPtr = 0; | ||
| 1594 | rxbdp++; | ||
| 1595 | } | ||
| 1596 | kfree(rx_queue->rx_skbuff); | ||
| 1597 | } | ||
| 885 | 1598 | ||
| 886 | rxbdp->lstatus = 0; | 1599 | /* If there are any tx skbs or rx skbs still around, free them. |
| 887 | rxbdp->bufPtr = 0; | 1600 | * Then free tx_skbuff and rx_skbuff */ |
| 1601 | static void free_skb_resources(struct gfar_private *priv) | ||
| 1602 | { | ||
| 1603 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
| 1604 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
| 1605 | int i; | ||
| 888 | 1606 | ||
| 889 | rxbdp++; | 1607 | /* Go through all the buffer descriptors and free their data buffers */ |
| 890 | } | 1608 | for (i = 0; i < priv->num_tx_queues; i++) { |
| 1609 | tx_queue = priv->tx_queue[i]; | ||
| 1610 | if(!tx_queue->tx_skbuff) | ||
| 1611 | free_skb_tx_queue(tx_queue); | ||
| 1612 | } | ||
| 891 | 1613 | ||
| 892 | kfree(priv->rx_skbuff); | 1614 | for (i = 0; i < priv->num_rx_queues; i++) { |
| 1615 | rx_queue = priv->rx_queue[i]; | ||
| 1616 | if(!rx_queue->rx_skbuff) | ||
| 1617 | free_skb_rx_queue(rx_queue); | ||
| 893 | } | 1618 | } |
| 1619 | |||
| 1620 | dma_free_coherent(&priv->ofdev->dev, | ||
| 1621 | sizeof(struct txbd8) * priv->total_tx_ring_size + | ||
| 1622 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | ||
| 1623 | priv->tx_queue[0]->tx_bd_base, | ||
| 1624 | priv->tx_queue[0]->tx_bd_dma_base); | ||
| 894 | } | 1625 | } |
| 895 | 1626 | ||
| 896 | void gfar_start(struct net_device *dev) | 1627 | void gfar_start(struct net_device *dev) |
| 897 | { | 1628 | { |
| 898 | struct gfar_private *priv = netdev_priv(dev); | 1629 | struct gfar_private *priv = netdev_priv(dev); |
| 899 | struct gfar __iomem *regs = priv->regs; | 1630 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 900 | u32 tempval; | 1631 | u32 tempval; |
| 1632 | int i = 0; | ||
| 901 | 1633 | ||
| 902 | /* Enable Rx and Tx in MACCFG1 */ | 1634 | /* Enable Rx and Tx in MACCFG1 */ |
| 903 | tempval = gfar_read(®s->maccfg1); | 1635 | tempval = gfar_read(®s->maccfg1); |
| @@ -905,269 +1637,159 @@ void gfar_start(struct net_device *dev) | |||
| 905 | gfar_write(®s->maccfg1, tempval); | 1637 | gfar_write(®s->maccfg1, tempval); |
| 906 | 1638 | ||
| 907 | /* Initialize DMACTRL to have WWR and WOP */ | 1639 | /* Initialize DMACTRL to have WWR and WOP */ |
| 908 | tempval = gfar_read(&priv->regs->dmactrl); | 1640 | tempval = gfar_read(®s->dmactrl); |
| 909 | tempval |= DMACTRL_INIT_SETTINGS; | 1641 | tempval |= DMACTRL_INIT_SETTINGS; |
| 910 | gfar_write(&priv->regs->dmactrl, tempval); | 1642 | gfar_write(®s->dmactrl, tempval); |
| 911 | 1643 | ||
| 912 | /* Make sure we aren't stopped */ | 1644 | /* Make sure we aren't stopped */ |
| 913 | tempval = gfar_read(&priv->regs->dmactrl); | 1645 | tempval = gfar_read(®s->dmactrl); |
| 914 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); | 1646 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
| 915 | gfar_write(&priv->regs->dmactrl, tempval); | 1647 | gfar_write(®s->dmactrl, tempval); |
| 916 | 1648 | ||
| 917 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | 1649 | for (i = 0; i < priv->num_grps; i++) { |
| 918 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT); | 1650 | regs = priv->gfargrp[i].regs; |
| 919 | gfar_write(®s->rstat, RSTAT_CLEAR_RHALT); | 1651 | /* Clear THLT/RHLT, so that the DMA starts polling now */ |
| 920 | 1652 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
| 921 | /* Unmask the interrupts we look for */ | 1653 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); |
| 922 | gfar_write(®s->imask, IMASK_DEFAULT); | 1654 | /* Unmask the interrupts we look for */ |
| 1655 | gfar_write(®s->imask, IMASK_DEFAULT); | ||
| 1656 | } | ||
| 923 | 1657 | ||
| 924 | dev->trans_start = jiffies; | 1658 | dev->trans_start = jiffies; |
| 925 | } | 1659 | } |
| 926 | 1660 | ||
| 927 | /* Bring the controller up and running */ | 1661 | void gfar_configure_coalescing(struct gfar_private *priv, |
| 928 | int startup_gfar(struct net_device *dev) | 1662 | unsigned long tx_mask, unsigned long rx_mask) |
| 929 | { | 1663 | { |
| 930 | struct txbd8 *txbdp; | 1664 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 931 | struct rxbd8 *rxbdp; | 1665 | u32 __iomem *baddr; |
| 932 | dma_addr_t addr = 0; | 1666 | int i = 0; |
| 933 | unsigned long vaddr; | ||
| 934 | int i; | ||
| 935 | struct gfar_private *priv = netdev_priv(dev); | ||
| 936 | struct gfar __iomem *regs = priv->regs; | ||
| 937 | int err = 0; | ||
| 938 | u32 rctrl = 0; | ||
| 939 | u32 tctrl = 0; | ||
| 940 | u32 attrs = 0; | ||
| 941 | |||
| 942 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
| 943 | 1667 | ||
| 944 | /* Allocate memory for the buffer descriptors */ | 1668 | /* Backward compatible case ---- even if we enable |
| 945 | vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev, | 1669 | * multiple queues, there's only single reg to program |
| 946 | sizeof (struct txbd8) * priv->tx_ring_size + | 1670 | */ |
| 947 | sizeof (struct rxbd8) * priv->rx_ring_size, | 1671 | gfar_write(®s->txic, 0); |
| 948 | &addr, GFP_KERNEL); | 1672 | if(likely(priv->tx_queue[0]->txcoalescing)) |
| 949 | 1673 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
| 950 | if (vaddr == 0) { | ||
| 951 | if (netif_msg_ifup(priv)) | ||
| 952 | printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", | ||
| 953 | dev->name); | ||
| 954 | return -ENOMEM; | ||
| 955 | } | ||
| 956 | |||
| 957 | priv->tx_bd_base = (struct txbd8 *) vaddr; | ||
| 958 | |||
| 959 | /* enet DMA only understands physical addresses */ | ||
| 960 | gfar_write(®s->tbase0, addr); | ||
| 961 | |||
| 962 | /* Start the rx descriptor ring where the tx ring leaves off */ | ||
| 963 | addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; | ||
| 964 | vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; | ||
| 965 | priv->rx_bd_base = (struct rxbd8 *) vaddr; | ||
| 966 | gfar_write(®s->rbase0, addr); | ||
| 967 | |||
| 968 | /* Setup the skbuff rings */ | ||
| 969 | priv->tx_skbuff = | ||
| 970 | (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * | ||
| 971 | priv->tx_ring_size, GFP_KERNEL); | ||
| 972 | |||
| 973 | if (NULL == priv->tx_skbuff) { | ||
| 974 | if (netif_msg_ifup(priv)) | ||
| 975 | printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", | ||
| 976 | dev->name); | ||
| 977 | err = -ENOMEM; | ||
| 978 | goto tx_skb_fail; | ||
| 979 | } | ||
| 980 | |||
| 981 | for (i = 0; i < priv->tx_ring_size; i++) | ||
| 982 | priv->tx_skbuff[i] = NULL; | ||
| 983 | |||
| 984 | priv->rx_skbuff = | ||
| 985 | (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * | ||
| 986 | priv->rx_ring_size, GFP_KERNEL); | ||
| 987 | |||
| 988 | if (NULL == priv->rx_skbuff) { | ||
| 989 | if (netif_msg_ifup(priv)) | ||
| 990 | printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", | ||
| 991 | dev->name); | ||
| 992 | err = -ENOMEM; | ||
| 993 | goto rx_skb_fail; | ||
| 994 | } | ||
| 995 | |||
| 996 | for (i = 0; i < priv->rx_ring_size; i++) | ||
| 997 | priv->rx_skbuff[i] = NULL; | ||
| 998 | |||
| 999 | /* Initialize some variables in our dev structure */ | ||
| 1000 | priv->num_txbdfree = priv->tx_ring_size; | ||
| 1001 | priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; | ||
| 1002 | priv->cur_rx = priv->rx_bd_base; | ||
| 1003 | priv->skb_curtx = priv->skb_dirtytx = 0; | ||
| 1004 | priv->skb_currx = 0; | ||
| 1005 | |||
| 1006 | /* Initialize Transmit Descriptor Ring */ | ||
| 1007 | txbdp = priv->tx_bd_base; | ||
| 1008 | for (i = 0; i < priv->tx_ring_size; i++) { | ||
| 1009 | txbdp->lstatus = 0; | ||
| 1010 | txbdp->bufPtr = 0; | ||
| 1011 | txbdp++; | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | /* Set the last descriptor in the ring to indicate wrap */ | ||
| 1015 | txbdp--; | ||
| 1016 | txbdp->status |= TXBD_WRAP; | ||
| 1017 | |||
| 1018 | rxbdp = priv->rx_bd_base; | ||
| 1019 | for (i = 0; i < priv->rx_ring_size; i++) { | ||
| 1020 | struct sk_buff *skb; | ||
| 1021 | |||
| 1022 | skb = gfar_new_skb(dev); | ||
| 1023 | |||
| 1024 | if (!skb) { | ||
| 1025 | printk(KERN_ERR "%s: Can't allocate RX buffers\n", | ||
| 1026 | dev->name); | ||
| 1027 | 1674 | ||
| 1028 | goto err_rxalloc_fail; | 1675 | gfar_write(®s->rxic, 0); |
| 1676 | if(unlikely(priv->rx_queue[0]->rxcoalescing)) | ||
| 1677 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | ||
| 1678 | |||
| 1679 | if (priv->mode == MQ_MG_MODE) { | ||
| 1680 | baddr = ®s->txic0; | ||
| 1681 | for_each_bit (i, &tx_mask, priv->num_tx_queues) { | ||
| 1682 | if (likely(priv->tx_queue[i]->txcoalescing)) { | ||
| 1683 | gfar_write(baddr + i, 0); | ||
| 1684 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | ||
| 1685 | } | ||
| 1029 | } | 1686 | } |
| 1030 | 1687 | ||
| 1031 | priv->rx_skbuff[i] = skb; | 1688 | baddr = ®s->rxic0; |
| 1032 | 1689 | for_each_bit (i, &rx_mask, priv->num_rx_queues) { | |
| 1033 | gfar_new_rxbdp(dev, rxbdp, skb); | 1690 | if (likely(priv->rx_queue[i]->rxcoalescing)) { |
| 1034 | 1691 | gfar_write(baddr + i, 0); | |
| 1035 | rxbdp++; | 1692 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); |
| 1693 | } | ||
| 1694 | } | ||
| 1036 | } | 1695 | } |
| 1696 | } | ||
| 1037 | 1697 | ||
| 1038 | /* Set the last descriptor in the ring to wrap */ | 1698 | static int register_grp_irqs(struct gfar_priv_grp *grp) |
| 1039 | rxbdp--; | 1699 | { |
| 1040 | rxbdp->status |= RXBD_WRAP; | 1700 | struct gfar_private *priv = grp->priv; |
| 1701 | struct net_device *dev = priv->ndev; | ||
| 1702 | int err; | ||
| 1041 | 1703 | ||
| 1042 | /* If the device has multiple interrupts, register for | 1704 | /* If the device has multiple interrupts, register for |
| 1043 | * them. Otherwise, only register for the one */ | 1705 | * them. Otherwise, only register for the one */ |
| 1044 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | 1706 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
| 1045 | /* Install our interrupt handlers for Error, | 1707 | /* Install our interrupt handlers for Error, |
| 1046 | * Transmit, and Receive */ | 1708 | * Transmit, and Receive */ |
| 1047 | if (request_irq(priv->interruptError, gfar_error, | 1709 | if ((err = request_irq(grp->interruptError, gfar_error, 0, |
| 1048 | 0, priv->int_name_er, dev) < 0) { | 1710 | grp->int_name_er,grp)) < 0) { |
| 1049 | if (netif_msg_intr(priv)) | 1711 | if (netif_msg_intr(priv)) |
| 1050 | printk(KERN_ERR "%s: Can't get IRQ %d\n", | 1712 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
| 1051 | dev->name, priv->interruptError); | 1713 | dev->name, grp->interruptError); |
| 1052 | 1714 | ||
| 1053 | err = -1; | 1715 | goto err_irq_fail; |
| 1054 | goto err_irq_fail; | ||
| 1055 | } | 1716 | } |
| 1056 | 1717 | ||
| 1057 | if (request_irq(priv->interruptTransmit, gfar_transmit, | 1718 | if ((err = request_irq(grp->interruptTransmit, gfar_transmit, |
| 1058 | 0, priv->int_name_tx, dev) < 0) { | 1719 | 0, grp->int_name_tx, grp)) < 0) { |
| 1059 | if (netif_msg_intr(priv)) | 1720 | if (netif_msg_intr(priv)) |
| 1060 | printk(KERN_ERR "%s: Can't get IRQ %d\n", | 1721 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
| 1061 | dev->name, priv->interruptTransmit); | 1722 | dev->name, grp->interruptTransmit); |
| 1062 | |||
| 1063 | err = -1; | ||
| 1064 | |||
| 1065 | goto tx_irq_fail; | 1723 | goto tx_irq_fail; |
| 1066 | } | 1724 | } |
| 1067 | 1725 | ||
| 1068 | if (request_irq(priv->interruptReceive, gfar_receive, | 1726 | if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, |
| 1069 | 0, priv->int_name_rx, dev) < 0) { | 1727 | grp->int_name_rx, grp)) < 0) { |
| 1070 | if (netif_msg_intr(priv)) | 1728 | if (netif_msg_intr(priv)) |
| 1071 | printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", | 1729 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
| 1072 | dev->name, priv->interruptReceive); | 1730 | dev->name, grp->interruptReceive); |
| 1073 | |||
| 1074 | err = -1; | ||
| 1075 | goto rx_irq_fail; | 1731 | goto rx_irq_fail; |
| 1076 | } | 1732 | } |
| 1077 | } else { | 1733 | } else { |
| 1078 | if (request_irq(priv->interruptTransmit, gfar_interrupt, | 1734 | if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, |
| 1079 | 0, priv->int_name_tx, dev) < 0) { | 1735 | grp->int_name_tx, grp)) < 0) { |
| 1080 | if (netif_msg_intr(priv)) | 1736 | if (netif_msg_intr(priv)) |
| 1081 | printk(KERN_ERR "%s: Can't get IRQ %d\n", | 1737 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
| 1082 | dev->name, priv->interruptTransmit); | 1738 | dev->name, grp->interruptTransmit); |
| 1083 | |||
| 1084 | err = -1; | ||
| 1085 | goto err_irq_fail; | 1739 | goto err_irq_fail; |
| 1086 | } | 1740 | } |
| 1087 | } | 1741 | } |
| 1088 | 1742 | ||
| 1089 | phy_start(priv->phydev); | 1743 | return 0; |
| 1090 | |||
| 1091 | /* Configure the coalescing support */ | ||
| 1092 | gfar_write(®s->txic, 0); | ||
| 1093 | if (priv->txcoalescing) | ||
| 1094 | gfar_write(®s->txic, priv->txic); | ||
| 1095 | |||
| 1096 | gfar_write(®s->rxic, 0); | ||
| 1097 | if (priv->rxcoalescing) | ||
| 1098 | gfar_write(®s->rxic, priv->rxic); | ||
| 1099 | |||
| 1100 | if (priv->rx_csum_enable) | ||
| 1101 | rctrl |= RCTRL_CHECKSUMMING; | ||
| 1102 | 1744 | ||
| 1103 | if (priv->extended_hash) { | 1745 | rx_irq_fail: |
| 1104 | rctrl |= RCTRL_EXTHASH; | 1746 | free_irq(grp->interruptTransmit, grp); |
| 1747 | tx_irq_fail: | ||
| 1748 | free_irq(grp->interruptError, grp); | ||
| 1749 | err_irq_fail: | ||
| 1750 | return err; | ||
| 1105 | 1751 | ||
| 1106 | gfar_clear_exact_match(dev); | 1752 | } |
| 1107 | rctrl |= RCTRL_EMEN; | ||
| 1108 | } | ||
| 1109 | 1753 | ||
| 1110 | if (priv->padding) { | 1754 | /* Bring the controller up and running */ |
| 1111 | rctrl &= ~RCTRL_PAL_MASK; | 1755 | int startup_gfar(struct net_device *ndev) |
| 1112 | rctrl |= RCTRL_PADDING(priv->padding); | 1756 | { |
| 1113 | } | 1757 | struct gfar_private *priv = netdev_priv(ndev); |
| 1758 | struct gfar __iomem *regs = NULL; | ||
| 1759 | int err, i, j; | ||
| 1114 | 1760 | ||
| 1115 | /* keep vlan related bits if it's enabled */ | 1761 | for (i = 0; i < priv->num_grps; i++) { |
| 1116 | if (priv->vlgrp) { | 1762 | regs= priv->gfargrp[i].regs; |
| 1117 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | 1763 | gfar_write(®s->imask, IMASK_INIT_CLEAR); |
| 1118 | tctrl |= TCTRL_VLINS; | ||
| 1119 | } | 1764 | } |
| 1120 | 1765 | ||
| 1121 | /* Init rctrl based on our settings */ | 1766 | regs= priv->gfargrp[0].regs; |
| 1122 | gfar_write(&priv->regs->rctrl, rctrl); | 1767 | err = gfar_alloc_skb_resources(ndev); |
| 1123 | 1768 | if (err) | |
| 1124 | if (dev->features & NETIF_F_IP_CSUM) | 1769 | return err; |
| 1125 | tctrl |= TCTRL_INIT_CSUM; | ||
| 1126 | |||
| 1127 | gfar_write(&priv->regs->tctrl, tctrl); | ||
| 1128 | |||
| 1129 | /* Set the extraction length and index */ | ||
| 1130 | attrs = ATTRELI_EL(priv->rx_stash_size) | | ||
| 1131 | ATTRELI_EI(priv->rx_stash_index); | ||
| 1132 | |||
| 1133 | gfar_write(&priv->regs->attreli, attrs); | ||
| 1134 | |||
| 1135 | /* Start with defaults, and add stashing or locking | ||
| 1136 | * depending on the approprate variables */ | ||
| 1137 | attrs = ATTR_INIT_SETTINGS; | ||
| 1138 | 1770 | ||
| 1139 | if (priv->bd_stash_en) | 1771 | gfar_init_mac(ndev); |
| 1140 | attrs |= ATTR_BDSTASH; | ||
| 1141 | 1772 | ||
| 1142 | if (priv->rx_stash_size != 0) | 1773 | for (i = 0; i < priv->num_grps; i++) { |
| 1143 | attrs |= ATTR_BUFSTASH; | 1774 | err = register_grp_irqs(&priv->gfargrp[i]); |
| 1775 | if (err) { | ||
| 1776 | for (j = 0; j < i; j++) | ||
| 1777 | free_grp_irqs(&priv->gfargrp[j]); | ||
| 1778 | goto irq_fail; | ||
| 1779 | } | ||
| 1780 | } | ||
| 1144 | 1781 | ||
| 1145 | gfar_write(&priv->regs->attr, attrs); | 1782 | /* Start the controller */ |
| 1783 | gfar_start(ndev); | ||
| 1146 | 1784 | ||
| 1147 | gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); | 1785 | phy_start(priv->phydev); |
| 1148 | gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); | ||
| 1149 | gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); | ||
| 1150 | 1786 | ||
| 1151 | /* Start the controller */ | 1787 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
| 1152 | gfar_start(dev); | ||
| 1153 | 1788 | ||
| 1154 | return 0; | 1789 | return 0; |
| 1155 | 1790 | ||
| 1156 | rx_irq_fail: | 1791 | irq_fail: |
| 1157 | free_irq(priv->interruptTransmit, dev); | ||
| 1158 | tx_irq_fail: | ||
| 1159 | free_irq(priv->interruptError, dev); | ||
| 1160 | err_irq_fail: | ||
| 1161 | err_rxalloc_fail: | ||
| 1162 | rx_skb_fail: | ||
| 1163 | free_skb_resources(priv); | 1792 | free_skb_resources(priv); |
| 1164 | tx_skb_fail: | ||
| 1165 | dma_free_coherent(&priv->ofdev->dev, | ||
| 1166 | sizeof(struct txbd8)*priv->tx_ring_size | ||
| 1167 | + sizeof(struct rxbd8)*priv->rx_ring_size, | ||
| 1168 | priv->tx_bd_base, | ||
| 1169 | gfar_read(®s->tbase0)); | ||
| 1170 | |||
| 1171 | return err; | 1793 | return err; |
| 1172 | } | 1794 | } |
| 1173 | 1795 | ||
| @@ -1178,7 +1800,7 @@ static int gfar_enet_open(struct net_device *dev) | |||
| 1178 | struct gfar_private *priv = netdev_priv(dev); | 1800 | struct gfar_private *priv = netdev_priv(dev); |
| 1179 | int err; | 1801 | int err; |
| 1180 | 1802 | ||
| 1181 | napi_enable(&priv->napi); | 1803 | enable_napi(priv); |
| 1182 | 1804 | ||
| 1183 | skb_queue_head_init(&priv->rx_recycle); | 1805 | skb_queue_head_init(&priv->rx_recycle); |
| 1184 | 1806 | ||
| @@ -1189,18 +1811,18 @@ static int gfar_enet_open(struct net_device *dev) | |||
| 1189 | 1811 | ||
| 1190 | err = init_phy(dev); | 1812 | err = init_phy(dev); |
| 1191 | 1813 | ||
| 1192 | if(err) { | 1814 | if (err) { |
| 1193 | napi_disable(&priv->napi); | 1815 | disable_napi(priv); |
| 1194 | return err; | 1816 | return err; |
| 1195 | } | 1817 | } |
| 1196 | 1818 | ||
| 1197 | err = startup_gfar(dev); | 1819 | err = startup_gfar(dev); |
| 1198 | if (err) { | 1820 | if (err) { |
| 1199 | napi_disable(&priv->napi); | 1821 | disable_napi(priv); |
| 1200 | return err; | 1822 | return err; |
| 1201 | } | 1823 | } |
| 1202 | 1824 | ||
| 1203 | netif_start_queue(dev); | 1825 | netif_tx_start_all_queues(dev); |
| 1204 | 1826 | ||
| 1205 | device_set_wakeup_enable(&dev->dev, priv->wol_en); | 1827 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
| 1206 | 1828 | ||
| @@ -1269,15 +1891,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |||
| 1269 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1891 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1270 | { | 1892 | { |
| 1271 | struct gfar_private *priv = netdev_priv(dev); | 1893 | struct gfar_private *priv = netdev_priv(dev); |
| 1894 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
| 1895 | struct netdev_queue *txq; | ||
| 1896 | struct gfar __iomem *regs = NULL; | ||
| 1272 | struct txfcb *fcb = NULL; | 1897 | struct txfcb *fcb = NULL; |
| 1273 | struct txbd8 *txbdp, *txbdp_start, *base; | 1898 | struct txbd8 *txbdp, *txbdp_start, *base; |
| 1274 | u32 lstatus; | 1899 | u32 lstatus; |
| 1275 | int i; | 1900 | int i, rq = 0; |
| 1276 | u32 bufaddr; | 1901 | u32 bufaddr; |
| 1277 | unsigned long flags; | 1902 | unsigned long flags; |
| 1278 | unsigned int nr_frags, length; | 1903 | unsigned int nr_frags, length; |
| 1279 | 1904 | ||
| 1280 | base = priv->tx_bd_base; | 1905 | |
| 1906 | rq = skb->queue_mapping; | ||
| 1907 | tx_queue = priv->tx_queue[rq]; | ||
| 1908 | txq = netdev_get_tx_queue(dev, rq); | ||
| 1909 | base = tx_queue->tx_bd_base; | ||
| 1910 | regs = tx_queue->grp->regs; | ||
| 1281 | 1911 | ||
| 1282 | /* make space for additional header when fcb is needed */ | 1912 | /* make space for additional header when fcb is needed */ |
| 1283 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | 1913 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || |
| @@ -1298,21 +1928,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1298 | /* total number of fragments in the SKB */ | 1928 | /* total number of fragments in the SKB */ |
| 1299 | nr_frags = skb_shinfo(skb)->nr_frags; | 1929 | nr_frags = skb_shinfo(skb)->nr_frags; |
| 1300 | 1930 | ||
| 1301 | spin_lock_irqsave(&priv->txlock, flags); | ||
| 1302 | |||
| 1303 | /* check if there is space to queue this packet */ | 1931 | /* check if there is space to queue this packet */ |
| 1304 | if ((nr_frags+1) > priv->num_txbdfree) { | 1932 | if ((nr_frags+1) > tx_queue->num_txbdfree) { |
| 1305 | /* no space, stop the queue */ | 1933 | /* no space, stop the queue */ |
| 1306 | netif_stop_queue(dev); | 1934 | netif_tx_stop_queue(txq); |
| 1307 | dev->stats.tx_fifo_errors++; | 1935 | dev->stats.tx_fifo_errors++; |
| 1308 | spin_unlock_irqrestore(&priv->txlock, flags); | ||
| 1309 | return NETDEV_TX_BUSY; | 1936 | return NETDEV_TX_BUSY; |
| 1310 | } | 1937 | } |
| 1311 | 1938 | ||
| 1312 | /* Update transmit stats */ | 1939 | /* Update transmit stats */ |
| 1313 | dev->stats.tx_bytes += skb->len; | 1940 | dev->stats.tx_bytes += skb->len; |
| 1314 | 1941 | ||
| 1315 | txbdp = txbdp_start = priv->cur_tx; | 1942 | txbdp = txbdp_start = tx_queue->cur_tx; |
| 1316 | 1943 | ||
| 1317 | if (nr_frags == 0) { | 1944 | if (nr_frags == 0) { |
| 1318 | lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | 1945 | lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); |
| @@ -1320,7 +1947,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1320 | /* Place the fragment addresses and lengths into the TxBDs */ | 1947 | /* Place the fragment addresses and lengths into the TxBDs */ |
| 1321 | for (i = 0; i < nr_frags; i++) { | 1948 | for (i = 0; i < nr_frags; i++) { |
| 1322 | /* Point at the next BD, wrapping as needed */ | 1949 | /* Point at the next BD, wrapping as needed */ |
| 1323 | txbdp = next_txbd(txbdp, base, priv->tx_ring_size); | 1950 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
| 1324 | 1951 | ||
| 1325 | length = skb_shinfo(skb)->frags[i].size; | 1952 | length = skb_shinfo(skb)->frags[i].size; |
| 1326 | 1953 | ||
| @@ -1362,13 +1989,27 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1362 | } | 1989 | } |
| 1363 | 1990 | ||
| 1364 | /* setup the TxBD length and buffer pointer for the first BD */ | 1991 | /* setup the TxBD length and buffer pointer for the first BD */ |
| 1365 | priv->tx_skbuff[priv->skb_curtx] = skb; | 1992 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
| 1366 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, | 1993 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
| 1367 | skb_headlen(skb), DMA_TO_DEVICE); | 1994 | skb_headlen(skb), DMA_TO_DEVICE); |
| 1368 | 1995 | ||
| 1369 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | 1996 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
| 1370 | 1997 | ||
| 1371 | /* | 1998 | /* |
| 1999 | * We can work in parallel with gfar_clean_tx_ring(), except | ||
| 2000 | * when modifying num_txbdfree. Note that we didn't grab the lock | ||
| 2001 | * when we were reading the num_txbdfree and checking for available | ||
| 2002 | * space, that's because outside of this function it can only grow, | ||
| 2003 | * and once we've got needed space, it cannot suddenly disappear. | ||
| 2004 | * | ||
| 2005 | * The lock also protects us from gfar_error(), which can modify | ||
| 2006 | * regs->tstat and thus retrigger the transfers, which is why we | ||
| 2007 | * also must grab the lock before setting ready bit for the first | ||
| 2008 | * to be transmitted BD. | ||
| 2009 | */ | ||
| 2010 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
| 2011 | |||
| 2012 | /* | ||
| 1372 | * The powerpc-specific eieio() is used, as wmb() has too strong | 2013 | * The powerpc-specific eieio() is used, as wmb() has too strong |
| 1373 | * semantics (it requires synchronization between cacheable and | 2014 | * semantics (it requires synchronization between cacheable and |
| 1374 | * uncacheable mappings, which eieio doesn't provide and which we | 2015 | * uncacheable mappings, which eieio doesn't provide and which we |
| @@ -1382,29 +2023,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1382 | 2023 | ||
| 1383 | /* Update the current skb pointer to the next entry we will use | 2024 | /* Update the current skb pointer to the next entry we will use |
| 1384 | * (wrapping if necessary) */ | 2025 | * (wrapping if necessary) */ |
| 1385 | priv->skb_curtx = (priv->skb_curtx + 1) & | 2026 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
| 1386 | TX_RING_MOD_MASK(priv->tx_ring_size); | 2027 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
| 1387 | 2028 | ||
| 1388 | priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); | 2029 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
| 1389 | 2030 | ||
| 1390 | /* reduce TxBD free count */ | 2031 | /* reduce TxBD free count */ |
| 1391 | priv->num_txbdfree -= (nr_frags + 1); | 2032 | tx_queue->num_txbdfree -= (nr_frags + 1); |
| 1392 | 2033 | ||
| 1393 | dev->trans_start = jiffies; | 2034 | dev->trans_start = jiffies; |
| 1394 | 2035 | ||
| 1395 | /* If the next BD still needs to be cleaned up, then the bds | 2036 | /* If the next BD still needs to be cleaned up, then the bds |
| 1396 | are full. We need to tell the kernel to stop sending us stuff. */ | 2037 | are full. We need to tell the kernel to stop sending us stuff. */ |
| 1397 | if (!priv->num_txbdfree) { | 2038 | if (!tx_queue->num_txbdfree) { |
| 1398 | netif_stop_queue(dev); | 2039 | netif_tx_stop_queue(txq); |
| 1399 | 2040 | ||
| 1400 | dev->stats.tx_fifo_errors++; | 2041 | dev->stats.tx_fifo_errors++; |
| 1401 | } | 2042 | } |
| 1402 | 2043 | ||
| 1403 | /* Tell the DMA to go go go */ | 2044 | /* Tell the DMA to go go go */ |
| 1404 | gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); | 2045 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
| 1405 | 2046 | ||
| 1406 | /* Unlock priv */ | 2047 | /* Unlock priv */ |
| 1407 | spin_unlock_irqrestore(&priv->txlock, flags); | 2048 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
| 1408 | 2049 | ||
| 1409 | return NETDEV_TX_OK; | 2050 | return NETDEV_TX_OK; |
| 1410 | } | 2051 | } |
| @@ -1414,7 +2055,7 @@ static int gfar_close(struct net_device *dev) | |||
| 1414 | { | 2055 | { |
| 1415 | struct gfar_private *priv = netdev_priv(dev); | 2056 | struct gfar_private *priv = netdev_priv(dev); |
| 1416 | 2057 | ||
| 1417 | napi_disable(&priv->napi); | 2058 | disable_napi(priv); |
| 1418 | 2059 | ||
| 1419 | skb_queue_purge(&priv->rx_recycle); | 2060 | skb_queue_purge(&priv->rx_recycle); |
| 1420 | cancel_work_sync(&priv->reset_task); | 2061 | cancel_work_sync(&priv->reset_task); |
| @@ -1424,7 +2065,7 @@ static int gfar_close(struct net_device *dev) | |||
| 1424 | phy_disconnect(priv->phydev); | 2065 | phy_disconnect(priv->phydev); |
| 1425 | priv->phydev = NULL; | 2066 | priv->phydev = NULL; |
| 1426 | 2067 | ||
| 1427 | netif_stop_queue(dev); | 2068 | netif_tx_stop_all_queues(dev); |
| 1428 | 2069 | ||
| 1429 | return 0; | 2070 | return 0; |
| 1430 | } | 2071 | } |
| @@ -1443,50 +2084,55 @@ static void gfar_vlan_rx_register(struct net_device *dev, | |||
| 1443 | struct vlan_group *grp) | 2084 | struct vlan_group *grp) |
| 1444 | { | 2085 | { |
| 1445 | struct gfar_private *priv = netdev_priv(dev); | 2086 | struct gfar_private *priv = netdev_priv(dev); |
| 2087 | struct gfar __iomem *regs = NULL; | ||
| 1446 | unsigned long flags; | 2088 | unsigned long flags; |
| 1447 | u32 tempval; | 2089 | u32 tempval; |
| 1448 | 2090 | ||
| 1449 | spin_lock_irqsave(&priv->rxlock, flags); | 2091 | regs = priv->gfargrp[0].regs; |
| 2092 | local_irq_save(flags); | ||
| 2093 | lock_rx_qs(priv); | ||
| 1450 | 2094 | ||
| 1451 | priv->vlgrp = grp; | 2095 | priv->vlgrp = grp; |
| 1452 | 2096 | ||
| 1453 | if (grp) { | 2097 | if (grp) { |
| 1454 | /* Enable VLAN tag insertion */ | 2098 | /* Enable VLAN tag insertion */ |
| 1455 | tempval = gfar_read(&priv->regs->tctrl); | 2099 | tempval = gfar_read(®s->tctrl); |
| 1456 | tempval |= TCTRL_VLINS; | 2100 | tempval |= TCTRL_VLINS; |
| 1457 | 2101 | ||
| 1458 | gfar_write(&priv->regs->tctrl, tempval); | 2102 | gfar_write(®s->tctrl, tempval); |
| 1459 | 2103 | ||
| 1460 | /* Enable VLAN tag extraction */ | 2104 | /* Enable VLAN tag extraction */ |
| 1461 | tempval = gfar_read(&priv->regs->rctrl); | 2105 | tempval = gfar_read(®s->rctrl); |
| 1462 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); | 2106 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); |
| 1463 | gfar_write(&priv->regs->rctrl, tempval); | 2107 | gfar_write(®s->rctrl, tempval); |
| 1464 | } else { | 2108 | } else { |
| 1465 | /* Disable VLAN tag insertion */ | 2109 | /* Disable VLAN tag insertion */ |
| 1466 | tempval = gfar_read(&priv->regs->tctrl); | 2110 | tempval = gfar_read(®s->tctrl); |
| 1467 | tempval &= ~TCTRL_VLINS; | 2111 | tempval &= ~TCTRL_VLINS; |
| 1468 | gfar_write(&priv->regs->tctrl, tempval); | 2112 | gfar_write(®s->tctrl, tempval); |
| 1469 | 2113 | ||
| 1470 | /* Disable VLAN tag extraction */ | 2114 | /* Disable VLAN tag extraction */ |
| 1471 | tempval = gfar_read(&priv->regs->rctrl); | 2115 | tempval = gfar_read(®s->rctrl); |
| 1472 | tempval &= ~RCTRL_VLEX; | 2116 | tempval &= ~RCTRL_VLEX; |
| 1473 | /* If parse is no longer required, then disable parser */ | 2117 | /* If parse is no longer required, then disable parser */ |
| 1474 | if (tempval & RCTRL_REQ_PARSER) | 2118 | if (tempval & RCTRL_REQ_PARSER) |
| 1475 | tempval |= RCTRL_PRSDEP_INIT; | 2119 | tempval |= RCTRL_PRSDEP_INIT; |
| 1476 | else | 2120 | else |
| 1477 | tempval &= ~RCTRL_PRSDEP_INIT; | 2121 | tempval &= ~RCTRL_PRSDEP_INIT; |
| 1478 | gfar_write(&priv->regs->rctrl, tempval); | 2122 | gfar_write(®s->rctrl, tempval); |
| 1479 | } | 2123 | } |
| 1480 | 2124 | ||
| 1481 | gfar_change_mtu(dev, dev->mtu); | 2125 | gfar_change_mtu(dev, dev->mtu); |
| 1482 | 2126 | ||
| 1483 | spin_unlock_irqrestore(&priv->rxlock, flags); | 2127 | unlock_rx_qs(priv); |
| 2128 | local_irq_restore(flags); | ||
| 1484 | } | 2129 | } |
| 1485 | 2130 | ||
| 1486 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) | 2131 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
| 1487 | { | 2132 | { |
| 1488 | int tempsize, tempval; | 2133 | int tempsize, tempval; |
| 1489 | struct gfar_private *priv = netdev_priv(dev); | 2134 | struct gfar_private *priv = netdev_priv(dev); |
| 2135 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
| 1490 | int oldsize = priv->rx_buffer_size; | 2136 | int oldsize = priv->rx_buffer_size; |
| 1491 | int frame_size = new_mtu + ETH_HLEN; | 2137 | int frame_size = new_mtu + ETH_HLEN; |
| 1492 | 2138 | ||
| @@ -1518,20 +2164,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1518 | 2164 | ||
| 1519 | dev->mtu = new_mtu; | 2165 | dev->mtu = new_mtu; |
| 1520 | 2166 | ||
| 1521 | gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); | 2167 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
| 1522 | gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); | 2168 | gfar_write(®s->maxfrm, priv->rx_buffer_size); |
| 1523 | 2169 | ||
| 1524 | /* If the mtu is larger than the max size for standard | 2170 | /* If the mtu is larger than the max size for standard |
| 1525 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | 2171 | * ethernet frames (ie, a jumbo frame), then set maccfg2 |
| 1526 | * to allow huge frames, and to check the length */ | 2172 | * to allow huge frames, and to check the length */ |
| 1527 | tempval = gfar_read(&priv->regs->maccfg2); | 2173 | tempval = gfar_read(®s->maccfg2); |
| 1528 | 2174 | ||
| 1529 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) | 2175 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) |
| 1530 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | 2176 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); |
| 1531 | else | 2177 | else |
| 1532 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | 2178 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); |
| 1533 | 2179 | ||
| 1534 | gfar_write(&priv->regs->maccfg2, tempval); | 2180 | gfar_write(®s->maccfg2, tempval); |
| 1535 | 2181 | ||
| 1536 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | 2182 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) |
| 1537 | startup_gfar(dev); | 2183 | startup_gfar(dev); |
| @@ -1551,10 +2197,10 @@ static void gfar_reset_task(struct work_struct *work) | |||
| 1551 | struct net_device *dev = priv->ndev; | 2197 | struct net_device *dev = priv->ndev; |
| 1552 | 2198 | ||
| 1553 | if (dev->flags & IFF_UP) { | 2199 | if (dev->flags & IFF_UP) { |
| 1554 | netif_stop_queue(dev); | 2200 | netif_tx_stop_all_queues(dev); |
| 1555 | stop_gfar(dev); | 2201 | stop_gfar(dev); |
| 1556 | startup_gfar(dev); | 2202 | startup_gfar(dev); |
| 1557 | netif_start_queue(dev); | 2203 | netif_tx_start_all_queues(dev); |
| 1558 | } | 2204 | } |
| 1559 | 2205 | ||
| 1560 | netif_tx_schedule_all(dev); | 2206 | netif_tx_schedule_all(dev); |
| @@ -1569,24 +2215,29 @@ static void gfar_timeout(struct net_device *dev) | |||
| 1569 | } | 2215 | } |
| 1570 | 2216 | ||
| 1571 | /* Interrupt Handler for Transmit complete */ | 2217 | /* Interrupt Handler for Transmit complete */ |
| 1572 | static int gfar_clean_tx_ring(struct net_device *dev) | 2218 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
| 1573 | { | 2219 | { |
| 2220 | struct net_device *dev = tx_queue->dev; | ||
| 1574 | struct gfar_private *priv = netdev_priv(dev); | 2221 | struct gfar_private *priv = netdev_priv(dev); |
| 2222 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
| 1575 | struct txbd8 *bdp; | 2223 | struct txbd8 *bdp; |
| 1576 | struct txbd8 *lbdp = NULL; | 2224 | struct txbd8 *lbdp = NULL; |
| 1577 | struct txbd8 *base = priv->tx_bd_base; | 2225 | struct txbd8 *base = tx_queue->tx_bd_base; |
| 1578 | struct sk_buff *skb; | 2226 | struct sk_buff *skb; |
| 1579 | int skb_dirtytx; | 2227 | int skb_dirtytx; |
| 1580 | int tx_ring_size = priv->tx_ring_size; | 2228 | int tx_ring_size = tx_queue->tx_ring_size; |
| 1581 | int frags = 0; | 2229 | int frags = 0; |
| 1582 | int i; | 2230 | int i; |
| 1583 | int howmany = 0; | 2231 | int howmany = 0; |
| 1584 | u32 lstatus; | 2232 | u32 lstatus; |
| 1585 | 2233 | ||
| 1586 | bdp = priv->dirty_tx; | 2234 | rx_queue = priv->rx_queue[tx_queue->qindex]; |
| 1587 | skb_dirtytx = priv->skb_dirtytx; | 2235 | bdp = tx_queue->dirty_tx; |
| 2236 | skb_dirtytx = tx_queue->skb_dirtytx; | ||
| 2237 | |||
| 2238 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { | ||
| 2239 | unsigned long flags; | ||
| 1588 | 2240 | ||
| 1589 | while ((skb = priv->tx_skbuff[skb_dirtytx])) { | ||
| 1590 | frags = skb_shinfo(skb)->nr_frags; | 2241 | frags = skb_shinfo(skb)->nr_frags; |
| 1591 | lbdp = skip_txbd(bdp, frags, base, tx_ring_size); | 2242 | lbdp = skip_txbd(bdp, frags, base, tx_ring_size); |
| 1592 | 2243 | ||
| @@ -1618,82 +2269,73 @@ static int gfar_clean_tx_ring(struct net_device *dev) | |||
| 1618 | * If there's room in the queue (limit it to rx_buffer_size) | 2269 | * If there's room in the queue (limit it to rx_buffer_size) |
| 1619 | * we add this skb back into the pool, if it's the right size | 2270 | * we add this skb back into the pool, if it's the right size |
| 1620 | */ | 2271 | */ |
| 1621 | if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && | 2272 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
| 1622 | skb_recycle_check(skb, priv->rx_buffer_size + | 2273 | skb_recycle_check(skb, priv->rx_buffer_size + |
| 1623 | RXBUF_ALIGNMENT)) | 2274 | RXBUF_ALIGNMENT)) |
| 1624 | __skb_queue_head(&priv->rx_recycle, skb); | 2275 | __skb_queue_head(&priv->rx_recycle, skb); |
| 1625 | else | 2276 | else |
| 1626 | dev_kfree_skb_any(skb); | 2277 | dev_kfree_skb_any(skb); |
| 1627 | 2278 | ||
| 1628 | priv->tx_skbuff[skb_dirtytx] = NULL; | 2279 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
| 1629 | 2280 | ||
| 1630 | skb_dirtytx = (skb_dirtytx + 1) & | 2281 | skb_dirtytx = (skb_dirtytx + 1) & |
| 1631 | TX_RING_MOD_MASK(tx_ring_size); | 2282 | TX_RING_MOD_MASK(tx_ring_size); |
| 1632 | 2283 | ||
| 1633 | howmany++; | 2284 | howmany++; |
| 1634 | priv->num_txbdfree += frags + 1; | 2285 | spin_lock_irqsave(&tx_queue->txlock, flags); |
| 2286 | tx_queue->num_txbdfree += frags + 1; | ||
| 2287 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
| 1635 | } | 2288 | } |
| 1636 | 2289 | ||
| 1637 | /* If we freed a buffer, we can restart transmission, if necessary */ | 2290 | /* If we freed a buffer, we can restart transmission, if necessary */ |
| 1638 | if (netif_queue_stopped(dev) && priv->num_txbdfree) | 2291 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) |
| 1639 | netif_wake_queue(dev); | 2292 | netif_wake_subqueue(dev, tx_queue->qindex); |
| 1640 | 2293 | ||
| 1641 | /* Update dirty indicators */ | 2294 | /* Update dirty indicators */ |
| 1642 | priv->skb_dirtytx = skb_dirtytx; | 2295 | tx_queue->skb_dirtytx = skb_dirtytx; |
| 1643 | priv->dirty_tx = bdp; | 2296 | tx_queue->dirty_tx = bdp; |
| 1644 | 2297 | ||
| 1645 | dev->stats.tx_packets += howmany; | 2298 | dev->stats.tx_packets += howmany; |
| 1646 | 2299 | ||
| 1647 | return howmany; | 2300 | return howmany; |
| 1648 | } | 2301 | } |
| 1649 | 2302 | ||
| 1650 | static void gfar_schedule_cleanup(struct net_device *dev) | 2303 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) |
| 1651 | { | 2304 | { |
| 1652 | struct gfar_private *priv = netdev_priv(dev); | ||
| 1653 | unsigned long flags; | 2305 | unsigned long flags; |
| 1654 | 2306 | ||
| 1655 | spin_lock_irqsave(&priv->txlock, flags); | 2307 | spin_lock_irqsave(&gfargrp->grplock, flags); |
| 1656 | spin_lock(&priv->rxlock); | 2308 | if (napi_schedule_prep(&gfargrp->napi)) { |
| 1657 | 2309 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); | |
| 1658 | if (napi_schedule_prep(&priv->napi)) { | 2310 | __napi_schedule(&gfargrp->napi); |
| 1659 | gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); | ||
| 1660 | __napi_schedule(&priv->napi); | ||
| 1661 | } else { | 2311 | } else { |
| 1662 | /* | 2312 | /* |
| 1663 | * Clear IEVENT, so interrupts aren't called again | 2313 | * Clear IEVENT, so interrupts aren't called again |
| 1664 | * because of the packets that have already arrived. | 2314 | * because of the packets that have already arrived. |
| 1665 | */ | 2315 | */ |
| 1666 | gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); | 2316 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); |
| 1667 | } | 2317 | } |
| 2318 | spin_unlock_irqrestore(&gfargrp->grplock, flags); | ||
| 1668 | 2319 | ||
| 1669 | spin_unlock(&priv->rxlock); | ||
| 1670 | spin_unlock_irqrestore(&priv->txlock, flags); | ||
| 1671 | } | 2320 | } |
| 1672 | 2321 | ||
| 1673 | /* Interrupt Handler for Transmit complete */ | 2322 | /* Interrupt Handler for Transmit complete */ |
| 1674 | static irqreturn_t gfar_transmit(int irq, void *dev_id) | 2323 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
| 1675 | { | 2324 | { |
| 1676 | gfar_schedule_cleanup((struct net_device *)dev_id); | 2325 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
| 1677 | return IRQ_HANDLED; | 2326 | return IRQ_HANDLED; |
| 1678 | } | 2327 | } |
| 1679 | 2328 | ||
| 1680 | static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, | 2329 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
| 1681 | struct sk_buff *skb) | 2330 | struct sk_buff *skb) |
| 1682 | { | 2331 | { |
| 2332 | struct net_device *dev = rx_queue->dev; | ||
| 1683 | struct gfar_private *priv = netdev_priv(dev); | 2333 | struct gfar_private *priv = netdev_priv(dev); |
| 1684 | u32 lstatus; | 2334 | dma_addr_t buf; |
| 1685 | |||
| 1686 | bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, | ||
| 1687 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
| 1688 | |||
| 1689 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | ||
| 1690 | |||
| 1691 | if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) | ||
| 1692 | lstatus |= BD_LFLAG(RXBD_WRAP); | ||
| 1693 | |||
| 1694 | eieio(); | ||
| 1695 | 2335 | ||
| 1696 | bdp->lstatus = lstatus; | 2336 | buf = dma_map_single(&priv->ofdev->dev, skb->data, |
| 2337 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
| 2338 | gfar_init_rxbdp(rx_queue, bdp, buf); | ||
| 1697 | } | 2339 | } |
| 1698 | 2340 | ||
| 1699 | 2341 | ||
| @@ -1760,9 +2402,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev) | |||
| 1760 | } | 2402 | } |
| 1761 | } | 2403 | } |
| 1762 | 2404 | ||
| 1763 | irqreturn_t gfar_receive(int irq, void *dev_id) | 2405 | irqreturn_t gfar_receive(int irq, void *grp_id) |
| 1764 | { | 2406 | { |
| 1765 | gfar_schedule_cleanup((struct net_device *)dev_id); | 2407 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
| 1766 | return IRQ_HANDLED; | 2408 | return IRQ_HANDLED; |
| 1767 | } | 2409 | } |
| 1768 | 2410 | ||
| @@ -1792,6 +2434,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
| 1792 | fcb = (struct rxfcb *)skb->data; | 2434 | fcb = (struct rxfcb *)skb->data; |
| 1793 | 2435 | ||
| 1794 | /* Remove the FCB from the skb */ | 2436 | /* Remove the FCB from the skb */ |
| 2437 | skb_set_queue_mapping(skb, fcb->rq); | ||
| 1795 | /* Remove the padded bytes, if there are any */ | 2438 | /* Remove the padded bytes, if there are any */ |
| 1796 | if (amount_pull) | 2439 | if (amount_pull) |
| 1797 | skb_pull(skb, amount_pull); | 2440 | skb_pull(skb, amount_pull); |
| @@ -1818,8 +2461,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
| 1818 | * until the budget/quota has been reached. Returns the number | 2461 | * until the budget/quota has been reached. Returns the number |
| 1819 | * of frames handled | 2462 | * of frames handled |
| 1820 | */ | 2463 | */ |
| 1821 | int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | 2464 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
| 1822 | { | 2465 | { |
| 2466 | struct net_device *dev = rx_queue->dev; | ||
| 1823 | struct rxbd8 *bdp, *base; | 2467 | struct rxbd8 *bdp, *base; |
| 1824 | struct sk_buff *skb; | 2468 | struct sk_buff *skb; |
| 1825 | int pkt_len; | 2469 | int pkt_len; |
| @@ -1828,8 +2472,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
| 1828 | struct gfar_private *priv = netdev_priv(dev); | 2472 | struct gfar_private *priv = netdev_priv(dev); |
| 1829 | 2473 | ||
| 1830 | /* Get the first full descriptor */ | 2474 | /* Get the first full descriptor */ |
| 1831 | bdp = priv->cur_rx; | 2475 | bdp = rx_queue->cur_rx; |
| 1832 | base = priv->rx_bd_base; | 2476 | base = rx_queue->rx_bd_base; |
| 1833 | 2477 | ||
| 1834 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + | 2478 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + |
| 1835 | priv->padding; | 2479 | priv->padding; |
| @@ -1841,7 +2485,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
| 1841 | /* Add another skb for the future */ | 2485 | /* Add another skb for the future */ |
| 1842 | newskb = gfar_new_skb(dev); | 2486 | newskb = gfar_new_skb(dev); |
| 1843 | 2487 | ||
| 1844 | skb = priv->rx_skbuff[priv->skb_currx]; | 2488 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
| 1845 | 2489 | ||
| 1846 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | 2490 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, |
| 1847 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 2491 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
| @@ -1875,8 +2519,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
| 1875 | skb_put(skb, pkt_len); | 2519 | skb_put(skb, pkt_len); |
| 1876 | dev->stats.rx_bytes += pkt_len; | 2520 | dev->stats.rx_bytes += pkt_len; |
| 1877 | 2521 | ||
| 1878 | if (in_irq() || irqs_disabled()) | ||
| 1879 | printk("Interrupt problem!\n"); | ||
| 1880 | gfar_process_frame(dev, skb, amount_pull); | 2522 | gfar_process_frame(dev, skb, amount_pull); |
| 1881 | 2523 | ||
| 1882 | } else { | 2524 | } else { |
| @@ -1889,46 +2531,70 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
| 1889 | 2531 | ||
| 1890 | } | 2532 | } |
| 1891 | 2533 | ||
| 1892 | priv->rx_skbuff[priv->skb_currx] = newskb; | 2534 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
| 1893 | 2535 | ||
| 1894 | /* Setup the new bdp */ | 2536 | /* Setup the new bdp */ |
| 1895 | gfar_new_rxbdp(dev, bdp, newskb); | 2537 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
| 1896 | 2538 | ||
| 1897 | /* Update to the next pointer */ | 2539 | /* Update to the next pointer */ |
| 1898 | bdp = next_bd(bdp, base, priv->rx_ring_size); | 2540 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
| 1899 | 2541 | ||
| 1900 | /* update to point at the next skb */ | 2542 | /* update to point at the next skb */ |
| 1901 | priv->skb_currx = | 2543 | rx_queue->skb_currx = |
| 1902 | (priv->skb_currx + 1) & | 2544 | (rx_queue->skb_currx + 1) & |
| 1903 | RX_RING_MOD_MASK(priv->rx_ring_size); | 2545 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); |
| 1904 | } | 2546 | } |
| 1905 | 2547 | ||
| 1906 | /* Update the current rxbd pointer to be the next one */ | 2548 | /* Update the current rxbd pointer to be the next one */ |
| 1907 | priv->cur_rx = bdp; | 2549 | rx_queue->cur_rx = bdp; |
| 1908 | 2550 | ||
| 1909 | return howmany; | 2551 | return howmany; |
| 1910 | } | 2552 | } |
| 1911 | 2553 | ||
| 1912 | static int gfar_poll(struct napi_struct *napi, int budget) | 2554 | static int gfar_poll(struct napi_struct *napi, int budget) |
| 1913 | { | 2555 | { |
| 1914 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); | 2556 | struct gfar_priv_grp *gfargrp = container_of(napi, |
| 1915 | struct net_device *dev = priv->ndev; | 2557 | struct gfar_priv_grp, napi); |
| 1916 | int tx_cleaned = 0; | 2558 | struct gfar_private *priv = gfargrp->priv; |
| 1917 | int rx_cleaned = 0; | 2559 | struct gfar __iomem *regs = gfargrp->regs; |
| 1918 | unsigned long flags; | 2560 | struct gfar_priv_tx_q *tx_queue = NULL; |
| 2561 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
| 2562 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; | ||
| 2563 | int tx_cleaned = 0, i, left_over_budget = budget; | ||
| 2564 | unsigned long serviced_queues = 0; | ||
| 2565 | int num_queues = 0; | ||
| 2566 | |||
| 2567 | num_queues = gfargrp->num_rx_queues; | ||
| 2568 | budget_per_queue = budget/num_queues; | ||
| 1919 | 2569 | ||
| 1920 | /* Clear IEVENT, so interrupts aren't called again | 2570 | /* Clear IEVENT, so interrupts aren't called again |
| 1921 | * because of the packets that have already arrived */ | 2571 | * because of the packets that have already arrived */ |
| 1922 | gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); | 2572 | gfar_write(®s->ievent, IEVENT_RTX_MASK); |
| 1923 | 2573 | ||
| 1924 | /* If we fail to get the lock, don't bother with the TX BDs */ | 2574 | while (num_queues && left_over_budget) { |
| 1925 | if (spin_trylock_irqsave(&priv->txlock, flags)) { | 2575 | |
| 1926 | tx_cleaned = gfar_clean_tx_ring(dev); | 2576 | budget_per_queue = left_over_budget/num_queues; |
| 1927 | spin_unlock_irqrestore(&priv->txlock, flags); | 2577 | left_over_budget = 0; |
| 2578 | |||
| 2579 | for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { | ||
| 2580 | if (test_bit(i, &serviced_queues)) | ||
| 2581 | continue; | ||
| 2582 | rx_queue = priv->rx_queue[i]; | ||
| 2583 | tx_queue = priv->tx_queue[rx_queue->qindex]; | ||
| 2584 | |||
| 2585 | tx_cleaned += gfar_clean_tx_ring(tx_queue); | ||
| 2586 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, | ||
| 2587 | budget_per_queue); | ||
| 2588 | rx_cleaned += rx_cleaned_per_queue; | ||
| 2589 | if(rx_cleaned_per_queue < budget_per_queue) { | ||
| 2590 | left_over_budget = left_over_budget + | ||
| 2591 | (budget_per_queue - rx_cleaned_per_queue); | ||
| 2592 | set_bit(i, &serviced_queues); | ||
| 2593 | num_queues--; | ||
| 2594 | } | ||
| 2595 | } | ||
| 1928 | } | 2596 | } |
| 1929 | 2597 | ||
| 1930 | rx_cleaned = gfar_clean_rx_ring(dev, budget); | ||
| 1931 | |||
| 1932 | if (tx_cleaned) | 2598 | if (tx_cleaned) |
| 1933 | return budget; | 2599 | return budget; |
| 1934 | 2600 | ||
| @@ -1936,20 +2602,14 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
| 1936 | napi_complete(napi); | 2602 | napi_complete(napi); |
| 1937 | 2603 | ||
| 1938 | /* Clear the halt bit in RSTAT */ | 2604 | /* Clear the halt bit in RSTAT */ |
| 1939 | gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); | 2605 | gfar_write(®s->rstat, gfargrp->rstat); |
| 1940 | 2606 | ||
| 1941 | gfar_write(&priv->regs->imask, IMASK_DEFAULT); | 2607 | gfar_write(®s->imask, IMASK_DEFAULT); |
| 1942 | 2608 | ||
| 1943 | /* If we are coalescing interrupts, update the timer */ | 2609 | /* If we are coalescing interrupts, update the timer */ |
| 1944 | /* Otherwise, clear it */ | 2610 | /* Otherwise, clear it */ |
| 1945 | if (likely(priv->rxcoalescing)) { | 2611 | gfar_configure_coalescing(priv, |
| 1946 | gfar_write(&priv->regs->rxic, 0); | 2612 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); |
| 1947 | gfar_write(&priv->regs->rxic, priv->rxic); | ||
| 1948 | } | ||
| 1949 | if (likely(priv->txcoalescing)) { | ||
| 1950 | gfar_write(&priv->regs->txic, 0); | ||
| 1951 | gfar_write(&priv->regs->txic, priv->txic); | ||
| 1952 | } | ||
| 1953 | } | 2613 | } |
| 1954 | 2614 | ||
| 1955 | return rx_cleaned; | 2615 | return rx_cleaned; |
| @@ -1964,44 +2624,49 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
| 1964 | static void gfar_netpoll(struct net_device *dev) | 2624 | static void gfar_netpoll(struct net_device *dev) |
| 1965 | { | 2625 | { |
| 1966 | struct gfar_private *priv = netdev_priv(dev); | 2626 | struct gfar_private *priv = netdev_priv(dev); |
| 2627 | int i = 0; | ||
| 1967 | 2628 | ||
| 1968 | /* If the device has multiple interrupts, run tx/rx */ | 2629 | /* If the device has multiple interrupts, run tx/rx */ |
| 1969 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | 2630 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
| 1970 | disable_irq(priv->interruptTransmit); | 2631 | for (i = 0; i < priv->num_grps; i++) { |
| 1971 | disable_irq(priv->interruptReceive); | 2632 | disable_irq(priv->gfargrp[i].interruptTransmit); |
| 1972 | disable_irq(priv->interruptError); | 2633 | disable_irq(priv->gfargrp[i].interruptReceive); |
| 1973 | gfar_interrupt(priv->interruptTransmit, dev); | 2634 | disable_irq(priv->gfargrp[i].interruptError); |
| 1974 | enable_irq(priv->interruptError); | 2635 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, |
| 1975 | enable_irq(priv->interruptReceive); | 2636 | &priv->gfargrp[i]); |
| 1976 | enable_irq(priv->interruptTransmit); | 2637 | enable_irq(priv->gfargrp[i].interruptError); |
| 2638 | enable_irq(priv->gfargrp[i].interruptReceive); | ||
| 2639 | enable_irq(priv->gfargrp[i].interruptTransmit); | ||
| 2640 | } | ||
| 1977 | } else { | 2641 | } else { |
| 1978 | disable_irq(priv->interruptTransmit); | 2642 | for (i = 0; i < priv->num_grps; i++) { |
| 1979 | gfar_interrupt(priv->interruptTransmit, dev); | 2643 | disable_irq(priv->gfargrp[i].interruptTransmit); |
| 1980 | enable_irq(priv->interruptTransmit); | 2644 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, |
| 2645 | &priv->gfargrp[i]); | ||
| 2646 | enable_irq(priv->gfargrp[i].interruptTransmit); | ||
| 1981 | } | 2647 | } |
| 1982 | } | 2648 | } |
| 1983 | #endif | 2649 | #endif |
| 1984 | 2650 | ||
| 1985 | /* The interrupt handler for devices with one interrupt */ | 2651 | /* The interrupt handler for devices with one interrupt */ |
| 1986 | static irqreturn_t gfar_interrupt(int irq, void *dev_id) | 2652 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
| 1987 | { | 2653 | { |
| 1988 | struct net_device *dev = dev_id; | 2654 | struct gfar_priv_grp *gfargrp = grp_id; |
| 1989 | struct gfar_private *priv = netdev_priv(dev); | ||
| 1990 | 2655 | ||
| 1991 | /* Save ievent for future reference */ | 2656 | /* Save ievent for future reference */ |
| 1992 | u32 events = gfar_read(&priv->regs->ievent); | 2657 | u32 events = gfar_read(&gfargrp->regs->ievent); |
| 1993 | 2658 | ||
| 1994 | /* Check for reception */ | 2659 | /* Check for reception */ |
| 1995 | if (events & IEVENT_RX_MASK) | 2660 | if (events & IEVENT_RX_MASK) |
| 1996 | gfar_receive(irq, dev_id); | 2661 | gfar_receive(irq, grp_id); |
| 1997 | 2662 | ||
| 1998 | /* Check for transmit completion */ | 2663 | /* Check for transmit completion */ |
| 1999 | if (events & IEVENT_TX_MASK) | 2664 | if (events & IEVENT_TX_MASK) |
| 2000 | gfar_transmit(irq, dev_id); | 2665 | gfar_transmit(irq, grp_id); |
| 2001 | 2666 | ||
| 2002 | /* Check for errors */ | 2667 | /* Check for errors */ |
| 2003 | if (events & IEVENT_ERR_MASK) | 2668 | if (events & IEVENT_ERR_MASK) |
| 2004 | gfar_error(irq, dev_id); | 2669 | gfar_error(irq, grp_id); |
| 2005 | 2670 | ||
| 2006 | return IRQ_HANDLED; | 2671 | return IRQ_HANDLED; |
| 2007 | } | 2672 | } |
| @@ -2015,12 +2680,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id) | |||
| 2015 | static void adjust_link(struct net_device *dev) | 2680 | static void adjust_link(struct net_device *dev) |
| 2016 | { | 2681 | { |
| 2017 | struct gfar_private *priv = netdev_priv(dev); | 2682 | struct gfar_private *priv = netdev_priv(dev); |
| 2018 | struct gfar __iomem *regs = priv->regs; | 2683 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 2019 | unsigned long flags; | 2684 | unsigned long flags; |
| 2020 | struct phy_device *phydev = priv->phydev; | 2685 | struct phy_device *phydev = priv->phydev; |
| 2021 | int new_state = 0; | 2686 | int new_state = 0; |
| 2022 | 2687 | ||
| 2023 | spin_lock_irqsave(&priv->txlock, flags); | 2688 | local_irq_save(flags); |
| 2689 | lock_tx_qs(priv); | ||
| 2690 | |||
| 2024 | if (phydev->link) { | 2691 | if (phydev->link) { |
| 2025 | u32 tempval = gfar_read(®s->maccfg2); | 2692 | u32 tempval = gfar_read(®s->maccfg2); |
| 2026 | u32 ecntrl = gfar_read(®s->ecntrl); | 2693 | u32 ecntrl = gfar_read(®s->ecntrl); |
| @@ -2085,8 +2752,8 @@ static void adjust_link(struct net_device *dev) | |||
| 2085 | 2752 | ||
| 2086 | if (new_state && netif_msg_link(priv)) | 2753 | if (new_state && netif_msg_link(priv)) |
| 2087 | phy_print_status(phydev); | 2754 | phy_print_status(phydev); |
| 2088 | 2755 | unlock_tx_qs(priv); | |
| 2089 | spin_unlock_irqrestore(&priv->txlock, flags); | 2756 | local_irq_restore(flags); |
| 2090 | } | 2757 | } |
| 2091 | 2758 | ||
| 2092 | /* Update the hash table based on the current list of multicast | 2759 | /* Update the hash table based on the current list of multicast |
| @@ -2097,10 +2764,10 @@ static void gfar_set_multi(struct net_device *dev) | |||
| 2097 | { | 2764 | { |
| 2098 | struct dev_mc_list *mc_ptr; | 2765 | struct dev_mc_list *mc_ptr; |
| 2099 | struct gfar_private *priv = netdev_priv(dev); | 2766 | struct gfar_private *priv = netdev_priv(dev); |
| 2100 | struct gfar __iomem *regs = priv->regs; | 2767 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
| 2101 | u32 tempval; | 2768 | u32 tempval; |
| 2102 | 2769 | ||
| 2103 | if(dev->flags & IFF_PROMISC) { | 2770 | if (dev->flags & IFF_PROMISC) { |
| 2104 | /* Set RCTRL to PROM */ | 2771 | /* Set RCTRL to PROM */ |
| 2105 | tempval = gfar_read(®s->rctrl); | 2772 | tempval = gfar_read(®s->rctrl); |
| 2106 | tempval |= RCTRL_PROM; | 2773 | tempval |= RCTRL_PROM; |
| @@ -2112,7 +2779,7 @@ static void gfar_set_multi(struct net_device *dev) | |||
| 2112 | gfar_write(®s->rctrl, tempval); | 2779 | gfar_write(®s->rctrl, tempval); |
| 2113 | } | 2780 | } |
| 2114 | 2781 | ||
| 2115 | if(dev->flags & IFF_ALLMULTI) { | 2782 | if (dev->flags & IFF_ALLMULTI) { |
| 2116 | /* Set the hash to rx all multicast frames */ | 2783 | /* Set the hash to rx all multicast frames */ |
| 2117 | gfar_write(®s->igaddr0, 0xffffffff); | 2784 | gfar_write(®s->igaddr0, 0xffffffff); |
| 2118 | gfar_write(®s->igaddr1, 0xffffffff); | 2785 | gfar_write(®s->igaddr1, 0xffffffff); |
| @@ -2164,7 +2831,7 @@ static void gfar_set_multi(struct net_device *dev) | |||
| 2164 | em_num = 0; | 2831 | em_num = 0; |
| 2165 | } | 2832 | } |
| 2166 | 2833 | ||
| 2167 | if(dev->mc_count == 0) | 2834 | if (dev->mc_count == 0) |
| 2168 | return; | 2835 | return; |
| 2169 | 2836 | ||
| 2170 | /* Parse the list, and set the appropriate bits */ | 2837 | /* Parse the list, and set the appropriate bits */ |
| @@ -2230,10 +2897,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | |||
| 2230 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) | 2897 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) |
| 2231 | { | 2898 | { |
| 2232 | struct gfar_private *priv = netdev_priv(dev); | 2899 | struct gfar_private *priv = netdev_priv(dev); |
| 2900 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
| 2233 | int idx; | 2901 | int idx; |
| 2234 | char tmpbuf[MAC_ADDR_LEN]; | 2902 | char tmpbuf[MAC_ADDR_LEN]; |
| 2235 | u32 tempval; | 2903 | u32 tempval; |
| 2236 | u32 __iomem *macptr = &priv->regs->macstnaddr1; | 2904 | u32 __iomem *macptr = ®s->macstnaddr1; |
| 2237 | 2905 | ||
| 2238 | macptr += num*2; | 2906 | macptr += num*2; |
| 2239 | 2907 | ||
| @@ -2250,16 +2918,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) | |||
| 2250 | } | 2918 | } |
| 2251 | 2919 | ||
| 2252 | /* GFAR error interrupt handler */ | 2920 | /* GFAR error interrupt handler */ |
| 2253 | static irqreturn_t gfar_error(int irq, void *dev_id) | 2921 | static irqreturn_t gfar_error(int irq, void *grp_id) |
| 2254 | { | 2922 | { |
| 2255 | struct net_device *dev = dev_id; | 2923 | struct gfar_priv_grp *gfargrp = grp_id; |
| 2256 | struct gfar_private *priv = netdev_priv(dev); | 2924 | struct gfar __iomem *regs = gfargrp->regs; |
| 2925 | struct gfar_private *priv= gfargrp->priv; | ||
| 2926 | struct net_device *dev = priv->ndev; | ||
| 2257 | 2927 | ||
| 2258 | /* Save ievent for future reference */ | 2928 | /* Save ievent for future reference */ |
| 2259 | u32 events = gfar_read(&priv->regs->ievent); | 2929 | u32 events = gfar_read(®s->ievent); |
| 2260 | 2930 | ||
| 2261 | /* Clear IEVENT */ | 2931 | /* Clear IEVENT */ |
| 2262 | gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); | 2932 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
| 2263 | 2933 | ||
| 2264 | /* Magic Packet is not an error. */ | 2934 | /* Magic Packet is not an error. */ |
| 2265 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && | 2935 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
| @@ -2269,7 +2939,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) | |||
| 2269 | /* Hmm... */ | 2939 | /* Hmm... */ |
| 2270 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) | 2940 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
| 2271 | printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", | 2941 | printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", |
| 2272 | dev->name, events, gfar_read(&priv->regs->imask)); | 2942 | dev->name, events, gfar_read(®s->imask)); |
| 2273 | 2943 | ||
| 2274 | /* Update the error counters */ | 2944 | /* Update the error counters */ |
| 2275 | if (events & IEVENT_TXE) { | 2945 | if (events & IEVENT_TXE) { |
| @@ -2280,14 +2950,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id) | |||
| 2280 | if (events & IEVENT_CRL) | 2950 | if (events & IEVENT_CRL) |
| 2281 | dev->stats.tx_aborted_errors++; | 2951 | dev->stats.tx_aborted_errors++; |
| 2282 | if (events & IEVENT_XFUN) { | 2952 | if (events & IEVENT_XFUN) { |
| 2953 | unsigned long flags; | ||
| 2954 | |||
| 2283 | if (netif_msg_tx_err(priv)) | 2955 | if (netif_msg_tx_err(priv)) |
| 2284 | printk(KERN_DEBUG "%s: TX FIFO underrun, " | 2956 | printk(KERN_DEBUG "%s: TX FIFO underrun, " |
| 2285 | "packet dropped.\n", dev->name); | 2957 | "packet dropped.\n", dev->name); |
| 2286 | dev->stats.tx_dropped++; | 2958 | dev->stats.tx_dropped++; |
| 2287 | priv->extra_stats.tx_underrun++; | 2959 | priv->extra_stats.tx_underrun++; |
| 2288 | 2960 | ||
| 2961 | local_irq_save(flags); | ||
| 2962 | lock_tx_qs(priv); | ||
| 2963 | |||
| 2289 | /* Reactivate the Tx Queues */ | 2964 | /* Reactivate the Tx Queues */ |
| 2290 | gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); | 2965 | gfar_write(®s->tstat, gfargrp->tstat); |
| 2966 | |||
| 2967 | unlock_tx_qs(priv); | ||
| 2968 | local_irq_restore(flags); | ||
| 2291 | } | 2969 | } |
| 2292 | if (netif_msg_tx_err(priv)) | 2970 | if (netif_msg_tx_err(priv)) |
| 2293 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); | 2971 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); |
| @@ -2296,11 +2974,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id) | |||
| 2296 | dev->stats.rx_errors++; | 2974 | dev->stats.rx_errors++; |
| 2297 | priv->extra_stats.rx_bsy++; | 2975 | priv->extra_stats.rx_bsy++; |
| 2298 | 2976 | ||
| 2299 | gfar_receive(irq, dev_id); | 2977 | gfar_receive(irq, grp_id); |
| 2300 | 2978 | ||
| 2301 | if (netif_msg_rx_err(priv)) | 2979 | if (netif_msg_rx_err(priv)) |
| 2302 | printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", | 2980 | printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", |
| 2303 | dev->name, gfar_read(&priv->regs->rstat)); | 2981 | dev->name, gfar_read(®s->rstat)); |
| 2304 | } | 2982 | } |
| 2305 | if (events & IEVENT_BABR) { | 2983 | if (events & IEVENT_BABR) { |
| 2306 | dev->stats.rx_errors++; | 2984 | dev->stats.rx_errors++; |
| @@ -2331,6 +3009,9 @@ static struct of_device_id gfar_match[] = | |||
| 2331 | .type = "network", | 3009 | .type = "network", |
| 2332 | .compatible = "gianfar", | 3010 | .compatible = "gianfar", |
| 2333 | }, | 3011 | }, |
| 3012 | { | ||
| 3013 | .compatible = "fsl,etsec2", | ||
| 3014 | }, | ||
| 2334 | {}, | 3015 | {}, |
| 2335 | }; | 3016 | }; |
| 2336 | MODULE_DEVICE_TABLE(of, gfar_match); | 3017 | MODULE_DEVICE_TABLE(of, gfar_match); |
| @@ -2342,8 +3023,9 @@ static struct of_platform_driver gfar_driver = { | |||
| 2342 | 3023 | ||
| 2343 | .probe = gfar_probe, | 3024 | .probe = gfar_probe, |
| 2344 | .remove = gfar_remove, | 3025 | .remove = gfar_remove, |
| 2345 | .suspend = gfar_suspend, | 3026 | .suspend = gfar_legacy_suspend, |
| 2346 | .resume = gfar_resume, | 3027 | .resume = gfar_legacy_resume, |
| 3028 | .driver.pm = GFAR_PM_OPS, | ||
| 2347 | }; | 3029 | }; |
| 2348 | 3030 | ||
| 2349 | static int __init gfar_init(void) | 3031 | static int __init gfar_init(void) |
