diff options
Diffstat (limited to 'drivers/net/tulip/interrupt.c')
| -rw-r--r-- | drivers/net/tulip/interrupt.c | 126 |
1 files changed, 63 insertions, 63 deletions
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index bb3558164a5b..da4f7593c50f 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c | |||
| @@ -139,22 +139,22 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
| 139 | } | 139 | } |
| 140 | /* Acknowledge current RX interrupt sources. */ | 140 | /* Acknowledge current RX interrupt sources. */ |
| 141 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); | 141 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); |
| 142 | 142 | ||
| 143 | 143 | ||
| 144 | /* If we own the next entry, it is a new packet. Send it up. */ | 144 | /* If we own the next entry, it is a new packet. Send it up. */ |
| 145 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | 145 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { |
| 146 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | 146 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); |
| 147 | 147 | ||
| 148 | 148 | ||
| 149 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) | 149 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) |
| 150 | break; | 150 | break; |
| 151 | 151 | ||
| 152 | if (tulip_debug > 5) | 152 | if (tulip_debug > 5) |
| 153 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", | 153 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", |
| 154 | dev->name, entry, status); | 154 | dev->name, entry, status); |
| 155 | if (--rx_work_limit < 0) | 155 | if (--rx_work_limit < 0) |
| 156 | goto not_done; | 156 | goto not_done; |
| 157 | 157 | ||
| 158 | if ((status & 0x38008300) != 0x0300) { | 158 | if ((status & 0x38008300) != 0x0300) { |
| 159 | if ((status & 0x38000300) != 0x0300) { | 159 | if ((status & 0x38000300) != 0x0300) { |
| 160 | /* Ingore earlier buffers. */ | 160 | /* Ingore earlier buffers. */ |
| @@ -180,7 +180,7 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
| 180 | /* Omit the four octet CRC from the length. */ | 180 | /* Omit the four octet CRC from the length. */ |
| 181 | short pkt_len = ((status >> 16) & 0x7ff) - 4; | 181 | short pkt_len = ((status >> 16) & 0x7ff) - 4; |
| 182 | struct sk_buff *skb; | 182 | struct sk_buff *skb; |
| 183 | 183 | ||
| 184 | #ifndef final_version | 184 | #ifndef final_version |
| 185 | if (pkt_len > 1518) { | 185 | if (pkt_len > 1518) { |
| 186 | printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", | 186 | printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", |
| @@ -213,7 +213,7 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
| 213 | } else { /* Pass up the skb already on the Rx ring. */ | 213 | } else { /* Pass up the skb already on the Rx ring. */ |
| 214 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | 214 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, |
| 215 | pkt_len); | 215 | pkt_len); |
| 216 | 216 | ||
| 217 | #ifndef final_version | 217 | #ifndef final_version |
| 218 | if (tp->rx_buffers[entry].mapping != | 218 | if (tp->rx_buffers[entry].mapping != |
| 219 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | 219 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { |
| @@ -225,17 +225,17 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
| 225 | skb->head, temp); | 225 | skb->head, temp); |
| 226 | } | 226 | } |
| 227 | #endif | 227 | #endif |
| 228 | 228 | ||
| 229 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, | 229 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, |
| 230 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 230 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
| 231 | 231 | ||
| 232 | tp->rx_buffers[entry].skb = NULL; | 232 | tp->rx_buffers[entry].skb = NULL; |
| 233 | tp->rx_buffers[entry].mapping = 0; | 233 | tp->rx_buffers[entry].mapping = 0; |
| 234 | } | 234 | } |
| 235 | skb->protocol = eth_type_trans(skb, dev); | 235 | skb->protocol = eth_type_trans(skb, dev); |
| 236 | 236 | ||
| 237 | netif_receive_skb(skb); | 237 | netif_receive_skb(skb); |
| 238 | 238 | ||
| 239 | dev->last_rx = jiffies; | 239 | dev->last_rx = jiffies; |
| 240 | tp->stats.rx_packets++; | 240 | tp->stats.rx_packets++; |
| 241 | tp->stats.rx_bytes += pkt_len; | 241 | tp->stats.rx_bytes += pkt_len; |
| @@ -245,12 +245,12 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
| 245 | entry = (++tp->cur_rx) % RX_RING_SIZE; | 245 | entry = (++tp->cur_rx) % RX_RING_SIZE; |
| 246 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) | 246 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) |
| 247 | tulip_refill_rx(dev); | 247 | tulip_refill_rx(dev); |
| 248 | 248 | ||
| 249 | } | 249 | } |
| 250 | 250 | ||
| 251 | /* New ack strategy... irq does not ack Rx any longer | 251 | /* New ack strategy... irq does not ack Rx any longer |
| 252 | hopefully this helps */ | 252 | hopefully this helps */ |
| 253 | 253 | ||
| 254 | /* Really bad things can happen here... If new packet arrives | 254 | /* Really bad things can happen here... If new packet arrives |
| 255 | * and an irq arrives (tx or just due to occasionally unset | 255 | * and an irq arrives (tx or just due to occasionally unset |
| 256 | * mask), it will be acked by irq handler, but new thread | 256 | * mask), it will be acked by irq handler, but new thread |
| @@ -259,28 +259,28 @@ int tulip_poll(struct net_device *dev, int *budget) | |||
| 259 | * tomorrow (night 011029). If it will not fail, we won | 259 | * tomorrow (night 011029). If it will not fail, we won |
| 260 | * finally: amount of IO did not increase at all. */ | 260 | * finally: amount of IO did not increase at all. */ |
| 261 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); | 261 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); |
| 262 | 262 | ||
| 263 | done: | 263 | done: |
| 264 | 264 | ||
| 265 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | 265 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
| 266 | 266 | ||
| 267 | /* We use this simplistic scheme for IM. It's proven by | 267 | /* We use this simplistic scheme for IM. It's proven by |
| 268 | real life installations. We can have IM enabled | 268 | real life installations. We can have IM enabled |
| 269 | continuesly but this would cause unnecessary latency. | 269 | continuesly but this would cause unnecessary latency. |
| 270 | Unfortunely we can't use all the NET_RX_* feedback here. | 270 | Unfortunely we can't use all the NET_RX_* feedback here. |
| 271 | This would turn on IM for devices that is not contributing | 271 | This would turn on IM for devices that is not contributing |
| 272 | to backlog congestion with unnecessary latency. | 272 | to backlog congestion with unnecessary latency. |
| 273 | 273 | ||
| 274 | We monitor the the device RX-ring and have: | 274 | We monitor the the device RX-ring and have: |
| 275 | 275 | ||
| 276 | HW Interrupt Mitigation either ON or OFF. | 276 | HW Interrupt Mitigation either ON or OFF. |
| 277 | 277 | ||
| 278 | ON: More then 1 pkt received (per intr.) OR we are dropping | 278 | ON: More then 1 pkt received (per intr.) OR we are dropping |
| 279 | OFF: Only 1 pkt received | 279 | OFF: Only 1 pkt received |
| 280 | 280 | ||
| 281 | Note. We only use min and max (0, 15) settings from mit_table */ | 281 | Note. We only use min and max (0, 15) settings from mit_table */ |
| 282 | 282 | ||
| 283 | 283 | ||
| 284 | if( tp->flags & HAS_INTR_MITIGATION) { | 284 | if( tp->flags & HAS_INTR_MITIGATION) { |
| 285 | if( received > 1 ) { | 285 | if( received > 1 ) { |
| 286 | if( ! tp->mit_on ) { | 286 | if( ! tp->mit_on ) { |
| @@ -297,20 +297,20 @@ done: | |||
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ | 299 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ |
| 300 | 300 | ||
| 301 | dev->quota -= received; | 301 | dev->quota -= received; |
| 302 | *budget -= received; | 302 | *budget -= received; |
| 303 | 303 | ||
| 304 | tulip_refill_rx(dev); | 304 | tulip_refill_rx(dev); |
| 305 | 305 | ||
| 306 | /* If RX ring is not full we are out of memory. */ | 306 | /* If RX ring is not full we are out of memory. */ |
| 307 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | 307 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; |
| 308 | 308 | ||
| 309 | /* Remove us from polling list and enable RX intr. */ | 309 | /* Remove us from polling list and enable RX intr. */ |
| 310 | 310 | ||
| 311 | netif_rx_complete(dev); | 311 | netif_rx_complete(dev); |
| 312 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); | 312 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); |
| 313 | 313 | ||
| 314 | /* The last op happens after poll completion. Which means the following: | 314 | /* The last op happens after poll completion. Which means the following: |
| 315 | * 1. it can race with disabling irqs in irq handler | 315 | * 1. it can race with disabling irqs in irq handler |
| 316 | * 2. it can race with dise/enabling irqs in other poll threads | 316 | * 2. it can race with dise/enabling irqs in other poll threads |
| @@ -321,9 +321,9 @@ done: | |||
| 321 | * due to races in masking and due to too late acking of already | 321 | * due to races in masking and due to too late acking of already |
| 322 | * processed irqs. But it must not result in losing events. | 322 | * processed irqs. But it must not result in losing events. |
| 323 | */ | 323 | */ |
| 324 | 324 | ||
| 325 | return 0; | 325 | return 0; |
| 326 | 326 | ||
| 327 | not_done: | 327 | not_done: |
| 328 | if (!received) { | 328 | if (!received) { |
| 329 | 329 | ||
| @@ -331,29 +331,29 @@ done: | |||
| 331 | } | 331 | } |
| 332 | dev->quota -= received; | 332 | dev->quota -= received; |
| 333 | *budget -= received; | 333 | *budget -= received; |
| 334 | 334 | ||
| 335 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || | 335 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || |
| 336 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | 336 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
| 337 | tulip_refill_rx(dev); | 337 | tulip_refill_rx(dev); |
| 338 | 338 | ||
| 339 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | 339 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; |
| 340 | 340 | ||
| 341 | return 1; | 341 | return 1; |
| 342 | 342 | ||
| 343 | 343 | ||
| 344 | oom: /* Executed with RX ints disabled */ | 344 | oom: /* Executed with RX ints disabled */ |
| 345 | 345 | ||
| 346 | 346 | ||
| 347 | /* Start timer, stop polling, but do not enable rx interrupts. */ | 347 | /* Start timer, stop polling, but do not enable rx interrupts. */ |
| 348 | mod_timer(&tp->oom_timer, jiffies+1); | 348 | mod_timer(&tp->oom_timer, jiffies+1); |
| 349 | 349 | ||
| 350 | /* Think: timer_pending() was an explicit signature of bug. | 350 | /* Think: timer_pending() was an explicit signature of bug. |
| 351 | * Timer can be pending now but fired and completed | 351 | * Timer can be pending now but fired and completed |
| 352 | * before we did netif_rx_complete(). See? We would lose it. */ | 352 | * before we did netif_rx_complete(). See? We would lose it. */ |
| 353 | 353 | ||
| 354 | /* remove ourselves from the polling list */ | 354 | /* remove ourselves from the polling list */ |
| 355 | netif_rx_complete(dev); | 355 | netif_rx_complete(dev); |
| 356 | 356 | ||
| 357 | return 0; | 357 | return 0; |
| 358 | } | 358 | } |
| 359 | 359 | ||
| @@ -521,9 +521,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
| 521 | /* Let's see whether the interrupt really is for us */ | 521 | /* Let's see whether the interrupt really is for us */ |
| 522 | csr5 = ioread32(ioaddr + CSR5); | 522 | csr5 = ioread32(ioaddr + CSR5); |
| 523 | 523 | ||
| 524 | if (tp->flags & HAS_PHY_IRQ) | 524 | if (tp->flags & HAS_PHY_IRQ) |
| 525 | handled = phy_interrupt (dev); | 525 | handled = phy_interrupt (dev); |
| 526 | 526 | ||
| 527 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) | 527 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) |
| 528 | return IRQ_RETVAL(handled); | 528 | return IRQ_RETVAL(handled); |
| 529 | 529 | ||
| @@ -538,17 +538,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
| 538 | /* Mask RX intrs and add the device to poll list. */ | 538 | /* Mask RX intrs and add the device to poll list. */ |
| 539 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); | 539 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); |
| 540 | netif_rx_schedule(dev); | 540 | netif_rx_schedule(dev); |
| 541 | 541 | ||
| 542 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) | 542 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) |
| 543 | break; | 543 | break; |
| 544 | } | 544 | } |
| 545 | 545 | ||
| 546 | /* Acknowledge the interrupt sources we handle here ASAP | 546 | /* Acknowledge the interrupt sources we handle here ASAP |
| 547 | the poll function does Rx and RxNoBuf acking */ | 547 | the poll function does Rx and RxNoBuf acking */ |
| 548 | 548 | ||
| 549 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); | 549 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); |
| 550 | 550 | ||
| 551 | #else | 551 | #else |
| 552 | /* Acknowledge all of the current interrupt sources ASAP. */ | 552 | /* Acknowledge all of the current interrupt sources ASAP. */ |
| 553 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); | 553 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); |
| 554 | 554 | ||
| @@ -559,11 +559,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
| 559 | } | 559 | } |
| 560 | 560 | ||
| 561 | #endif /* CONFIG_TULIP_NAPI */ | 561 | #endif /* CONFIG_TULIP_NAPI */ |
| 562 | 562 | ||
| 563 | if (tulip_debug > 4) | 563 | if (tulip_debug > 4) |
| 564 | printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", | 564 | printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", |
| 565 | dev->name, csr5, ioread32(ioaddr + CSR5)); | 565 | dev->name, csr5, ioread32(ioaddr + CSR5)); |
| 566 | 566 | ||
| 567 | 567 | ||
| 568 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { | 568 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { |
| 569 | unsigned int dirty_tx; | 569 | unsigned int dirty_tx; |
| @@ -737,17 +737,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | |||
| 737 | #ifdef CONFIG_TULIP_NAPI | 737 | #ifdef CONFIG_TULIP_NAPI |
| 738 | if (rxd) | 738 | if (rxd) |
| 739 | csr5 &= ~RxPollInt; | 739 | csr5 &= ~RxPollInt; |
| 740 | } while ((csr5 & (TxNoBuf | | 740 | } while ((csr5 & (TxNoBuf | |
| 741 | TxDied | | 741 | TxDied | |
| 742 | TxIntr | | 742 | TxIntr | |
| 743 | TimerInt | | 743 | TimerInt | |
| 744 | /* Abnormal intr. */ | 744 | /* Abnormal intr. */ |
| 745 | RxDied | | 745 | RxDied | |
| 746 | TxFIFOUnderflow | | 746 | TxFIFOUnderflow | |
| 747 | TxJabber | | 747 | TxJabber | |
| 748 | TPLnkFail | | 748 | TPLnkFail | |
| 749 | SytemError )) != 0); | 749 | SytemError )) != 0); |
| 750 | #else | 750 | #else |
| 751 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); | 751 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); |
| 752 | 752 | ||
| 753 | tulip_refill_rx(dev); | 753 | tulip_refill_rx(dev); |
