diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/tulip/interrupt.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/net/tulip/interrupt.c')
-rw-r--r-- | drivers/net/tulip/interrupt.c | 786 |
1 files changed, 786 insertions, 0 deletions
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c new file mode 100644 index 000000000000..afb5cda9d8e1 --- /dev/null +++ b/drivers/net/tulip/interrupt.c | |||
@@ -0,0 +1,786 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/interrupt.c | ||
3 | |||
4 | Maintained by Jeff Garzik <jgarzik@pobox.com> | ||
5 | Copyright 2000,2001 The Linux Kernel Team | ||
6 | Written/copyright 1994-2001 by Donald Becker. | ||
7 | |||
8 | This software may be used and distributed according to the terms | ||
9 | of the GNU General Public License, incorporated herein by reference. | ||
10 | |||
11 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
12 | for more information on this driver, or visit the project | ||
13 | Web page at http://sourceforge.net/projects/tulip/ | ||
14 | |||
15 | */ | ||
16 | |||
17 | #include <linux/pci.h> | ||
18 | #include "tulip.h" | ||
19 | #include <linux/config.h> | ||
20 | #include <linux/etherdevice.h> | ||
21 | |||
22 | int tulip_rx_copybreak; | ||
23 | unsigned int tulip_max_interrupt_work; | ||
24 | |||
25 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
26 | #define MIT_SIZE 15 | ||
27 | #define MIT_TABLE 15 /* We use 0 or max */ | ||
28 | |||
29 | static unsigned int mit_table[MIT_SIZE+1] = | ||
30 | { | ||
31 | /* CRS11 21143 hardware Mitigation Control Interrupt | ||
32 | We use only RX mitigation we other techniques for | ||
33 | TX intr. mitigation. | ||
34 | |||
35 | 31 Cycle Size (timer control) | ||
36 | 30:27 TX timer in 16 * Cycle size | ||
37 | 26:24 TX No pkts before Int. | ||
38 | 23:20 RX timer in Cycle size | ||
39 | 19:17 RX No pkts before Int. | ||
40 | 16 Continues Mode (CM) | ||
41 | */ | ||
42 | |||
43 | 0x0, /* IM disabled */ | ||
44 | 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */ | ||
45 | 0x80150000, | ||
46 | 0x80270000, | ||
47 | 0x80370000, | ||
48 | 0x80490000, | ||
49 | 0x80590000, | ||
50 | 0x80690000, | ||
51 | 0x807B0000, | ||
52 | 0x808B0000, | ||
53 | 0x809D0000, | ||
54 | 0x80AD0000, | ||
55 | 0x80BD0000, | ||
56 | 0x80CF0000, | ||
57 | 0x80DF0000, | ||
58 | // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */ | ||
59 | 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */ | ||
60 | }; | ||
61 | #endif | ||
62 | |||
63 | |||
64 | int tulip_refill_rx(struct net_device *dev) | ||
65 | { | ||
66 | struct tulip_private *tp = netdev_priv(dev); | ||
67 | int entry; | ||
68 | int refilled = 0; | ||
69 | |||
70 | /* Refill the Rx ring buffers. */ | ||
71 | for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { | ||
72 | entry = tp->dirty_rx % RX_RING_SIZE; | ||
73 | if (tp->rx_buffers[entry].skb == NULL) { | ||
74 | struct sk_buff *skb; | ||
75 | dma_addr_t mapping; | ||
76 | |||
77 | skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ); | ||
78 | if (skb == NULL) | ||
79 | break; | ||
80 | |||
81 | mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ, | ||
82 | PCI_DMA_FROMDEVICE); | ||
83 | tp->rx_buffers[entry].mapping = mapping; | ||
84 | |||
85 | skb->dev = dev; /* Mark as being used by this device. */ | ||
86 | tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); | ||
87 | refilled++; | ||
88 | } | ||
89 | tp->rx_ring[entry].status = cpu_to_le32(DescOwned); | ||
90 | } | ||
91 | if(tp->chip_id == LC82C168) { | ||
92 | if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) { | ||
93 | /* Rx stopped due to out of buffers, | ||
94 | * restart it | ||
95 | */ | ||
96 | iowrite32(0x01, tp->base_addr + CSR2); | ||
97 | } | ||
98 | } | ||
99 | return refilled; | ||
100 | } | ||
101 | |||
102 | #ifdef CONFIG_TULIP_NAPI | ||
103 | |||
104 | void oom_timer(unsigned long data) | ||
105 | { | ||
106 | struct net_device *dev = (struct net_device *)data; | ||
107 | netif_rx_schedule(dev); | ||
108 | } | ||
109 | |||
110 | int tulip_poll(struct net_device *dev, int *budget) | ||
111 | { | ||
112 | struct tulip_private *tp = netdev_priv(dev); | ||
113 | int entry = tp->cur_rx % RX_RING_SIZE; | ||
114 | int rx_work_limit = *budget; | ||
115 | int received = 0; | ||
116 | |||
117 | if (!netif_running(dev)) | ||
118 | goto done; | ||
119 | |||
120 | if (rx_work_limit > dev->quota) | ||
121 | rx_work_limit = dev->quota; | ||
122 | |||
123 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
124 | |||
125 | /* that one buffer is needed for mit activation; or might be a | ||
126 | bug in the ring buffer code; check later -- JHS*/ | ||
127 | |||
128 | if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--; | ||
129 | #endif | ||
130 | |||
131 | if (tulip_debug > 4) | ||
132 | printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, | ||
133 | tp->rx_ring[entry].status); | ||
134 | |||
135 | do { | ||
136 | if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { | ||
137 | printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n"); | ||
138 | break; | ||
139 | } | ||
140 | /* Acknowledge current RX interrupt sources. */ | ||
141 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); | ||
142 | |||
143 | |||
144 | /* If we own the next entry, it is a new packet. Send it up. */ | ||
145 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | ||
146 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | ||
147 | |||
148 | |||
149 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) | ||
150 | break; | ||
151 | |||
152 | if (tulip_debug > 5) | ||
153 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", | ||
154 | dev->name, entry, status); | ||
155 | if (--rx_work_limit < 0) | ||
156 | goto not_done; | ||
157 | |||
158 | if ((status & 0x38008300) != 0x0300) { | ||
159 | if ((status & 0x38000300) != 0x0300) { | ||
160 | /* Ingore earlier buffers. */ | ||
161 | if ((status & 0xffff) != 0x7fff) { | ||
162 | if (tulip_debug > 1) | ||
163 | printk(KERN_WARNING "%s: Oversized Ethernet frame " | ||
164 | "spanned multiple buffers, status %8.8x!\n", | ||
165 | dev->name, status); | ||
166 | tp->stats.rx_length_errors++; | ||
167 | } | ||
168 | } else if (status & RxDescFatalErr) { | ||
169 | /* There was a fatal error. */ | ||
170 | if (tulip_debug > 2) | ||
171 | printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", | ||
172 | dev->name, status); | ||
173 | tp->stats.rx_errors++; /* end of a packet.*/ | ||
174 | if (status & 0x0890) tp->stats.rx_length_errors++; | ||
175 | if (status & 0x0004) tp->stats.rx_frame_errors++; | ||
176 | if (status & 0x0002) tp->stats.rx_crc_errors++; | ||
177 | if (status & 0x0001) tp->stats.rx_fifo_errors++; | ||
178 | } | ||
179 | } else { | ||
180 | /* Omit the four octet CRC from the length. */ | ||
181 | short pkt_len = ((status >> 16) & 0x7ff) - 4; | ||
182 | struct sk_buff *skb; | ||
183 | |||
184 | #ifndef final_version | ||
185 | if (pkt_len > 1518) { | ||
186 | printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", | ||
187 | dev->name, pkt_len, pkt_len); | ||
188 | pkt_len = 1518; | ||
189 | tp->stats.rx_length_errors++; | ||
190 | } | ||
191 | #endif | ||
192 | /* Check if the packet is long enough to accept without copying | ||
193 | to a minimally-sized skbuff. */ | ||
194 | if (pkt_len < tulip_rx_copybreak | ||
195 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
196 | skb->dev = dev; | ||
197 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | ||
198 | pci_dma_sync_single_for_cpu(tp->pdev, | ||
199 | tp->rx_buffers[entry].mapping, | ||
200 | pkt_len, PCI_DMA_FROMDEVICE); | ||
201 | #if ! defined(__alpha__) | ||
202 | eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, | ||
203 | pkt_len, 0); | ||
204 | skb_put(skb, pkt_len); | ||
205 | #else | ||
206 | memcpy(skb_put(skb, pkt_len), | ||
207 | tp->rx_buffers[entry].skb->tail, | ||
208 | pkt_len); | ||
209 | #endif | ||
210 | pci_dma_sync_single_for_device(tp->pdev, | ||
211 | tp->rx_buffers[entry].mapping, | ||
212 | pkt_len, PCI_DMA_FROMDEVICE); | ||
213 | } else { /* Pass up the skb already on the Rx ring. */ | ||
214 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | ||
215 | pkt_len); | ||
216 | |||
217 | #ifndef final_version | ||
218 | if (tp->rx_buffers[entry].mapping != | ||
219 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | ||
220 | printk(KERN_ERR "%s: Internal fault: The skbuff addresses " | ||
221 | "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n", | ||
222 | dev->name, | ||
223 | le32_to_cpu(tp->rx_ring[entry].buffer1), | ||
224 | (unsigned long long)tp->rx_buffers[entry].mapping, | ||
225 | skb->head, temp); | ||
226 | } | ||
227 | #endif | ||
228 | |||
229 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, | ||
230 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
231 | |||
232 | tp->rx_buffers[entry].skb = NULL; | ||
233 | tp->rx_buffers[entry].mapping = 0; | ||
234 | } | ||
235 | skb->protocol = eth_type_trans(skb, dev); | ||
236 | |||
237 | netif_receive_skb(skb); | ||
238 | |||
239 | dev->last_rx = jiffies; | ||
240 | tp->stats.rx_packets++; | ||
241 | tp->stats.rx_bytes += pkt_len; | ||
242 | } | ||
243 | received++; | ||
244 | |||
245 | entry = (++tp->cur_rx) % RX_RING_SIZE; | ||
246 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) | ||
247 | tulip_refill_rx(dev); | ||
248 | |||
249 | } | ||
250 | |||
251 | /* New ack strategy... irq does not ack Rx any longer | ||
252 | hopefully this helps */ | ||
253 | |||
254 | /* Really bad things can happen here... If new packet arrives | ||
255 | * and an irq arrives (tx or just due to occasionally unset | ||
256 | * mask), it will be acked by irq handler, but new thread | ||
257 | * is not scheduled. It is major hole in design. | ||
258 | * No idea how to fix this if "playing with fire" will fail | ||
259 | * tomorrow (night 011029). If it will not fail, we won | ||
260 | * finally: amount of IO did not increase at all. */ | ||
261 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); | ||
262 | |||
263 | done: | ||
264 | |||
265 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
266 | |||
267 | /* We use this simplistic scheme for IM. It's proven by | ||
268 | real life installations. We can have IM enabled | ||
269 | continuesly but this would cause unnecessary latency. | ||
270 | Unfortunely we can't use all the NET_RX_* feedback here. | ||
271 | This would turn on IM for devices that is not contributing | ||
272 | to backlog congestion with unnecessary latency. | ||
273 | |||
274 | We monitor the the device RX-ring and have: | ||
275 | |||
276 | HW Interrupt Mitigation either ON or OFF. | ||
277 | |||
278 | ON: More then 1 pkt received (per intr.) OR we are dropping | ||
279 | OFF: Only 1 pkt received | ||
280 | |||
281 | Note. We only use min and max (0, 15) settings from mit_table */ | ||
282 | |||
283 | |||
284 | if( tp->flags & HAS_INTR_MITIGATION) { | ||
285 | if( received > 1 ) { | ||
286 | if( ! tp->mit_on ) { | ||
287 | tp->mit_on = 1; | ||
288 | iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11); | ||
289 | } | ||
290 | } | ||
291 | else { | ||
292 | if( tp->mit_on ) { | ||
293 | tp->mit_on = 0; | ||
294 | iowrite32(0, tp->base_addr + CSR11); | ||
295 | } | ||
296 | } | ||
297 | } | ||
298 | |||
299 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ | ||
300 | |||
301 | dev->quota -= received; | ||
302 | *budget -= received; | ||
303 | |||
304 | tulip_refill_rx(dev); | ||
305 | |||
306 | /* If RX ring is not full we are out of memory. */ | ||
307 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | ||
308 | |||
309 | /* Remove us from polling list and enable RX intr. */ | ||
310 | |||
311 | netif_rx_complete(dev); | ||
312 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); | ||
313 | |||
314 | /* The last op happens after poll completion. Which means the following: | ||
315 | * 1. it can race with disabling irqs in irq handler | ||
316 | * 2. it can race with dise/enabling irqs in other poll threads | ||
317 | * 3. if an irq raised after beginning loop, it will be immediately | ||
318 | * triggered here. | ||
319 | * | ||
320 | * Summarizing: the logic results in some redundant irqs both | ||
321 | * due to races in masking and due to too late acking of already | ||
322 | * processed irqs. But it must not result in losing events. | ||
323 | */ | ||
324 | |||
325 | return 0; | ||
326 | |||
327 | not_done: | ||
328 | if (!received) { | ||
329 | |||
330 | received = dev->quota; /* Not to happen */ | ||
331 | } | ||
332 | dev->quota -= received; | ||
333 | *budget -= received; | ||
334 | |||
335 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || | ||
336 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
337 | tulip_refill_rx(dev); | ||
338 | |||
339 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; | ||
340 | |||
341 | return 1; | ||
342 | |||
343 | |||
344 | oom: /* Executed with RX ints disabled */ | ||
345 | |||
346 | |||
347 | /* Start timer, stop polling, but do not enable rx interrupts. */ | ||
348 | mod_timer(&tp->oom_timer, jiffies+1); | ||
349 | |||
350 | /* Think: timer_pending() was an explicit signature of bug. | ||
351 | * Timer can be pending now but fired and completed | ||
352 | * before we did netif_rx_complete(). See? We would lose it. */ | ||
353 | |||
354 | /* remove ourselves from the polling list */ | ||
355 | netif_rx_complete(dev); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | #else /* CONFIG_TULIP_NAPI */ | ||
361 | |||
362 | static int tulip_rx(struct net_device *dev) | ||
363 | { | ||
364 | struct tulip_private *tp = netdev_priv(dev); | ||
365 | int entry = tp->cur_rx % RX_RING_SIZE; | ||
366 | int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; | ||
367 | int received = 0; | ||
368 | |||
369 | if (tulip_debug > 4) | ||
370 | printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, | ||
371 | tp->rx_ring[entry].status); | ||
372 | /* If we own the next entry, it is a new packet. Send it up. */ | ||
373 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | ||
374 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | ||
375 | |||
376 | if (tulip_debug > 5) | ||
377 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", | ||
378 | dev->name, entry, status); | ||
379 | if (--rx_work_limit < 0) | ||
380 | break; | ||
381 | if ((status & 0x38008300) != 0x0300) { | ||
382 | if ((status & 0x38000300) != 0x0300) { | ||
383 | /* Ingore earlier buffers. */ | ||
384 | if ((status & 0xffff) != 0x7fff) { | ||
385 | if (tulip_debug > 1) | ||
386 | printk(KERN_WARNING "%s: Oversized Ethernet frame " | ||
387 | "spanned multiple buffers, status %8.8x!\n", | ||
388 | dev->name, status); | ||
389 | tp->stats.rx_length_errors++; | ||
390 | } | ||
391 | } else if (status & RxDescFatalErr) { | ||
392 | /* There was a fatal error. */ | ||
393 | if (tulip_debug > 2) | ||
394 | printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", | ||
395 | dev->name, status); | ||
396 | tp->stats.rx_errors++; /* end of a packet.*/ | ||
397 | if (status & 0x0890) tp->stats.rx_length_errors++; | ||
398 | if (status & 0x0004) tp->stats.rx_frame_errors++; | ||
399 | if (status & 0x0002) tp->stats.rx_crc_errors++; | ||
400 | if (status & 0x0001) tp->stats.rx_fifo_errors++; | ||
401 | } | ||
402 | } else { | ||
403 | /* Omit the four octet CRC from the length. */ | ||
404 | short pkt_len = ((status >> 16) & 0x7ff) - 4; | ||
405 | struct sk_buff *skb; | ||
406 | |||
407 | #ifndef final_version | ||
408 | if (pkt_len > 1518) { | ||
409 | printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", | ||
410 | dev->name, pkt_len, pkt_len); | ||
411 | pkt_len = 1518; | ||
412 | tp->stats.rx_length_errors++; | ||
413 | } | ||
414 | #endif | ||
415 | |||
416 | /* Check if the packet is long enough to accept without copying | ||
417 | to a minimally-sized skbuff. */ | ||
418 | if (pkt_len < tulip_rx_copybreak | ||
419 | && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
420 | skb->dev = dev; | ||
421 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | ||
422 | pci_dma_sync_single_for_cpu(tp->pdev, | ||
423 | tp->rx_buffers[entry].mapping, | ||
424 | pkt_len, PCI_DMA_FROMDEVICE); | ||
425 | #if ! defined(__alpha__) | ||
426 | eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, | ||
427 | pkt_len, 0); | ||
428 | skb_put(skb, pkt_len); | ||
429 | #else | ||
430 | memcpy(skb_put(skb, pkt_len), | ||
431 | tp->rx_buffers[entry].skb->tail, | ||
432 | pkt_len); | ||
433 | #endif | ||
434 | pci_dma_sync_single_for_device(tp->pdev, | ||
435 | tp->rx_buffers[entry].mapping, | ||
436 | pkt_len, PCI_DMA_FROMDEVICE); | ||
437 | } else { /* Pass up the skb already on the Rx ring. */ | ||
438 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | ||
439 | pkt_len); | ||
440 | |||
441 | #ifndef final_version | ||
442 | if (tp->rx_buffers[entry].mapping != | ||
443 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | ||
444 | printk(KERN_ERR "%s: Internal fault: The skbuff addresses " | ||
445 | "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n", | ||
446 | dev->name, | ||
447 | le32_to_cpu(tp->rx_ring[entry].buffer1), | ||
448 | (long long)tp->rx_buffers[entry].mapping, | ||
449 | skb->head, temp); | ||
450 | } | ||
451 | #endif | ||
452 | |||
453 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, | ||
454 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
455 | |||
456 | tp->rx_buffers[entry].skb = NULL; | ||
457 | tp->rx_buffers[entry].mapping = 0; | ||
458 | } | ||
459 | skb->protocol = eth_type_trans(skb, dev); | ||
460 | |||
461 | netif_rx(skb); | ||
462 | |||
463 | dev->last_rx = jiffies; | ||
464 | tp->stats.rx_packets++; | ||
465 | tp->stats.rx_bytes += pkt_len; | ||
466 | } | ||
467 | received++; | ||
468 | entry = (++tp->cur_rx) % RX_RING_SIZE; | ||
469 | } | ||
470 | return received; | ||
471 | } | ||
472 | #endif /* CONFIG_TULIP_NAPI */ | ||
473 | |||
474 | static inline unsigned int phy_interrupt (struct net_device *dev) | ||
475 | { | ||
476 | #ifdef __hppa__ | ||
477 | struct tulip_private *tp = netdev_priv(dev); | ||
478 | int csr12 = ioread32(tp->base_addr + CSR12) & 0xff; | ||
479 | |||
480 | if (csr12 != tp->csr12_shadow) { | ||
481 | /* ack interrupt */ | ||
482 | iowrite32(csr12 | 0x02, tp->base_addr + CSR12); | ||
483 | tp->csr12_shadow = csr12; | ||
484 | /* do link change stuff */ | ||
485 | spin_lock(&tp->lock); | ||
486 | tulip_check_duplex(dev); | ||
487 | spin_unlock(&tp->lock); | ||
488 | /* clear irq ack bit */ | ||
489 | iowrite32(csr12 & ~0x02, tp->base_addr + CSR12); | ||
490 | |||
491 | return 1; | ||
492 | } | ||
493 | #endif | ||
494 | |||
495 | return 0; | ||
496 | } | ||
497 | |||
498 | /* The interrupt handler does all of the Rx thread work and cleans up | ||
499 | after the Tx thread. */ | ||
500 | irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) | ||
501 | { | ||
502 | struct net_device *dev = (struct net_device *)dev_instance; | ||
503 | struct tulip_private *tp = netdev_priv(dev); | ||
504 | void __iomem *ioaddr = tp->base_addr; | ||
505 | int csr5; | ||
506 | int missed; | ||
507 | int rx = 0; | ||
508 | int tx = 0; | ||
509 | int oi = 0; | ||
510 | int maxrx = RX_RING_SIZE; | ||
511 | int maxtx = TX_RING_SIZE; | ||
512 | int maxoi = TX_RING_SIZE; | ||
513 | #ifdef CONFIG_TULIP_NAPI | ||
514 | int rxd = 0; | ||
515 | #else | ||
516 | int entry; | ||
517 | #endif | ||
518 | unsigned int work_count = tulip_max_interrupt_work; | ||
519 | unsigned int handled = 0; | ||
520 | |||
521 | /* Let's see whether the interrupt really is for us */ | ||
522 | csr5 = ioread32(ioaddr + CSR5); | ||
523 | |||
524 | if (tp->flags & HAS_PHY_IRQ) | ||
525 | handled = phy_interrupt (dev); | ||
526 | |||
527 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) | ||
528 | return IRQ_RETVAL(handled); | ||
529 | |||
530 | tp->nir++; | ||
531 | |||
532 | do { | ||
533 | |||
534 | #ifdef CONFIG_TULIP_NAPI | ||
535 | |||
536 | if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { | ||
537 | rxd++; | ||
538 | /* Mask RX intrs and add the device to poll list. */ | ||
539 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); | ||
540 | netif_rx_schedule(dev); | ||
541 | |||
542 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) | ||
543 | break; | ||
544 | } | ||
545 | |||
546 | /* Acknowledge the interrupt sources we handle here ASAP | ||
547 | the poll function does Rx and RxNoBuf acking */ | ||
548 | |||
549 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); | ||
550 | |||
551 | #else | ||
552 | /* Acknowledge all of the current interrupt sources ASAP. */ | ||
553 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); | ||
554 | |||
555 | |||
556 | if (csr5 & (RxIntr | RxNoBuf)) { | ||
557 | rx += tulip_rx(dev); | ||
558 | tulip_refill_rx(dev); | ||
559 | } | ||
560 | |||
561 | #endif /* CONFIG_TULIP_NAPI */ | ||
562 | |||
563 | if (tulip_debug > 4) | ||
564 | printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", | ||
565 | dev->name, csr5, ioread32(ioaddr + CSR5)); | ||
566 | |||
567 | |||
568 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { | ||
569 | unsigned int dirty_tx; | ||
570 | |||
571 | spin_lock(&tp->lock); | ||
572 | |||
573 | for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; | ||
574 | dirty_tx++) { | ||
575 | int entry = dirty_tx % TX_RING_SIZE; | ||
576 | int status = le32_to_cpu(tp->tx_ring[entry].status); | ||
577 | |||
578 | if (status < 0) | ||
579 | break; /* It still has not been Txed */ | ||
580 | |||
581 | /* Check for Rx filter setup frames. */ | ||
582 | if (tp->tx_buffers[entry].skb == NULL) { | ||
583 | /* test because dummy frames not mapped */ | ||
584 | if (tp->tx_buffers[entry].mapping) | ||
585 | pci_unmap_single(tp->pdev, | ||
586 | tp->tx_buffers[entry].mapping, | ||
587 | sizeof(tp->setup_frame), | ||
588 | PCI_DMA_TODEVICE); | ||
589 | continue; | ||
590 | } | ||
591 | |||
592 | if (status & 0x8000) { | ||
593 | /* There was an major error, log it. */ | ||
594 | #ifndef final_version | ||
595 | if (tulip_debug > 1) | ||
596 | printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", | ||
597 | dev->name, status); | ||
598 | #endif | ||
599 | tp->stats.tx_errors++; | ||
600 | if (status & 0x4104) tp->stats.tx_aborted_errors++; | ||
601 | if (status & 0x0C00) tp->stats.tx_carrier_errors++; | ||
602 | if (status & 0x0200) tp->stats.tx_window_errors++; | ||
603 | if (status & 0x0002) tp->stats.tx_fifo_errors++; | ||
604 | if ((status & 0x0080) && tp->full_duplex == 0) | ||
605 | tp->stats.tx_heartbeat_errors++; | ||
606 | } else { | ||
607 | tp->stats.tx_bytes += | ||
608 | tp->tx_buffers[entry].skb->len; | ||
609 | tp->stats.collisions += (status >> 3) & 15; | ||
610 | tp->stats.tx_packets++; | ||
611 | } | ||
612 | |||
613 | pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, | ||
614 | tp->tx_buffers[entry].skb->len, | ||
615 | PCI_DMA_TODEVICE); | ||
616 | |||
617 | /* Free the original skb. */ | ||
618 | dev_kfree_skb_irq(tp->tx_buffers[entry].skb); | ||
619 | tp->tx_buffers[entry].skb = NULL; | ||
620 | tp->tx_buffers[entry].mapping = 0; | ||
621 | tx++; | ||
622 | } | ||
623 | |||
624 | #ifndef final_version | ||
625 | if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { | ||
626 | printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n", | ||
627 | dev->name, dirty_tx, tp->cur_tx); | ||
628 | dirty_tx += TX_RING_SIZE; | ||
629 | } | ||
630 | #endif | ||
631 | |||
632 | if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) | ||
633 | netif_wake_queue(dev); | ||
634 | |||
635 | tp->dirty_tx = dirty_tx; | ||
636 | if (csr5 & TxDied) { | ||
637 | if (tulip_debug > 2) | ||
638 | printk(KERN_WARNING "%s: The transmitter stopped." | ||
639 | " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", | ||
640 | dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6); | ||
641 | tulip_restart_rxtx(tp); | ||
642 | } | ||
643 | spin_unlock(&tp->lock); | ||
644 | } | ||
645 | |||
646 | /* Log errors. */ | ||
647 | if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ | ||
648 | if (csr5 == 0xffffffff) | ||
649 | break; | ||
650 | if (csr5 & TxJabber) tp->stats.tx_errors++; | ||
651 | if (csr5 & TxFIFOUnderflow) { | ||
652 | if ((tp->csr6 & 0xC000) != 0xC000) | ||
653 | tp->csr6 += 0x4000; /* Bump up the Tx threshold */ | ||
654 | else | ||
655 | tp->csr6 |= 0x00200000; /* Store-n-forward. */ | ||
656 | /* Restart the transmit process. */ | ||
657 | tulip_restart_rxtx(tp); | ||
658 | iowrite32(0, ioaddr + CSR1); | ||
659 | } | ||
660 | if (csr5 & (RxDied | RxNoBuf)) { | ||
661 | if (tp->flags & COMET_MAC_ADDR) { | ||
662 | iowrite32(tp->mc_filter[0], ioaddr + 0xAC); | ||
663 | iowrite32(tp->mc_filter[1], ioaddr + 0xB0); | ||
664 | } | ||
665 | } | ||
666 | if (csr5 & RxDied) { /* Missed a Rx frame. */ | ||
667 | tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; | ||
668 | tp->stats.rx_errors++; | ||
669 | tulip_start_rxtx(tp); | ||
670 | } | ||
671 | /* | ||
672 | * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this | ||
673 | * call is ever done under the spinlock | ||
674 | */ | ||
675 | if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { | ||
676 | if (tp->link_change) | ||
677 | (tp->link_change)(dev, csr5); | ||
678 | } | ||
679 | if (csr5 & SytemError) { | ||
680 | int error = (csr5 >> 23) & 7; | ||
681 | /* oops, we hit a PCI error. The code produced corresponds | ||
682 | * to the reason: | ||
683 | * 0 - parity error | ||
684 | * 1 - master abort | ||
685 | * 2 - target abort | ||
686 | * Note that on parity error, we should do a software reset | ||
687 | * of the chip to get it back into a sane state (according | ||
688 | * to the 21142/3 docs that is). | ||
689 | * -- rmk | ||
690 | */ | ||
691 | printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n", | ||
692 | dev->name, tp->nir, error); | ||
693 | } | ||
694 | /* Clear all error sources, included undocumented ones! */ | ||
695 | iowrite32(0x0800f7ba, ioaddr + CSR5); | ||
696 | oi++; | ||
697 | } | ||
698 | if (csr5 & TimerInt) { | ||
699 | |||
700 | if (tulip_debug > 2) | ||
701 | printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n", | ||
702 | dev->name, csr5); | ||
703 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); | ||
704 | tp->ttimer = 0; | ||
705 | oi++; | ||
706 | } | ||
707 | if (tx > maxtx || rx > maxrx || oi > maxoi) { | ||
708 | if (tulip_debug > 1) | ||
709 | printk(KERN_WARNING "%s: Too much work during an interrupt, " | ||
710 | "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi); | ||
711 | |||
712 | /* Acknowledge all interrupt sources. */ | ||
713 | iowrite32(0x8001ffff, ioaddr + CSR5); | ||
714 | if (tp->flags & HAS_INTR_MITIGATION) { | ||
715 | /* Josip Loncaric at ICASE did extensive experimentation | ||
716 | to develop a good interrupt mitigation setting.*/ | ||
717 | iowrite32(0x8b240000, ioaddr + CSR11); | ||
718 | } else if (tp->chip_id == LC82C168) { | ||
719 | /* the LC82C168 doesn't have a hw timer.*/ | ||
720 | iowrite32(0x00, ioaddr + CSR7); | ||
721 | mod_timer(&tp->timer, RUN_AT(HZ/50)); | ||
722 | } else { | ||
723 | /* Mask all interrupting sources, set timer to | ||
724 | re-enable. */ | ||
725 | iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7); | ||
726 | iowrite32(0x0012, ioaddr + CSR11); | ||
727 | } | ||
728 | break; | ||
729 | } | ||
730 | |||
731 | work_count--; | ||
732 | if (work_count == 0) | ||
733 | break; | ||
734 | |||
735 | csr5 = ioread32(ioaddr + CSR5); | ||
736 | |||
737 | #ifdef CONFIG_TULIP_NAPI | ||
738 | if (rxd) | ||
739 | csr5 &= ~RxPollInt; | ||
740 | } while ((csr5 & (TxNoBuf | | ||
741 | TxDied | | ||
742 | TxIntr | | ||
743 | TimerInt | | ||
744 | /* Abnormal intr. */ | ||
745 | RxDied | | ||
746 | TxFIFOUnderflow | | ||
747 | TxJabber | | ||
748 | TPLnkFail | | ||
749 | SytemError )) != 0); | ||
750 | #else | ||
751 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); | ||
752 | |||
753 | tulip_refill_rx(dev); | ||
754 | |||
755 | /* check if the card is in suspend mode */ | ||
756 | entry = tp->dirty_rx % RX_RING_SIZE; | ||
757 | if (tp->rx_buffers[entry].skb == NULL) { | ||
758 | if (tulip_debug > 1) | ||
759 | printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx); | ||
760 | if (tp->chip_id == LC82C168) { | ||
761 | iowrite32(0x00, ioaddr + CSR7); | ||
762 | mod_timer(&tp->timer, RUN_AT(HZ/50)); | ||
763 | } else { | ||
764 | if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { | ||
765 | if (tulip_debug > 1) | ||
766 | printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir); | ||
767 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, | ||
768 | ioaddr + CSR7); | ||
769 | iowrite32(TimerInt, ioaddr + CSR5); | ||
770 | iowrite32(12, ioaddr + CSR11); | ||
771 | tp->ttimer = 1; | ||
772 | } | ||
773 | } | ||
774 | } | ||
775 | #endif /* CONFIG_TULIP_NAPI */ | ||
776 | |||
777 | if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { | ||
778 | tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; | ||
779 | } | ||
780 | |||
781 | if (tulip_debug > 4) | ||
782 | printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n", | ||
783 | dev->name, ioread32(ioaddr + CSR5)); | ||
784 | |||
785 | return IRQ_HANDLED; | ||
786 | } | ||