diff options
author | Jay Cliburn <jacliburn@bellsouth.net> | 2007-07-15 12:03:29 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-07-16 18:29:16 -0400 |
commit | 05ffdd7bf364c456ffd51f0c2bf0f41b8e110f49 (patch) | |
tree | 29ef162264e09fde11f46d4b2f8324d3b11cdf68 /drivers/net/atl1/atl1_main.c | |
parent | 2ca13da705ac9780ff7cd2ea7948e25303c42f81 (diff) |
atl1: reorder atl1_main functions
Reorder functions in atl1_main into more logical groupings to make the
code easier to follow. This patch is large, but it's harmless; it neither
adds nor removes any functionality whatsoever.
Signed-off-by: Jay Cliburn <jacliburn@bellsouth.net>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/atl1/atl1_main.c')
-rw-r--r-- | drivers/net/atl1/atl1_main.c | 1950 |
1 files changed, 975 insertions, 975 deletions
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 67ddf8dcf764..f7ac4758d51c 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -168,6 +168,64 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) | |||
168 | return 0; | 168 | return 0; |
169 | } | 169 | } |
170 | 170 | ||
171 | static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) | ||
172 | { | ||
173 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
174 | u16 result; | ||
175 | |||
176 | atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); | ||
177 | |||
178 | return result; | ||
179 | } | ||
180 | |||
181 | static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, | ||
182 | int val) | ||
183 | { | ||
184 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
185 | |||
186 | atl1_write_phy_reg(&adapter->hw, reg_num, val); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * atl1_mii_ioctl - | ||
191 | * @netdev: | ||
192 | * @ifreq: | ||
193 | * @cmd: | ||
194 | */ | ||
195 | static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
196 | { | ||
197 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
198 | unsigned long flags; | ||
199 | int retval; | ||
200 | |||
201 | if (!netif_running(netdev)) | ||
202 | return -EINVAL; | ||
203 | |||
204 | spin_lock_irqsave(&adapter->lock, flags); | ||
205 | retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); | ||
206 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
207 | |||
208 | return retval; | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * atl1_ioctl - | ||
213 | * @netdev: | ||
214 | * @ifreq: | ||
215 | * @cmd: | ||
216 | */ | ||
217 | static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
218 | { | ||
219 | switch (cmd) { | ||
220 | case SIOCGMIIPHY: | ||
221 | case SIOCGMIIREG: | ||
222 | case SIOCSMIIREG: | ||
223 | return atl1_mii_ioctl(netdev, ifr, cmd); | ||
224 | default: | ||
225 | return -EOPNOTSUPP; | ||
226 | } | ||
227 | } | ||
228 | |||
171 | /* | 229 | /* |
172 | * atl1_setup_mem_resources - allocate Tx / RX descriptor resources | 230 | * atl1_setup_mem_resources - allocate Tx / RX descriptor resources |
173 | * @adapter: board private structure | 231 | * @adapter: board private structure |
@@ -276,555 +334,116 @@ void atl1_init_ring_ptrs(struct atl1_adapter *adapter) | |||
276 | } | 334 | } |
277 | 335 | ||
278 | /* | 336 | /* |
279 | * atl1_irq_enable - Enable default interrupt generation settings | 337 | * atl1_clean_rx_ring - Free RFD Buffers |
280 | * @adapter: board private structure | 338 | * @adapter: board private structure |
281 | */ | 339 | */ |
282 | static void atl1_irq_enable(struct atl1_adapter *adapter) | 340 | static void atl1_clean_rx_ring(struct atl1_adapter *adapter) |
283 | { | ||
284 | iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); | ||
285 | ioread32(adapter->hw.hw_addr + REG_IMR); | ||
286 | } | ||
287 | |||
288 | static void atl1_clear_phy_int(struct atl1_adapter *adapter) | ||
289 | { | ||
290 | u16 phy_data; | ||
291 | unsigned long flags; | ||
292 | |||
293 | spin_lock_irqsave(&adapter->lock, flags); | ||
294 | atl1_read_phy_reg(&adapter->hw, 19, &phy_data); | ||
295 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
296 | } | ||
297 | |||
298 | static void atl1_inc_smb(struct atl1_adapter *adapter) | ||
299 | { | ||
300 | struct stats_msg_block *smb = adapter->smb.smb; | ||
301 | |||
302 | /* Fill out the OS statistics structure */ | ||
303 | adapter->soft_stats.rx_packets += smb->rx_ok; | ||
304 | adapter->soft_stats.tx_packets += smb->tx_ok; | ||
305 | adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; | ||
306 | adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; | ||
307 | adapter->soft_stats.multicast += smb->rx_mcast; | ||
308 | adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + | ||
309 | smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); | ||
310 | |||
311 | /* Rx Errors */ | ||
312 | adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + | ||
313 | smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + | ||
314 | smb->rx_rrd_ov + smb->rx_align_err); | ||
315 | adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; | ||
316 | adapter->soft_stats.rx_length_errors += smb->rx_len_err; | ||
317 | adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; | ||
318 | adapter->soft_stats.rx_frame_errors += smb->rx_align_err; | ||
319 | adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + | ||
320 | smb->rx_rxf_ov); | ||
321 | |||
322 | adapter->soft_stats.rx_pause += smb->rx_pause; | ||
323 | adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; | ||
324 | adapter->soft_stats.rx_trunc += smb->rx_sz_ov; | ||
325 | |||
326 | /* Tx Errors */ | ||
327 | adapter->soft_stats.tx_errors += (smb->tx_late_col + | ||
328 | smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); | ||
329 | adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; | ||
330 | adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; | ||
331 | adapter->soft_stats.tx_window_errors += smb->tx_late_col; | ||
332 | |||
333 | adapter->soft_stats.excecol += smb->tx_abort_col; | ||
334 | adapter->soft_stats.deffer += smb->tx_defer; | ||
335 | adapter->soft_stats.scc += smb->tx_1_col; | ||
336 | adapter->soft_stats.mcc += smb->tx_2_col; | ||
337 | adapter->soft_stats.latecol += smb->tx_late_col; | ||
338 | adapter->soft_stats.tx_underun += smb->tx_underrun; | ||
339 | adapter->soft_stats.tx_trunc += smb->tx_trunc; | ||
340 | adapter->soft_stats.tx_pause += smb->tx_pause; | ||
341 | |||
342 | adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; | ||
343 | adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; | ||
344 | adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; | ||
345 | adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; | ||
346 | adapter->net_stats.multicast = adapter->soft_stats.multicast; | ||
347 | adapter->net_stats.collisions = adapter->soft_stats.collisions; | ||
348 | adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; | ||
349 | adapter->net_stats.rx_over_errors = | ||
350 | adapter->soft_stats.rx_missed_errors; | ||
351 | adapter->net_stats.rx_length_errors = | ||
352 | adapter->soft_stats.rx_length_errors; | ||
353 | adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; | ||
354 | adapter->net_stats.rx_frame_errors = | ||
355 | adapter->soft_stats.rx_frame_errors; | ||
356 | adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; | ||
357 | adapter->net_stats.rx_missed_errors = | ||
358 | adapter->soft_stats.rx_missed_errors; | ||
359 | adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; | ||
360 | adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; | ||
361 | adapter->net_stats.tx_aborted_errors = | ||
362 | adapter->soft_stats.tx_aborted_errors; | ||
363 | adapter->net_stats.tx_window_errors = | ||
364 | adapter->soft_stats.tx_window_errors; | ||
365 | adapter->net_stats.tx_carrier_errors = | ||
366 | adapter->soft_stats.tx_carrier_errors; | ||
367 | } | ||
368 | |||
369 | static void atl1_rx_checksum(struct atl1_adapter *adapter, | ||
370 | struct rx_return_desc *rrd, struct sk_buff *skb) | ||
371 | { | ||
372 | struct pci_dev *pdev = adapter->pdev; | ||
373 | |||
374 | skb->ip_summed = CHECKSUM_NONE; | ||
375 | |||
376 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
377 | if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | | ||
378 | ERR_FLAG_CODE | ERR_FLAG_OV)) { | ||
379 | adapter->hw_csum_err++; | ||
380 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
381 | "rx checksum error\n"); | ||
382 | return; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | /* not IPv4 */ | ||
387 | if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) | ||
388 | /* checksum is invalid, but it's not an IPv4 pkt, so ok */ | ||
389 | return; | ||
390 | |||
391 | /* IPv4 packet */ | ||
392 | if (likely(!(rrd->err_flg & | ||
393 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { | ||
394 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
395 | adapter->hw_csum_good++; | ||
396 | return; | ||
397 | } | ||
398 | |||
399 | /* IPv4, but hardware thinks its checksum is wrong */ | ||
400 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
401 | "hw csum wrong, pkt_flag:%x, err_flag:%x\n", | ||
402 | rrd->pkt_flg, rrd->err_flg); | ||
403 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
404 | skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); | ||
405 | adapter->hw_csum_err++; | ||
406 | return; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * atl1_alloc_rx_buffers - Replace used receive buffers | ||
411 | * @adapter: address of board private structure | ||
412 | */ | ||
413 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | ||
414 | { | ||
415 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
416 | struct pci_dev *pdev = adapter->pdev; | ||
417 | struct page *page; | ||
418 | unsigned long offset; | ||
419 | struct atl1_buffer *buffer_info, *next_info; | ||
420 | struct sk_buff *skb; | ||
421 | u16 num_alloc = 0; | ||
422 | u16 rfd_next_to_use, next_next; | ||
423 | struct rx_free_desc *rfd_desc; | ||
424 | |||
425 | next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); | ||
426 | if (++next_next == rfd_ring->count) | ||
427 | next_next = 0; | ||
428 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
429 | next_info = &rfd_ring->buffer_info[next_next]; | ||
430 | |||
431 | while (!buffer_info->alloced && !next_info->alloced) { | ||
432 | if (buffer_info->skb) { | ||
433 | buffer_info->alloced = 1; | ||
434 | goto next; | ||
435 | } | ||
436 | |||
437 | rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); | ||
438 | |||
439 | skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); | ||
440 | if (unlikely(!skb)) { /* Better luck next round */ | ||
441 | adapter->net_stats.rx_dropped++; | ||
442 | break; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
447 | * this will result in a 16 byte aligned IP header after | ||
448 | * the 14 byte MAC header is removed | ||
449 | */ | ||
450 | skb_reserve(skb, NET_IP_ALIGN); | ||
451 | |||
452 | buffer_info->alloced = 1; | ||
453 | buffer_info->skb = skb; | ||
454 | buffer_info->length = (u16) adapter->rx_buffer_len; | ||
455 | page = virt_to_page(skb->data); | ||
456 | offset = (unsigned long)skb->data & ~PAGE_MASK; | ||
457 | buffer_info->dma = pci_map_page(pdev, page, offset, | ||
458 | adapter->rx_buffer_len, | ||
459 | PCI_DMA_FROMDEVICE); | ||
460 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
461 | rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); | ||
462 | rfd_desc->coalese = 0; | ||
463 | |||
464 | next: | ||
465 | rfd_next_to_use = next_next; | ||
466 | if (unlikely(++next_next == rfd_ring->count)) | ||
467 | next_next = 0; | ||
468 | |||
469 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
470 | next_info = &rfd_ring->buffer_info[next_next]; | ||
471 | num_alloc++; | ||
472 | } | ||
473 | |||
474 | if (num_alloc) { | ||
475 | /* | ||
476 | * Force memory writes to complete before letting h/w | ||
477 | * know there are new descriptors to fetch. (Only | ||
478 | * applicable for weak-ordered memory model archs, | ||
479 | * such as IA-64). | ||
480 | */ | ||
481 | wmb(); | ||
482 | atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); | ||
483 | } | ||
484 | return num_alloc; | ||
485 | } | ||
486 | |||
487 | static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, | ||
488 | struct rx_return_desc *rrd, u16 offset) | ||
489 | { | ||
490 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
491 | |||
492 | while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { | ||
493 | rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; | ||
494 | if (++rfd_ring->next_to_clean == rfd_ring->count) { | ||
495 | rfd_ring->next_to_clean = 0; | ||
496 | } | ||
497 | } | ||
498 | } | ||
499 | |||
500 | static void atl1_update_rfd_index(struct atl1_adapter *adapter, | ||
501 | struct rx_return_desc *rrd) | ||
502 | { | ||
503 | u16 num_buf; | ||
504 | |||
505 | num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / | ||
506 | adapter->rx_buffer_len; | ||
507 | if (rrd->num_buf == num_buf) | ||
508 | /* clean alloc flag for bad rrd */ | ||
509 | atl1_clean_alloc_flag(adapter, rrd, num_buf); | ||
510 | } | ||
511 | |||
512 | static void atl1_intr_rx(struct atl1_adapter *adapter) | ||
513 | { | 341 | { |
514 | int i, count; | ||
515 | u16 length; | ||
516 | u16 rrd_next_to_clean; | ||
517 | u32 value; | ||
518 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | 342 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; |
519 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | 343 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; |
520 | struct atl1_buffer *buffer_info; | 344 | struct atl1_buffer *buffer_info; |
521 | struct rx_return_desc *rrd; | 345 | struct pci_dev *pdev = adapter->pdev; |
522 | struct sk_buff *skb; | 346 | unsigned long size; |
523 | 347 | unsigned int i; | |
524 | count = 0; | ||
525 | |||
526 | rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); | ||
527 | |||
528 | while (1) { | ||
529 | rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); | ||
530 | i = 1; | ||
531 | if (likely(rrd->xsz.valid)) { /* packet valid */ | ||
532 | chk_rrd: | ||
533 | /* check rrd status */ | ||
534 | if (likely(rrd->num_buf == 1)) | ||
535 | goto rrd_ok; | ||
536 | |||
537 | /* rrd seems to be bad */ | ||
538 | if (unlikely(i-- > 0)) { | ||
539 | /* rrd may not be DMAed completely */ | ||
540 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
541 | "incomplete RRD DMA transfer\n"); | ||
542 | udelay(1); | ||
543 | goto chk_rrd; | ||
544 | } | ||
545 | /* bad rrd */ | ||
546 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
547 | "bad RRD\n"); | ||
548 | /* see if update RFD index */ | ||
549 | if (rrd->num_buf > 1) | ||
550 | atl1_update_rfd_index(adapter, rrd); | ||
551 | |||
552 | /* update rrd */ | ||
553 | rrd->xsz.valid = 0; | ||
554 | if (++rrd_next_to_clean == rrd_ring->count) | ||
555 | rrd_next_to_clean = 0; | ||
556 | count++; | ||
557 | continue; | ||
558 | } else { /* current rrd still not be updated */ | ||
559 | 348 | ||
560 | break; | 349 | /* Free all the Rx ring sk_buffs */ |
350 | for (i = 0; i < rfd_ring->count; i++) { | ||
351 | buffer_info = &rfd_ring->buffer_info[i]; | ||
352 | if (buffer_info->dma) { | ||
353 | pci_unmap_page(pdev, buffer_info->dma, | ||
354 | buffer_info->length, PCI_DMA_FROMDEVICE); | ||
355 | buffer_info->dma = 0; | ||
561 | } | 356 | } |
562 | rrd_ok: | 357 | if (buffer_info->skb) { |
563 | /* clean alloc flag for bad rrd */ | 358 | dev_kfree_skb(buffer_info->skb); |
564 | atl1_clean_alloc_flag(adapter, rrd, 0); | 359 | buffer_info->skb = NULL; |
565 | |||
566 | buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; | ||
567 | if (++rfd_ring->next_to_clean == rfd_ring->count) | ||
568 | rfd_ring->next_to_clean = 0; | ||
569 | |||
570 | /* update rrd next to clean */ | ||
571 | if (++rrd_next_to_clean == rrd_ring->count) | ||
572 | rrd_next_to_clean = 0; | ||
573 | count++; | ||
574 | |||
575 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
576 | if (!(rrd->err_flg & | ||
577 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ||
578 | | ERR_FLAG_LEN))) { | ||
579 | /* packet error, don't need upstream */ | ||
580 | buffer_info->alloced = 0; | ||
581 | rrd->xsz.valid = 0; | ||
582 | continue; | ||
583 | } | ||
584 | } | 360 | } |
585 | |||
586 | /* Good Receive */ | ||
587 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
588 | buffer_info->length, PCI_DMA_FROMDEVICE); | ||
589 | skb = buffer_info->skb; | ||
590 | length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); | ||
591 | |||
592 | skb_put(skb, length - ETHERNET_FCS_SIZE); | ||
593 | |||
594 | /* Receive Checksum Offload */ | ||
595 | atl1_rx_checksum(adapter, rrd, skb); | ||
596 | skb->protocol = eth_type_trans(skb, adapter->netdev); | ||
597 | |||
598 | if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { | ||
599 | u16 vlan_tag = (rrd->vlan_tag >> 4) | | ||
600 | ((rrd->vlan_tag & 7) << 13) | | ||
601 | ((rrd->vlan_tag & 8) << 9); | ||
602 | vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); | ||
603 | } else | ||
604 | netif_rx(skb); | ||
605 | |||
606 | /* let protocol layer free skb */ | ||
607 | buffer_info->skb = NULL; | ||
608 | buffer_info->alloced = 0; | ||
609 | rrd->xsz.valid = 0; | ||
610 | |||
611 | adapter->netdev->last_rx = jiffies; | ||
612 | } | 361 | } |
613 | 362 | ||
614 | atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); | 363 | size = sizeof(struct atl1_buffer) * rfd_ring->count; |
615 | 364 | memset(rfd_ring->buffer_info, 0, size); | |
616 | atl1_alloc_rx_buffers(adapter); | ||
617 | 365 | ||
618 | /* update mailbox ? */ | 366 | /* Zero out the descriptor ring */ |
619 | if (count) { | 367 | memset(rfd_ring->desc, 0, rfd_ring->size); |
620 | u32 tpd_next_to_use; | ||
621 | u32 rfd_next_to_use; | ||
622 | u32 rrd_next_to_clean; | ||
623 | 368 | ||
624 | spin_lock(&adapter->mb_lock); | 369 | rfd_ring->next_to_clean = 0; |
370 | atomic_set(&rfd_ring->next_to_use, 0); | ||
625 | 371 | ||
626 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | 372 | rrd_ring->next_to_use = 0; |
627 | rfd_next_to_use = | 373 | atomic_set(&rrd_ring->next_to_clean, 0); |
628 | atomic_read(&adapter->rfd_ring.next_to_use); | ||
629 | rrd_next_to_clean = | ||
630 | atomic_read(&adapter->rrd_ring.next_to_clean); | ||
631 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
632 | MB_RFD_PROD_INDX_SHIFT) | | ||
633 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
634 | MB_RRD_CONS_INDX_SHIFT) | | ||
635 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
636 | MB_TPD_PROD_INDX_SHIFT); | ||
637 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
638 | spin_unlock(&adapter->mb_lock); | ||
639 | } | ||
640 | } | 374 | } |
641 | 375 | ||
642 | static void atl1_intr_tx(struct atl1_adapter *adapter) | 376 | /* |
377 | * atl1_clean_tx_ring - Free Tx Buffers | ||
378 | * @adapter: board private structure | ||
379 | */ | ||
380 | static void atl1_clean_tx_ring(struct atl1_adapter *adapter) | ||
643 | { | 381 | { |
644 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 382 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
645 | struct atl1_buffer *buffer_info; | 383 | struct atl1_buffer *buffer_info; |
646 | u16 sw_tpd_next_to_clean; | 384 | struct pci_dev *pdev = adapter->pdev; |
647 | u16 cmb_tpd_next_to_clean; | 385 | unsigned long size; |
648 | 386 | unsigned int i; | |
649 | sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); | ||
650 | cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); | ||
651 | |||
652 | while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { | ||
653 | struct tx_packet_desc *tpd; | ||
654 | 387 | ||
655 | tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); | 388 | /* Free all the Tx ring sk_buffs */ |
656 | buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; | 389 | for (i = 0; i < tpd_ring->count; i++) { |
390 | buffer_info = &tpd_ring->buffer_info[i]; | ||
657 | if (buffer_info->dma) { | 391 | if (buffer_info->dma) { |
658 | pci_unmap_page(adapter->pdev, buffer_info->dma, | 392 | pci_unmap_page(pdev, buffer_info->dma, |
659 | buffer_info->length, PCI_DMA_TODEVICE); | 393 | buffer_info->length, PCI_DMA_TODEVICE); |
660 | buffer_info->dma = 0; | 394 | buffer_info->dma = 0; |
661 | } | 395 | } |
396 | } | ||
662 | 397 | ||
398 | for (i = 0; i < tpd_ring->count; i++) { | ||
399 | buffer_info = &tpd_ring->buffer_info[i]; | ||
663 | if (buffer_info->skb) { | 400 | if (buffer_info->skb) { |
664 | dev_kfree_skb_irq(buffer_info->skb); | 401 | dev_kfree_skb_any(buffer_info->skb); |
665 | buffer_info->skb = NULL; | 402 | buffer_info->skb = NULL; |
666 | } | 403 | } |
667 | tpd->buffer_addr = 0; | ||
668 | tpd->desc.data = 0; | ||
669 | |||
670 | if (++sw_tpd_next_to_clean == tpd_ring->count) | ||
671 | sw_tpd_next_to_clean = 0; | ||
672 | } | 404 | } |
673 | atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); | ||
674 | |||
675 | if (netif_queue_stopped(adapter->netdev) | ||
676 | && netif_carrier_ok(adapter->netdev)) | ||
677 | netif_wake_queue(adapter->netdev); | ||
678 | } | ||
679 | |||
680 | static void atl1_check_for_link(struct atl1_adapter *adapter) | ||
681 | { | ||
682 | struct net_device *netdev = adapter->netdev; | ||
683 | u16 phy_data = 0; | ||
684 | |||
685 | spin_lock(&adapter->lock); | ||
686 | adapter->phy_timer_pending = false; | ||
687 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
688 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
689 | spin_unlock(&adapter->lock); | ||
690 | 405 | ||
691 | /* notify upper layer link down ASAP */ | 406 | size = sizeof(struct atl1_buffer) * tpd_ring->count; |
692 | if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ | 407 | memset(tpd_ring->buffer_info, 0, size); |
693 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
694 | dev_info(&adapter->pdev->dev, "%s link is down\n", | ||
695 | netdev->name); | ||
696 | adapter->link_speed = SPEED_0; | ||
697 | netif_carrier_off(netdev); | ||
698 | netif_stop_queue(netdev); | ||
699 | } | ||
700 | } | ||
701 | schedule_work(&adapter->link_chg_task); | ||
702 | } | ||
703 | |||
704 | /* | ||
705 | * atl1_intr - Interrupt Handler | ||
706 | * @irq: interrupt number | ||
707 | * @data: pointer to a network interface device structure | ||
708 | * @pt_regs: CPU registers structure | ||
709 | */ | ||
710 | static irqreturn_t atl1_intr(int irq, void *data) | ||
711 | { | ||
712 | struct atl1_adapter *adapter = netdev_priv(data); | ||
713 | u32 status; | ||
714 | u8 update_rx; | ||
715 | int max_ints = 10; | ||
716 | |||
717 | status = adapter->cmb.cmb->int_stats; | ||
718 | if (!status) | ||
719 | return IRQ_NONE; | ||
720 | |||
721 | update_rx = 0; | ||
722 | |||
723 | do { | ||
724 | /* clear CMB interrupt status at once */ | ||
725 | adapter->cmb.cmb->int_stats = 0; | ||
726 | |||
727 | if (status & ISR_GPHY) /* clear phy status */ | ||
728 | atl1_clear_phy_int(adapter); | ||
729 | |||
730 | /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ | ||
731 | iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); | ||
732 | |||
733 | /* check if SMB intr */ | ||
734 | if (status & ISR_SMB) | ||
735 | atl1_inc_smb(adapter); | ||
736 | |||
737 | /* check if PCIE PHY Link down */ | ||
738 | if (status & ISR_PHY_LINKDOWN) { | ||
739 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
740 | "pcie phy link down %x\n", status); | ||
741 | if (netif_running(adapter->netdev)) { /* reset MAC */ | ||
742 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
743 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
744 | return IRQ_HANDLED; | ||
745 | } | ||
746 | } | ||
747 | |||
748 | /* check if DMA read/write error ? */ | ||
749 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { | ||
750 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
751 | "pcie DMA r/w error (status = 0x%x)\n", | ||
752 | status); | ||
753 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
754 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
755 | return IRQ_HANDLED; | ||
756 | } | ||
757 | |||
758 | /* link event */ | ||
759 | if (status & ISR_GPHY) { | ||
760 | adapter->soft_stats.tx_carrier_errors++; | ||
761 | atl1_check_for_link(adapter); | ||
762 | } | ||
763 | |||
764 | /* transmit event */ | ||
765 | if (status & ISR_CMB_TX) | ||
766 | atl1_intr_tx(adapter); | ||
767 | |||
768 | /* rx exception */ | ||
769 | if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | | ||
770 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
771 | ISR_HOST_RRD_OV | ISR_CMB_RX))) { | ||
772 | if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | | ||
773 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
774 | ISR_HOST_RRD_OV)) | ||
775 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
776 | "rx exception, ISR = 0x%x\n", status); | ||
777 | atl1_intr_rx(adapter); | ||
778 | } | ||
779 | |||
780 | if (--max_ints < 0) | ||
781 | break; | ||
782 | 408 | ||
783 | } while ((status = adapter->cmb.cmb->int_stats)); | 409 | /* Zero out the descriptor ring */ |
410 | memset(tpd_ring->desc, 0, tpd_ring->size); | ||
784 | 411 | ||
785 | /* re-enable Interrupt */ | 412 | atomic_set(&tpd_ring->next_to_use, 0); |
786 | iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); | 413 | atomic_set(&tpd_ring->next_to_clean, 0); |
787 | return IRQ_HANDLED; | ||
788 | } | 414 | } |
789 | 415 | ||
790 | /* | 416 | /* |
791 | * atl1_set_multi - Multicast and Promiscuous mode set | 417 | * atl1_free_ring_resources - Free Tx / RX descriptor Resources |
792 | * @netdev: network interface device structure | 418 | * @adapter: board private structure |
793 | * | 419 | * |
794 | * The set_multi entry point is called whenever the multicast address | 420 | * Free all transmit software resources |
795 | * list or the network interface flags are updated. This routine is | ||
796 | * responsible for configuring the hardware for proper multicast, | ||
797 | * promiscuous mode, and all-multi behavior. | ||
798 | */ | 421 | */ |
799 | static void atl1_set_multi(struct net_device *netdev) | 422 | void atl1_free_ring_resources(struct atl1_adapter *adapter) |
800 | { | 423 | { |
801 | struct atl1_adapter *adapter = netdev_priv(netdev); | 424 | struct pci_dev *pdev = adapter->pdev; |
802 | struct atl1_hw *hw = &adapter->hw; | 425 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
803 | struct dev_mc_list *mc_ptr; | 426 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; |
804 | u32 rctl; | 427 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; |
805 | u32 hash_value; | 428 | struct atl1_ring_header *ring_header = &adapter->ring_header; |
806 | 429 | ||
807 | /* Check for Promiscuous and All Multicast modes */ | 430 | atl1_clean_tx_ring(adapter); |
808 | rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); | 431 | atl1_clean_rx_ring(adapter); |
809 | if (netdev->flags & IFF_PROMISC) | ||
810 | rctl |= MAC_CTRL_PROMIS_EN; | ||
811 | else if (netdev->flags & IFF_ALLMULTI) { | ||
812 | rctl |= MAC_CTRL_MC_ALL_EN; | ||
813 | rctl &= ~MAC_CTRL_PROMIS_EN; | ||
814 | } else | ||
815 | rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); | ||
816 | 432 | ||
817 | iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); | 433 | kfree(tpd_ring->buffer_info); |
434 | pci_free_consistent(pdev, ring_header->size, ring_header->desc, | ||
435 | ring_header->dma); | ||
818 | 436 | ||
819 | /* clear the old settings from the multicast hash table */ | 437 | tpd_ring->buffer_info = NULL; |
820 | iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); | 438 | tpd_ring->desc = NULL; |
821 | iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); | 439 | tpd_ring->dma = 0; |
822 | 440 | ||
823 | /* compute mc addresses' hash value ,and put it into hash table */ | 441 | rfd_ring->buffer_info = NULL; |
824 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | 442 | rfd_ring->desc = NULL; |
825 | hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); | 443 | rfd_ring->dma = 0; |
826 | atl1_hash_set(hw, hash_value); | 444 | |
827 | } | 445 | rrd_ring->desc = NULL; |
446 | rrd_ring->dma = 0; | ||
828 | } | 447 | } |
829 | 448 | ||
830 | static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) | 449 | static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) |
@@ -865,6 +484,31 @@ static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) | |||
865 | iowrite32(value, hw->hw_addr + REG_MAC_CTRL); | 484 | iowrite32(value, hw->hw_addr + REG_MAC_CTRL); |
866 | } | 485 | } |
867 | 486 | ||
487 | /* | ||
488 | * atl1_set_mac - Change the Ethernet Address of the NIC | ||
489 | * @netdev: network interface device structure | ||
490 | * @p: pointer to an address structure | ||
491 | * | ||
492 | * Returns 0 on success, negative on failure | ||
493 | */ | ||
494 | static int atl1_set_mac(struct net_device *netdev, void *p) | ||
495 | { | ||
496 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
497 | struct sockaddr *addr = p; | ||
498 | |||
499 | if (netif_running(netdev)) | ||
500 | return -EBUSY; | ||
501 | |||
502 | if (!is_valid_ether_addr(addr->sa_data)) | ||
503 | return -EADDRNOTAVAIL; | ||
504 | |||
505 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
506 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | ||
507 | |||
508 | atl1_set_mac_addr(&adapter->hw); | ||
509 | return 0; | ||
510 | } | ||
511 | |||
868 | static u32 atl1_check_link(struct atl1_adapter *adapter) | 512 | static u32 atl1_check_link(struct atl1_adapter *adapter) |
869 | { | 513 | { |
870 | struct atl1_hw *hw = &adapter->hw; | 514 | struct atl1_hw *hw = &adapter->hw; |
@@ -972,6 +616,103 @@ static u32 atl1_check_link(struct atl1_adapter *adapter) | |||
972 | return ATL1_SUCCESS; | 616 | return ATL1_SUCCESS; |
973 | } | 617 | } |
974 | 618 | ||
619 | static void atl1_check_for_link(struct atl1_adapter *adapter) | ||
620 | { | ||
621 | struct net_device *netdev = adapter->netdev; | ||
622 | u16 phy_data = 0; | ||
623 | |||
624 | spin_lock(&adapter->lock); | ||
625 | adapter->phy_timer_pending = false; | ||
626 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
627 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
628 | spin_unlock(&adapter->lock); | ||
629 | |||
630 | /* notify upper layer link down ASAP */ | ||
631 | if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ | ||
632 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
633 | dev_info(&adapter->pdev->dev, "%s link is down\n", | ||
634 | netdev->name); | ||
635 | adapter->link_speed = SPEED_0; | ||
636 | netif_carrier_off(netdev); | ||
637 | netif_stop_queue(netdev); | ||
638 | } | ||
639 | } | ||
640 | schedule_work(&adapter->link_chg_task); | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * atl1_set_multi - Multicast and Promiscuous mode set | ||
645 | * @netdev: network interface device structure | ||
646 | * | ||
647 | * The set_multi entry point is called whenever the multicast address | ||
648 | * list or the network interface flags are updated. This routine is | ||
649 | * responsible for configuring the hardware for proper multicast, | ||
650 | * promiscuous mode, and all-multi behavior. | ||
651 | */ | ||
652 | static void atl1_set_multi(struct net_device *netdev) | ||
653 | { | ||
654 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
655 | struct atl1_hw *hw = &adapter->hw; | ||
656 | struct dev_mc_list *mc_ptr; | ||
657 | u32 rctl; | ||
658 | u32 hash_value; | ||
659 | |||
660 | /* Check for Promiscuous and All Multicast modes */ | ||
661 | rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
662 | if (netdev->flags & IFF_PROMISC) | ||
663 | rctl |= MAC_CTRL_PROMIS_EN; | ||
664 | else if (netdev->flags & IFF_ALLMULTI) { | ||
665 | rctl |= MAC_CTRL_MC_ALL_EN; | ||
666 | rctl &= ~MAC_CTRL_PROMIS_EN; | ||
667 | } else | ||
668 | rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); | ||
669 | |||
670 | iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); | ||
671 | |||
672 | /* clear the old settings from the multicast hash table */ | ||
673 | iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); | ||
674 | iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); | ||
675 | |||
676 | /* compute mc addresses' hash value ,and put it into hash table */ | ||
677 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | ||
678 | hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); | ||
679 | atl1_hash_set(hw, hash_value); | ||
680 | } | ||
681 | } | ||
682 | |||
683 | /* | ||
684 | * atl1_change_mtu - Change the Maximum Transfer Unit | ||
685 | * @netdev: network interface device structure | ||
686 | * @new_mtu: new value for maximum frame size | ||
687 | * | ||
688 | * Returns 0 on success, negative on failure | ||
689 | */ | ||
690 | static int atl1_change_mtu(struct net_device *netdev, int new_mtu) | ||
691 | { | ||
692 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
693 | int old_mtu = netdev->mtu; | ||
694 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | ||
695 | |||
696 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | ||
697 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | ||
698 | dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); | ||
699 | return -EINVAL; | ||
700 | } | ||
701 | |||
702 | adapter->hw.max_frame_size = max_frame; | ||
703 | adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; | ||
704 | adapter->rx_buffer_len = (max_frame + 7) & ~7; | ||
705 | adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; | ||
706 | |||
707 | netdev->mtu = new_mtu; | ||
708 | if ((old_mtu != new_mtu) && netif_running(netdev)) { | ||
709 | atl1_down(adapter); | ||
710 | atl1_up(adapter); | ||
711 | } | ||
712 | |||
713 | return 0; | ||
714 | } | ||
715 | |||
975 | static void set_flow_ctrl_old(struct atl1_adapter *adapter) | 716 | static void set_flow_ctrl_old(struct atl1_adapter *adapter) |
976 | { | 717 | { |
977 | u32 hi, lo, value; | 718 | u32 hi, lo, value; |
@@ -1202,6 +943,48 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1202 | } | 943 | } |
1203 | 944 | ||
1204 | /* | 945 | /* |
946 | * atl1_pcie_patch - Patch for PCIE module | ||
947 | */ | ||
948 | static void atl1_pcie_patch(struct atl1_adapter *adapter) | ||
949 | { | ||
950 | u32 value; | ||
951 | |||
952 | /* much vendor magic here */ | ||
953 | value = 0x6500; | ||
954 | iowrite32(value, adapter->hw.hw_addr + 0x12FC); | ||
955 | /* pcie flow control mode change */ | ||
956 | value = ioread32(adapter->hw.hw_addr + 0x1008); | ||
957 | value |= 0x8000; | ||
958 | iowrite32(value, adapter->hw.hw_addr + 0x1008); | ||
959 | } | ||
960 | |||
961 | /* | ||
962 | * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 | ||
963 | * on PCI Command register is disable. | ||
964 | * The function enable this bit. | ||
965 | * Brackett, 2006/03/15 | ||
966 | */ | ||
967 | static void atl1_via_workaround(struct atl1_adapter *adapter) | ||
968 | { | ||
969 | unsigned long value; | ||
970 | |||
971 | value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); | ||
972 | if (value & PCI_COMMAND_INTX_DISABLE) | ||
973 | value &= ~PCI_COMMAND_INTX_DISABLE; | ||
974 | iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); | ||
975 | } | ||
976 | |||
977 | /* | ||
978 | * atl1_irq_enable - Enable default interrupt generation settings | ||
979 | * @adapter: board private structure | ||
980 | */ | ||
981 | static void atl1_irq_enable(struct atl1_adapter *adapter) | ||
982 | { | ||
983 | iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); | ||
984 | ioread32(adapter->hw.hw_addr + REG_IMR); | ||
985 | } | ||
986 | |||
987 | /* | ||
1205 | * atl1_irq_disable - Mask off interrupt generation on the NIC | 988 | * atl1_irq_disable - Mask off interrupt generation on the NIC |
1206 | * @adapter: board private structure | 989 | * @adapter: board private structure |
1207 | */ | 990 | */ |
@@ -1212,36 +995,434 @@ static void atl1_irq_disable(struct atl1_adapter *adapter) | |||
1212 | synchronize_irq(adapter->pdev->irq); | 995 | synchronize_irq(adapter->pdev->irq); |
1213 | } | 996 | } |
1214 | 997 | ||
1215 | static void atl1_vlan_rx_register(struct net_device *netdev, | 998 | static void atl1_clear_phy_int(struct atl1_adapter *adapter) |
1216 | struct vlan_group *grp) | ||
1217 | { | 999 | { |
1218 | struct atl1_adapter *adapter = netdev_priv(netdev); | 1000 | u16 phy_data; |
1219 | unsigned long flags; | 1001 | unsigned long flags; |
1220 | u32 ctrl; | ||
1221 | 1002 | ||
1222 | spin_lock_irqsave(&adapter->lock, flags); | 1003 | spin_lock_irqsave(&adapter->lock, flags); |
1223 | /* atl1_irq_disable(adapter); */ | 1004 | atl1_read_phy_reg(&adapter->hw, 19, &phy_data); |
1224 | adapter->vlgrp = grp; | 1005 | spin_unlock_irqrestore(&adapter->lock, flags); |
1006 | } | ||
1225 | 1007 | ||
1226 | if (grp) { | 1008 | static void atl1_inc_smb(struct atl1_adapter *adapter) |
1227 | /* enable VLAN tag insert/strip */ | 1009 | { |
1228 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | 1010 | struct stats_msg_block *smb = adapter->smb.smb; |
1229 | ctrl |= MAC_CTRL_RMV_VLAN; | 1011 | |
1230 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | 1012 | /* Fill out the OS statistics structure */ |
1231 | } else { | 1013 | adapter->soft_stats.rx_packets += smb->rx_ok; |
1232 | /* disable VLAN tag insert/strip */ | 1014 | adapter->soft_stats.tx_packets += smb->tx_ok; |
1233 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | 1015 | adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; |
1234 | ctrl &= ~MAC_CTRL_RMV_VLAN; | 1016 | adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; |
1235 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | 1017 | adapter->soft_stats.multicast += smb->rx_mcast; |
1018 | adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + | ||
1019 | smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); | ||
1020 | |||
1021 | /* Rx Errors */ | ||
1022 | adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + | ||
1023 | smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + | ||
1024 | smb->rx_rrd_ov + smb->rx_align_err); | ||
1025 | adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; | ||
1026 | adapter->soft_stats.rx_length_errors += smb->rx_len_err; | ||
1027 | adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; | ||
1028 | adapter->soft_stats.rx_frame_errors += smb->rx_align_err; | ||
1029 | adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + | ||
1030 | smb->rx_rxf_ov); | ||
1031 | |||
1032 | adapter->soft_stats.rx_pause += smb->rx_pause; | ||
1033 | adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; | ||
1034 | adapter->soft_stats.rx_trunc += smb->rx_sz_ov; | ||
1035 | |||
1036 | /* Tx Errors */ | ||
1037 | adapter->soft_stats.tx_errors += (smb->tx_late_col + | ||
1038 | smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); | ||
1039 | adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; | ||
1040 | adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; | ||
1041 | adapter->soft_stats.tx_window_errors += smb->tx_late_col; | ||
1042 | |||
1043 | adapter->soft_stats.excecol += smb->tx_abort_col; | ||
1044 | adapter->soft_stats.deffer += smb->tx_defer; | ||
1045 | adapter->soft_stats.scc += smb->tx_1_col; | ||
1046 | adapter->soft_stats.mcc += smb->tx_2_col; | ||
1047 | adapter->soft_stats.latecol += smb->tx_late_col; | ||
1048 | adapter->soft_stats.tx_underun += smb->tx_underrun; | ||
1049 | adapter->soft_stats.tx_trunc += smb->tx_trunc; | ||
1050 | adapter->soft_stats.tx_pause += smb->tx_pause; | ||
1051 | |||
1052 | adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; | ||
1053 | adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; | ||
1054 | adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; | ||
1055 | adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; | ||
1056 | adapter->net_stats.multicast = adapter->soft_stats.multicast; | ||
1057 | adapter->net_stats.collisions = adapter->soft_stats.collisions; | ||
1058 | adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; | ||
1059 | adapter->net_stats.rx_over_errors = | ||
1060 | adapter->soft_stats.rx_missed_errors; | ||
1061 | adapter->net_stats.rx_length_errors = | ||
1062 | adapter->soft_stats.rx_length_errors; | ||
1063 | adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; | ||
1064 | adapter->net_stats.rx_frame_errors = | ||
1065 | adapter->soft_stats.rx_frame_errors; | ||
1066 | adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; | ||
1067 | adapter->net_stats.rx_missed_errors = | ||
1068 | adapter->soft_stats.rx_missed_errors; | ||
1069 | adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; | ||
1070 | adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; | ||
1071 | adapter->net_stats.tx_aborted_errors = | ||
1072 | adapter->soft_stats.tx_aborted_errors; | ||
1073 | adapter->net_stats.tx_window_errors = | ||
1074 | adapter->soft_stats.tx_window_errors; | ||
1075 | adapter->net_stats.tx_carrier_errors = | ||
1076 | adapter->soft_stats.tx_carrier_errors; | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * atl1_get_stats - Get System Network Statistics | ||
1081 | * @netdev: network interface device structure | ||
1082 | * | ||
1083 | * Returns the address of the device statistics structure. | ||
1084 | * The statistics are actually updated from the timer callback. | ||
1085 | */ | ||
1086 | static struct net_device_stats *atl1_get_stats(struct net_device *netdev) | ||
1087 | { | ||
1088 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1089 | return &adapter->net_stats; | ||
1090 | } | ||
1091 | |||
1092 | static void atl1_update_mailbox(struct atl1_adapter *adapter) | ||
1093 | { | ||
1094 | unsigned long flags; | ||
1095 | u32 tpd_next_to_use; | ||
1096 | u32 rfd_next_to_use; | ||
1097 | u32 rrd_next_to_clean; | ||
1098 | u32 value; | ||
1099 | |||
1100 | spin_lock_irqsave(&adapter->mb_lock, flags); | ||
1101 | |||
1102 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
1103 | rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); | ||
1104 | rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); | ||
1105 | |||
1106 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
1107 | MB_RFD_PROD_INDX_SHIFT) | | ||
1108 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
1109 | MB_RRD_CONS_INDX_SHIFT) | | ||
1110 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
1111 | MB_TPD_PROD_INDX_SHIFT); | ||
1112 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
1113 | |||
1114 | spin_unlock_irqrestore(&adapter->mb_lock, flags); | ||
1115 | } | ||
1116 | |||
1117 | static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, | ||
1118 | struct rx_return_desc *rrd, u16 offset) | ||
1119 | { | ||
1120 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1121 | |||
1122 | while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { | ||
1123 | rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; | ||
1124 | if (++rfd_ring->next_to_clean == rfd_ring->count) { | ||
1125 | rfd_ring->next_to_clean = 0; | ||
1126 | } | ||
1236 | } | 1127 | } |
1128 | } | ||
1237 | 1129 | ||
1238 | /* atl1_irq_enable(adapter); */ | 1130 | static void atl1_update_rfd_index(struct atl1_adapter *adapter, |
1239 | spin_unlock_irqrestore(&adapter->lock, flags); | 1131 | struct rx_return_desc *rrd) |
1132 | { | ||
1133 | u16 num_buf; | ||
1134 | |||
1135 | num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / | ||
1136 | adapter->rx_buffer_len; | ||
1137 | if (rrd->num_buf == num_buf) | ||
1138 | /* clean alloc flag for bad rrd */ | ||
1139 | atl1_clean_alloc_flag(adapter, rrd, num_buf); | ||
1240 | } | 1140 | } |
1241 | 1141 | ||
1242 | static void atl1_restore_vlan(struct atl1_adapter *adapter) | 1142 | static void atl1_rx_checksum(struct atl1_adapter *adapter, |
1143 | struct rx_return_desc *rrd, struct sk_buff *skb) | ||
1243 | { | 1144 | { |
1244 | atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 1145 | struct pci_dev *pdev = adapter->pdev; |
1146 | |||
1147 | skb->ip_summed = CHECKSUM_NONE; | ||
1148 | |||
1149 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
1150 | if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | | ||
1151 | ERR_FLAG_CODE | ERR_FLAG_OV)) { | ||
1152 | adapter->hw_csum_err++; | ||
1153 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1154 | "rx checksum error\n"); | ||
1155 | return; | ||
1156 | } | ||
1157 | } | ||
1158 | |||
1159 | /* not IPv4 */ | ||
1160 | if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) | ||
1161 | /* checksum is invalid, but it's not an IPv4 pkt, so ok */ | ||
1162 | return; | ||
1163 | |||
1164 | /* IPv4 packet */ | ||
1165 | if (likely(!(rrd->err_flg & | ||
1166 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { | ||
1167 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1168 | adapter->hw_csum_good++; | ||
1169 | return; | ||
1170 | } | ||
1171 | |||
1172 | /* IPv4, but hardware thinks its checksum is wrong */ | ||
1173 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1174 | "hw csum wrong, pkt_flag:%x, err_flag:%x\n", | ||
1175 | rrd->pkt_flg, rrd->err_flg); | ||
1176 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1177 | skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); | ||
1178 | adapter->hw_csum_err++; | ||
1179 | return; | ||
1180 | } | ||
1181 | |||
1182 | /* | ||
1183 | * atl1_alloc_rx_buffers - Replace used receive buffers | ||
1184 | * @adapter: address of board private structure | ||
1185 | */ | ||
1186 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | ||
1187 | { | ||
1188 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1189 | struct pci_dev *pdev = adapter->pdev; | ||
1190 | struct page *page; | ||
1191 | unsigned long offset; | ||
1192 | struct atl1_buffer *buffer_info, *next_info; | ||
1193 | struct sk_buff *skb; | ||
1194 | u16 num_alloc = 0; | ||
1195 | u16 rfd_next_to_use, next_next; | ||
1196 | struct rx_free_desc *rfd_desc; | ||
1197 | |||
1198 | next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); | ||
1199 | if (++next_next == rfd_ring->count) | ||
1200 | next_next = 0; | ||
1201 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
1202 | next_info = &rfd_ring->buffer_info[next_next]; | ||
1203 | |||
1204 | while (!buffer_info->alloced && !next_info->alloced) { | ||
1205 | if (buffer_info->skb) { | ||
1206 | buffer_info->alloced = 1; | ||
1207 | goto next; | ||
1208 | } | ||
1209 | |||
1210 | rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); | ||
1211 | |||
1212 | skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); | ||
1213 | if (unlikely(!skb)) { /* Better luck next round */ | ||
1214 | adapter->net_stats.rx_dropped++; | ||
1215 | break; | ||
1216 | } | ||
1217 | |||
1218 | /* | ||
1219 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
1220 | * this will result in a 16 byte aligned IP header after | ||
1221 | * the 14 byte MAC header is removed | ||
1222 | */ | ||
1223 | skb_reserve(skb, NET_IP_ALIGN); | ||
1224 | |||
1225 | buffer_info->alloced = 1; | ||
1226 | buffer_info->skb = skb; | ||
1227 | buffer_info->length = (u16) adapter->rx_buffer_len; | ||
1228 | page = virt_to_page(skb->data); | ||
1229 | offset = (unsigned long)skb->data & ~PAGE_MASK; | ||
1230 | buffer_info->dma = pci_map_page(pdev, page, offset, | ||
1231 | adapter->rx_buffer_len, | ||
1232 | PCI_DMA_FROMDEVICE); | ||
1233 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
1234 | rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); | ||
1235 | rfd_desc->coalese = 0; | ||
1236 | |||
1237 | next: | ||
1238 | rfd_next_to_use = next_next; | ||
1239 | if (unlikely(++next_next == rfd_ring->count)) | ||
1240 | next_next = 0; | ||
1241 | |||
1242 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
1243 | next_info = &rfd_ring->buffer_info[next_next]; | ||
1244 | num_alloc++; | ||
1245 | } | ||
1246 | |||
1247 | if (num_alloc) { | ||
1248 | /* | ||
1249 | * Force memory writes to complete before letting h/w | ||
1250 | * know there are new descriptors to fetch. (Only | ||
1251 | * applicable for weak-ordered memory model archs, | ||
1252 | * such as IA-64). | ||
1253 | */ | ||
1254 | wmb(); | ||
1255 | atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); | ||
1256 | } | ||
1257 | return num_alloc; | ||
1258 | } | ||
1259 | |||
1260 | static void atl1_intr_rx(struct atl1_adapter *adapter) | ||
1261 | { | ||
1262 | int i, count; | ||
1263 | u16 length; | ||
1264 | u16 rrd_next_to_clean; | ||
1265 | u32 value; | ||
1266 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1267 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1268 | struct atl1_buffer *buffer_info; | ||
1269 | struct rx_return_desc *rrd; | ||
1270 | struct sk_buff *skb; | ||
1271 | |||
1272 | count = 0; | ||
1273 | |||
1274 | rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); | ||
1275 | |||
1276 | while (1) { | ||
1277 | rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); | ||
1278 | i = 1; | ||
1279 | if (likely(rrd->xsz.valid)) { /* packet valid */ | ||
1280 | chk_rrd: | ||
1281 | /* check rrd status */ | ||
1282 | if (likely(rrd->num_buf == 1)) | ||
1283 | goto rrd_ok; | ||
1284 | |||
1285 | /* rrd seems to be bad */ | ||
1286 | if (unlikely(i-- > 0)) { | ||
1287 | /* rrd may not be DMAed completely */ | ||
1288 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1289 | "incomplete RRD DMA transfer\n"); | ||
1290 | udelay(1); | ||
1291 | goto chk_rrd; | ||
1292 | } | ||
1293 | /* bad rrd */ | ||
1294 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1295 | "bad RRD\n"); | ||
1296 | /* see if update RFD index */ | ||
1297 | if (rrd->num_buf > 1) | ||
1298 | atl1_update_rfd_index(adapter, rrd); | ||
1299 | |||
1300 | /* update rrd */ | ||
1301 | rrd->xsz.valid = 0; | ||
1302 | if (++rrd_next_to_clean == rrd_ring->count) | ||
1303 | rrd_next_to_clean = 0; | ||
1304 | count++; | ||
1305 | continue; | ||
1306 | } else { /* current rrd still not be updated */ | ||
1307 | |||
1308 | break; | ||
1309 | } | ||
1310 | rrd_ok: | ||
1311 | /* clean alloc flag for bad rrd */ | ||
1312 | atl1_clean_alloc_flag(adapter, rrd, 0); | ||
1313 | |||
1314 | buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; | ||
1315 | if (++rfd_ring->next_to_clean == rfd_ring->count) | ||
1316 | rfd_ring->next_to_clean = 0; | ||
1317 | |||
1318 | /* update rrd next to clean */ | ||
1319 | if (++rrd_next_to_clean == rrd_ring->count) | ||
1320 | rrd_next_to_clean = 0; | ||
1321 | count++; | ||
1322 | |||
1323 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
1324 | if (!(rrd->err_flg & | ||
1325 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ||
1326 | | ERR_FLAG_LEN))) { | ||
1327 | /* packet error, don't need upstream */ | ||
1328 | buffer_info->alloced = 0; | ||
1329 | rrd->xsz.valid = 0; | ||
1330 | continue; | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | /* Good Receive */ | ||
1335 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
1336 | buffer_info->length, PCI_DMA_FROMDEVICE); | ||
1337 | skb = buffer_info->skb; | ||
1338 | length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); | ||
1339 | |||
1340 | skb_put(skb, length - ETHERNET_FCS_SIZE); | ||
1341 | |||
1342 | /* Receive Checksum Offload */ | ||
1343 | atl1_rx_checksum(adapter, rrd, skb); | ||
1344 | skb->protocol = eth_type_trans(skb, adapter->netdev); | ||
1345 | |||
1346 | if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { | ||
1347 | u16 vlan_tag = (rrd->vlan_tag >> 4) | | ||
1348 | ((rrd->vlan_tag & 7) << 13) | | ||
1349 | ((rrd->vlan_tag & 8) << 9); | ||
1350 | vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); | ||
1351 | } else | ||
1352 | netif_rx(skb); | ||
1353 | |||
1354 | /* let protocol layer free skb */ | ||
1355 | buffer_info->skb = NULL; | ||
1356 | buffer_info->alloced = 0; | ||
1357 | rrd->xsz.valid = 0; | ||
1358 | |||
1359 | adapter->netdev->last_rx = jiffies; | ||
1360 | } | ||
1361 | |||
1362 | atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); | ||
1363 | |||
1364 | atl1_alloc_rx_buffers(adapter); | ||
1365 | |||
1366 | /* update mailbox ? */ | ||
1367 | if (count) { | ||
1368 | u32 tpd_next_to_use; | ||
1369 | u32 rfd_next_to_use; | ||
1370 | u32 rrd_next_to_clean; | ||
1371 | |||
1372 | spin_lock(&adapter->mb_lock); | ||
1373 | |||
1374 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
1375 | rfd_next_to_use = | ||
1376 | atomic_read(&adapter->rfd_ring.next_to_use); | ||
1377 | rrd_next_to_clean = | ||
1378 | atomic_read(&adapter->rrd_ring.next_to_clean); | ||
1379 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
1380 | MB_RFD_PROD_INDX_SHIFT) | | ||
1381 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
1382 | MB_RRD_CONS_INDX_SHIFT) | | ||
1383 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
1384 | MB_TPD_PROD_INDX_SHIFT); | ||
1385 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
1386 | spin_unlock(&adapter->mb_lock); | ||
1387 | } | ||
1388 | } | ||
1389 | |||
1390 | static void atl1_intr_tx(struct atl1_adapter *adapter) | ||
1391 | { | ||
1392 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
1393 | struct atl1_buffer *buffer_info; | ||
1394 | u16 sw_tpd_next_to_clean; | ||
1395 | u16 cmb_tpd_next_to_clean; | ||
1396 | |||
1397 | sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); | ||
1398 | cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); | ||
1399 | |||
1400 | while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { | ||
1401 | struct tx_packet_desc *tpd; | ||
1402 | |||
1403 | tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); | ||
1404 | buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; | ||
1405 | if (buffer_info->dma) { | ||
1406 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
1407 | buffer_info->length, PCI_DMA_TODEVICE); | ||
1408 | buffer_info->dma = 0; | ||
1409 | } | ||
1410 | |||
1411 | if (buffer_info->skb) { | ||
1412 | dev_kfree_skb_irq(buffer_info->skb); | ||
1413 | buffer_info->skb = NULL; | ||
1414 | } | ||
1415 | tpd->buffer_addr = 0; | ||
1416 | tpd->desc.data = 0; | ||
1417 | |||
1418 | if (++sw_tpd_next_to_clean == tpd_ring->count) | ||
1419 | sw_tpd_next_to_clean = 0; | ||
1420 | } | ||
1421 | atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); | ||
1422 | |||
1423 | if (netif_queue_stopped(adapter->netdev) | ||
1424 | && netif_carrier_ok(adapter->netdev)) | ||
1425 | netif_wake_queue(adapter->netdev); | ||
1245 | } | 1426 | } |
1246 | 1427 | ||
1247 | static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) | 1428 | static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) |
@@ -1465,31 +1646,6 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count, | |||
1465 | atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); | 1646 | atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); |
1466 | } | 1647 | } |
1467 | 1648 | ||
1468 | static void atl1_update_mailbox(struct atl1_adapter *adapter) | ||
1469 | { | ||
1470 | unsigned long flags; | ||
1471 | u32 tpd_next_to_use; | ||
1472 | u32 rfd_next_to_use; | ||
1473 | u32 rrd_next_to_clean; | ||
1474 | u32 value; | ||
1475 | |||
1476 | spin_lock_irqsave(&adapter->mb_lock, flags); | ||
1477 | |||
1478 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
1479 | rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); | ||
1480 | rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); | ||
1481 | |||
1482 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
1483 | MB_RFD_PROD_INDX_SHIFT) | | ||
1484 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
1485 | MB_RRD_CONS_INDX_SHIFT) | | ||
1486 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
1487 | MB_TPD_PROD_INDX_SHIFT); | ||
1488 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
1489 | |||
1490 | spin_unlock_irqrestore(&adapter->mb_lock, flags); | ||
1491 | } | ||
1492 | |||
1493 | static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 1649 | static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
1494 | { | 1650 | { |
1495 | struct atl1_adapter *adapter = netdev_priv(netdev); | 1651 | struct atl1_adapter *adapter = netdev_priv(netdev); |
@@ -1601,129 +1757,208 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1601 | } | 1757 | } |
1602 | 1758 | ||
1603 | /* | 1759 | /* |
1604 | * atl1_get_stats - Get System Network Statistics | 1760 | * atl1_intr - Interrupt Handler |
1605 | * @netdev: network interface device structure | 1761 | * @irq: interrupt number |
1606 | * | 1762 | * @data: pointer to a network interface device structure |
1607 | * Returns the address of the device statistics structure. | 1763 | * @pt_regs: CPU registers structure |
1608 | * The statistics are actually updated from the timer callback. | ||
1609 | */ | 1764 | */ |
1610 | static struct net_device_stats *atl1_get_stats(struct net_device *netdev) | 1765 | static irqreturn_t atl1_intr(int irq, void *data) |
1611 | { | 1766 | { |
1612 | struct atl1_adapter *adapter = netdev_priv(netdev); | 1767 | struct atl1_adapter *adapter = netdev_priv(data); |
1613 | return &adapter->net_stats; | 1768 | u32 status; |
1614 | } | 1769 | u8 update_rx; |
1770 | int max_ints = 10; | ||
1615 | 1771 | ||
1616 | /* | 1772 | status = adapter->cmb.cmb->int_stats; |
1617 | * atl1_clean_rx_ring - Free RFD Buffers | 1773 | if (!status) |
1618 | * @adapter: board private structure | 1774 | return IRQ_NONE; |
1619 | */ | ||
1620 | static void atl1_clean_rx_ring(struct atl1_adapter *adapter) | ||
1621 | { | ||
1622 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1623 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1624 | struct atl1_buffer *buffer_info; | ||
1625 | struct pci_dev *pdev = adapter->pdev; | ||
1626 | unsigned long size; | ||
1627 | unsigned int i; | ||
1628 | 1775 | ||
1629 | /* Free all the Rx ring sk_buffs */ | 1776 | update_rx = 0; |
1630 | for (i = 0; i < rfd_ring->count; i++) { | 1777 | |
1631 | buffer_info = &rfd_ring->buffer_info[i]; | 1778 | do { |
1632 | if (buffer_info->dma) { | 1779 | /* clear CMB interrupt status at once */ |
1633 | pci_unmap_page(pdev, buffer_info->dma, | 1780 | adapter->cmb.cmb->int_stats = 0; |
1634 | buffer_info->length, PCI_DMA_FROMDEVICE); | 1781 | |
1635 | buffer_info->dma = 0; | 1782 | if (status & ISR_GPHY) /* clear phy status */ |
1783 | atl1_clear_phy_int(adapter); | ||
1784 | |||
1785 | /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ | ||
1786 | iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); | ||
1787 | |||
1788 | /* check if SMB intr */ | ||
1789 | if (status & ISR_SMB) | ||
1790 | atl1_inc_smb(adapter); | ||
1791 | |||
1792 | /* check if PCIE PHY Link down */ | ||
1793 | if (status & ISR_PHY_LINKDOWN) { | ||
1794 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1795 | "pcie phy link down %x\n", status); | ||
1796 | if (netif_running(adapter->netdev)) { /* reset MAC */ | ||
1797 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1798 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
1799 | return IRQ_HANDLED; | ||
1800 | } | ||
1636 | } | 1801 | } |
1637 | if (buffer_info->skb) { | 1802 | |
1638 | dev_kfree_skb(buffer_info->skb); | 1803 | /* check if DMA read/write error ? */ |
1639 | buffer_info->skb = NULL; | 1804 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { |
1805 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1806 | "pcie DMA r/w error (status = 0x%x)\n", | ||
1807 | status); | ||
1808 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1809 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
1810 | return IRQ_HANDLED; | ||
1640 | } | 1811 | } |
1641 | } | ||
1642 | 1812 | ||
1643 | size = sizeof(struct atl1_buffer) * rfd_ring->count; | 1813 | /* link event */ |
1644 | memset(rfd_ring->buffer_info, 0, size); | 1814 | if (status & ISR_GPHY) { |
1815 | adapter->soft_stats.tx_carrier_errors++; | ||
1816 | atl1_check_for_link(adapter); | ||
1817 | } | ||
1645 | 1818 | ||
1646 | /* Zero out the descriptor ring */ | 1819 | /* transmit event */ |
1647 | memset(rfd_ring->desc, 0, rfd_ring->size); | 1820 | if (status & ISR_CMB_TX) |
1821 | atl1_intr_tx(adapter); | ||
1648 | 1822 | ||
1649 | rfd_ring->next_to_clean = 0; | 1823 | /* rx exception */ |
1650 | atomic_set(&rfd_ring->next_to_use, 0); | 1824 | if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | |
1825 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
1826 | ISR_HOST_RRD_OV | ISR_CMB_RX))) { | ||
1827 | if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | | ||
1828 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
1829 | ISR_HOST_RRD_OV)) | ||
1830 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, | ||
1831 | "rx exception, ISR = 0x%x\n", status); | ||
1832 | atl1_intr_rx(adapter); | ||
1833 | } | ||
1651 | 1834 | ||
1652 | rrd_ring->next_to_use = 0; | 1835 | if (--max_ints < 0) |
1653 | atomic_set(&rrd_ring->next_to_clean, 0); | 1836 | break; |
1837 | |||
1838 | } while ((status = adapter->cmb.cmb->int_stats)); | ||
1839 | |||
1840 | /* re-enable Interrupt */ | ||
1841 | iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); | ||
1842 | return IRQ_HANDLED; | ||
1654 | } | 1843 | } |
1655 | 1844 | ||
1656 | /* | 1845 | /* |
1657 | * atl1_clean_tx_ring - Free Tx Buffers | 1846 | * atl1_watchdog - Timer Call-back |
1658 | * @adapter: board private structure | 1847 | * @data: pointer to netdev cast into an unsigned long |
1659 | */ | 1848 | */ |
1660 | static void atl1_clean_tx_ring(struct atl1_adapter *adapter) | 1849 | static void atl1_watchdog(unsigned long data) |
1661 | { | 1850 | { |
1662 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1851 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; |
1663 | struct atl1_buffer *buffer_info; | ||
1664 | struct pci_dev *pdev = adapter->pdev; | ||
1665 | unsigned long size; | ||
1666 | unsigned int i; | ||
1667 | 1852 | ||
1668 | /* Free all the Tx ring sk_buffs */ | 1853 | /* Reset the timer */ |
1669 | for (i = 0; i < tpd_ring->count; i++) { | 1854 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); |
1670 | buffer_info = &tpd_ring->buffer_info[i]; | 1855 | } |
1671 | if (buffer_info->dma) { | ||
1672 | pci_unmap_page(pdev, buffer_info->dma, | ||
1673 | buffer_info->length, PCI_DMA_TODEVICE); | ||
1674 | buffer_info->dma = 0; | ||
1675 | } | ||
1676 | } | ||
1677 | 1856 | ||
1678 | for (i = 0; i < tpd_ring->count; i++) { | 1857 | /* |
1679 | buffer_info = &tpd_ring->buffer_info[i]; | 1858 | * atl1_phy_config - Timer Call-back |
1680 | if (buffer_info->skb) { | 1859 | * @data: pointer to netdev cast into an unsigned long |
1681 | dev_kfree_skb_any(buffer_info->skb); | 1860 | */ |
1682 | buffer_info->skb = NULL; | 1861 | static void atl1_phy_config(unsigned long data) |
1683 | } | 1862 | { |
1684 | } | 1863 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; |
1864 | struct atl1_hw *hw = &adapter->hw; | ||
1865 | unsigned long flags; | ||
1685 | 1866 | ||
1686 | size = sizeof(struct atl1_buffer) * tpd_ring->count; | 1867 | spin_lock_irqsave(&adapter->lock, flags); |
1687 | memset(tpd_ring->buffer_info, 0, size); | 1868 | adapter->phy_timer_pending = false; |
1869 | atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); | ||
1870 | atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); | ||
1871 | atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); | ||
1872 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1873 | } | ||
1688 | 1874 | ||
1689 | /* Zero out the descriptor ring */ | 1875 | /* |
1690 | memset(tpd_ring->desc, 0, tpd_ring->size); | 1876 | * atl1_tx_timeout - Respond to a Tx Hang |
1877 | * @netdev: network interface device structure | ||
1878 | */ | ||
1879 | static void atl1_tx_timeout(struct net_device *netdev) | ||
1880 | { | ||
1881 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1882 | /* Do the reset outside of interrupt context */ | ||
1883 | schedule_work(&adapter->tx_timeout_task); | ||
1884 | } | ||
1691 | 1885 | ||
1692 | atomic_set(&tpd_ring->next_to_use, 0); | 1886 | /* |
1693 | atomic_set(&tpd_ring->next_to_clean, 0); | 1887 | * Orphaned vendor comment left intact here: |
1888 | * <vendor comment> | ||
1889 | * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT | ||
1890 | * will assert. We do soft reset <0x1400=1> according | ||
1891 | * with the SPEC. BUT, it seemes that PCIE or DMA | ||
1892 | * state-machine will not be reset. DMAR_TO_INT will | ||
1893 | * assert again and again. | ||
1894 | * </vendor comment> | ||
1895 | */ | ||
1896 | static void atl1_tx_timeout_task(struct work_struct *work) | ||
1897 | { | ||
1898 | struct atl1_adapter *adapter = | ||
1899 | container_of(work, struct atl1_adapter, tx_timeout_task); | ||
1900 | struct net_device *netdev = adapter->netdev; | ||
1901 | |||
1902 | netif_device_detach(netdev); | ||
1903 | atl1_down(adapter); | ||
1904 | atl1_up(adapter); | ||
1905 | netif_device_attach(netdev); | ||
1694 | } | 1906 | } |
1695 | 1907 | ||
1696 | /* | 1908 | /* |
1697 | * atl1_free_ring_resources - Free Tx / RX descriptor Resources | 1909 | * atl1_link_chg_task - deal with link change event Out of interrupt context |
1698 | * @adapter: board private structure | ||
1699 | * | ||
1700 | * Free all transmit software resources | ||
1701 | */ | 1910 | */ |
1702 | void atl1_free_ring_resources(struct atl1_adapter *adapter) | 1911 | static void atl1_link_chg_task(struct work_struct *work) |
1703 | { | 1912 | { |
1704 | struct pci_dev *pdev = adapter->pdev; | 1913 | struct atl1_adapter *adapter = |
1705 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1914 | container_of(work, struct atl1_adapter, link_chg_task); |
1706 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | 1915 | unsigned long flags; |
1707 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1708 | struct atl1_ring_header *ring_header = &adapter->ring_header; | ||
1709 | 1916 | ||
1710 | atl1_clean_tx_ring(adapter); | 1917 | spin_lock_irqsave(&adapter->lock, flags); |
1711 | atl1_clean_rx_ring(adapter); | 1918 | atl1_check_link(adapter); |
1919 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1920 | } | ||
1712 | 1921 | ||
1713 | kfree(tpd_ring->buffer_info); | 1922 | static void atl1_vlan_rx_register(struct net_device *netdev, |
1714 | pci_free_consistent(pdev, ring_header->size, ring_header->desc, | 1923 | struct vlan_group *grp) |
1715 | ring_header->dma); | 1924 | { |
1925 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1926 | unsigned long flags; | ||
1927 | u32 ctrl; | ||
1716 | 1928 | ||
1717 | tpd_ring->buffer_info = NULL; | 1929 | spin_lock_irqsave(&adapter->lock, flags); |
1718 | tpd_ring->desc = NULL; | 1930 | /* atl1_irq_disable(adapter); */ |
1719 | tpd_ring->dma = 0; | 1931 | adapter->vlgrp = grp; |
1720 | 1932 | ||
1721 | rfd_ring->buffer_info = NULL; | 1933 | if (grp) { |
1722 | rfd_ring->desc = NULL; | 1934 | /* enable VLAN tag insert/strip */ |
1723 | rfd_ring->dma = 0; | 1935 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); |
1936 | ctrl |= MAC_CTRL_RMV_VLAN; | ||
1937 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1938 | } else { | ||
1939 | /* disable VLAN tag insert/strip */ | ||
1940 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1941 | ctrl &= ~MAC_CTRL_RMV_VLAN; | ||
1942 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1943 | } | ||
1724 | 1944 | ||
1725 | rrd_ring->desc = NULL; | 1945 | /* atl1_irq_enable(adapter); */ |
1726 | rrd_ring->dma = 0; | 1946 | spin_unlock_irqrestore(&adapter->lock, flags); |
1947 | } | ||
1948 | |||
1949 | static void atl1_restore_vlan(struct atl1_adapter *adapter) | ||
1950 | { | ||
1951 | atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); | ||
1952 | } | ||
1953 | |||
1954 | int atl1_reset(struct atl1_adapter *adapter) | ||
1955 | { | ||
1956 | int ret; | ||
1957 | |||
1958 | ret = atl1_reset_hw(&adapter->hw); | ||
1959 | if (ret != ATL1_SUCCESS) | ||
1960 | return ret; | ||
1961 | return atl1_init_hw(&adapter->hw); | ||
1727 | } | 1962 | } |
1728 | 1963 | ||
1729 | s32 atl1_up(struct atl1_adapter *adapter) | 1964 | s32 atl1_up(struct atl1_adapter *adapter) |
@@ -1793,173 +2028,6 @@ void atl1_down(struct atl1_adapter *adapter) | |||
1793 | } | 2028 | } |
1794 | 2029 | ||
1795 | /* | 2030 | /* |
1796 | * atl1_change_mtu - Change the Maximum Transfer Unit | ||
1797 | * @netdev: network interface device structure | ||
1798 | * @new_mtu: new value for maximum frame size | ||
1799 | * | ||
1800 | * Returns 0 on success, negative on failure | ||
1801 | */ | ||
1802 | static int atl1_change_mtu(struct net_device *netdev, int new_mtu) | ||
1803 | { | ||
1804 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1805 | int old_mtu = netdev->mtu; | ||
1806 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | ||
1807 | |||
1808 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | ||
1809 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | ||
1810 | dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); | ||
1811 | return -EINVAL; | ||
1812 | } | ||
1813 | |||
1814 | adapter->hw.max_frame_size = max_frame; | ||
1815 | adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; | ||
1816 | adapter->rx_buffer_len = (max_frame + 7) & ~7; | ||
1817 | adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; | ||
1818 | |||
1819 | netdev->mtu = new_mtu; | ||
1820 | if ((old_mtu != new_mtu) && netif_running(netdev)) { | ||
1821 | atl1_down(adapter); | ||
1822 | atl1_up(adapter); | ||
1823 | } | ||
1824 | |||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | /* | ||
1829 | * atl1_set_mac - Change the Ethernet Address of the NIC | ||
1830 | * @netdev: network interface device structure | ||
1831 | * @p: pointer to an address structure | ||
1832 | * | ||
1833 | * Returns 0 on success, negative on failure | ||
1834 | */ | ||
1835 | static int atl1_set_mac(struct net_device *netdev, void *p) | ||
1836 | { | ||
1837 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1838 | struct sockaddr *addr = p; | ||
1839 | |||
1840 | if (netif_running(netdev)) | ||
1841 | return -EBUSY; | ||
1842 | |||
1843 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1844 | return -EADDRNOTAVAIL; | ||
1845 | |||
1846 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1847 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | ||
1848 | |||
1849 | atl1_set_mac_addr(&adapter->hw); | ||
1850 | return 0; | ||
1851 | } | ||
1852 | |||
1853 | /* | ||
1854 | * atl1_watchdog - Timer Call-back | ||
1855 | * @data: pointer to netdev cast into an unsigned long | ||
1856 | */ | ||
1857 | static void atl1_watchdog(unsigned long data) | ||
1858 | { | ||
1859 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; | ||
1860 | |||
1861 | /* Reset the timer */ | ||
1862 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
1863 | } | ||
1864 | |||
1865 | static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) | ||
1866 | { | ||
1867 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1868 | u16 result; | ||
1869 | |||
1870 | atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); | ||
1871 | |||
1872 | return result; | ||
1873 | } | ||
1874 | |||
1875 | static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, | ||
1876 | int val) | ||
1877 | { | ||
1878 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1879 | |||
1880 | atl1_write_phy_reg(&adapter->hw, reg_num, val); | ||
1881 | } | ||
1882 | |||
1883 | /* | ||
1884 | * atl1_mii_ioctl - | ||
1885 | * @netdev: | ||
1886 | * @ifreq: | ||
1887 | * @cmd: | ||
1888 | */ | ||
1889 | static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1890 | { | ||
1891 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1892 | unsigned long flags; | ||
1893 | int retval; | ||
1894 | |||
1895 | if (!netif_running(netdev)) | ||
1896 | return -EINVAL; | ||
1897 | |||
1898 | spin_lock_irqsave(&adapter->lock, flags); | ||
1899 | retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); | ||
1900 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1901 | |||
1902 | return retval; | ||
1903 | } | ||
1904 | |||
1905 | /* | ||
1906 | * atl1_ioctl - | ||
1907 | * @netdev: | ||
1908 | * @ifreq: | ||
1909 | * @cmd: | ||
1910 | */ | ||
1911 | static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1912 | { | ||
1913 | switch (cmd) { | ||
1914 | case SIOCGMIIPHY: | ||
1915 | case SIOCGMIIREG: | ||
1916 | case SIOCSMIIREG: | ||
1917 | return atl1_mii_ioctl(netdev, ifr, cmd); | ||
1918 | default: | ||
1919 | return -EOPNOTSUPP; | ||
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | /* | ||
1924 | * atl1_tx_timeout - Respond to a Tx Hang | ||
1925 | * @netdev: network interface device structure | ||
1926 | */ | ||
1927 | static void atl1_tx_timeout(struct net_device *netdev) | ||
1928 | { | ||
1929 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1930 | /* Do the reset outside of interrupt context */ | ||
1931 | schedule_work(&adapter->tx_timeout_task); | ||
1932 | } | ||
1933 | |||
1934 | /* | ||
1935 | * atl1_phy_config - Timer Call-back | ||
1936 | * @data: pointer to netdev cast into an unsigned long | ||
1937 | */ | ||
1938 | static void atl1_phy_config(unsigned long data) | ||
1939 | { | ||
1940 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; | ||
1941 | struct atl1_hw *hw = &adapter->hw; | ||
1942 | unsigned long flags; | ||
1943 | |||
1944 | spin_lock_irqsave(&adapter->lock, flags); | ||
1945 | adapter->phy_timer_pending = false; | ||
1946 | atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); | ||
1947 | atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); | ||
1948 | atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); | ||
1949 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1950 | } | ||
1951 | |||
1952 | int atl1_reset(struct atl1_adapter *adapter) | ||
1953 | { | ||
1954 | int ret; | ||
1955 | |||
1956 | ret = atl1_reset_hw(&adapter->hw); | ||
1957 | if (ret != ATL1_SUCCESS) | ||
1958 | return ret; | ||
1959 | return atl1_init_hw(&adapter->hw); | ||
1960 | } | ||
1961 | |||
1962 | /* | ||
1963 | * atl1_open - Called when a network interface is made active | 2031 | * atl1_open - Called when a network interface is made active |
1964 | * @netdev: network interface device structure | 2032 | * @netdev: network interface device structure |
1965 | * | 2033 | * |
@@ -2011,82 +2079,113 @@ static int atl1_close(struct net_device *netdev) | |||
2011 | return 0; | 2079 | return 0; |
2012 | } | 2080 | } |
2013 | 2081 | ||
2014 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2082 | #ifdef CONFIG_PM |
2015 | static void atl1_poll_controller(struct net_device *netdev) | 2083 | static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) |
2016 | { | ||
2017 | disable_irq(netdev->irq); | ||
2018 | atl1_intr(netdev->irq, netdev); | ||
2019 | enable_irq(netdev->irq); | ||
2020 | } | ||
2021 | #endif | ||
2022 | |||
2023 | /* | ||
2024 | * Orphaned vendor comment left intact here: | ||
2025 | * <vendor comment> | ||
2026 | * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT | ||
2027 | * will assert. We do soft reset <0x1400=1> according | ||
2028 | * with the SPEC. BUT, it seemes that PCIE or DMA | ||
2029 | * state-machine will not be reset. DMAR_TO_INT will | ||
2030 | * assert again and again. | ||
2031 | * </vendor comment> | ||
2032 | */ | ||
2033 | static void atl1_tx_timeout_task(struct work_struct *work) | ||
2034 | { | 2084 | { |
2035 | struct atl1_adapter *adapter = | 2085 | struct net_device *netdev = pci_get_drvdata(pdev); |
2036 | container_of(work, struct atl1_adapter, tx_timeout_task); | 2086 | struct atl1_adapter *adapter = netdev_priv(netdev); |
2037 | struct net_device *netdev = adapter->netdev; | 2087 | struct atl1_hw *hw = &adapter->hw; |
2088 | u32 ctrl = 0; | ||
2089 | u32 wufc = adapter->wol; | ||
2038 | 2090 | ||
2039 | netif_device_detach(netdev); | 2091 | netif_device_detach(netdev); |
2040 | atl1_down(adapter); | 2092 | if (netif_running(netdev)) |
2041 | atl1_up(adapter); | 2093 | atl1_down(adapter); |
2042 | netif_device_attach(netdev); | ||
2043 | } | ||
2044 | 2094 | ||
2045 | /* | 2095 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); |
2046 | * atl1_link_chg_task - deal with link change event Out of interrupt context | 2096 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); |
2047 | */ | 2097 | if (ctrl & BMSR_LSTATUS) |
2048 | static void atl1_link_chg_task(struct work_struct *work) | 2098 | wufc &= ~ATL1_WUFC_LNKC; |
2049 | { | ||
2050 | struct atl1_adapter *adapter = | ||
2051 | container_of(work, struct atl1_adapter, link_chg_task); | ||
2052 | unsigned long flags; | ||
2053 | 2099 | ||
2054 | spin_lock_irqsave(&adapter->lock, flags); | 2100 | /* reduce speed to 10/100M */ |
2055 | atl1_check_link(adapter); | 2101 | if (wufc) { |
2056 | spin_unlock_irqrestore(&adapter->lock, flags); | 2102 | atl1_phy_enter_power_saving(hw); |
2103 | /* if resume, let driver to re- setup link */ | ||
2104 | hw->phy_configured = false; | ||
2105 | atl1_set_mac_addr(hw); | ||
2106 | atl1_set_multi(netdev); | ||
2107 | |||
2108 | ctrl = 0; | ||
2109 | /* turn on magic packet wol */ | ||
2110 | if (wufc & ATL1_WUFC_MAG) | ||
2111 | ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | ||
2112 | |||
2113 | /* turn on Link change WOL */ | ||
2114 | if (wufc & ATL1_WUFC_LNKC) | ||
2115 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | ||
2116 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); | ||
2117 | |||
2118 | /* turn on all-multi mode if wake on multicast is enabled */ | ||
2119 | ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
2120 | ctrl &= ~MAC_CTRL_DBG; | ||
2121 | ctrl &= ~MAC_CTRL_PROMIS_EN; | ||
2122 | if (wufc & ATL1_WUFC_MC) | ||
2123 | ctrl |= MAC_CTRL_MC_ALL_EN; | ||
2124 | else | ||
2125 | ctrl &= ~MAC_CTRL_MC_ALL_EN; | ||
2126 | |||
2127 | /* turn on broadcast mode if wake on-BC is enabled */ | ||
2128 | if (wufc & ATL1_WUFC_BC) | ||
2129 | ctrl |= MAC_CTRL_BC_EN; | ||
2130 | else | ||
2131 | ctrl &= ~MAC_CTRL_BC_EN; | ||
2132 | |||
2133 | /* enable RX */ | ||
2134 | ctrl |= MAC_CTRL_RX_EN; | ||
2135 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); | ||
2136 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
2137 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
2138 | } else { | ||
2139 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | ||
2140 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2141 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2142 | } | ||
2143 | |||
2144 | pci_save_state(pdev); | ||
2145 | pci_disable_device(pdev); | ||
2146 | |||
2147 | pci_set_power_state(pdev, PCI_D3hot); | ||
2148 | |||
2149 | return 0; | ||
2057 | } | 2150 | } |
2058 | 2151 | ||
2059 | /* | 2152 | static int atl1_resume(struct pci_dev *pdev) |
2060 | * atl1_pcie_patch - Patch for PCIE module | ||
2061 | */ | ||
2062 | static void atl1_pcie_patch(struct atl1_adapter *adapter) | ||
2063 | { | 2153 | { |
2064 | u32 value; | 2154 | struct net_device *netdev = pci_get_drvdata(pdev); |
2155 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2156 | u32 ret_val; | ||
2065 | 2157 | ||
2066 | /* much vendor magic here */ | 2158 | pci_set_power_state(pdev, 0); |
2067 | value = 0x6500; | 2159 | pci_restore_state(pdev); |
2068 | iowrite32(value, adapter->hw.hw_addr + 0x12FC); | 2160 | |
2069 | /* pcie flow control mode change */ | 2161 | ret_val = pci_enable_device(pdev); |
2070 | value = ioread32(adapter->hw.hw_addr + 0x1008); | 2162 | pci_enable_wake(pdev, PCI_D3hot, 0); |
2071 | value |= 0x8000; | 2163 | pci_enable_wake(pdev, PCI_D3cold, 0); |
2072 | iowrite32(value, adapter->hw.hw_addr + 0x1008); | 2164 | |
2165 | iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); | ||
2166 | atl1_reset(adapter); | ||
2167 | |||
2168 | if (netif_running(netdev)) | ||
2169 | atl1_up(adapter); | ||
2170 | netif_device_attach(netdev); | ||
2171 | |||
2172 | atl1_via_workaround(adapter); | ||
2173 | |||
2174 | return 0; | ||
2073 | } | 2175 | } |
2176 | #else | ||
2177 | #define atl1_suspend NULL | ||
2178 | #define atl1_resume NULL | ||
2179 | #endif | ||
2074 | 2180 | ||
2075 | /* | 2181 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2076 | * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 | 2182 | static void atl1_poll_controller(struct net_device *netdev) |
2077 | * on PCI Command register is disable. | ||
2078 | * The function enable this bit. | ||
2079 | * Brackett, 2006/03/15 | ||
2080 | */ | ||
2081 | static void atl1_via_workaround(struct atl1_adapter *adapter) | ||
2082 | { | 2183 | { |
2083 | unsigned long value; | 2184 | disable_irq(netdev->irq); |
2084 | 2185 | atl1_intr(netdev->irq, netdev); | |
2085 | value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); | 2186 | enable_irq(netdev->irq); |
2086 | if (value & PCI_COMMAND_INTX_DISABLE) | ||
2087 | value &= ~PCI_COMMAND_INTX_DISABLE; | ||
2088 | iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); | ||
2089 | } | 2187 | } |
2188 | #endif | ||
2090 | 2189 | ||
2091 | /* | 2190 | /* |
2092 | * atl1_probe - Device Initialization Routine | 2191 | * atl1_probe - Device Initialization Routine |
@@ -2320,105 +2419,6 @@ static void __devexit atl1_remove(struct pci_dev *pdev) | |||
2320 | pci_disable_device(pdev); | 2419 | pci_disable_device(pdev); |
2321 | } | 2420 | } |
2322 | 2421 | ||
2323 | #ifdef CONFIG_PM | ||
2324 | static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2325 | { | ||
2326 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2327 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2328 | struct atl1_hw *hw = &adapter->hw; | ||
2329 | u32 ctrl = 0; | ||
2330 | u32 wufc = adapter->wol; | ||
2331 | |||
2332 | netif_device_detach(netdev); | ||
2333 | if (netif_running(netdev)) | ||
2334 | atl1_down(adapter); | ||
2335 | |||
2336 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | ||
2337 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | ||
2338 | if (ctrl & BMSR_LSTATUS) | ||
2339 | wufc &= ~ATL1_WUFC_LNKC; | ||
2340 | |||
2341 | /* reduce speed to 10/100M */ | ||
2342 | if (wufc) { | ||
2343 | atl1_phy_enter_power_saving(hw); | ||
2344 | /* if resume, let driver to re- setup link */ | ||
2345 | hw->phy_configured = false; | ||
2346 | atl1_set_mac_addr(hw); | ||
2347 | atl1_set_multi(netdev); | ||
2348 | |||
2349 | ctrl = 0; | ||
2350 | /* turn on magic packet wol */ | ||
2351 | if (wufc & ATL1_WUFC_MAG) | ||
2352 | ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | ||
2353 | |||
2354 | /* turn on Link change WOL */ | ||
2355 | if (wufc & ATL1_WUFC_LNKC) | ||
2356 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | ||
2357 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); | ||
2358 | |||
2359 | /* turn on all-multi mode if wake on multicast is enabled */ | ||
2360 | ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
2361 | ctrl &= ~MAC_CTRL_DBG; | ||
2362 | ctrl &= ~MAC_CTRL_PROMIS_EN; | ||
2363 | if (wufc & ATL1_WUFC_MC) | ||
2364 | ctrl |= MAC_CTRL_MC_ALL_EN; | ||
2365 | else | ||
2366 | ctrl &= ~MAC_CTRL_MC_ALL_EN; | ||
2367 | |||
2368 | /* turn on broadcast mode if wake on-BC is enabled */ | ||
2369 | if (wufc & ATL1_WUFC_BC) | ||
2370 | ctrl |= MAC_CTRL_BC_EN; | ||
2371 | else | ||
2372 | ctrl &= ~MAC_CTRL_BC_EN; | ||
2373 | |||
2374 | /* enable RX */ | ||
2375 | ctrl |= MAC_CTRL_RX_EN; | ||
2376 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); | ||
2377 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
2378 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
2379 | } else { | ||
2380 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | ||
2381 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2382 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2383 | } | ||
2384 | |||
2385 | pci_save_state(pdev); | ||
2386 | pci_disable_device(pdev); | ||
2387 | |||
2388 | pci_set_power_state(pdev, PCI_D3hot); | ||
2389 | |||
2390 | return 0; | ||
2391 | } | ||
2392 | |||
2393 | static int atl1_resume(struct pci_dev *pdev) | ||
2394 | { | ||
2395 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2396 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2397 | u32 ret_val; | ||
2398 | |||
2399 | pci_set_power_state(pdev, 0); | ||
2400 | pci_restore_state(pdev); | ||
2401 | |||
2402 | ret_val = pci_enable_device(pdev); | ||
2403 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2404 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2405 | |||
2406 | iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); | ||
2407 | atl1_reset(adapter); | ||
2408 | |||
2409 | if (netif_running(netdev)) | ||
2410 | atl1_up(adapter); | ||
2411 | netif_device_attach(netdev); | ||
2412 | |||
2413 | atl1_via_workaround(adapter); | ||
2414 | |||
2415 | return 0; | ||
2416 | } | ||
2417 | #else | ||
2418 | #define atl1_suspend NULL | ||
2419 | #define atl1_resume NULL | ||
2420 | #endif | ||
2421 | |||
2422 | static struct pci_driver atl1_driver = { | 2422 | static struct pci_driver atl1_driver = { |
2423 | .name = atl1_driver_name, | 2423 | .name = atl1_driver_name, |
2424 | .id_table = atl1_pci_tbl, | 2424 | .id_table = atl1_pci_tbl, |