diff options
author | Jay Cliburn <jacliburn@bellsouth.net> | 2007-07-15 12:03:27 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-07-16 18:29:16 -0400 |
commit | 53ffb42cdf3d01f7b6e2101eebed3d12e71a30f5 (patch) | |
tree | 68f77dd28d32ee0156420448fb0716df43406cbb /drivers | |
parent | 2b116145bbdbe1b13a2eb780988447eecd657a55 (diff) |
atl1: cleanup atl1_main
Fix indentation, remove dead code, improve some comments, change dev_dbg to
dev_printk.
Signed-off-by: Jay Cliburn <jacliburn@bellsouth.net>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/atl1/atl1_main.c | 278 |
1 files changed, 137 insertions, 141 deletions
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 6c8cf986bee4..b40f1c7901fe 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -38,7 +38,7 @@ | |||
38 | * TODO: | 38 | * TODO: |
39 | * Fix TSO; tx performance is horrible with TSO enabled. | 39 | * Fix TSO; tx performance is horrible with TSO enabled. |
40 | * Wake on LAN. | 40 | * Wake on LAN. |
41 | * Add more ethtool functions, including set ring parameters. | 41 | * Add more ethtool functions. |
42 | * Fix abstruse irq enable/disable condition described here: | 42 | * Fix abstruse irq enable/disable condition described here: |
43 | * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 | 43 | * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 |
44 | * | 44 | * |
@@ -191,19 +191,22 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | |||
191 | goto err_nomem; | 191 | goto err_nomem; |
192 | } | 192 | } |
193 | rfd_ring->buffer_info = | 193 | rfd_ring->buffer_info = |
194 | (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); | 194 | (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); |
195 | 195 | ||
196 | /* real ring DMA buffer */ | 196 | /* real ring DMA buffer |
197 | ring_header->size = size = sizeof(struct tx_packet_desc) * | 197 | * each ring/block may need up to 8 bytes for alignment, hence the |
198 | tpd_ring->count | 198 | * additional 40 bytes tacked onto the end. |
199 | + sizeof(struct rx_free_desc) * rfd_ring->count | 199 | */ |
200 | + sizeof(struct rx_return_desc) * rrd_ring->count | 200 | ring_header->size = size = |
201 | + sizeof(struct coals_msg_block) | 201 | sizeof(struct tx_packet_desc) * tpd_ring->count |
202 | + sizeof(struct stats_msg_block) | 202 | + sizeof(struct rx_free_desc) * rfd_ring->count |
203 | + 40; /* "40: for 8 bytes align" huh? -- CHS */ | 203 | + sizeof(struct rx_return_desc) * rrd_ring->count |
204 | + sizeof(struct coals_msg_block) | ||
205 | + sizeof(struct stats_msg_block) | ||
206 | + 40; | ||
204 | 207 | ||
205 | ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, | 208 | ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, |
206 | &ring_header->dma); | 209 | &ring_header->dma); |
207 | if (unlikely(!ring_header->desc)) { | 210 | if (unlikely(!ring_header->desc)) { |
208 | dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); | 211 | dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); |
209 | goto err_nomem; | 212 | goto err_nomem; |
@@ -227,7 +230,6 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | |||
227 | rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); | 230 | rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); |
228 | rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; | 231 | rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; |
229 | rfd_ring->next_to_clean = 0; | 232 | rfd_ring->next_to_clean = 0; |
230 | /* rfd_ring->next_to_use = rfd_ring->count - 1; */ | ||
231 | atomic_set(&rfd_ring->next_to_use, 0); | 233 | atomic_set(&rfd_ring->next_to_use, 0); |
232 | 234 | ||
233 | /* init RRD ring */ | 235 | /* init RRD ring */ |
@@ -243,16 +245,16 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | |||
243 | adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; | 245 | adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; |
244 | offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; | 246 | offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; |
245 | adapter->cmb.dma += offset; | 247 | adapter->cmb.dma += offset; |
246 | adapter->cmb.cmb = | 248 | adapter->cmb.cmb = (struct coals_msg_block *) |
247 | (struct coals_msg_block *) ((u8 *) rrd_ring->desc + | 249 | ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); |
248 | (rrd_ring->size + offset)); | ||
249 | 250 | ||
250 | /* init SMB */ | 251 | /* init SMB */ |
251 | adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); | 252 | adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); |
252 | offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; | 253 | offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; |
253 | adapter->smb.dma += offset; | 254 | adapter->smb.dma += offset; |
254 | adapter->smb.smb = (struct stats_msg_block *) | 255 | adapter->smb.smb = (struct stats_msg_block *) |
255 | ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); | 256 | ((u8 *) adapter->cmb.cmb + |
257 | (sizeof(struct coals_msg_block) + offset)); | ||
256 | 258 | ||
257 | return ATL1_SUCCESS; | 259 | return ATL1_SUCCESS; |
258 | 260 | ||
@@ -291,25 +293,19 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) | |||
291 | adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; | 293 | adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; |
292 | adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; | 294 | adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; |
293 | adapter->soft_stats.multicast += smb->rx_mcast; | 295 | adapter->soft_stats.multicast += smb->rx_mcast; |
294 | adapter->soft_stats.collisions += (smb->tx_1_col + | 296 | adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + |
295 | smb->tx_2_col * 2 + | 297 | smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); |
296 | smb->tx_late_col + | ||
297 | smb->tx_abort_col * | ||
298 | adapter->hw.max_retry); | ||
299 | 298 | ||
300 | /* Rx Errors */ | 299 | /* Rx Errors */ |
301 | adapter->soft_stats.rx_errors += (smb->rx_frag + | 300 | adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + |
302 | smb->rx_fcs_err + | 301 | smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + |
303 | smb->rx_len_err + | 302 | smb->rx_rrd_ov + smb->rx_align_err); |
304 | smb->rx_sz_ov + | ||
305 | smb->rx_rxf_ov + | ||
306 | smb->rx_rrd_ov + smb->rx_align_err); | ||
307 | adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; | 303 | adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; |
308 | adapter->soft_stats.rx_length_errors += smb->rx_len_err; | 304 | adapter->soft_stats.rx_length_errors += smb->rx_len_err; |
309 | adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; | 305 | adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; |
310 | adapter->soft_stats.rx_frame_errors += smb->rx_align_err; | 306 | adapter->soft_stats.rx_frame_errors += smb->rx_align_err; |
311 | adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + | 307 | adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + |
312 | smb->rx_rxf_ov); | 308 | smb->rx_rxf_ov); |
313 | 309 | ||
314 | adapter->soft_stats.rx_pause += smb->rx_pause; | 310 | adapter->soft_stats.rx_pause += smb->rx_pause; |
315 | adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; | 311 | adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; |
@@ -317,8 +313,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) | |||
317 | 313 | ||
318 | /* Tx Errors */ | 314 | /* Tx Errors */ |
319 | adapter->soft_stats.tx_errors += (smb->tx_late_col + | 315 | adapter->soft_stats.tx_errors += (smb->tx_late_col + |
320 | smb->tx_abort_col + | 316 | smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); |
321 | smb->tx_underrun + smb->tx_trunc); | ||
322 | adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; | 317 | adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; |
323 | adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; | 318 | adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; |
324 | adapter->soft_stats.tx_window_errors += smb->tx_late_col; | 319 | adapter->soft_stats.tx_window_errors += smb->tx_late_col; |
@@ -340,36 +335,38 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) | |||
340 | adapter->net_stats.collisions = adapter->soft_stats.collisions; | 335 | adapter->net_stats.collisions = adapter->soft_stats.collisions; |
341 | adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; | 336 | adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; |
342 | adapter->net_stats.rx_over_errors = | 337 | adapter->net_stats.rx_over_errors = |
343 | adapter->soft_stats.rx_missed_errors; | 338 | adapter->soft_stats.rx_missed_errors; |
344 | adapter->net_stats.rx_length_errors = | 339 | adapter->net_stats.rx_length_errors = |
345 | adapter->soft_stats.rx_length_errors; | 340 | adapter->soft_stats.rx_length_errors; |
346 | adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; | 341 | adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; |
347 | adapter->net_stats.rx_frame_errors = | 342 | adapter->net_stats.rx_frame_errors = |
348 | adapter->soft_stats.rx_frame_errors; | 343 | adapter->soft_stats.rx_frame_errors; |
349 | adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; | 344 | adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; |
350 | adapter->net_stats.rx_missed_errors = | 345 | adapter->net_stats.rx_missed_errors = |
351 | adapter->soft_stats.rx_missed_errors; | 346 | adapter->soft_stats.rx_missed_errors; |
352 | adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; | 347 | adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; |
353 | adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; | 348 | adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; |
354 | adapter->net_stats.tx_aborted_errors = | 349 | adapter->net_stats.tx_aborted_errors = |
355 | adapter->soft_stats.tx_aborted_errors; | 350 | adapter->soft_stats.tx_aborted_errors; |
356 | adapter->net_stats.tx_window_errors = | 351 | adapter->net_stats.tx_window_errors = |
357 | adapter->soft_stats.tx_window_errors; | 352 | adapter->soft_stats.tx_window_errors; |
358 | adapter->net_stats.tx_carrier_errors = | 353 | adapter->net_stats.tx_carrier_errors = |
359 | adapter->soft_stats.tx_carrier_errors; | 354 | adapter->soft_stats.tx_carrier_errors; |
360 | } | 355 | } |
361 | 356 | ||
362 | static void atl1_rx_checksum(struct atl1_adapter *adapter, | 357 | static void atl1_rx_checksum(struct atl1_adapter *adapter, |
363 | struct rx_return_desc *rrd, | 358 | struct rx_return_desc *rrd, struct sk_buff *skb) |
364 | struct sk_buff *skb) | ||
365 | { | 359 | { |
360 | struct pci_dev *pdev = adapter->pdev; | ||
361 | |||
366 | skb->ip_summed = CHECKSUM_NONE; | 362 | skb->ip_summed = CHECKSUM_NONE; |
367 | 363 | ||
368 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | 364 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { |
369 | if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | | 365 | if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | |
370 | ERR_FLAG_CODE | ERR_FLAG_OV)) { | 366 | ERR_FLAG_CODE | ERR_FLAG_OV)) { |
371 | adapter->hw_csum_err++; | 367 | adapter->hw_csum_err++; |
372 | dev_dbg(&adapter->pdev->dev, "rx checksum error\n"); | 368 | dev_printk(KERN_DEBUG, &pdev->dev, |
369 | "rx checksum error\n"); | ||
373 | return; | 370 | return; |
374 | } | 371 | } |
375 | } | 372 | } |
@@ -388,7 +385,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, | |||
388 | } | 385 | } |
389 | 386 | ||
390 | /* IPv4, but hardware thinks its checksum is wrong */ | 387 | /* IPv4, but hardware thinks its checksum is wrong */ |
391 | dev_dbg(&adapter->pdev->dev, | 388 | dev_printk(KERN_DEBUG, &pdev->dev, |
392 | "hw csum wrong, pkt_flag:%x, err_flag:%x\n", | 389 | "hw csum wrong, pkt_flag:%x, err_flag:%x\n", |
393 | rrd->pkt_flg, rrd->err_flg); | 390 | rrd->pkt_flg, rrd->err_flg); |
394 | skb->ip_summed = CHECKSUM_COMPLETE; | 391 | skb->ip_summed = CHECKSUM_COMPLETE; |
@@ -503,13 +500,14 @@ chk_rrd: | |||
503 | /* rrd seems to be bad */ | 500 | /* rrd seems to be bad */ |
504 | if (unlikely(i-- > 0)) { | 501 | if (unlikely(i-- > 0)) { |
505 | /* rrd may not be DMAed completely */ | 502 | /* rrd may not be DMAed completely */ |
506 | dev_dbg(&adapter->pdev->dev, | 503 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, |
507 | "incomplete RRD DMA transfer\n"); | 504 | "incomplete RRD DMA transfer\n"); |
508 | udelay(1); | 505 | udelay(1); |
509 | goto chk_rrd; | 506 | goto chk_rrd; |
510 | } | 507 | } |
511 | /* bad rrd */ | 508 | /* bad rrd */ |
512 | dev_dbg(&adapter->pdev->dev, "bad RRD\n"); | 509 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, |
510 | "bad RRD\n"); | ||
513 | /* see if update RFD index */ | 511 | /* see if update RFD index */ |
514 | if (rrd->num_buf > 1) { | 512 | if (rrd->num_buf > 1) { |
515 | u16 num_buf; | 513 | u16 num_buf; |
@@ -697,7 +695,6 @@ static void atl1_check_for_link(struct atl1_adapter *adapter) | |||
697 | */ | 695 | */ |
698 | static irqreturn_t atl1_intr(int irq, void *data) | 696 | static irqreturn_t atl1_intr(int irq, void *data) |
699 | { | 697 | { |
700 | /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/ | ||
701 | struct atl1_adapter *adapter = netdev_priv(data); | 698 | struct atl1_adapter *adapter = netdev_priv(data); |
702 | u32 status; | 699 | u32 status; |
703 | u8 update_rx; | 700 | u8 update_rx; |
@@ -725,8 +722,8 @@ static irqreturn_t atl1_intr(int irq, void *data) | |||
725 | 722 | ||
726 | /* check if PCIE PHY Link down */ | 723 | /* check if PCIE PHY Link down */ |
727 | if (status & ISR_PHY_LINKDOWN) { | 724 | if (status & ISR_PHY_LINKDOWN) { |
728 | dev_dbg(&adapter->pdev->dev, "pcie phy link down %x\n", | 725 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, |
729 | status); | 726 | "pcie phy link down %x\n", status); |
730 | if (netif_running(adapter->netdev)) { /* reset MAC */ | 727 | if (netif_running(adapter->netdev)) { /* reset MAC */ |
731 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | 728 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); |
732 | schedule_work(&adapter->pcie_dma_to_rst_task); | 729 | schedule_work(&adapter->pcie_dma_to_rst_task); |
@@ -736,7 +733,7 @@ static irqreturn_t atl1_intr(int irq, void *data) | |||
736 | 733 | ||
737 | /* check if DMA read/write error ? */ | 734 | /* check if DMA read/write error ? */ |
738 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { | 735 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { |
739 | dev_dbg(&adapter->pdev->dev, | 736 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, |
740 | "pcie DMA r/w error (status = 0x%x)\n", | 737 | "pcie DMA r/w error (status = 0x%x)\n", |
741 | status); | 738 | status); |
742 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | 739 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); |
@@ -761,7 +758,7 @@ static irqreturn_t atl1_intr(int irq, void *data) | |||
761 | if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | | 758 | if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | |
762 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | 759 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | |
763 | ISR_HOST_RRD_OV)) | 760 | ISR_HOST_RRD_OV)) |
764 | dev_dbg(&adapter->pdev->dev, | 761 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, |
765 | "rx exception, ISR = 0x%x\n", status); | 762 | "rx exception, ISR = 0x%x\n", status); |
766 | atl1_intr_rx(adapter); | 763 | atl1_intr_rx(adapter); |
767 | } | 764 | } |
@@ -973,7 +970,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter) | |||
973 | lo = value * 7 / 8; | 970 | lo = value * 7 / 8; |
974 | 971 | ||
975 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | | 972 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | |
976 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); | 973 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); |
977 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); | 974 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); |
978 | 975 | ||
979 | /* RRD Flow Control */ | 976 | /* RRD Flow Control */ |
@@ -983,7 +980,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter) | |||
983 | if (lo < 2) | 980 | if (lo < 2) |
984 | lo = 2; | 981 | lo = 2; |
985 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | | 982 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | |
986 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); | 983 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); |
987 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); | 984 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); |
988 | } | 985 | } |
989 | 986 | ||
@@ -1000,7 +997,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw) | |||
1000 | if (hi < lo) | 997 | if (hi < lo) |
1001 | hi = lo + 16; | 998 | hi = lo + 16; |
1002 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | | 999 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | |
1003 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); | 1000 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); |
1004 | iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); | 1001 | iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); |
1005 | 1002 | ||
1006 | /* RRD Flow Control */ | 1003 | /* RRD Flow Control */ |
@@ -1012,7 +1009,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw) | |||
1012 | if (hi < lo) | 1009 | if (hi < lo) |
1013 | hi = lo + 3; | 1010 | hi = lo + 3; |
1014 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | | 1011 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | |
1015 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); | 1012 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); |
1016 | iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); | 1013 | iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); |
1017 | } | 1014 | } |
1018 | 1015 | ||
@@ -1069,31 +1066,31 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1069 | /* config Mailbox */ | 1066 | /* config Mailbox */ |
1070 | value = ((atomic_read(&adapter->tpd_ring.next_to_use) | 1067 | value = ((atomic_read(&adapter->tpd_ring.next_to_use) |
1071 | & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | | 1068 | & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | |
1072 | ((atomic_read(&adapter->rrd_ring.next_to_clean) | 1069 | ((atomic_read(&adapter->rrd_ring.next_to_clean) |
1073 | & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | | 1070 | & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | |
1074 | ((atomic_read(&adapter->rfd_ring.next_to_use) | 1071 | ((atomic_read(&adapter->rfd_ring.next_to_use) |
1075 | & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); | 1072 | & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); |
1076 | iowrite32(value, hw->hw_addr + REG_MAILBOX); | 1073 | iowrite32(value, hw->hw_addr + REG_MAILBOX); |
1077 | 1074 | ||
1078 | /* config IPG/IFG */ | 1075 | /* config IPG/IFG */ |
1079 | value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) | 1076 | value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) |
1080 | << MAC_IPG_IFG_IPGT_SHIFT) | | 1077 | << MAC_IPG_IFG_IPGT_SHIFT) | |
1081 | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) | 1078 | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) |
1082 | << MAC_IPG_IFG_MIFG_SHIFT) | | 1079 | << MAC_IPG_IFG_MIFG_SHIFT) | |
1083 | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) | 1080 | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) |
1084 | << MAC_IPG_IFG_IPGR1_SHIFT) | | 1081 | << MAC_IPG_IFG_IPGR1_SHIFT) | |
1085 | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) | 1082 | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) |
1086 | << MAC_IPG_IFG_IPGR2_SHIFT); | 1083 | << MAC_IPG_IFG_IPGR2_SHIFT); |
1087 | iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); | 1084 | iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); |
1088 | 1085 | ||
1089 | /* config Half-Duplex Control */ | 1086 | /* config Half-Duplex Control */ |
1090 | value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | | 1087 | value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | |
1091 | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) | 1088 | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) |
1092 | << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | | 1089 | << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | |
1093 | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | | 1090 | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | |
1094 | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | | 1091 | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | |
1095 | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) | 1092 | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) |
1096 | << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); | 1093 | << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); |
1097 | iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); | 1094 | iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); |
1098 | 1095 | ||
1099 | /* set Interrupt Moderator Timer */ | 1096 | /* set Interrupt Moderator Timer */ |
@@ -1109,10 +1106,10 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1109 | /* jumbo size & rrd retirement timer */ | 1106 | /* jumbo size & rrd retirement timer */ |
1110 | value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) | 1107 | value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) |
1111 | << RXQ_JMBOSZ_TH_SHIFT) | | 1108 | << RXQ_JMBOSZ_TH_SHIFT) | |
1112 | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) | 1109 | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) |
1113 | << RXQ_JMBO_LKAH_SHIFT) | | 1110 | << RXQ_JMBO_LKAH_SHIFT) | |
1114 | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) | 1111 | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) |
1115 | << RXQ_RRD_TIMER_SHIFT); | 1112 | << RXQ_RRD_TIMER_SHIFT); |
1116 | iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); | 1113 | iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); |
1117 | 1114 | ||
1118 | /* Flow Control */ | 1115 | /* Flow Control */ |
@@ -1131,35 +1128,36 @@ static u32 atl1_configure(struct atl1_adapter *adapter) | |||
1131 | /* config TXQ */ | 1128 | /* config TXQ */ |
1132 | value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) | 1129 | value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) |
1133 | << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | | 1130 | << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | |
1134 | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) | 1131 | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) |
1135 | << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | | 1132 | << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | |
1136 | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) | 1133 | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) |
1137 | << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; | 1134 | << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | |
1135 | TXQ_CTRL_EN; | ||
1138 | iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); | 1136 | iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); |
1139 | 1137 | ||
1140 | /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ | 1138 | /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ |
1141 | value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) | 1139 | value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) |
1142 | << TX_JUMBO_TASK_TH_SHIFT) | | 1140 | << TX_JUMBO_TASK_TH_SHIFT) | |
1143 | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) | 1141 | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) |
1144 | << TX_TPD_MIN_IPG_SHIFT); | 1142 | << TX_TPD_MIN_IPG_SHIFT); |
1145 | iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); | 1143 | iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); |
1146 | 1144 | ||
1147 | /* config RXQ */ | 1145 | /* config RXQ */ |
1148 | value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) | 1146 | value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) |
1149 | << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | | 1147 | << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | |
1150 | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) | 1148 | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) |
1151 | << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | | 1149 | << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | |
1152 | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) | 1150 | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) |
1153 | << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | | 1151 | << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | |
1154 | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; | 1152 | RXQ_CTRL_EN; |
1155 | iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); | 1153 | iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); |
1156 | 1154 | ||
1157 | /* config DMA Engine */ | 1155 | /* config DMA Engine */ |
1158 | value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) | 1156 | value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) |
1159 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | | 1157 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | |
1160 | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) | 1158 | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) |
1161 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | | 1159 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | |
1162 | DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; | 1160 | DMA_CTRL_DMAW_EN; |
1163 | value |= (u32) hw->dma_ord; | 1161 | value |= (u32) hw->dma_ord; |
1164 | if (atl1_rcb_128 == hw->rcb_value) | 1162 | if (atl1_rcb_128 == hw->rcb_value) |
1165 | value |= DMA_CTRL_RCB_VALUE; | 1163 | value |= DMA_CTRL_RCB_VALUE; |
@@ -1200,7 +1198,7 @@ static void atl1_irq_disable(struct atl1_adapter *adapter) | |||
1200 | } | 1198 | } |
1201 | 1199 | ||
1202 | static void atl1_vlan_rx_register(struct net_device *netdev, | 1200 | static void atl1_vlan_rx_register(struct net_device *netdev, |
1203 | struct vlan_group *grp) | 1201 | struct vlan_group *grp) |
1204 | { | 1202 | { |
1205 | struct atl1_adapter *adapter = netdev_priv(netdev); | 1203 | struct atl1_adapter *adapter = netdev_priv(netdev); |
1206 | unsigned long flags; | 1204 | unsigned long flags; |
@@ -1235,9 +1233,9 @@ static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) | |||
1235 | { | 1233 | { |
1236 | u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); | 1234 | u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); |
1237 | u16 next_to_use = atomic_read(&tpd_ring->next_to_use); | 1235 | u16 next_to_use = atomic_read(&tpd_ring->next_to_use); |
1238 | return ((next_to_clean > | 1236 | return ((next_to_clean > next_to_use) ? |
1239 | next_to_use) ? next_to_clean - next_to_use - | 1237 | next_to_clean - next_to_use - 1 : |
1240 | 1 : tpd_ring->count + next_to_clean - next_to_use - 1); | 1238 | tpd_ring->count + next_to_clean - next_to_use - 1); |
1241 | } | 1239 | } |
1242 | 1240 | ||
1243 | static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | 1241 | static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, |
@@ -1270,7 +1268,8 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1270 | tso->tsopl |= (iph->ihl & | 1268 | tso->tsopl |= (iph->ihl & |
1271 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; | 1269 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; |
1272 | tso->tsopl |= (tcp_hdrlen(skb) & | 1270 | tso->tsopl |= (tcp_hdrlen(skb) & |
1273 | TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; | 1271 | TSO_PARAM_TCPHDRLEN_MASK) << |
1272 | TSO_PARAM_TCPHDRLEN_SHIFT; | ||
1274 | tso->tsopl |= (skb_shinfo(skb)->gso_size & | 1273 | tso->tsopl |= (skb_shinfo(skb)->gso_size & |
1275 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; | 1274 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; |
1276 | tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; | 1275 | tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; |
@@ -1283,7 +1282,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1283 | } | 1282 | } |
1284 | 1283 | ||
1285 | static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | 1284 | static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, |
1286 | struct csum_param *csum) | 1285 | struct csum_param *csum) |
1287 | { | 1286 | { |
1288 | u8 css, cso; | 1287 | u8 css, cso; |
1289 | 1288 | ||
@@ -1291,7 +1290,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1291 | cso = skb_transport_offset(skb); | 1290 | cso = skb_transport_offset(skb); |
1292 | css = cso + skb->csum_offset; | 1291 | css = cso + skb->csum_offset; |
1293 | if (unlikely(cso & 0x1)) { | 1292 | if (unlikely(cso & 0x1)) { |
1294 | dev_dbg(&adapter->pdev->dev, | 1293 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, |
1295 | "payload offset not an even number\n"); | 1294 | "payload offset not an even number\n"); |
1296 | return -1; | 1295 | return -1; |
1297 | } | 1296 | } |
@@ -1306,8 +1305,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | |||
1306 | return true; | 1305 | return true; |
1307 | } | 1306 | } |
1308 | 1307 | ||
1309 | static void atl1_tx_map(struct atl1_adapter *adapter, | 1308 | static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, |
1310 | struct sk_buff *skb, bool tcp_seg) | 1309 | bool tcp_seg) |
1311 | { | 1310 | { |
1312 | /* We enter this function holding a spinlock. */ | 1311 | /* We enter this function holding a spinlock. */ |
1313 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1312 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
@@ -1344,7 +1343,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1344 | 1343 | ||
1345 | if (first_buf_len > proto_hdr_len) { | 1344 | if (first_buf_len > proto_hdr_len) { |
1346 | len12 = first_buf_len - proto_hdr_len; | 1345 | len12 = first_buf_len - proto_hdr_len; |
1347 | m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; | 1346 | m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / |
1347 | ATL1_MAX_TX_BUF_LEN; | ||
1348 | for (i = 0; i < m; i++) { | 1348 | for (i = 0; i < m; i++) { |
1349 | buffer_info = | 1349 | buffer_info = |
1350 | &tpd_ring->buffer_info[tpd_next_to_use]; | 1350 | &tpd_ring->buffer_info[tpd_next_to_use]; |
@@ -1354,16 +1354,14 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1354 | len12) ? ATL1_MAX_TX_BUF_LEN : len12; | 1354 | len12) ? ATL1_MAX_TX_BUF_LEN : len12; |
1355 | len12 -= buffer_info->length; | 1355 | len12 -= buffer_info->length; |
1356 | page = virt_to_page(skb->data + | 1356 | page = virt_to_page(skb->data + |
1357 | (proto_hdr_len + | 1357 | (proto_hdr_len + |
1358 | i * ATL1_MAX_TX_BUF_LEN)); | 1358 | i * ATL1_MAX_TX_BUF_LEN)); |
1359 | offset = (unsigned long)(skb->data + | 1359 | offset = (unsigned long)(skb->data + |
1360 | (proto_hdr_len + | 1360 | (proto_hdr_len + |
1361 | i * ATL1_MAX_TX_BUF_LEN)) & | 1361 | i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; |
1362 | ~PAGE_MASK; | 1362 | buffer_info->dma = pci_map_page(adapter->pdev, |
1363 | buffer_info->dma = | 1363 | page, offset, buffer_info->length, |
1364 | pci_map_page(adapter->pdev, page, offset, | 1364 | PCI_DMA_TODEVICE); |
1365 | buffer_info->length, | ||
1366 | PCI_DMA_TODEVICE); | ||
1367 | if (++tpd_next_to_use == tpd_ring->count) | 1365 | if (++tpd_next_to_use == tpd_ring->count) |
1368 | tpd_next_to_use = 0; | 1366 | tpd_next_to_use = 0; |
1369 | } | 1367 | } |
@@ -1374,8 +1372,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1374 | page = virt_to_page(skb->data); | 1372 | page = virt_to_page(skb->data); |
1375 | offset = (unsigned long)skb->data & ~PAGE_MASK; | 1373 | offset = (unsigned long)skb->data & ~PAGE_MASK; |
1376 | buffer_info->dma = pci_map_page(adapter->pdev, page, | 1374 | buffer_info->dma = pci_map_page(adapter->pdev, page, |
1377 | offset, first_buf_len, | 1375 | offset, first_buf_len, PCI_DMA_TODEVICE); |
1378 | PCI_DMA_TODEVICE); | ||
1379 | if (++tpd_next_to_use == tpd_ring->count) | 1376 | if (++tpd_next_to_use == tpd_ring->count) |
1380 | tpd_next_to_use = 0; | 1377 | tpd_next_to_use = 0; |
1381 | } | 1378 | } |
@@ -1393,13 +1390,13 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1393 | if (unlikely(buffer_info->skb)) | 1390 | if (unlikely(buffer_info->skb)) |
1394 | BUG(); | 1391 | BUG(); |
1395 | buffer_info->skb = NULL; | 1392 | buffer_info->skb = NULL; |
1396 | buffer_info->length = | 1393 | buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? |
1397 | (lenf > ATL1_MAX_TX_BUF_LEN) ? ATL1_MAX_TX_BUF_LEN : lenf; | 1394 | ATL1_MAX_TX_BUF_LEN : lenf; |
1398 | lenf -= buffer_info->length; | 1395 | lenf -= buffer_info->length; |
1399 | buffer_info->dma = | 1396 | buffer_info->dma = pci_map_page(adapter->pdev, |
1400 | pci_map_page(adapter->pdev, frag->page, | 1397 | frag->page, |
1401 | frag->page_offset + i * ATL1_MAX_TX_BUF_LEN, | 1398 | frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), |
1402 | buffer_info->length, PCI_DMA_TODEVICE); | 1399 | buffer_info->length, PCI_DMA_TODEVICE); |
1403 | 1400 | ||
1404 | if (++tpd_next_to_use == tpd_ring->count) | 1401 | if (++tpd_next_to_use == tpd_ring->count) |
1405 | tpd_next_to_use = 0; | 1402 | tpd_next_to_use = 0; |
@@ -1411,7 +1408,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, | |||
1411 | } | 1408 | } |
1412 | 1409 | ||
1413 | static void atl1_tx_queue(struct atl1_adapter *adapter, int count, | 1410 | static void atl1_tx_queue(struct atl1_adapter *adapter, int count, |
1414 | union tpd_descr *descr) | 1411 | union tpd_descr *descr) |
1415 | { | 1412 | { |
1416 | /* We enter this function holding a spinlock. */ | 1413 | /* We enter this function holding a spinlock. */ |
1417 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | 1414 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; |
@@ -1515,8 +1512,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1515 | for (f = 0; f < nr_frags; f++) { | 1512 | for (f = 0; f < nr_frags; f++) { |
1516 | frag_size = skb_shinfo(skb)->frags[f].size; | 1513 | frag_size = skb_shinfo(skb)->frags[f].size; |
1517 | if (frag_size) | 1514 | if (frag_size) |
1518 | count += | 1515 | count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / |
1519 | (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; | 1516 | ATL1_MAX_TX_BUF_LEN; |
1520 | } | 1517 | } |
1521 | 1518 | ||
1522 | /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ | 1519 | /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ |
@@ -1532,7 +1529,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1532 | /* need additional TPD ? */ | 1529 | /* need additional TPD ? */ |
1533 | if (proto_hdr_len != len) | 1530 | if (proto_hdr_len != len) |
1534 | count += (len - proto_hdr_len + | 1531 | count += (len - proto_hdr_len + |
1535 | ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; | 1532 | ATL1_MAX_TX_BUF_LEN - 1) / |
1533 | ATL1_MAX_TX_BUF_LEN; | ||
1536 | } | 1534 | } |
1537 | } | 1535 | } |
1538 | 1536 | ||
@@ -1540,7 +1538,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1540 | if (!spin_trylock(&adapter->lock)) { | 1538 | if (!spin_trylock(&adapter->lock)) { |
1541 | /* Can't get lock - tell upper layer to requeue */ | 1539 | /* Can't get lock - tell upper layer to requeue */ |
1542 | local_irq_restore(flags); | 1540 | local_irq_restore(flags); |
1543 | dev_dbg(&adapter->pdev->dev, "tx locked\n"); | 1541 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); |
1544 | return NETDEV_TX_LOCKED; | 1542 | return NETDEV_TX_LOCKED; |
1545 | } | 1543 | } |
1546 | 1544 | ||
@@ -1548,7 +1546,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1548 | /* not enough descriptors */ | 1546 | /* not enough descriptors */ |
1549 | netif_stop_queue(netdev); | 1547 | netif_stop_queue(netdev); |
1550 | spin_unlock_irqrestore(&adapter->lock, flags); | 1548 | spin_unlock_irqrestore(&adapter->lock, flags); |
1551 | dev_dbg(&adapter->pdev->dev, "tx busy\n"); | 1549 | dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); |
1552 | return NETDEV_TX_BUSY; | 1550 | return NETDEV_TX_BUSY; |
1553 | } | 1551 | } |
1554 | 1552 | ||
@@ -1619,10 +1617,8 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter) | |||
1619 | for (i = 0; i < rfd_ring->count; i++) { | 1617 | for (i = 0; i < rfd_ring->count; i++) { |
1620 | buffer_info = &rfd_ring->buffer_info[i]; | 1618 | buffer_info = &rfd_ring->buffer_info[i]; |
1621 | if (buffer_info->dma) { | 1619 | if (buffer_info->dma) { |
1622 | pci_unmap_page(pdev, | 1620 | pci_unmap_page(pdev, buffer_info->dma, |
1623 | buffer_info->dma, | 1621 | buffer_info->length, PCI_DMA_FROMDEVICE); |
1624 | buffer_info->length, | ||
1625 | PCI_DMA_FROMDEVICE); | ||
1626 | buffer_info->dma = 0; | 1622 | buffer_info->dma = 0; |
1627 | } | 1623 | } |
1628 | if (buffer_info->skb) { | 1624 | if (buffer_info->skb) { |
@@ -1661,7 +1657,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter) | |||
1661 | buffer_info = &tpd_ring->buffer_info[i]; | 1657 | buffer_info = &tpd_ring->buffer_info[i]; |
1662 | if (buffer_info->dma) { | 1658 | if (buffer_info->dma) { |
1663 | pci_unmap_page(pdev, buffer_info->dma, | 1659 | pci_unmap_page(pdev, buffer_info->dma, |
1664 | buffer_info->length, PCI_DMA_TODEVICE); | 1660 | buffer_info->length, PCI_DMA_TODEVICE); |
1665 | buffer_info->dma = 0; | 1661 | buffer_info->dma = 0; |
1666 | } | 1662 | } |
1667 | } | 1663 | } |
@@ -1703,7 +1699,7 @@ void atl1_free_ring_resources(struct atl1_adapter *adapter) | |||
1703 | 1699 | ||
1704 | kfree(tpd_ring->buffer_info); | 1700 | kfree(tpd_ring->buffer_info); |
1705 | pci_free_consistent(pdev, ring_header->size, ring_header->desc, | 1701 | pci_free_consistent(pdev, ring_header->size, ring_header->desc, |
1706 | ring_header->dma); | 1702 | ring_header->dma); |
1707 | 1703 | ||
1708 | tpd_ring->buffer_info = NULL; | 1704 | tpd_ring->buffer_info = NULL; |
1709 | tpd_ring->desc = NULL; | 1705 | tpd_ring->desc = NULL; |
@@ -1752,11 +1748,6 @@ s32 atl1_up(struct atl1_adapter *adapter) | |||
1752 | atl1_check_link(adapter); | 1748 | atl1_check_link(adapter); |
1753 | return 0; | 1749 | return 0; |
1754 | 1750 | ||
1755 | /* FIXME: unreachable code! -- CHS */ | ||
1756 | /* free irq disable any interrupt */ | ||
1757 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1758 | free_irq(adapter->pdev->irq, netdev); | ||
1759 | |||
1760 | err_up: | 1751 | err_up: |
1761 | pci_disable_msi(adapter->pdev); | 1752 | pci_disable_msi(adapter->pdev); |
1762 | /* free rx_buffers */ | 1753 | /* free rx_buffers */ |
@@ -1867,7 +1858,8 @@ static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) | |||
1867 | return result; | 1858 | return result; |
1868 | } | 1859 | } |
1869 | 1860 | ||
1870 | static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) | 1861 | static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, |
1862 | int val) | ||
1871 | { | 1863 | { |
1872 | struct atl1_adapter *adapter = netdev_priv(netdev); | 1864 | struct atl1_adapter *adapter = netdev_priv(netdev); |
1873 | 1865 | ||
@@ -2015,11 +2007,14 @@ static void atl1_poll_controller(struct net_device *netdev) | |||
2015 | #endif | 2007 | #endif |
2016 | 2008 | ||
2017 | /* | 2009 | /* |
2010 | * Orphaned vendor comment left intact here: | ||
2011 | * <vendor comment> | ||
2018 | * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT | 2012 | * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT |
2019 | * will assert. We do soft reset <0x1400=1> according | 2013 | * will assert. We do soft reset <0x1400=1> according |
2020 | * with the SPEC. BUT, it seemes that PCIE or DMA | 2014 | * with the SPEC. BUT, it seemes that PCIE or DMA |
2021 | * state-machine will not be reset. DMAR_TO_INT will | 2015 | * state-machine will not be reset. DMAR_TO_INT will |
2022 | * assert again and again. | 2016 | * assert again and again. |
2017 | * </vendor comment> | ||
2023 | */ | 2018 | */ |
2024 | static void atl1_tx_timeout_task(struct work_struct *work) | 2019 | static void atl1_tx_timeout_task(struct work_struct *work) |
2025 | { | 2020 | { |
@@ -2053,6 +2048,8 @@ static void atl1_link_chg_task(struct work_struct *work) | |||
2053 | static void atl1_pcie_patch(struct atl1_adapter *adapter) | 2048 | static void atl1_pcie_patch(struct atl1_adapter *adapter) |
2054 | { | 2049 | { |
2055 | u32 value; | 2050 | u32 value; |
2051 | |||
2052 | /* much vendor magic here */ | ||
2056 | value = 0x6500; | 2053 | value = 0x6500; |
2057 | iowrite32(value, adapter->hw.hw_addr + 0x12FC); | 2054 | iowrite32(value, adapter->hw.hw_addr + 0x12FC); |
2058 | /* pcie flow control mode change */ | 2055 | /* pcie flow control mode change */ |
@@ -2089,7 +2086,7 @@ static void atl1_via_workaround(struct atl1_adapter *adapter) | |||
2089 | * and a hardware reset occur. | 2086 | * and a hardware reset occur. |
2090 | */ | 2087 | */ |
2091 | static int __devinit atl1_probe(struct pci_dev *pdev, | 2088 | static int __devinit atl1_probe(struct pci_dev *pdev, |
2092 | const struct pci_device_id *ent) | 2089 | const struct pci_device_id *ent) |
2093 | { | 2090 | { |
2094 | struct net_device *netdev; | 2091 | struct net_device *netdev; |
2095 | struct atl1_adapter *adapter; | 2092 | struct atl1_adapter *adapter; |
@@ -2143,7 +2140,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev, | |||
2143 | } | 2140 | } |
2144 | /* get device revision number */ | 2141 | /* get device revision number */ |
2145 | adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + | 2142 | adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + |
2146 | (REG_MASTER_CTRL + 2)); | 2143 | (REG_MASTER_CTRL + 2)); |
2147 | dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); | 2144 | dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); |
2148 | 2145 | ||
2149 | /* set default ring resource counts */ | 2146 | /* set default ring resource counts */ |
@@ -2296,7 +2293,8 @@ static void __devexit atl1_remove(struct pci_dev *pdev) | |||
2296 | * address, we need to save the permanent one. | 2293 | * address, we need to save the permanent one. |
2297 | */ | 2294 | */ |
2298 | if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { | 2295 | if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { |
2299 | memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN); | 2296 | memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, |
2297 | ETH_ALEN); | ||
2300 | atl1_set_mac_addr(&adapter->hw); | 2298 | atl1_set_mac_addr(&adapter->hw); |
2301 | } | 2299 | } |
2302 | 2300 | ||
@@ -2363,11 +2361,11 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2363 | ctrl |= MAC_CTRL_RX_EN; | 2361 | ctrl |= MAC_CTRL_RX_EN; |
2364 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); | 2362 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); |
2365 | pci_enable_wake(pdev, PCI_D3hot, 1); | 2363 | pci_enable_wake(pdev, PCI_D3hot, 1); |
2366 | pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */ | 2364 | pci_enable_wake(pdev, PCI_D3cold, 1); |
2367 | } else { | 2365 | } else { |
2368 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | 2366 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); |
2369 | pci_enable_wake(pdev, PCI_D3hot, 0); | 2367 | pci_enable_wake(pdev, PCI_D3hot, 0); |
2370 | pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ | 2368 | pci_enable_wake(pdev, PCI_D3cold, 0); |
2371 | } | 2369 | } |
2372 | 2370 | ||
2373 | pci_save_state(pdev); | 2371 | pci_save_state(pdev); |
@@ -2412,8 +2410,6 @@ static struct pci_driver atl1_driver = { | |||
2412 | .id_table = atl1_pci_tbl, | 2410 | .id_table = atl1_pci_tbl, |
2413 | .probe = atl1_probe, | 2411 | .probe = atl1_probe, |
2414 | .remove = __devexit_p(atl1_remove), | 2412 | .remove = __devexit_p(atl1_remove), |
2415 | /* Power Managment Hooks */ | ||
2416 | /* probably broken right now -- CHS */ | ||
2417 | .suspend = atl1_suspend, | 2413 | .suspend = atl1_suspend, |
2418 | .resume = atl1_resume | 2414 | .resume = atl1_resume |
2419 | }; | 2415 | }; |