diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-19 18:29:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-19 18:29:23 -0400 |
commit | 804c64ea864d0a8ee13f3de0b74158a3e9c3166d (patch) | |
tree | 842b223e9db75ece9f4e2a5daf0f519b07b4a92a | |
parent | 49a43876b935c811cfd29d8fe998a6912a1cc5c4 (diff) | |
parent | aa1c6a6f7f0518b42994d02756a41cbfdcac1916 (diff) |
Merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git/
-rw-r--r-- | drivers/net/tg3.c | 480 | ||||
-rw-r--r-- | drivers/net/tg3.h | 8 | ||||
-rw-r--r-- | fs/namei.c | 1 | ||||
-rw-r--r-- | include/net/act_generic.h | 4 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 8 | ||||
-rw-r--r-- | net/ipv4/ipvs/ip_vs_xmit.c | 1 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_core.c | 28 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 14 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 13 | ||||
-rw-r--r-- | net/unix/af_unix.c | 28 | ||||
-rw-r--r-- | net/xfrm/xfrm_algo.c | 2 | ||||
-rw-r--r-- | net/xfrm/xfrm_user.c | 15 |
12 files changed, 423 insertions, 179 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f79b02e80e75..4d2bdbdd34e8 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -420,7 +420,8 @@ static void tg3_enable_ints(struct tg3 *tp) | |||
420 | { | 420 | { |
421 | tw32(TG3PCI_MISC_HOST_CTRL, | 421 | tw32(TG3PCI_MISC_HOST_CTRL, |
422 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); | 422 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); |
423 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); | 423 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
424 | (tp->last_tag << 24)); | ||
424 | tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); | 425 | tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); |
425 | 426 | ||
426 | tg3_cond_int(tp); | 427 | tg3_cond_int(tp); |
@@ -455,10 +456,16 @@ static void tg3_restart_ints(struct tg3 *tp) | |||
455 | { | 456 | { |
456 | tw32(TG3PCI_MISC_HOST_CTRL, | 457 | tw32(TG3PCI_MISC_HOST_CTRL, |
457 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); | 458 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); |
458 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); | 459 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
460 | tp->last_tag << 24); | ||
459 | mmiowb(); | 461 | mmiowb(); |
460 | 462 | ||
461 | if (tg3_has_work(tp)) | 463 | /* When doing tagged status, this work check is unnecessary. |
464 | * The last_tag we write above tells the chip which piece of | ||
465 | * work we've completed. | ||
466 | */ | ||
467 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | ||
468 | tg3_has_work(tp)) | ||
462 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 469 | tw32(HOSTCC_MODE, tp->coalesce_mode | |
463 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); | 470 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); |
464 | } | 471 | } |
@@ -2500,7 +2507,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) | |||
2500 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 2507 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
2501 | if (netif_carrier_ok(tp->dev)) { | 2508 | if (netif_carrier_ok(tp->dev)) { |
2502 | tw32(HOSTCC_STAT_COAL_TICKS, | 2509 | tw32(HOSTCC_STAT_COAL_TICKS, |
2503 | DEFAULT_STAT_COAL_TICKS); | 2510 | tp->coal.stats_block_coalesce_usecs); |
2504 | } else { | 2511 | } else { |
2505 | tw32(HOSTCC_STAT_COAL_TICKS, 0); | 2512 | tw32(HOSTCC_STAT_COAL_TICKS, 0); |
2506 | } | 2513 | } |
@@ -2886,7 +2893,6 @@ static int tg3_poll(struct net_device *netdev, int *budget) | |||
2886 | * All RX "locking" is done by ensuring outside | 2893 | * All RX "locking" is done by ensuring outside |
2887 | * code synchronizes with dev->poll() | 2894 | * code synchronizes with dev->poll() |
2888 | */ | 2895 | */ |
2889 | done = 1; | ||
2890 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { | 2896 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { |
2891 | int orig_budget = *budget; | 2897 | int orig_budget = *budget; |
2892 | int work_done; | 2898 | int work_done; |
@@ -2898,12 +2904,14 @@ static int tg3_poll(struct net_device *netdev, int *budget) | |||
2898 | 2904 | ||
2899 | *budget -= work_done; | 2905 | *budget -= work_done; |
2900 | netdev->quota -= work_done; | 2906 | netdev->quota -= work_done; |
2901 | |||
2902 | if (work_done >= orig_budget) | ||
2903 | done = 0; | ||
2904 | } | 2907 | } |
2905 | 2908 | ||
2909 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
2910 | tp->last_tag = sblk->status_tag; | ||
2911 | rmb(); | ||
2912 | |||
2906 | /* if no more work, tell net stack and NIC we're done */ | 2913 | /* if no more work, tell net stack and NIC we're done */ |
2914 | done = !tg3_has_work(tp); | ||
2907 | if (done) { | 2915 | if (done) { |
2908 | spin_lock_irqsave(&tp->lock, flags); | 2916 | spin_lock_irqsave(&tp->lock, flags); |
2909 | __netif_rx_complete(netdev); | 2917 | __netif_rx_complete(netdev); |
@@ -2928,22 +2936,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs) | |||
2928 | spin_lock_irqsave(&tp->lock, flags); | 2936 | spin_lock_irqsave(&tp->lock, flags); |
2929 | 2937 | ||
2930 | /* | 2938 | /* |
2931 | * writing any value to intr-mbox-0 clears PCI INTA# and | 2939 | * Writing any value to intr-mbox-0 clears PCI INTA# and |
2932 | * chip-internal interrupt pending events. | 2940 | * chip-internal interrupt pending events. |
2933 | * writing non-zero to intr-mbox-0 additional tells the | 2941 | * Writing non-zero to intr-mbox-0 additional tells the |
2934 | * NIC to stop sending us irqs, engaging "in-intr-handler" | 2942 | * NIC to stop sending us irqs, engaging "in-intr-handler" |
2935 | * event coalescing. | 2943 | * event coalescing. |
2936 | */ | 2944 | */ |
2937 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 2945 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); |
2946 | tp->last_tag = sblk->status_tag; | ||
2938 | sblk->status &= ~SD_STATUS_UPDATED; | 2947 | sblk->status &= ~SD_STATUS_UPDATED; |
2939 | |||
2940 | if (likely(tg3_has_work(tp))) | 2948 | if (likely(tg3_has_work(tp))) |
2941 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 2949 | netif_rx_schedule(dev); /* schedule NAPI poll */ |
2942 | else { | 2950 | else { |
2943 | /* no work, re-enable interrupts | 2951 | /* No work, re-enable interrupts. */ |
2944 | */ | ||
2945 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 2952 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
2946 | 0x00000000); | 2953 | tp->last_tag << 24); |
2947 | } | 2954 | } |
2948 | 2955 | ||
2949 | spin_unlock_irqrestore(&tp->lock, flags); | 2956 | spin_unlock_irqrestore(&tp->lock, flags); |
@@ -2969,21 +2976,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
2969 | if ((sblk->status & SD_STATUS_UPDATED) || | 2976 | if ((sblk->status & SD_STATUS_UPDATED) || |
2970 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 2977 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { |
2971 | /* | 2978 | /* |
2972 | * writing any value to intr-mbox-0 clears PCI INTA# and | 2979 | * Writing any value to intr-mbox-0 clears PCI INTA# and |
2973 | * chip-internal interrupt pending events. | 2980 | * chip-internal interrupt pending events. |
2974 | * writing non-zero to intr-mbox-0 additional tells the | 2981 | * Writing non-zero to intr-mbox-0 additional tells the |
2975 | * NIC to stop sending us irqs, engaging "in-intr-handler" | 2982 | * NIC to stop sending us irqs, engaging "in-intr-handler" |
2976 | * event coalescing. | 2983 | * event coalescing. |
2977 | */ | 2984 | */ |
2978 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 2985 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
2979 | 0x00000001); | 2986 | 0x00000001); |
2987 | sblk->status &= ~SD_STATUS_UPDATED; | ||
2988 | if (likely(tg3_has_work(tp))) | ||
2989 | netif_rx_schedule(dev); /* schedule NAPI poll */ | ||
2990 | else { | ||
2991 | /* No work, shared interrupt perhaps? re-enable | ||
2992 | * interrupts, and flush that PCI write | ||
2993 | */ | ||
2994 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | ||
2995 | 0x00000000); | ||
2996 | tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); | ||
2997 | } | ||
2998 | } else { /* shared interrupt */ | ||
2999 | handled = 0; | ||
3000 | } | ||
3001 | |||
3002 | spin_unlock_irqrestore(&tp->lock, flags); | ||
3003 | |||
3004 | return IRQ_RETVAL(handled); | ||
3005 | } | ||
3006 | |||
3007 | static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs) | ||
3008 | { | ||
3009 | struct net_device *dev = dev_id; | ||
3010 | struct tg3 *tp = netdev_priv(dev); | ||
3011 | struct tg3_hw_status *sblk = tp->hw_status; | ||
3012 | unsigned long flags; | ||
3013 | unsigned int handled = 1; | ||
3014 | |||
3015 | spin_lock_irqsave(&tp->lock, flags); | ||
3016 | |||
3017 | /* In INTx mode, it is possible for the interrupt to arrive at | ||
3018 | * the CPU before the status block posted prior to the interrupt. | ||
3019 | * Reading the PCI State register will confirm whether the | ||
3020 | * interrupt is ours and will flush the status block. | ||
3021 | */ | ||
3022 | if ((sblk->status & SD_STATUS_UPDATED) || | ||
3023 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | ||
2980 | /* | 3024 | /* |
2981 | * Flush PCI write. This also guarantees that our | 3025 | * writing any value to intr-mbox-0 clears PCI INTA# and |
2982 | * status block has been flushed to host memory. | 3026 | * chip-internal interrupt pending events. |
3027 | * writing non-zero to intr-mbox-0 additional tells the | ||
3028 | * NIC to stop sending us irqs, engaging "in-intr-handler" | ||
3029 | * event coalescing. | ||
2983 | */ | 3030 | */ |
2984 | tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); | 3031 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
3032 | 0x00000001); | ||
3033 | tp->last_tag = sblk->status_tag; | ||
2985 | sblk->status &= ~SD_STATUS_UPDATED; | 3034 | sblk->status &= ~SD_STATUS_UPDATED; |
2986 | |||
2987 | if (likely(tg3_has_work(tp))) | 3035 | if (likely(tg3_has_work(tp))) |
2988 | netif_rx_schedule(dev); /* schedule NAPI poll */ | 3036 | netif_rx_schedule(dev); /* schedule NAPI poll */ |
2989 | else { | 3037 | else { |
@@ -2991,7 +3039,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
2991 | * interrupts, and flush that PCI write | 3039 | * interrupts, and flush that PCI write |
2992 | */ | 3040 | */ |
2993 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 3041 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
2994 | 0x00000000); | 3042 | tp->last_tag << 24); |
2995 | tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); | 3043 | tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); |
2996 | } | 3044 | } |
2997 | } else { /* shared interrupt */ | 3045 | } else { /* shared interrupt */ |
@@ -5044,6 +5092,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, | |||
5044 | } | 5092 | } |
5045 | 5093 | ||
5046 | static void __tg3_set_rx_mode(struct net_device *); | 5094 | static void __tg3_set_rx_mode(struct net_device *); |
5095 | static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | ||
5096 | { | ||
5097 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | ||
5098 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); | ||
5099 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | ||
5100 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); | ||
5101 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | ||
5102 | tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); | ||
5103 | tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); | ||
5104 | } | ||
5105 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | ||
5106 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); | ||
5107 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | ||
5108 | u32 val = ec->stats_block_coalesce_usecs; | ||
5109 | |||
5110 | if (!netif_carrier_ok(tp->dev)) | ||
5111 | val = 0; | ||
5112 | |||
5113 | tw32(HOSTCC_STAT_COAL_TICKS, val); | ||
5114 | } | ||
5115 | } | ||
5047 | 5116 | ||
5048 | /* tp->lock is held. */ | 5117 | /* tp->lock is held. */ |
5049 | static int tg3_reset_hw(struct tg3 *tp) | 5118 | static int tg3_reset_hw(struct tg3 *tp) |
@@ -5366,16 +5435,7 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
5366 | udelay(10); | 5435 | udelay(10); |
5367 | } | 5436 | } |
5368 | 5437 | ||
5369 | tw32(HOSTCC_RXCOL_TICKS, 0); | 5438 | tg3_set_coalesce(tp, &tp->coal); |
5370 | tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS); | ||
5371 | tw32(HOSTCC_RXMAX_FRAMES, 1); | ||
5372 | tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES); | ||
5373 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | ||
5374 | tw32(HOSTCC_RXCOAL_TICK_INT, 0); | ||
5375 | tw32(HOSTCC_TXCOAL_TICK_INT, 0); | ||
5376 | } | ||
5377 | tw32(HOSTCC_RXCOAL_MAXF_INT, 1); | ||
5378 | tw32(HOSTCC_TXCOAL_MAXF_INT, 0); | ||
5379 | 5439 | ||
5380 | /* set status block DMA address */ | 5440 | /* set status block DMA address */ |
5381 | tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, | 5441 | tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, |
@@ -5388,8 +5448,6 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
5388 | * the tg3_periodic_fetch_stats call there, and | 5448 | * the tg3_periodic_fetch_stats call there, and |
5389 | * tg3_get_stats to see how this works for 5705/5750 chips. | 5449 | * tg3_get_stats to see how this works for 5705/5750 chips. |
5390 | */ | 5450 | */ |
5391 | tw32(HOSTCC_STAT_COAL_TICKS, | ||
5392 | DEFAULT_STAT_COAL_TICKS); | ||
5393 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, | 5451 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, |
5394 | ((u64) tp->stats_mapping >> 32)); | 5452 | ((u64) tp->stats_mapping >> 32)); |
5395 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, | 5453 | tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, |
@@ -5445,7 +5503,8 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
5445 | udelay(100); | 5503 | udelay(100); |
5446 | 5504 | ||
5447 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); | 5505 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); |
5448 | tr32(MAILBOX_INTERRUPT_0); | 5506 | tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); |
5507 | tp->last_tag = 0; | ||
5449 | 5508 | ||
5450 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 5509 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { |
5451 | tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); | 5510 | tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); |
@@ -5723,31 +5782,33 @@ static void tg3_timer(unsigned long __opaque) | |||
5723 | spin_lock_irqsave(&tp->lock, flags); | 5782 | spin_lock_irqsave(&tp->lock, flags); |
5724 | spin_lock(&tp->tx_lock); | 5783 | spin_lock(&tp->tx_lock); |
5725 | 5784 | ||
5726 | /* All of this garbage is because when using non-tagged | 5785 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { |
5727 | * IRQ status the mailbox/status_block protocol the chip | 5786 | /* All of this garbage is because when using non-tagged |
5728 | * uses with the cpu is race prone. | 5787 | * IRQ status the mailbox/status_block protocol the chip |
5729 | */ | 5788 | * uses with the cpu is race prone. |
5730 | if (tp->hw_status->status & SD_STATUS_UPDATED) { | 5789 | */ |
5731 | tw32(GRC_LOCAL_CTRL, | 5790 | if (tp->hw_status->status & SD_STATUS_UPDATED) { |
5732 | tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); | 5791 | tw32(GRC_LOCAL_CTRL, |
5733 | } else { | 5792 | tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); |
5734 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 5793 | } else { |
5735 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); | 5794 | tw32(HOSTCC_MODE, tp->coalesce_mode | |
5736 | } | 5795 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); |
5796 | } | ||
5737 | 5797 | ||
5738 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | 5798 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { |
5739 | tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; | 5799 | tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; |
5740 | spin_unlock(&tp->tx_lock); | 5800 | spin_unlock(&tp->tx_lock); |
5741 | spin_unlock_irqrestore(&tp->lock, flags); | 5801 | spin_unlock_irqrestore(&tp->lock, flags); |
5742 | schedule_work(&tp->reset_task); | 5802 | schedule_work(&tp->reset_task); |
5743 | return; | 5803 | return; |
5804 | } | ||
5744 | } | 5805 | } |
5745 | 5806 | ||
5746 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | ||
5747 | tg3_periodic_fetch_stats(tp); | ||
5748 | |||
5749 | /* This part only runs once per second. */ | 5807 | /* This part only runs once per second. */ |
5750 | if (!--tp->timer_counter) { | 5808 | if (!--tp->timer_counter) { |
5809 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | ||
5810 | tg3_periodic_fetch_stats(tp); | ||
5811 | |||
5751 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { | 5812 | if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { |
5752 | u32 mac_stat; | 5813 | u32 mac_stat; |
5753 | int phy_event; | 5814 | int phy_event; |
@@ -5846,9 +5907,13 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
5846 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) | 5907 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) |
5847 | err = request_irq(tp->pdev->irq, tg3_msi, | 5908 | err = request_irq(tp->pdev->irq, tg3_msi, |
5848 | SA_SAMPLE_RANDOM, dev->name, dev); | 5909 | SA_SAMPLE_RANDOM, dev->name, dev); |
5849 | else | 5910 | else { |
5850 | err = request_irq(tp->pdev->irq, tg3_interrupt, | 5911 | irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; |
5912 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
5913 | fn = tg3_interrupt_tagged; | ||
5914 | err = request_irq(tp->pdev->irq, fn, | ||
5851 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); | 5915 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); |
5916 | } | ||
5852 | 5917 | ||
5853 | if (err) | 5918 | if (err) |
5854 | return err; | 5919 | return err; |
@@ -5900,9 +5965,14 @@ static int tg3_test_msi(struct tg3 *tp) | |||
5900 | 5965 | ||
5901 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 5966 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
5902 | 5967 | ||
5903 | err = request_irq(tp->pdev->irq, tg3_interrupt, | 5968 | { |
5904 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); | 5969 | irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; |
5970 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
5971 | fn = tg3_interrupt_tagged; | ||
5905 | 5972 | ||
5973 | err = request_irq(tp->pdev->irq, fn, | ||
5974 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); | ||
5975 | } | ||
5906 | if (err) | 5976 | if (err) |
5907 | return err; | 5977 | return err; |
5908 | 5978 | ||
@@ -5948,7 +6018,13 @@ static int tg3_open(struct net_device *dev) | |||
5948 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 6018 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
5949 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && | 6019 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && |
5950 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { | 6020 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { |
5951 | if (pci_enable_msi(tp->pdev) == 0) { | 6021 | /* All MSI supporting chips should support tagged |
6022 | * status. Assert that this is the case. | ||
6023 | */ | ||
6024 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { | ||
6025 | printk(KERN_WARNING PFX "%s: MSI without TAGGED? " | ||
6026 | "Not using MSI.\n", tp->dev->name); | ||
6027 | } else if (pci_enable_msi(tp->pdev) == 0) { | ||
5952 | u32 msi_mode; | 6028 | u32 msi_mode; |
5953 | 6029 | ||
5954 | msi_mode = tr32(MSGINT_MODE); | 6030 | msi_mode = tr32(MSGINT_MODE); |
@@ -5959,9 +6035,14 @@ static int tg3_open(struct net_device *dev) | |||
5959 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) | 6035 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) |
5960 | err = request_irq(tp->pdev->irq, tg3_msi, | 6036 | err = request_irq(tp->pdev->irq, tg3_msi, |
5961 | SA_SAMPLE_RANDOM, dev->name, dev); | 6037 | SA_SAMPLE_RANDOM, dev->name, dev); |
5962 | else | 6038 | else { |
5963 | err = request_irq(tp->pdev->irq, tg3_interrupt, | 6039 | irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; |
6040 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
6041 | fn = tg3_interrupt_tagged; | ||
6042 | |||
6043 | err = request_irq(tp->pdev->irq, fn, | ||
5964 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); | 6044 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); |
6045 | } | ||
5965 | 6046 | ||
5966 | if (err) { | 6047 | if (err) { |
5967 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 6048 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
@@ -5980,9 +6061,16 @@ static int tg3_open(struct net_device *dev) | |||
5980 | tg3_halt(tp, 1); | 6061 | tg3_halt(tp, 1); |
5981 | tg3_free_rings(tp); | 6062 | tg3_free_rings(tp); |
5982 | } else { | 6063 | } else { |
5983 | tp->timer_offset = HZ / 10; | 6064 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) |
5984 | tp->timer_counter = tp->timer_multiplier = 10; | 6065 | tp->timer_offset = HZ; |
5985 | tp->asf_counter = tp->asf_multiplier = (10 * 120); | 6066 | else |
6067 | tp->timer_offset = HZ / 10; | ||
6068 | |||
6069 | BUG_ON(tp->timer_offset > HZ); | ||
6070 | tp->timer_counter = tp->timer_multiplier = | ||
6071 | (HZ / tp->timer_offset); | ||
6072 | tp->asf_counter = tp->asf_multiplier = | ||
6073 | ((HZ / tp->timer_offset) * 120); | ||
5986 | 6074 | ||
5987 | init_timer(&tp->timer); | 6075 | init_timer(&tp->timer); |
5988 | tp->timer.expires = jiffies + tp->timer_offset; | 6076 | tp->timer.expires = jiffies + tp->timer_offset; |
@@ -6005,6 +6093,7 @@ static int tg3_open(struct net_device *dev) | |||
6005 | 6093 | ||
6006 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 6094 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
6007 | err = tg3_test_msi(tp); | 6095 | err = tg3_test_msi(tp); |
6096 | |||
6008 | if (err) { | 6097 | if (err) { |
6009 | spin_lock_irq(&tp->lock); | 6098 | spin_lock_irq(&tp->lock); |
6010 | spin_lock(&tp->tx_lock); | 6099 | spin_lock(&tp->tx_lock); |
@@ -7203,6 +7292,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
7203 | } | 7292 | } |
7204 | #endif | 7293 | #endif |
7205 | 7294 | ||
7295 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | ||
7296 | { | ||
7297 | struct tg3 *tp = netdev_priv(dev); | ||
7298 | |||
7299 | memcpy(ec, &tp->coal, sizeof(*ec)); | ||
7300 | return 0; | ||
7301 | } | ||
7302 | |||
7206 | static struct ethtool_ops tg3_ethtool_ops = { | 7303 | static struct ethtool_ops tg3_ethtool_ops = { |
7207 | .get_settings = tg3_get_settings, | 7304 | .get_settings = tg3_get_settings, |
7208 | .set_settings = tg3_set_settings, | 7305 | .set_settings = tg3_set_settings, |
@@ -7235,6 +7332,7 @@ static struct ethtool_ops tg3_ethtool_ops = { | |||
7235 | .get_strings = tg3_get_strings, | 7332 | .get_strings = tg3_get_strings, |
7236 | .get_stats_count = tg3_get_stats_count, | 7333 | .get_stats_count = tg3_get_stats_count, |
7237 | .get_ethtool_stats = tg3_get_ethtool_stats, | 7334 | .get_ethtool_stats = tg3_get_ethtool_stats, |
7335 | .get_coalesce = tg3_get_coalesce, | ||
7238 | }; | 7336 | }; |
7239 | 7337 | ||
7240 | static void __devinit tg3_get_eeprom_size(struct tg3 *tp) | 7338 | static void __devinit tg3_get_eeprom_size(struct tg3 *tp) |
@@ -8422,15 +8520,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
8422 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 8520 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) |
8423 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; | 8521 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; |
8424 | 8522 | ||
8425 | /* Only 5701 and later support tagged irq status mode. | ||
8426 | * Also, 5788 chips cannot use tagged irq status. | ||
8427 | * | ||
8428 | * However, since we are using NAPI avoid tagged irq status | ||
8429 | * because the interrupt condition is more difficult to | ||
8430 | * fully clear in that mode. | ||
8431 | */ | ||
8432 | tp->coalesce_mode = 0; | 8523 | tp->coalesce_mode = 0; |
8433 | |||
8434 | if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && | 8524 | if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && |
8435 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) | 8525 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) |
8436 | tp->coalesce_mode |= HOSTCC_MODE_32BYTE; | 8526 | tp->coalesce_mode |= HOSTCC_MODE_32BYTE; |
@@ -8494,6 +8584,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
8494 | grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) | 8584 | grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) |
8495 | tp->tg3_flags2 |= TG3_FLG2_IS_5788; | 8585 | tp->tg3_flags2 |= TG3_FLG2_IS_5788; |
8496 | 8586 | ||
8587 | if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && | ||
8588 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) | ||
8589 | tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; | ||
8590 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | ||
8591 | tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | | ||
8592 | HOSTCC_MODE_CLRTICK_TXBD); | ||
8593 | |||
8594 | tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; | ||
8595 | pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | ||
8596 | tp->misc_host_ctrl); | ||
8597 | } | ||
8598 | |||
8497 | /* these are limited to 10/100 only */ | 8599 | /* these are limited to 10/100 only */ |
8498 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && | 8600 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && |
8499 | (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || | 8601 | (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || |
@@ -8671,6 +8773,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) | |||
8671 | return 0; | 8773 | return 0; |
8672 | } | 8774 | } |
8673 | 8775 | ||
8776 | #define BOUNDARY_SINGLE_CACHELINE 1 | ||
8777 | #define BOUNDARY_MULTI_CACHELINE 2 | ||
8778 | |||
8779 | static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | ||
8780 | { | ||
8781 | int cacheline_size; | ||
8782 | u8 byte; | ||
8783 | int goal; | ||
8784 | |||
8785 | pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); | ||
8786 | if (byte == 0) | ||
8787 | cacheline_size = 1024; | ||
8788 | else | ||
8789 | cacheline_size = (int) byte * 4; | ||
8790 | |||
8791 | /* On 5703 and later chips, the boundary bits have no | ||
8792 | * effect. | ||
8793 | */ | ||
8794 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | ||
8795 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | ||
8796 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | ||
8797 | goto out; | ||
8798 | |||
8799 | #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) | ||
8800 | goal = BOUNDARY_MULTI_CACHELINE; | ||
8801 | #else | ||
8802 | #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) | ||
8803 | goal = BOUNDARY_SINGLE_CACHELINE; | ||
8804 | #else | ||
8805 | goal = 0; | ||
8806 | #endif | ||
8807 | #endif | ||
8808 | |||
8809 | if (!goal) | ||
8810 | goto out; | ||
8811 | |||
8812 | /* PCI controllers on most RISC systems tend to disconnect | ||
8813 | * when a device tries to burst across a cache-line boundary. | ||
8814 | * Therefore, letting tg3 do so just wastes PCI bandwidth. | ||
8815 | * | ||
8816 | * Unfortunately, for PCI-E there are only limited | ||
8817 | * write-side controls for this, and thus for reads | ||
8818 | * we will still get the disconnects. We'll also waste | ||
8819 | * these PCI cycles for both read and write for chips | ||
8820 | * other than 5700 and 5701 which do not implement the | ||
8821 | * boundary bits. | ||
8822 | */ | ||
8823 | if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | ||
8824 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | ||
8825 | switch (cacheline_size) { | ||
8826 | case 16: | ||
8827 | case 32: | ||
8828 | case 64: | ||
8829 | case 128: | ||
8830 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | ||
8831 | val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | | ||
8832 | DMA_RWCTRL_WRITE_BNDRY_128_PCIX); | ||
8833 | } else { | ||
8834 | val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | | ||
8835 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); | ||
8836 | } | ||
8837 | break; | ||
8838 | |||
8839 | case 256: | ||
8840 | val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | | ||
8841 | DMA_RWCTRL_WRITE_BNDRY_256_PCIX); | ||
8842 | break; | ||
8843 | |||
8844 | default: | ||
8845 | val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | | ||
8846 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX); | ||
8847 | break; | ||
8848 | }; | ||
8849 | } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | ||
8850 | switch (cacheline_size) { | ||
8851 | case 16: | ||
8852 | case 32: | ||
8853 | case 64: | ||
8854 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | ||
8855 | val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; | ||
8856 | val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; | ||
8857 | break; | ||
8858 | } | ||
8859 | /* fallthrough */ | ||
8860 | case 128: | ||
8861 | default: | ||
8862 | val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; | ||
8863 | val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; | ||
8864 | break; | ||
8865 | }; | ||
8866 | } else { | ||
8867 | switch (cacheline_size) { | ||
8868 | case 16: | ||
8869 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | ||
8870 | val |= (DMA_RWCTRL_READ_BNDRY_16 | | ||
8871 | DMA_RWCTRL_WRITE_BNDRY_16); | ||
8872 | break; | ||
8873 | } | ||
8874 | /* fallthrough */ | ||
8875 | case 32: | ||
8876 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | ||
8877 | val |= (DMA_RWCTRL_READ_BNDRY_32 | | ||
8878 | DMA_RWCTRL_WRITE_BNDRY_32); | ||
8879 | break; | ||
8880 | } | ||
8881 | /* fallthrough */ | ||
8882 | case 64: | ||
8883 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | ||
8884 | val |= (DMA_RWCTRL_READ_BNDRY_64 | | ||
8885 | DMA_RWCTRL_WRITE_BNDRY_64); | ||
8886 | break; | ||
8887 | } | ||
8888 | /* fallthrough */ | ||
8889 | case 128: | ||
8890 | if (goal == BOUNDARY_SINGLE_CACHELINE) { | ||
8891 | val |= (DMA_RWCTRL_READ_BNDRY_128 | | ||
8892 | DMA_RWCTRL_WRITE_BNDRY_128); | ||
8893 | break; | ||
8894 | } | ||
8895 | /* fallthrough */ | ||
8896 | case 256: | ||
8897 | val |= (DMA_RWCTRL_READ_BNDRY_256 | | ||
8898 | DMA_RWCTRL_WRITE_BNDRY_256); | ||
8899 | break; | ||
8900 | case 512: | ||
8901 | val |= (DMA_RWCTRL_READ_BNDRY_512 | | ||
8902 | DMA_RWCTRL_WRITE_BNDRY_512); | ||
8903 | break; | ||
8904 | case 1024: | ||
8905 | default: | ||
8906 | val |= (DMA_RWCTRL_READ_BNDRY_1024 | | ||
8907 | DMA_RWCTRL_WRITE_BNDRY_1024); | ||
8908 | break; | ||
8909 | }; | ||
8910 | } | ||
8911 | |||
8912 | out: | ||
8913 | return val; | ||
8914 | } | ||
8915 | |||
8674 | static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) | 8916 | static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) |
8675 | { | 8917 | { |
8676 | struct tg3_internal_buffer_desc test_desc; | 8918 | struct tg3_internal_buffer_desc test_desc; |
@@ -8757,7 +8999,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm | |||
8757 | static int __devinit tg3_test_dma(struct tg3 *tp) | 8999 | static int __devinit tg3_test_dma(struct tg3 *tp) |
8758 | { | 9000 | { |
8759 | dma_addr_t buf_dma; | 9001 | dma_addr_t buf_dma; |
8760 | u32 *buf; | 9002 | u32 *buf, saved_dma_rwctrl; |
8761 | int ret; | 9003 | int ret; |
8762 | 9004 | ||
8763 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); | 9005 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); |
@@ -8769,46 +9011,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
8769 | tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | | 9011 | tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | |
8770 | (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); | 9012 | (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); |
8771 | 9013 | ||
8772 | #ifndef CONFIG_X86 | 9014 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); |
8773 | { | ||
8774 | u8 byte; | ||
8775 | int cacheline_size; | ||
8776 | pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); | ||
8777 | |||
8778 | if (byte == 0) | ||
8779 | cacheline_size = 1024; | ||
8780 | else | ||
8781 | cacheline_size = (int) byte * 4; | ||
8782 | |||
8783 | switch (cacheline_size) { | ||
8784 | case 16: | ||
8785 | case 32: | ||
8786 | case 64: | ||
8787 | case 128: | ||
8788 | if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | ||
8789 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | ||
8790 | tp->dma_rwctrl |= | ||
8791 | DMA_RWCTRL_WRITE_BNDRY_384_PCIX; | ||
8792 | break; | ||
8793 | } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | ||
8794 | tp->dma_rwctrl &= | ||
8795 | ~(DMA_RWCTRL_PCI_WRITE_CMD); | ||
8796 | tp->dma_rwctrl |= | ||
8797 | DMA_RWCTRL_WRITE_BNDRY_128_PCIE; | ||
8798 | break; | ||
8799 | } | ||
8800 | /* fallthrough */ | ||
8801 | case 256: | ||
8802 | if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | ||
8803 | !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | ||
8804 | tp->dma_rwctrl |= | ||
8805 | DMA_RWCTRL_WRITE_BNDRY_256; | ||
8806 | else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | ||
8807 | tp->dma_rwctrl |= | ||
8808 | DMA_RWCTRL_WRITE_BNDRY_256_PCIX; | ||
8809 | }; | ||
8810 | } | ||
8811 | #endif | ||
8812 | 9015 | ||
8813 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 9016 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
8814 | /* DMA read watermark not used on PCIE */ | 9017 | /* DMA read watermark not used on PCIE */ |
@@ -8827,7 +9030,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
8827 | if (ccval == 0x6 || ccval == 0x7) | 9030 | if (ccval == 0x6 || ccval == 0x7) |
8828 | tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; | 9031 | tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; |
8829 | 9032 | ||
8830 | /* Set bit 23 to renable PCIX hw bug fix */ | 9033 | /* Set bit 23 to enable PCIX hw bug fix */ |
8831 | tp->dma_rwctrl |= 0x009f0000; | 9034 | tp->dma_rwctrl |= 0x009f0000; |
8832 | } else { | 9035 | } else { |
8833 | tp->dma_rwctrl |= 0x001b000f; | 9036 | tp->dma_rwctrl |= 0x001b000f; |
@@ -8868,6 +9071,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
8868 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 9071 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) |
8869 | goto out; | 9072 | goto out; |
8870 | 9073 | ||
9074 | /* It is best to perform DMA test with maximum write burst size | ||
9075 | * to expose the 5700/5701 write DMA bug. | ||
9076 | */ | ||
9077 | saved_dma_rwctrl = tp->dma_rwctrl; | ||
9078 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | ||
9079 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | ||
9080 | |||
8871 | while (1) { | 9081 | while (1) { |
8872 | u32 *p = buf, i; | 9082 | u32 *p = buf, i; |
8873 | 9083 | ||
@@ -8906,8 +9116,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
8906 | if (p[i] == i) | 9116 | if (p[i] == i) |
8907 | continue; | 9117 | continue; |
8908 | 9118 | ||
8909 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == | 9119 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != |
8910 | DMA_RWCTRL_WRITE_BNDRY_DISAB) { | 9120 | DMA_RWCTRL_WRITE_BNDRY_16) { |
9121 | tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | ||
8911 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; | 9122 | tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; |
8912 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 9123 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); |
8913 | break; | 9124 | break; |
@@ -8924,6 +9135,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
8924 | break; | 9135 | break; |
8925 | } | 9136 | } |
8926 | } | 9137 | } |
9138 | if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != | ||
9139 | DMA_RWCTRL_WRITE_BNDRY_16) { | ||
9140 | /* DMA test passed without adjusting DMA boundary, | ||
9141 | * just restore the calculated DMA boundary | ||
9142 | */ | ||
9143 | tp->dma_rwctrl = saved_dma_rwctrl; | ||
9144 | tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | ||
9145 | } | ||
8927 | 9146 | ||
8928 | out: | 9147 | out: |
8929 | pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); | 9148 | pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); |
@@ -9011,6 +9230,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp) | |||
9011 | return peer; | 9230 | return peer; |
9012 | } | 9231 | } |
9013 | 9232 | ||
9233 | static void __devinit tg3_init_coal(struct tg3 *tp) | ||
9234 | { | ||
9235 | struct ethtool_coalesce *ec = &tp->coal; | ||
9236 | |||
9237 | memset(ec, 0, sizeof(*ec)); | ||
9238 | ec->cmd = ETHTOOL_GCOALESCE; | ||
9239 | ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; | ||
9240 | ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; | ||
9241 | ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; | ||
9242 | ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; | ||
9243 | ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; | ||
9244 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; | ||
9245 | ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; | ||
9246 | ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; | ||
9247 | ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; | ||
9248 | |||
9249 | if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | | ||
9250 | HOSTCC_MODE_CLRTICK_TXBD)) { | ||
9251 | ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; | ||
9252 | ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; | ||
9253 | ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; | ||
9254 | ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; | ||
9255 | } | ||
9256 | } | ||
9257 | |||
9014 | static int __devinit tg3_init_one(struct pci_dev *pdev, | 9258 | static int __devinit tg3_init_one(struct pci_dev *pdev, |
9015 | const struct pci_device_id *ent) | 9259 | const struct pci_device_id *ent) |
9016 | { | 9260 | { |
@@ -9256,6 +9500,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
9256 | /* flow control autonegotiation is default behavior */ | 9500 | /* flow control autonegotiation is default behavior */ |
9257 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 9501 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; |
9258 | 9502 | ||
9503 | tg3_init_coal(tp); | ||
9504 | |||
9259 | err = register_netdev(dev); | 9505 | err = register_netdev(dev); |
9260 | if (err) { | 9506 | if (err) { |
9261 | printk(KERN_ERR PFX "Cannot register net device, " | 9507 | printk(KERN_ERR PFX "Cannot register net device, " |
@@ -9298,6 +9544,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
9298 | (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, | 9544 | (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, |
9299 | (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, | 9545 | (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, |
9300 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); | 9546 | (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); |
9547 | printk(KERN_INFO "%s: dma_rwctrl[%08x]\n", | ||
9548 | dev->name, tp->dma_rwctrl); | ||
9301 | 9549 | ||
9302 | return 0; | 9550 | return 0; |
9303 | 9551 | ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 8de6f21037ba..993f84c93dc4 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -876,10 +876,12 @@ | |||
876 | #define HOSTCC_STATUS_ERROR_ATTN 0x00000004 | 876 | #define HOSTCC_STATUS_ERROR_ATTN 0x00000004 |
877 | #define HOSTCC_RXCOL_TICKS 0x00003c08 | 877 | #define HOSTCC_RXCOL_TICKS 0x00003c08 |
878 | #define LOW_RXCOL_TICKS 0x00000032 | 878 | #define LOW_RXCOL_TICKS 0x00000032 |
879 | #define LOW_RXCOL_TICKS_CLRTCKS 0x00000014 | ||
879 | #define DEFAULT_RXCOL_TICKS 0x00000048 | 880 | #define DEFAULT_RXCOL_TICKS 0x00000048 |
880 | #define HIGH_RXCOL_TICKS 0x00000096 | 881 | #define HIGH_RXCOL_TICKS 0x00000096 |
881 | #define HOSTCC_TXCOL_TICKS 0x00003c0c | 882 | #define HOSTCC_TXCOL_TICKS 0x00003c0c |
882 | #define LOW_TXCOL_TICKS 0x00000096 | 883 | #define LOW_TXCOL_TICKS 0x00000096 |
884 | #define LOW_TXCOL_TICKS_CLRTCKS 0x00000048 | ||
883 | #define DEFAULT_TXCOL_TICKS 0x0000012c | 885 | #define DEFAULT_TXCOL_TICKS 0x0000012c |
884 | #define HIGH_TXCOL_TICKS 0x00000145 | 886 | #define HIGH_TXCOL_TICKS 0x00000145 |
885 | #define HOSTCC_RXMAX_FRAMES 0x00003c10 | 887 | #define HOSTCC_RXMAX_FRAMES 0x00003c10 |
@@ -892,8 +894,10 @@ | |||
892 | #define HIGH_TXMAX_FRAMES 0x00000052 | 894 | #define HIGH_TXMAX_FRAMES 0x00000052 |
893 | #define HOSTCC_RXCOAL_TICK_INT 0x00003c18 | 895 | #define HOSTCC_RXCOAL_TICK_INT 0x00003c18 |
894 | #define DEFAULT_RXCOAL_TICK_INT 0x00000019 | 896 | #define DEFAULT_RXCOAL_TICK_INT 0x00000019 |
897 | #define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014 | ||
895 | #define HOSTCC_TXCOAL_TICK_INT 0x00003c1c | 898 | #define HOSTCC_TXCOAL_TICK_INT 0x00003c1c |
896 | #define DEFAULT_TXCOAL_TICK_INT 0x00000019 | 899 | #define DEFAULT_TXCOAL_TICK_INT 0x00000019 |
900 | #define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014 | ||
897 | #define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 | 901 | #define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 |
898 | #define DEFAULT_RXCOAL_MAXF_INT 0x00000005 | 902 | #define DEFAULT_RXCOAL_MAXF_INT 0x00000005 |
899 | #define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 | 903 | #define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 |
@@ -2023,6 +2027,7 @@ struct tg3 { | |||
2023 | 2027 | ||
2024 | struct tg3_hw_status *hw_status; | 2028 | struct tg3_hw_status *hw_status; |
2025 | dma_addr_t status_mapping; | 2029 | dma_addr_t status_mapping; |
2030 | u32 last_tag; | ||
2026 | 2031 | ||
2027 | u32 msg_enable; | 2032 | u32 msg_enable; |
2028 | 2033 | ||
@@ -2068,6 +2073,7 @@ struct tg3 { | |||
2068 | 2073 | ||
2069 | u32 rx_offset; | 2074 | u32 rx_offset; |
2070 | u32 tg3_flags; | 2075 | u32 tg3_flags; |
2076 | #define TG3_FLAG_TAGGED_STATUS 0x00000001 | ||
2071 | #define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 | 2077 | #define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 |
2072 | #define TG3_FLAG_RX_CHECKSUMS 0x00000004 | 2078 | #define TG3_FLAG_RX_CHECKSUMS 0x00000004 |
2073 | #define TG3_FLAG_USE_LINKCHG_REG 0x00000008 | 2079 | #define TG3_FLAG_USE_LINKCHG_REG 0x00000008 |
@@ -2225,7 +2231,7 @@ struct tg3 { | |||
2225 | 2231 | ||
2226 | #define SST_25VF0X0_PAGE_SIZE 4098 | 2232 | #define SST_25VF0X0_PAGE_SIZE 4098 |
2227 | 2233 | ||
2228 | 2234 | struct ethtool_coalesce coal; | |
2229 | }; | 2235 | }; |
2230 | 2236 | ||
2231 | #endif /* !(_T3_H) */ | 2237 | #endif /* !(_T3_H) */ |
diff --git a/fs/namei.c b/fs/namei.c index defe6781e003..dd78f01b6de8 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1580,6 +1580,7 @@ enoent: | |||
1580 | fail: | 1580 | fail: |
1581 | return dentry; | 1581 | return dentry; |
1582 | } | 1582 | } |
1583 | EXPORT_SYMBOL_GPL(lookup_create); | ||
1583 | 1584 | ||
1584 | int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | 1585 | int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) |
1585 | { | 1586 | { |
diff --git a/include/net/act_generic.h b/include/net/act_generic.h index 95b120781c14..c9daa7e52300 100644 --- a/include/net/act_generic.h +++ b/include/net/act_generic.h | |||
@@ -2,8 +2,8 @@ | |||
2 | * include/net/act_generic.h | 2 | * include/net/act_generic.h |
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | #ifndef ACT_GENERIC_H | 5 | #ifndef _NET_ACT_GENERIC_H |
6 | #define ACT_GENERIC_H | 6 | #define _NET_ACT_GENERIC_H |
7 | static inline int tcf_defact_release(struct tcf_defact *p, int bind) | 7 | static inline int tcf_defact_release(struct tcf_defact *p, int bind) |
8 | { | 8 | { |
9 | int ret = 0; | 9 | int ret = 0; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index daebd93fd8a0..760dc8238d65 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -490,6 +490,14 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | |||
490 | /* Partially cloned skb? */ | 490 | /* Partially cloned skb? */ |
491 | if (skb_shared(frag)) | 491 | if (skb_shared(frag)) |
492 | goto slow_path; | 492 | goto slow_path; |
493 | |||
494 | BUG_ON(frag->sk); | ||
495 | if (skb->sk) { | ||
496 | sock_hold(skb->sk); | ||
497 | frag->sk = skb->sk; | ||
498 | frag->destructor = sock_wfree; | ||
499 | skb->truesize -= frag->truesize; | ||
500 | } | ||
493 | } | 501 | } |
494 | 502 | ||
495 | /* Everything is OK. Generate! */ | 503 | /* Everything is OK. Generate! */ |
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index faa6176bbeb1..de21da00057f 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
@@ -508,7 +508,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
508 | rc = NF_ACCEPT; | 508 | rc = NF_ACCEPT; |
509 | /* do not touch skb anymore */ | 509 | /* do not touch skb anymore */ |
510 | atomic_inc(&cp->in_pkts); | 510 | atomic_inc(&cp->in_pkts); |
511 | __ip_vs_conn_put(cp); | ||
512 | goto out; | 511 | goto out; |
513 | } | 512 | } |
514 | 513 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 28d9425d5c39..09e824622977 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -940,37 +940,25 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct, | |||
940 | struct sk_buff * | 940 | struct sk_buff * |
941 | ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) | 941 | ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) |
942 | { | 942 | { |
943 | struct sock *sk = skb->sk; | ||
944 | #ifdef CONFIG_NETFILTER_DEBUG | 943 | #ifdef CONFIG_NETFILTER_DEBUG |
945 | unsigned int olddebug = skb->nf_debug; | 944 | unsigned int olddebug = skb->nf_debug; |
946 | #endif | 945 | #endif |
947 | 946 | ||
948 | if (sk) { | 947 | skb_orphan(skb); |
949 | sock_hold(sk); | ||
950 | skb_orphan(skb); | ||
951 | } | ||
952 | 948 | ||
953 | local_bh_disable(); | 949 | local_bh_disable(); |
954 | skb = ip_defrag(skb, user); | 950 | skb = ip_defrag(skb, user); |
955 | local_bh_enable(); | 951 | local_bh_enable(); |
956 | 952 | ||
957 | if (!skb) { | 953 | if (skb) { |
958 | if (sk) | 954 | ip_send_check(skb->nh.iph); |
959 | sock_put(sk); | 955 | skb->nfcache |= NFC_ALTERED; |
960 | return skb; | ||
961 | } | ||
962 | |||
963 | if (sk) { | ||
964 | skb_set_owner_w(skb, sk); | ||
965 | sock_put(sk); | ||
966 | } | ||
967 | |||
968 | ip_send_check(skb->nh.iph); | ||
969 | skb->nfcache |= NFC_ALTERED; | ||
970 | #ifdef CONFIG_NETFILTER_DEBUG | 956 | #ifdef CONFIG_NETFILTER_DEBUG |
971 | /* Packet path as if nothing had happened. */ | 957 | /* Packet path as if nothing had happened. */ |
972 | skb->nf_debug = olddebug; | 958 | skb->nf_debug = olddebug; |
973 | #endif | 959 | #endif |
960 | } | ||
961 | |||
974 | return skb; | 962 | return skb; |
975 | } | 963 | } |
976 | 964 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0f0711417c9d..b78a53586804 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -552,13 +552,17 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
552 | skb_headroom(frag) < hlen) | 552 | skb_headroom(frag) < hlen) |
553 | goto slow_path; | 553 | goto slow_path; |
554 | 554 | ||
555 | /* Correct socket ownership. */ | ||
556 | if (frag->sk == NULL) | ||
557 | goto slow_path; | ||
558 | |||
559 | /* Partially cloned skb? */ | 555 | /* Partially cloned skb? */ |
560 | if (skb_shared(frag)) | 556 | if (skb_shared(frag)) |
561 | goto slow_path; | 557 | goto slow_path; |
558 | |||
559 | BUG_ON(frag->sk); | ||
560 | if (skb->sk) { | ||
561 | sock_hold(skb->sk); | ||
562 | frag->sk = skb->sk; | ||
563 | frag->destructor = sock_wfree; | ||
564 | skb->truesize -= frag->truesize; | ||
565 | } | ||
562 | } | 566 | } |
563 | 567 | ||
564 | err = 0; | 568 | err = 0; |
@@ -1116,12 +1120,10 @@ int ip6_push_pending_frames(struct sock *sk) | |||
1116 | tail_skb = &(tmp_skb->next); | 1120 | tail_skb = &(tmp_skb->next); |
1117 | skb->len += tmp_skb->len; | 1121 | skb->len += tmp_skb->len; |
1118 | skb->data_len += tmp_skb->len; | 1122 | skb->data_len += tmp_skb->len; |
1119 | #if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */ | ||
1120 | skb->truesize += tmp_skb->truesize; | 1123 | skb->truesize += tmp_skb->truesize; |
1121 | __sock_put(tmp_skb->sk); | 1124 | __sock_put(tmp_skb->sk); |
1122 | tmp_skb->destructor = NULL; | 1125 | tmp_skb->destructor = NULL; |
1123 | tmp_skb->sk = NULL; | 1126 | tmp_skb->sk = NULL; |
1124 | #endif | ||
1125 | } | 1127 | } |
1126 | 1128 | ||
1127 | ipv6_addr_copy(final_dst, &fl->fl6_dst); | 1129 | ipv6_addr_copy(final_dst, &fl->fl6_dst); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 733bf52cef3e..e41ce458c2a9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -735,11 +735,15 @@ static inline int do_one_broadcast(struct sock *sk, | |||
735 | 735 | ||
736 | sock_hold(sk); | 736 | sock_hold(sk); |
737 | if (p->skb2 == NULL) { | 737 | if (p->skb2 == NULL) { |
738 | if (atomic_read(&p->skb->users) != 1) { | 738 | if (skb_shared(p->skb)) { |
739 | p->skb2 = skb_clone(p->skb, p->allocation); | 739 | p->skb2 = skb_clone(p->skb, p->allocation); |
740 | } else { | 740 | } else { |
741 | p->skb2 = p->skb; | 741 | p->skb2 = skb_get(p->skb); |
742 | atomic_inc(&p->skb->users); | 742 | /* |
743 | * skb ownership may have been set when | ||
744 | * delivered to a previous socket. | ||
745 | */ | ||
746 | skb_orphan(p->skb2); | ||
743 | } | 747 | } |
744 | } | 748 | } |
745 | if (p->skb2 == NULL) { | 749 | if (p->skb2 == NULL) { |
@@ -785,11 +789,12 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | |||
785 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 789 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) |
786 | do_one_broadcast(sk, &info); | 790 | do_one_broadcast(sk, &info); |
787 | 791 | ||
792 | kfree_skb(skb); | ||
793 | |||
788 | netlink_unlock_table(); | 794 | netlink_unlock_table(); |
789 | 795 | ||
790 | if (info.skb2) | 796 | if (info.skb2) |
791 | kfree_skb(info.skb2); | 797 | kfree_skb(info.skb2); |
792 | kfree_skb(skb); | ||
793 | 798 | ||
794 | if (info.delivered) { | 799 | if (info.delivered) { |
795 | if (info.congested && (allocation & __GFP_WAIT)) | 800 | if (info.congested && (allocation & __GFP_WAIT)) |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c478fc8db776..c420eba4876b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -770,33 +770,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
770 | err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); | 770 | err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); |
771 | if (err) | 771 | if (err) |
772 | goto out_mknod_parent; | 772 | goto out_mknod_parent; |
773 | /* | 773 | |
774 | * Yucky last component or no last component at all? | 774 | dentry = lookup_create(&nd, 0); |
775 | * (foo/., foo/.., /////) | ||
776 | */ | ||
777 | err = -EEXIST; | ||
778 | if (nd.last_type != LAST_NORM) | ||
779 | goto out_mknod; | ||
780 | /* | ||
781 | * Lock the directory. | ||
782 | */ | ||
783 | down(&nd.dentry->d_inode->i_sem); | ||
784 | /* | ||
785 | * Do the final lookup. | ||
786 | */ | ||
787 | dentry = lookup_hash(&nd.last, nd.dentry); | ||
788 | err = PTR_ERR(dentry); | 775 | err = PTR_ERR(dentry); |
789 | if (IS_ERR(dentry)) | 776 | if (IS_ERR(dentry)) |
790 | goto out_mknod_unlock; | 777 | goto out_mknod_unlock; |
791 | err = -ENOENT; | 778 | |
792 | /* | ||
793 | * Special case - lookup gave negative, but... we had foo/bar/ | ||
794 | * From the vfs_mknod() POV we just have a negative dentry - | ||
795 | * all is fine. Let's be bastards - you had / on the end, you've | ||
796 | * been asking for (non-existent) directory. -ENOENT for you. | ||
797 | */ | ||
798 | if (nd.last.name[nd.last.len] && !dentry->d_inode) | ||
799 | goto out_mknod_dput; | ||
800 | /* | 779 | /* |
801 | * All right, let's create it. | 780 | * All right, let's create it. |
802 | */ | 781 | */ |
@@ -845,7 +824,6 @@ out_mknod_dput: | |||
845 | dput(dentry); | 824 | dput(dentry); |
846 | out_mknod_unlock: | 825 | out_mknod_unlock: |
847 | up(&nd.dentry->d_inode->i_sem); | 826 | up(&nd.dentry->d_inode->i_sem); |
848 | out_mknod: | ||
849 | path_release(&nd); | 827 | path_release(&nd); |
850 | out_mknod_parent: | 828 | out_mknod_parent: |
851 | if (err==-EEXIST) | 829 | if (err==-EEXIST) |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 080aae243ce0..2f4531fcaca2 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -698,7 +698,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
698 | return -ENOMEM; | 698 | return -ENOMEM; |
699 | 699 | ||
700 | if (skb1->sk) | 700 | if (skb1->sk) |
701 | skb_set_owner_w(skb, skb1->sk); | 701 | skb_set_owner_w(skb2, skb1->sk); |
702 | 702 | ||
703 | /* Looking around. Are we still alive? | 703 | /* Looking around. Are we still alive? |
704 | * OK, link new skb, drop old one */ | 704 | * OK, link new skb, drop old one */ |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 5ddda2c98af9..97509011c274 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -34,14 +34,21 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) | |||
34 | { | 34 | { |
35 | struct rtattr *rt = xfrma[type - 1]; | 35 | struct rtattr *rt = xfrma[type - 1]; |
36 | struct xfrm_algo *algp; | 36 | struct xfrm_algo *algp; |
37 | int len; | ||
37 | 38 | ||
38 | if (!rt) | 39 | if (!rt) |
39 | return 0; | 40 | return 0; |
40 | 41 | ||
41 | if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp)) | 42 | len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp); |
43 | if (len < 0) | ||
42 | return -EINVAL; | 44 | return -EINVAL; |
43 | 45 | ||
44 | algp = RTA_DATA(rt); | 46 | algp = RTA_DATA(rt); |
47 | |||
48 | len -= (algp->alg_key_len + 7U) / 8; | ||
49 | if (len < 0) | ||
50 | return -EINVAL; | ||
51 | |||
45 | switch (type) { | 52 | switch (type) { |
46 | case XFRMA_ALG_AUTH: | 53 | case XFRMA_ALG_AUTH: |
47 | if (!algp->alg_key_len && | 54 | if (!algp->alg_key_len && |
@@ -162,6 +169,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, | |||
162 | struct rtattr *rta = u_arg; | 169 | struct rtattr *rta = u_arg; |
163 | struct xfrm_algo *p, *ualg; | 170 | struct xfrm_algo *p, *ualg; |
164 | struct xfrm_algo_desc *algo; | 171 | struct xfrm_algo_desc *algo; |
172 | int len; | ||
165 | 173 | ||
166 | if (!rta) | 174 | if (!rta) |
167 | return 0; | 175 | return 0; |
@@ -173,11 +181,12 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, | |||
173 | return -ENOSYS; | 181 | return -ENOSYS; |
174 | *props = algo->desc.sadb_alg_id; | 182 | *props = algo->desc.sadb_alg_id; |
175 | 183 | ||
176 | p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL); | 184 | len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8; |
185 | p = kmalloc(len, GFP_KERNEL); | ||
177 | if (!p) | 186 | if (!p) |
178 | return -ENOMEM; | 187 | return -ENOMEM; |
179 | 188 | ||
180 | memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len); | 189 | memcpy(p, ualg, len); |
181 | *algpp = p; | 190 | *algpp = p; |
182 | return 0; | 191 | return 0; |
183 | } | 192 | } |