aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-02-20 23:03:01 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-20 23:03:01 -0500
commitf24211e7b3e6d02251c53c48821003c77495efef (patch)
tree1ac0e99cb2a8af160b852d2779eab712806381b7
parentcf535ea52e68e3ee6f4a90cc383faa1ee857f14d (diff)
parenta8372f035aa2f6717123eb30679a08b619321dd4 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
-rw-r--r--drivers/net/irda/irda-usb.c90
-rw-r--r--drivers/net/irda/irda-usb.h7
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c18
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c16
-rw-r--r--net/xfrm/xfrm_policy.c2
6 files changed, 96 insertions, 47 deletions
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index fa176ffb4ad5..8936058a3cce 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
108static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs); 108static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
109static void write_bulk_callback(struct urb *urb, struct pt_regs *regs); 109static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
110static void irda_usb_receive(struct urb *urb, struct pt_regs *regs); 110static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
111static void irda_usb_rx_defer_expired(unsigned long data);
111static int irda_usb_net_open(struct net_device *dev); 112static int irda_usb_net_open(struct net_device *dev);
112static int irda_usb_net_close(struct net_device *dev); 113static int irda_usb_net_close(struct net_device *dev);
113static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 114static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev)
677 * on the interrupt pipe and hang the Rx URB only when an interrupt is 678 * on the interrupt pipe and hang the Rx URB only when an interrupt is
678 * received. 679 * received.
679 * Jean II 680 * Jean II
681 *
682 * Note : don't read the above as what we are currently doing, but as
683 * something we could do with KC dongle. Also don't forget that the
684 * interrupt pipe is not part of the original standard, so this would
685 * need to be optional...
686 * Jean II
680 */ 687 */
681 688
682/*------------------------------------------------------------------*/ 689/*------------------------------------------------------------------*/
@@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
704 /* Reinitialize URB */ 711 /* Reinitialize URB */
705 usb_fill_bulk_urb(urb, self->usbdev, 712 usb_fill_bulk_urb(urb, self->usbdev,
706 usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep), 713 usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
707 skb->data, skb->truesize, 714 skb->data, IRDA_SKB_MAX_MTU,
708 irda_usb_receive, skb); 715 irda_usb_receive, skb);
709 /* Note : unlink *must* be synchronous because of the code in
710 * irda_usb_net_close() -> free the skb - Jean II */
711 urb->status = 0; 716 urb->status = 0;
712 717
713 /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */ 718 /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
@@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
734 struct irda_skb_cb *cb; 739 struct irda_skb_cb *cb;
735 struct sk_buff *newskb; 740 struct sk_buff *newskb;
736 struct sk_buff *dataskb; 741 struct sk_buff *dataskb;
742 struct urb *next_urb;
737 int docopy; 743 int docopy;
738 744
739 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); 745 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
@@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
755 if (urb->status != 0) { 761 if (urb->status != 0) {
756 switch (urb->status) { 762 switch (urb->status) {
757 case -EILSEQ: 763 case -EILSEQ:
758 self->stats.rx_errors++;
759 self->stats.rx_crc_errors++; 764 self->stats.rx_crc_errors++;
760 break; 765 /* Also precursor to a hot-unplug on UHCI. */
766 /* Fallthrough... */
761 case -ECONNRESET: /* -104 */ 767 case -ECONNRESET: /* -104 */
762 IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags); 768 /* Random error, if I remember correctly */
763 /* uhci_cleanup_unlink() is going to kill the Rx 769 /* uhci_cleanup_unlink() is going to kill the Rx
764 * URB just after we return. No problem, at this 770 * URB just after we return. No problem, at this
765 * point the URB will be idle ;-) - Jean II */ 771 * point the URB will be idle ;-) - Jean II */
766 break; 772 case -ESHUTDOWN: /* -108 */
773 /* That's usually a hot-unplug. Submit will fail... */
774 case -ETIMEDOUT: /* -110 */
775 /* Usually precursor to a hot-unplug on OHCI. */
767 default: 776 default:
768 IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); 777 self->stats.rx_errors++;
778 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
769 break; 779 break;
770 } 780 }
771 goto done; 781 /* If we received an error, we don't want to resubmit the
782 * Rx URB straight away but to give the USB layer a little
783 * bit of breathing room.
784 * We are in the USB thread context, therefore there is a
785 * danger of recursion (new URB we submit fails, we come
786 * back here).
787 * With recent USB stack (2.6.15+), I'm seeing that on
788 * hot unplug of the dongle...
789 * Lowest effective timer is 10ms...
790 * Jean II */
791 self->rx_defer_timer.function = &irda_usb_rx_defer_expired;
792 self->rx_defer_timer.data = (unsigned long) urb;
793 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
794 return;
772 } 795 }
773 796
774 /* Check for empty frames */ 797 /* Check for empty frames */
@@ -845,13 +868,45 @@ done:
845 * idle slot.... 868 * idle slot....
846 * Jean II */ 869 * Jean II */
847 /* Note : with this scheme, we could submit the idle URB before 870 /* Note : with this scheme, we could submit the idle URB before
848 * processing the Rx URB. Another time... Jean II */ 871 * processing the Rx URB. I don't think it would buy us anything as
872 * we are running in the USB thread context. Jean II */
873 next_urb = self->idle_rx_urb;
849 874
850 /* Submit the idle URB to replace the URB we've just received */
851 irda_usb_submit(self, skb, self->idle_rx_urb);
852 /* Recycle Rx URB : Now, the idle URB is the present one */ 875 /* Recycle Rx URB : Now, the idle URB is the present one */
853 urb->context = NULL; 876 urb->context = NULL;
854 self->idle_rx_urb = urb; 877 self->idle_rx_urb = urb;
878
879 /* Submit the idle URB to replace the URB we've just received.
880 * Do it last to avoid race conditions... Jean II */
881 irda_usb_submit(self, skb, next_urb);
882}
883
884/*------------------------------------------------------------------*/
885/*
886 * In case of errors, we want the USB layer to have time to recover.
887 * Now, it is time to resubmit ouur Rx URB...
888 */
889static void irda_usb_rx_defer_expired(unsigned long data)
890{
891 struct urb *urb = (struct urb *) data;
892 struct sk_buff *skb = (struct sk_buff *) urb->context;
893 struct irda_usb_cb *self;
894 struct irda_skb_cb *cb;
895 struct urb *next_urb;
896
897 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
898
899 /* Find ourselves */
900 cb = (struct irda_skb_cb *) skb->cb;
901 IRDA_ASSERT(cb != NULL, return;);
902 self = (struct irda_usb_cb *) cb->context;
903 IRDA_ASSERT(self != NULL, return;);
904
905 /* Same stuff as when Rx is done, see above... */
906 next_urb = self->idle_rx_urb;
907 urb->context = NULL;
908 self->idle_rx_urb = urb;
909 irda_usb_submit(self, skb, next_urb);
855} 910}
856 911
857/*------------------------------------------------------------------*/ 912/*------------------------------------------------------------------*/
@@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev)
990 /* Stop network Tx queue */ 1045 /* Stop network Tx queue */
991 netif_stop_queue(netdev); 1046 netif_stop_queue(netdev);
992 1047
1048 /* Kill defered Rx URB */
1049 del_timer(&self->rx_defer_timer);
1050
993 /* Deallocate all the Rx path buffers (URBs and skb) */ 1051 /* Deallocate all the Rx path buffers (URBs and skb) */
994 for (i = 0; i < IU_MAX_RX_URBS; i++) { 1052 for (i = 0; i < IU_MAX_RX_URBS; i++) {
995 struct urb *urb = self->rx_urb[i]; 1053 struct urb *urb = self->rx_urb[i];
@@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1365 self = net->priv; 1423 self = net->priv;
1366 self->netdev = net; 1424 self->netdev = net;
1367 spin_lock_init(&self->lock); 1425 spin_lock_init(&self->lock);
1426 init_timer(&self->rx_defer_timer);
1368 1427
1369 /* Create all of the needed urbs */ 1428 /* Create all of the needed urbs */
1370 for (i = 0; i < IU_MAX_RX_URBS; i++) { 1429 for (i = 0; i < IU_MAX_RX_URBS; i++) {
@@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1498 * This will stop/desactivate the Tx path. - Jean II */ 1557 * This will stop/desactivate the Tx path. - Jean II */
1499 self->present = 0; 1558 self->present = 0;
1500 1559
1560 /* Kill defered Rx URB */
1561 del_timer(&self->rx_defer_timer);
1562
1501 /* We need to have irq enabled to unlink the URBs. That's OK, 1563 /* We need to have irq enabled to unlink the URBs. That's OK,
1502 * at this point the Tx path is gone - Jean II */ 1564 * at this point the Tx path is gone - Jean II */
1503 spin_unlock_irqrestore(&self->lock, flags); 1565 spin_unlock_irqrestore(&self->lock, flags);
@@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1507 /* Accept no more transmissions */ 1569 /* Accept no more transmissions */
1508 /*netif_device_detach(self->netdev);*/ 1570 /*netif_device_detach(self->netdev);*/
1509 netif_stop_queue(self->netdev); 1571 netif_stop_queue(self->netdev);
1510 /* Stop all the receive URBs */ 1572 /* Stop all the receive URBs. Must be synchronous. */
1511 for (i = 0; i < IU_MAX_RX_URBS; i++) 1573 for (i = 0; i < IU_MAX_RX_URBS; i++)
1512 usb_kill_urb(self->rx_urb[i]); 1574 usb_kill_urb(self->rx_urb[i]);
1513 /* Cancel Tx and speed URB. 1575 /* Cancel Tx and speed URB.
1514 * Toggle flags to make sure it's synchronous. */ 1576 * Make sure it's synchronous to avoid races. */
1515 usb_kill_urb(self->tx_urb); 1577 usb_kill_urb(self->tx_urb);
1516 usb_kill_urb(self->speed_urb); 1578 usb_kill_urb(self->speed_urb);
1517 } 1579 }
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index bd8f66542322..4026af42dd47 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -136,8 +136,6 @@ struct irda_usb_cb {
136 __u16 bulk_out_mtu; /* Max Tx packet size in bytes */ 136 __u16 bulk_out_mtu; /* Max Tx packet size in bytes */
137 __u8 bulk_int_ep; /* Interrupt Endpoint assignments */ 137 __u8 bulk_int_ep; /* Interrupt Endpoint assignments */
138 138
139 wait_queue_head_t wait_q; /* for timeouts */
140
141 struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */ 139 struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
142 struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */ 140 struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
143 struct urb *tx_urb; /* URB used to send data frames */ 141 struct urb *tx_urb; /* URB used to send data frames */
@@ -147,17 +145,18 @@ struct irda_usb_cb {
147 struct net_device_stats stats; 145 struct net_device_stats stats;
148 struct irlap_cb *irlap; /* The link layer we are binded to */ 146 struct irlap_cb *irlap; /* The link layer we are binded to */
149 struct qos_info qos; 147 struct qos_info qos;
150 hashbin_t *tx_list; /* Queued transmit skb's */
151 char *speed_buff; /* Buffer for speed changes */ 148 char *speed_buff; /* Buffer for speed changes */
152 149
153 struct timeval stamp; 150 struct timeval stamp;
154 struct timeval now; 151 struct timeval now;
155 152
156 spinlock_t lock; /* For serializing operations */ 153 spinlock_t lock; /* For serializing Tx operations */
157 154
158 __u16 xbofs; /* Current xbofs setting */ 155 __u16 xbofs; /* Current xbofs setting */
159 __s16 new_xbofs; /* xbofs we need to set */ 156 __s16 new_xbofs; /* xbofs we need to set */
160 __u32 speed; /* Current speed */ 157 __u32 speed; /* Current speed */
161 __s32 new_speed; /* speed we need to set */ 158 __s32 new_speed; /* speed we need to set */
159
160 struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
162}; 161};
163 162
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6766f118f070..2144952d1c6c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -411,6 +411,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
411 C(pkt_type); 411 C(pkt_type);
412 C(ip_summed); 412 C(ip_summed);
413 C(priority); 413 C(priority);
414#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
415 C(ipvs_property);
416#endif
414 C(protocol); 417 C(protocol);
415 n->destructor = NULL; 418 n->destructor = NULL;
416#ifdef CONFIG_NETFILTER 419#ifdef CONFIG_NETFILTER
@@ -422,13 +425,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
422 C(nfct_reasm); 425 C(nfct_reasm);
423 nf_conntrack_get_reasm(skb->nfct_reasm); 426 nf_conntrack_get_reasm(skb->nfct_reasm);
424#endif 427#endif
425#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
426 C(ipvs_property);
427#endif
428#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
429 C(nfct_reasm);
430 nf_conntrack_get_reasm(skb->nfct_reasm);
431#endif
432#ifdef CONFIG_BRIDGE_NETFILTER 428#ifdef CONFIG_BRIDGE_NETFILTER
433 C(nf_bridge); 429 C(nf_bridge);
434 nf_bridge_get(skb->nf_bridge); 430 nf_bridge_get(skb->nf_bridge);
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index c1a61462507f..1741d555ad0d 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -434,6 +434,7 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
434 } *inside; 434 } *inside;
435 struct ip_conntrack_tuple inner, target; 435 struct ip_conntrack_tuple inner, target;
436 int hdrlen = (*pskb)->nh.iph->ihl * 4; 436 int hdrlen = (*pskb)->nh.iph->ihl * 4;
437 unsigned long statusbit;
437 438
438 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) 439 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
439 return 0; 440 return 0;
@@ -495,17 +496,16 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
495 496
496 /* Change outer to look the reply to an incoming packet 497 /* Change outer to look the reply to an incoming packet
497 * (proto 0 means don't invert per-proto part). */ 498 * (proto 0 means don't invert per-proto part). */
499 if (manip == IP_NAT_MANIP_SRC)
500 statusbit = IPS_SRC_NAT;
501 else
502 statusbit = IPS_DST_NAT;
498 503
499 /* Obviously, we need to NAT destination IP, but source IP 504 /* Invert if this is reply dir. */
500 should be NAT'ed only if it is from a NAT'd host. 505 if (dir == IP_CT_DIR_REPLY)
506 statusbit ^= IPS_NAT_MASK;
501 507
502 Explanation: some people use NAT for anonymizing. Also, 508 if (ct->status & statusbit) {
503 CERT recommends dropping all packets from private IP
504 addresses (although ICMP errors from internal links with
505 such addresses are not too uncommon, as Alan Cox points
506 out) */
507 if (manip != IP_NAT_MANIP_SRC
508 || ((*pskb)->nh.iph->saddr == ct->tuplehash[dir].tuple.src.ip)) {
509 invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 509 invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
510 if (!manip_pkt(0, pskb, 0, &target, manip)) 510 if (!manip_pkt(0, pskb, 0, &target, manip))
511 return 0; 511 return 0;
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 7c3f7d380240..ab1f88fa21ec 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -200,20 +200,14 @@ ip_nat_in(unsigned int hooknum,
200 const struct net_device *out, 200 const struct net_device *out,
201 int (*okfn)(struct sk_buff *)) 201 int (*okfn)(struct sk_buff *))
202{ 202{
203 struct ip_conntrack *ct;
204 enum ip_conntrack_info ctinfo;
205 unsigned int ret; 203 unsigned int ret;
204 u_int32_t daddr = (*pskb)->nh.iph->daddr;
206 205
207 ret = ip_nat_fn(hooknum, pskb, in, out, okfn); 206 ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
208 if (ret != NF_DROP && ret != NF_STOLEN 207 if (ret != NF_DROP && ret != NF_STOLEN
209 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { 208 && daddr != (*pskb)->nh.iph->daddr) {
210 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 209 dst_release((*pskb)->dst);
211 210 (*pskb)->dst = NULL;
212 if (ct->tuplehash[dir].tuple.dst.ip !=
213 ct->tuplehash[!dir].tuple.src.ip) {
214 dst_release((*pskb)->dst);
215 (*pskb)->dst = NULL;
216 }
217 } 211 }
218 return ret; 212 return ret;
219} 213}
@@ -276,7 +270,7 @@ ip_nat_local_fn(unsigned int hooknum,
276 ct->tuplehash[!dir].tuple.src.ip 270 ct->tuplehash[!dir].tuple.src.ip
277#ifdef CONFIG_XFRM 271#ifdef CONFIG_XFRM
278 || ct->tuplehash[dir].tuple.dst.u.all != 272 || ct->tuplehash[dir].tuple.dst.u.all !=
279 ct->tuplehash[dir].tuple.src.u.all 273 ct->tuplehash[!dir].tuple.src.u.all
280#endif 274#endif
281 ) 275 )
282 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 276 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 98ec53bd3ac7..5e6b05ac1260 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -885,8 +885,6 @@ restart:
885 * We can't enlist stable bundles either. 885 * We can't enlist stable bundles either.
886 */ 886 */
887 write_unlock_bh(&policy->lock); 887 write_unlock_bh(&policy->lock);
888
889 xfrm_pol_put(policy);
890 if (dst) 888 if (dst)
891 dst_free(dst); 889 dst_free(dst);
892 890