diff options
author | stephen hemminger <stephen@networkplumber.org> | 2013-12-08 15:15:44 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-12-10 21:57:11 -0500 |
commit | 8e3bff96afa67369008153f3326fa5ce985cabab (patch) | |
tree | ee0e38aa3b976362016a9a9727c60be63aed376e /net/ipv4 | |
parent | 22a93216140e5097e8d9d2f99784cfd1c6158ee6 (diff) |
net: more spelling fixes
Various spelling fixes in networking stack
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/ip_sockglue.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 10 |
2 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ddf32a6bc415..a9fc435dc89f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -1051,7 +1051,7 @@ e_inval: | |||
1051 | * | 1051 | * |
1052 | * To support IP_CMSG_PKTINFO option, we store rt_iif and specific | 1052 | * To support IP_CMSG_PKTINFO option, we store rt_iif and specific |
1053 | * destination in skb->cb[] before dst drop. | 1053 | * destination in skb->cb[] before dst drop. |
1054 | * This way, receiver doesnt make cache line misses to read rtable. | 1054 | * This way, receiver doesn't make cache line misses to read rtable. |
1055 | */ | 1055 | */ |
1056 | void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) | 1056 | void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) |
1057 | { | 1057 | { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 993da005e087..2a69f42e51ca 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -408,7 +408,7 @@ struct tcp_out_options { | |||
408 | * Beware: Something in the Internet is very sensitive to the ordering of | 408 | * Beware: Something in the Internet is very sensitive to the ordering of |
409 | * TCP options, we learned this through the hard way, so be careful here. | 409 | * TCP options, we learned this through the hard way, so be careful here. |
410 | * Luckily we can at least blame others for their non-compliance but from | 410 | * Luckily we can at least blame others for their non-compliance but from |
411 | * inter-operatibility perspective it seems that we're somewhat stuck with | 411 | * inter-operability perspective it seems that we're somewhat stuck with |
412 | * the ordering which we have been using if we want to keep working with | 412 | * the ordering which we have been using if we want to keep working with |
413 | * those broken things (not that it currently hurts anybody as there isn't | 413 | * those broken things (not that it currently hurts anybody as there isn't |
414 | * particular reason why the ordering would need to be changed). | 414 | * particular reason why the ordering would need to be changed). |
@@ -681,7 +681,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb | |||
681 | * | 681 | * |
682 | * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb | 682 | * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb |
683 | * needs to be reallocated in a driver. | 683 | * needs to be reallocated in a driver. |
684 | * The invariant being skb->truesize substracted from sk->sk_wmem_alloc | 684 | * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc |
685 | * | 685 | * |
686 | * Since transmit from skb destructor is forbidden, we use a tasklet | 686 | * Since transmit from skb destructor is forbidden, we use a tasklet |
687 | * to process all sockets that eventually need to send more skbs. | 687 | * to process all sockets that eventually need to send more skbs. |
@@ -701,9 +701,9 @@ static void tcp_tsq_handler(struct sock *sk) | |||
701 | tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); | 701 | tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); |
702 | } | 702 | } |
703 | /* | 703 | /* |
704 | * One tasklest per cpu tries to send more skbs. | 704 | * One tasklet per cpu tries to send more skbs. |
705 | * We run in tasklet context but need to disable irqs when | 705 | * We run in tasklet context but need to disable irqs when |
706 | * transfering tsq->head because tcp_wfree() might | 706 | * transferring tsq->head because tcp_wfree() might |
707 | * interrupt us (non NAPI drivers) | 707 | * interrupt us (non NAPI drivers) |
708 | */ | 708 | */ |
709 | static void tcp_tasklet_func(unsigned long data) | 709 | static void tcp_tasklet_func(unsigned long data) |
@@ -797,7 +797,7 @@ void __init tcp_tasklet_init(void) | |||
797 | 797 | ||
798 | /* | 798 | /* |
799 | * Write buffer destructor automatically called from kfree_skb. | 799 | * Write buffer destructor automatically called from kfree_skb. |
800 | * We cant xmit new skbs from this context, as we might already | 800 | * We can't xmit new skbs from this context, as we might already |
801 | * hold qdisc lock. | 801 | * hold qdisc lock. |
802 | */ | 802 | */ |
803 | void tcp_wfree(struct sk_buff *skb) | 803 | void tcp_wfree(struct sk_buff *skb) |