aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/netprio_cgroup.c2
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/tcp_output.c10
5 files changed, 11 insertions, 11 deletions
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f75d92e4f96b..c2fd6bc5f080 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -373,7 +373,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
373 373
374 p = br_port_get_rtnl(dev); 374 p = br_port_get_rtnl(dev);
375 /* We want to accept dev as bridge itself if the AF_SPEC 375 /* We want to accept dev as bridge itself if the AF_SPEC
376 * is set to see if someone is setting vlan info on the brigde 376 * is set to see if someone is setting vlan info on the bridge
377 */ 377 */
378 if (!p && !afspec) 378 if (!p && !afspec)
379 return -EINVAL; 379 return -EINVAL;
@@ -389,7 +389,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
389 err = br_setport(p, tb); 389 err = br_setport(p, tb);
390 spin_unlock_bh(&p->br->lock); 390 spin_unlock_bh(&p->br->lock);
391 } else { 391 } else {
392 /* Binary compatability with old RSTP */ 392 /* Binary compatibility with old RSTP */
393 if (nla_len(protinfo) < sizeof(u8)) 393 if (nla_len(protinfo) < sizeof(u8))
394 return -EINVAL; 394 return -EINVAL;
395 395
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f3edf9635e02..1a7b7b1df0d1 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -676,8 +676,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
676 while ((mask | (mask >> 1)) != mask) 676 while ((mask | (mask >> 1)) != mask)
677 mask |= (mask >> 1); 677 mask |= (mask >> 1);
678 /* On 64 bit arches, must check mask fits in table->mask (u32), 678 /* On 64 bit arches, must check mask fits in table->mask (u32),
679 * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1) 679 * and on 32bit arches, must check
680 * doesnt overflow. 680 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
681 */ 681 */
682#if BITS_PER_LONG > 32 682#if BITS_PER_LONG > 32
683 if (mask > (unsigned long)(u32)mask) 683 if (mask > (unsigned long)(u32)mask)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 9b7cf6c85f82..1dda50c2e705 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -30,7 +30,7 @@
30#define PRIOMAP_MIN_SZ 128 30#define PRIOMAP_MIN_SZ 128
31 31
32/* 32/*
33 * Extend @dev->priomap so that it's large enough to accomodate 33 * Extend @dev->priomap so that it's large enough to accommodate
34 * @target_idx. @dev->priomap.priomap_len > @target_idx after successful 34 * @target_idx. @dev->priomap.priomap_len > @target_idx after successful
35 * return. Must be called under rtnl lock. 35 * return. Must be called under rtnl lock.
36 */ 36 */
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ddf32a6bc415..a9fc435dc89f 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1051,7 +1051,7 @@ e_inval:
1051 * 1051 *
1052 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific 1052 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1053 * destination in skb->cb[] before dst drop. 1053 * destination in skb->cb[] before dst drop.
1054 * This way, receiver doesnt make cache line misses to read rtable. 1054 * This way, receiver doesn't make cache line misses to read rtable.
1055 */ 1055 */
1056void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) 1056void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1057{ 1057{
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 993da005e087..2a69f42e51ca 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -408,7 +408,7 @@ struct tcp_out_options {
408 * Beware: Something in the Internet is very sensitive to the ordering of 408 * Beware: Something in the Internet is very sensitive to the ordering of
409 * TCP options, we learned this through the hard way, so be careful here. 409 * TCP options, we learned this through the hard way, so be careful here.
410 * Luckily we can at least blame others for their non-compliance but from 410 * Luckily we can at least blame others for their non-compliance but from
411 * inter-operatibility perspective it seems that we're somewhat stuck with 411 * inter-operability perspective it seems that we're somewhat stuck with
412 * the ordering which we have been using if we want to keep working with 412 * the ordering which we have been using if we want to keep working with
413 * those broken things (not that it currently hurts anybody as there isn't 413 * those broken things (not that it currently hurts anybody as there isn't
414 * particular reason why the ordering would need to be changed). 414 * particular reason why the ordering would need to be changed).
@@ -681,7 +681,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
681 * 681 *
682 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 682 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
683 * needs to be reallocated in a driver. 683 * needs to be reallocated in a driver.
684 * The invariant being skb->truesize substracted from sk->sk_wmem_alloc 684 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
685 * 685 *
686 * Since transmit from skb destructor is forbidden, we use a tasklet 686 * Since transmit from skb destructor is forbidden, we use a tasklet
687 * to process all sockets that eventually need to send more skbs. 687 * to process all sockets that eventually need to send more skbs.
@@ -701,9 +701,9 @@ static void tcp_tsq_handler(struct sock *sk)
701 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); 701 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
702} 702}
703/* 703/*
704 * One tasklest per cpu tries to send more skbs. 704 * One tasklet per cpu tries to send more skbs.
705 * We run in tasklet context but need to disable irqs when 705 * We run in tasklet context but need to disable irqs when
706 * transfering tsq->head because tcp_wfree() might 706 * transferring tsq->head because tcp_wfree() might
707 * interrupt us (non NAPI drivers) 707 * interrupt us (non NAPI drivers)
708 */ 708 */
709static void tcp_tasklet_func(unsigned long data) 709static void tcp_tasklet_func(unsigned long data)
@@ -797,7 +797,7 @@ void __init tcp_tasklet_init(void)
797 797
798/* 798/*
799 * Write buffer destructor automatically called from kfree_skb. 799 * Write buffer destructor automatically called from kfree_skb.
800 * We cant xmit new skbs from this context, as we might already 800 * We can't xmit new skbs from this context, as we might already
801 * hold qdisc lock. 801 * hold qdisc lock.
802 */ 802 */
803void tcp_wfree(struct sk_buff *skb) 803void tcp_wfree(struct sk_buff *skb)