aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/af_inet.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-14 00:26:42 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-14 22:41:36 -0400
commit57a7744e09867ebcfa0ccf1d6d529caa7728d552 (patch)
tree6407fee7138787a24bf9251abfeeae69a239028a /net/ipv4/af_inet.c
parent85dcce7a73f1cc59f7a96fe52713b1630f4ca272 (diff)
net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq
Replace the bh safe variant with the hard irq safe variant. We need a hard irq safe variant to deal with netpoll transmitting packets from hard irq context, and we need it in most if not all of the places using the bh safe variant. Except on 32bit uni-processor the code is exactly the same so don't bother with a bh variant, just have a hard irq safe variant that everyone can use. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/af_inet.c')
-rw-r--r--net/ipv4/af_inet.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 19ab78aca547..8c54870db792 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1505,9 +1505,9 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1505 bhptr = per_cpu_ptr(mib[0], cpu); 1505 bhptr = per_cpu_ptr(mib[0], cpu);
1506 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); 1506 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1507 do { 1507 do {
1508 start = u64_stats_fetch_begin_bh(syncp); 1508 start = u64_stats_fetch_begin_irq(syncp);
1509 v = *(((u64 *) bhptr) + offt); 1509 v = *(((u64 *) bhptr) + offt);
1510 } while (u64_stats_fetch_retry_bh(syncp, start)); 1510 } while (u64_stats_fetch_retry_irq(syncp, start));
1511 1511
1512 res += v; 1512 res += v;
1513 } 1513 }