summaryrefslogtreecommitdiffstats
path: root/net/openvswitch/flow.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-08-21 13:12:29 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-08-21 19:58:18 -0400
commitdaa138a58c802e7b4c2fb73f9b85bb082616ef43 (patch)
treebe913e8e3745bb367d2ba371598f447649102cfc /net/openvswitch/flow.c
parent6869b7b206595ae0e326f59719090351eb8f4f5d (diff)
parentfba0e448a2c5b297a4ddc1ec4e48f4aa6600a1c9 (diff)
Merge branch 'odp_fixes' into hmm.git
From rdma.git Jason Gunthorpe says: ==================== This is a collection of general cleanups for ODP to clarify some of the flows around umem creation and use of the interval tree. ==================== The branch is based on v5.3-rc5 due to dependencies, and is being taken into hmm.git due to dependencies in the next patches. * odp_fixes: RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr RDMA/mlx5: Use ib_umem_start instead of umem.address RDMA/core: Make invalidate_range a device operation RDMA/odp: Use kvcalloc for the dma_list and page_list RDMA/odp: Check for overflow when computing the umem_odp end RDMA/odp: Provide ib_umem_odp_release() to undo the allocs RDMA/odp: Split creating a umem_odp from ib_umem_get RDMA/odp: Make the three ways to create a umem_odp clear RMDA/odp: Consolidate umem_odp initialization RDMA/odp: Make it clearer when a umem is an implicit ODP umem RDMA/odp: Iterate over the whole rbtree directly RDMA/odp: Use the common interval tree library instead of generic RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'net/openvswitch/flow.c')
-rw-r--r--net/openvswitch/flow.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dca3b1e2acf0..bc89e16e0505 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, 59void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
60 const struct sk_buff *skb) 60 const struct sk_buff *skb)
61{ 61{
62 struct flow_stats *stats; 62 struct sw_flow_stats *stats;
63 unsigned int cpu = smp_processor_id(); 63 unsigned int cpu = smp_processor_id();
64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
65 65
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
87 if (likely(flow->stats_last_writer != -1) && 87 if (likely(flow->stats_last_writer != -1) &&
88 likely(!rcu_access_pointer(flow->stats[cpu]))) { 88 likely(!rcu_access_pointer(flow->stats[cpu]))) {
89 /* Try to allocate CPU-specific stats. */ 89 /* Try to allocate CPU-specific stats. */
90 struct flow_stats *new_stats; 90 struct sw_flow_stats *new_stats;
91 91
92 new_stats = 92 new_stats =
93 kmem_cache_alloc_node(flow_stats_cache, 93 kmem_cache_alloc_node(flow_stats_cache,
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
134 134
135 /* We open code this to make sure cpu 0 is always considered */ 135 /* We open code this to make sure cpu 0 is always considered */
136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
137 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); 137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
138 138
139 if (stats) { 139 if (stats) {
140 /* Local CPU may write on non-local stats, so we must 140 /* Local CPU may write on non-local stats, so we must
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
158 158
159 /* We open code this to make sure cpu 0 is always considered */ 159 /* We open code this to make sure cpu 0 is always considered */
160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
161 struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); 161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
162 162
163 if (stats) { 163 if (stats) {
164 spin_lock_bh(&stats->lock); 164 spin_lock_bh(&stats->lock);