aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2009-06-13 06:30:52 -0400
committerPatrick McHardy <kaber@trash.net>2009-06-13 06:30:52 -0400
commitdd7669a92c6066b2b31bae7e04cd787092920883 (patch)
treed06a9e18aec99c5a34a191cb3391e74ba8a8ec59 /include
parentd219dce76c64f2c883dad0537fa09a56d5ff0a10 (diff)
netfilter: conntrack: optional reliable conntrack event delivery
This patch improves ctnetlink event reliability if one broadcast listener has set the NETLINK_BROADCAST_ERROR socket option. The logic is the following: if an event delivery fails, we keep the undelivered events in the missed event cache. Once the next packet arrives, we add the new events (if any) to the missed events in the cache and we try a new delivery, and so on. Thus, if ctnetlink fails to deliver an event, we try to deliver them once we see a new packet. Therefore, we may lose state transitions but the userspace process gets in sync at some point. At worst case, if no events were delivered to userspace, we make sure that destroy events are successfully delivered. Basically, if ctnetlink fails to deliver the destroy event, we remove the conntrack entry from the hashes and we insert them in the dying list, which contains inactive entries. Then, the conntrack timer is added with an extra grace timeout of random32() % 15 seconds to trigger the event again (this grace timeout is tunable via /proc). The use of a limited random timeout value allows distributing the "destroy" resends, thus, avoiding accumulating lots "destroy" events at the same time. Event delivery may re-order but we can identify them by means of the tuple plus the conntrack ID. The maximum number of conntrack entries (active or inactive) is still handled by nf_conntrack_max. Thus, we may start dropping packets at some point if we accumulate a lot of inactive conntrack entries that did not successfully report the destroy event to userspace. During my stress tests consisting of setting a very small buffer of 2048 bytes for conntrackd and the NETLINK_BROADCAST_ERROR socket flag, and generating lots of very small connections, I noticed very few destroy entries on the fly waiting to be resend. A simple way to test this patch consist of creating a lot of entries, set a very small Netlink buffer in conntrackd (+ a patch which is not in the git tree to set the BROADCAST_ERROR flag) and invoke `conntrack -F'. For expectations, no changes are introduced in this patch. Currently, event delivery is only done for new expectations (no events from expectation expiration, removal and confirmation). In that case, they need a per-expectation event cache to implement the same idea that is exposed in this patch. This patch can be useful to provide reliable flow-accouting. We still have to add a new conntrack extension to store the creation and destroy time. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h61
-rw-r--r--include/net/netns/conntrack.h2
3 files changed, 48 insertions, 17 deletions
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index ecc79f959076..a632689b61b4 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -201,6 +201,8 @@ extern struct nf_conntrack_tuple_hash *
201__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple); 201__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple);
202 202
203extern void nf_conntrack_hash_insert(struct nf_conn *ct); 203extern void nf_conntrack_hash_insert(struct nf_conn *ct);
204extern void nf_ct_delete_from_lists(struct nf_conn *ct);
205extern void nf_ct_insert_dying_list(struct nf_conn *ct);
204 206
205extern void nf_conntrack_flush_report(struct net *net, u32 pid, int report); 207extern void nf_conntrack_flush_report(struct net *net, u32 pid, int report);
206 208
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index e7ae297ba383..4f20d58e2ab7 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -32,6 +32,8 @@ enum ip_conntrack_expect_events {
32 32
33struct nf_conntrack_ecache { 33struct nf_conntrack_ecache {
34 unsigned long cache; /* bitops want long */ 34 unsigned long cache; /* bitops want long */
35 unsigned long missed; /* missed events */
36 u32 pid; /* netlink pid of destroyer */
35}; 37};
36 38
37static inline struct nf_conntrack_ecache * 39static inline struct nf_conntrack_ecache *
@@ -84,14 +86,16 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
84 set_bit(event, &e->cache); 86 set_bit(event, &e->cache);
85} 87}
86 88
87static inline void 89static inline int
88nf_conntrack_eventmask_report(unsigned int eventmask, 90nf_conntrack_eventmask_report(unsigned int eventmask,
89 struct nf_conn *ct, 91 struct nf_conn *ct,
90 u32 pid, 92 u32 pid,
91 int report) 93 int report)
92{ 94{
95 int ret = 0;
93 struct net *net = nf_ct_net(ct); 96 struct net *net = nf_ct_net(ct);
94 struct nf_ct_event_notifier *notify; 97 struct nf_ct_event_notifier *notify;
98 struct nf_conntrack_ecache *e;
95 99
96 rcu_read_lock(); 100 rcu_read_lock();
97 notify = rcu_dereference(nf_conntrack_event_cb); 101 notify = rcu_dereference(nf_conntrack_event_cb);
@@ -101,29 +105,52 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
101 if (!net->ct.sysctl_events) 105 if (!net->ct.sysctl_events)
102 goto out_unlock; 106 goto out_unlock;
103 107
108 e = nf_ct_ecache_find(ct);
109 if (e == NULL)
110 goto out_unlock;
111
104 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) { 112 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
105 struct nf_ct_event item = { 113 struct nf_ct_event item = {
106 .ct = ct, 114 .ct = ct,
107 .pid = pid, 115 .pid = e->pid ? e->pid : pid,
108 .report = report 116 .report = report
109 }; 117 };
110 notify->fcn(eventmask, &item); 118 /* This is a resent of a destroy event? If so, skip missed */
119 unsigned long missed = e->pid ? 0 : e->missed;
120
121 ret = notify->fcn(eventmask | missed, &item);
122 if (unlikely(ret < 0 || missed)) {
123 spin_lock_bh(&ct->lock);
124 if (ret < 0) {
125 /* This is a destroy event that has been
126 * triggered by a process, we store the PID
127 * to include it in the retransmission. */
128 if (eventmask & (1 << IPCT_DESTROY) &&
129 e->pid == 0 && pid != 0)
130 e->pid = pid;
131 else
132 e->missed |= eventmask;
133 } else
134 e->missed &= ~missed;
135 spin_unlock_bh(&ct->lock);
136 }
111 } 137 }
112out_unlock: 138out_unlock:
113 rcu_read_unlock(); 139 rcu_read_unlock();
140 return ret;
114} 141}
115 142
116static inline void 143static inline int
117nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, 144nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
118 u32 pid, int report) 145 u32 pid, int report)
119{ 146{
120 nf_conntrack_eventmask_report(1 << event, ct, pid, report); 147 return nf_conntrack_eventmask_report(1 << event, ct, pid, report);
121} 148}
122 149
123static inline void 150static inline int
124nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) 151nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
125{ 152{
126 nf_conntrack_eventmask_report(1 << event, ct, 0, 0); 153 return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
127} 154}
128 155
129struct nf_exp_event { 156struct nf_exp_event {
@@ -183,16 +210,16 @@ extern void nf_conntrack_ecache_fini(struct net *net);
183 210
184static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, 211static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
185 struct nf_conn *ct) {} 212 struct nf_conn *ct) {}
186static inline void nf_conntrack_eventmask_report(unsigned int eventmask, 213static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
187 struct nf_conn *ct, 214 struct nf_conn *ct,
188 u32 pid, 215 u32 pid,
189 int report) {} 216 int report) { return 0; }
190static inline void nf_conntrack_event(enum ip_conntrack_events event, 217static inline int nf_conntrack_event(enum ip_conntrack_events event,
191 struct nf_conn *ct) {} 218 struct nf_conn *ct) { return 0; }
192static inline void nf_conntrack_event_report(enum ip_conntrack_events event, 219static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
193 struct nf_conn *ct, 220 struct nf_conn *ct,
194 u32 pid, 221 u32 pid,
195 int report) {} 222 int report) { return 0; }
196static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} 223static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
197static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, 224static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
198 struct nf_conntrack_expect *exp) {} 225 struct nf_conntrack_expect *exp) {}
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 505a51cd8c63..ba1ba0c5efd1 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -14,8 +14,10 @@ struct netns_ct {
14 struct hlist_nulls_head *hash; 14 struct hlist_nulls_head *hash;
15 struct hlist_head *expect_hash; 15 struct hlist_head *expect_hash;
16 struct hlist_nulls_head unconfirmed; 16 struct hlist_nulls_head unconfirmed;
17 struct hlist_nulls_head dying;
17 struct ip_conntrack_stat *stat; 18 struct ip_conntrack_stat *stat;
18 int sysctl_events; 19 int sysctl_events;
20 unsigned int sysctl_events_retry_timeout;
19 int sysctl_acct; 21 int sysctl_acct;
20 int sysctl_checksum; 22 int sysctl_checksum;
21 unsigned int sysctl_log_invalid; /* Log invalid packets */ 23 unsigned int sysctl_log_invalid; /* Log invalid packets */