1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Event cache for netfilter. */
8 * (C) 2005 USAGI/WIDE Project <http://www.linux-ipv6.org>
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/vmalloc.h>
17 #include <linux/stddef.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_conntrack_ecache.h>
27 #include <net/netfilter/nf_conntrack_extend.h>
29 static DEFINE_MUTEX(nf_ct_ecache_mutex);
31 #define DYING_NULLS_VAL ((1 << 30) + 1)
32 #define ECACHE_MAX_JIFFIES msecs_to_jiffies(10)
33 #define ECACHE_RETRY_JIFFIES msecs_to_jiffies(10)
41 struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net)
43 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
47 #if IS_MODULE(CONFIG_NF_CT_NETLINK)
48 EXPORT_SYMBOL_GPL(nf_conn_pernet_ecache);
51 static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
53 unsigned long stop = jiffies + ECACHE_MAX_JIFFIES;
54 struct hlist_nulls_head evicted_list;
55 enum retry_state ret = STATE_DONE;
56 struct nf_conntrack_tuple_hash *h;
57 struct hlist_nulls_node *n;
60 INIT_HLIST_NULLS_HEAD(&evicted_list, DYING_NULLS_VAL);
64 spin_lock_bh(&cnet->ecache.dying_lock);
66 hlist_nulls_for_each_entry_safe(h, n, &cnet->ecache.dying_list, hnnode) {
67 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
69 /* The worker owns all entries, ct remains valid until nf_ct_put
72 if (nf_conntrack_event(IPCT_DESTROY, ct)) {
73 ret = STATE_CONGESTED;
77 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
78 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &evicted_list);
80 if (time_after(stop, jiffies)) {
86 spin_unlock_bh(&cnet->ecache.dying_lock);
92 spin_unlock_bh(&cnet->ecache.dying_lock);
94 hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) {
95 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
97 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
106 static void ecache_work(struct work_struct *work)
108 struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work);
111 ret = ecache_work_evict_list(cnet);
113 case STATE_CONGESTED:
114 delay = ECACHE_RETRY_JIFFIES;
124 schedule_delayed_work(&cnet->ecache.dwork, delay);
127 static int __nf_conntrack_eventmask_report(struct nf_conntrack_ecache *e,
130 const struct nf_ct_event *item)
132 struct net *net = nf_ct_net(item->ct);
133 struct nf_ct_event_notifier *notify;
137 if (!((events | missed) & e->ctmask))
142 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
148 ret = notify->ct_event(events | missed, item);
151 if (likely(ret >= 0 && missed == 0))
155 old = READ_ONCE(e->missed);
159 want = old & ~missed;
160 } while (cmpxchg(&e->missed, old, want) != old);
165 int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct,
166 u32 portid, int report)
168 struct nf_conntrack_ecache *e;
169 struct nf_ct_event item;
173 if (!nf_ct_is_confirmed(ct))
176 e = nf_ct_ecache_find(ct);
180 memset(&item, 0, sizeof(item));
183 item.portid = e->portid ? e->portid : portid;
184 item.report = report;
186 /* This is a resent of a destroy event? If so, skip missed */
187 missed = e->portid ? 0 : e->missed;
189 ret = __nf_conntrack_eventmask_report(e, events, missed, &item);
190 if (unlikely(ret < 0 && (events & (1 << IPCT_DESTROY)))) {
191 /* This is a destroy event that has been triggered by a process,
192 * we store the PORTID to include it in the retransmission.
194 if (e->portid == 0 && portid != 0)
200 EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
202 /* deliver cached events and clear cache entry - must be called with locally
203 * disabled softirqs */
204 void nf_ct_deliver_cached_events(struct nf_conn *ct)
206 struct nf_conntrack_ecache *e;
207 struct nf_ct_event item;
210 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct))
213 e = nf_ct_ecache_find(ct);
217 events = xchg(&e->cache, 0);
223 /* We make a copy of the missed event cache without taking
224 * the lock, thus we may send missed events twice. However,
225 * this does not harm and it happens very rarely.
227 __nf_conntrack_eventmask_report(e, events, e->missed, &item);
229 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
231 void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
232 struct nf_conntrack_expect *exp,
233 u32 portid, int report)
236 struct net *net = nf_ct_exp_net(exp);
237 struct nf_ct_event_notifier *notify;
238 struct nf_conntrack_ecache *e;
241 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
245 e = nf_ct_ecache_find(exp->master);
249 if (e->expmask & (1 << event)) {
250 struct nf_exp_event item = {
255 notify->exp_event(1 << event, &item);
261 void nf_conntrack_register_notifier(struct net *net,
262 const struct nf_ct_event_notifier *new)
264 struct nf_ct_event_notifier *notify;
266 mutex_lock(&nf_ct_ecache_mutex);
267 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
268 lockdep_is_held(&nf_ct_ecache_mutex));
269 WARN_ON_ONCE(notify);
270 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
271 mutex_unlock(&nf_ct_ecache_mutex);
273 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
275 void nf_conntrack_unregister_notifier(struct net *net)
277 mutex_lock(&nf_ct_ecache_mutex);
278 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
279 mutex_unlock(&nf_ct_ecache_mutex);
280 /* synchronize_rcu() is called after netns pre_exit */
282 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
284 void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
286 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
288 if (state == NFCT_ECACHE_DESTROY_FAIL &&
289 !delayed_work_pending(&cnet->ecache.dwork)) {
290 schedule_delayed_work(&cnet->ecache.dwork, HZ);
291 net->ct.ecache_dwork_pending = true;
292 } else if (state == NFCT_ECACHE_DESTROY_SENT) {
293 if (!hlist_nulls_empty(&cnet->ecache.dying_list))
294 mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
296 net->ct.ecache_dwork_pending = false;
300 bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
302 struct net *net = nf_ct_net(ct);
303 struct nf_conntrack_ecache *e;
305 switch (net->ct.sysctl_events) {
307 /* assignment via template / ruleset? ignore sysctl. */
308 if (ctmask || expmask)
311 case 2: /* autodetect: no event listener, don't allocate extension. */
312 if (!READ_ONCE(nf_ctnetlink_has_listener))
316 /* always allocate an extension. */
317 if (!ctmask && !expmask) {
327 e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
330 e->expmask = expmask;
335 EXPORT_SYMBOL_GPL(nf_ct_ecache_ext_add);
337 #define NF_CT_EVENTS_DEFAULT 2
338 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
340 void nf_conntrack_ecache_pernet_init(struct net *net)
342 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
344 net->ct.sysctl_events = nf_ct_events;
346 INIT_DELAYED_WORK(&cnet->ecache.dwork, ecache_work);
347 INIT_HLIST_NULLS_HEAD(&cnet->ecache.dying_list, DYING_NULLS_VAL);
348 spin_lock_init(&cnet->ecache.dying_lock);
350 BUILD_BUG_ON(__IPCT_MAX >= 16); /* e->ctmask is u16 */
353 void nf_conntrack_ecache_pernet_fini(struct net *net)
355 struct nf_conntrack_net *cnet = nf_ct_pernet(net);
357 cancel_delayed_work_sync(&cnet->ecache.dwork);