1 // SPDX-License-Identifier: GPL-2.0-only
8 #include <net/inet_common.h>
9 #include <net/ip6_checksum.h>
12 static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv4);
13 #if IS_ENABLED(CONFIG_IPV6)
14 static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv6);
17 struct nat_keepalive {
27 static void nat_keepalive_init(struct nat_keepalive *ka, struct xfrm_state *x)
30 ka->family = x->props.family;
31 ka->saddr = x->props.saddr;
32 ka->daddr = x->id.daddr;
33 ka->encap_sport = x->encap->encap_sport;
34 ka->encap_dport = x->encap->encap_dport;
35 ka->smark = xfrm_smark_get(0, x);
38 static int nat_keepalive_send_ipv4(struct sk_buff *skb,
39 struct nat_keepalive *ka)
41 struct net *net = ka->net;
48 flowi4_init_output(&fl4, 0 /* oif */, skb->mark, tos,
49 RT_SCOPE_UNIVERSE, IPPROTO_UDP, 0,
50 ka->daddr.a4, ka->saddr.a4, ka->encap_dport,
51 ka->encap_sport, sock_net_uid(net, NULL));
53 rt = ip_route_output_key(net, &fl4);
57 skb_dst_set(skb, &rt->dst);
59 sk = *this_cpu_ptr(&nat_keepalive_sk_ipv4);
60 sock_net_set(sk, net);
61 err = ip_build_and_send_pkt(skb, sk, fl4.saddr, fl4.daddr, NULL, tos);
62 sock_net_set(sk, &init_net);
66 #if IS_ENABLED(CONFIG_IPV6)
67 static int nat_keepalive_send_ipv6(struct sk_buff *skb,
68 struct nat_keepalive *ka,
71 struct net *net = ka->net;
72 struct dst_entry *dst;
78 csum = skb_checksum(skb, 0, skb->len, 0);
79 uh->check = csum_ipv6_magic(&ka->saddr.in6, &ka->daddr.in6,
80 skb->len, IPPROTO_UDP, csum);
82 uh->check = CSUM_MANGLED_0;
84 memset(&fl6, 0, sizeof(fl6));
85 fl6.flowi6_mark = skb->mark;
86 fl6.saddr = ka->saddr.in6;
87 fl6.daddr = ka->daddr.in6;
88 fl6.flowi6_proto = IPPROTO_UDP;
89 fl6.fl6_sport = ka->encap_sport;
90 fl6.fl6_dport = ka->encap_dport;
92 sk = *this_cpu_ptr(&nat_keepalive_sk_ipv6);
93 sock_net_set(sk, net);
94 dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, &fl6, NULL);
98 skb_dst_set(skb, dst);
99 err = ipv6_stub->ip6_xmit(sk, skb, &fl6, skb->mark, NULL, 0, 0);
100 sock_net_set(sk, &init_net);
105 static void nat_keepalive_send(struct nat_keepalive *ka)
107 const int nat_ka_hdrs_len = max(sizeof(struct iphdr),
108 sizeof(struct ipv6hdr)) +
109 sizeof(struct udphdr);
110 const u8 nat_ka_payload = 0xFF;
111 int err = -EAFNOSUPPORT;
115 skb = alloc_skb(nat_ka_hdrs_len + sizeof(nat_ka_payload), GFP_ATOMIC);
119 skb_reserve(skb, nat_ka_hdrs_len);
121 skb_put_u8(skb, nat_ka_payload);
123 uh = skb_push(skb, sizeof(*uh));
124 uh->source = ka->encap_sport;
125 uh->dest = ka->encap_dport;
126 uh->len = htons(skb->len);
129 skb->mark = ka->smark;
131 switch (ka->family) {
133 err = nat_keepalive_send_ipv4(skb, ka);
135 #if IS_ENABLED(CONFIG_IPV6)
137 err = nat_keepalive_send_ipv6(skb, ka, uh);
145 struct nat_keepalive_work_ctx {
150 static int nat_keepalive_work_single(struct xfrm_state *x, int count, void *ptr)
152 struct nat_keepalive_work_ctx *ctx = ptr;
153 bool send_keepalive = false;
154 struct nat_keepalive ka;
159 interval = x->nat_keepalive_interval;
165 delta = (int)(ctx->now - x->lastused);
166 if (delta < interval) {
167 x->nat_keepalive_expiration = ctx->now + interval - delta;
168 next_run = x->nat_keepalive_expiration;
169 } else if (x->nat_keepalive_expiration > ctx->now) {
170 next_run = x->nat_keepalive_expiration;
172 next_run = ctx->now + interval;
173 nat_keepalive_init(&ka, x);
174 send_keepalive = true;
177 spin_unlock(&x->lock);
180 nat_keepalive_send(&ka);
182 if (!ctx->next_run || next_run < ctx->next_run)
183 ctx->next_run = next_run;
187 static void nat_keepalive_work(struct work_struct *work)
189 struct nat_keepalive_work_ctx ctx;
190 struct xfrm_state_walk walk;
194 ctx.now = ktime_get_real_seconds();
196 net = container_of(work, struct net, xfrm.nat_keepalive_work.work);
197 xfrm_state_walk_init(&walk, IPPROTO_ESP, NULL);
198 xfrm_state_walk(net, &walk, nat_keepalive_work_single, &ctx);
199 xfrm_state_walk_done(&walk, net);
201 schedule_delayed_work(&net->xfrm.nat_keepalive_work,
202 (ctx.next_run - ctx.now) * HZ);
205 static int nat_keepalive_sk_init(struct sock * __percpu *socks,
206 unsigned short family)
211 for_each_possible_cpu(i) {
212 err = inet_ctl_sock_create(&sk, family, SOCK_RAW, IPPROTO_UDP,
217 *per_cpu_ptr(socks, i) = sk;
222 for_each_possible_cpu(i)
223 inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
227 static void nat_keepalive_sk_fini(struct sock * __percpu *socks)
231 for_each_possible_cpu(i)
232 inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
235 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x)
239 if (!x->nat_keepalive_interval)
243 schedule_delayed_work(&net->xfrm.nat_keepalive_work, 0);
246 int __net_init xfrm_nat_keepalive_net_init(struct net *net)
248 INIT_DELAYED_WORK(&net->xfrm.nat_keepalive_work, nat_keepalive_work);
252 int xfrm_nat_keepalive_net_fini(struct net *net)
254 cancel_delayed_work_sync(&net->xfrm.nat_keepalive_work);
258 int xfrm_nat_keepalive_init(unsigned short family)
260 int err = -EAFNOSUPPORT;
264 err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv4, PF_INET);
266 #if IS_ENABLED(CONFIG_IPV6)
268 err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv6, PF_INET6);
274 pr_err("xfrm nat keepalive init: failed to init err:%d\n", err);
277 EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_init);
279 void xfrm_nat_keepalive_fini(unsigned short family)
283 nat_keepalive_sk_fini(&nat_keepalive_sk_ipv4);
285 #if IS_ENABLED(CONFIG_IPV6)
287 nat_keepalive_sk_fini(&nat_keepalive_sk_ipv6);
292 EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_fini);