]> Git Repo - linux.git/blob - net/netfilter/nf_queue.c
libbpf: fix GCC8 warning for strncpy
[linux.git] / net / netfilter / nf_queue.c
1 /*
2  * Rusty Russell (C)2000 -- This code is GPL.
3  * Patrick McHardy (c) 2006-2012
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_ipv4.h>
14 #include <linux/netfilter_ipv6.h>
15 #include <linux/netfilter_bridge.h>
16 #include <linux/seq_file.h>
17 #include <linux/rcupdate.h>
18 #include <net/protocol.h>
19 #include <net/netfilter/nf_queue.h>
20 #include <net/dst.h>
21
22 #include "nf_internals.h"
23
24 /*
25  * Hook for nfnetlink_queue to register its queue handler.
26  * We do this so that most of the NFQUEUE code can be modular.
27  *
28  * Once the queue is registered it must reinject all packets it
29  * receives, no matter what.
30  */
31
32 /* return EBUSY when somebody else is registered, return EEXIST if the
33  * same handler is registered, return 0 in case of success. */
34 void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
35 {
36         /* should never happen, we only have one queueing backend in kernel */
37         WARN_ON(rcu_access_pointer(net->nf.queue_handler));
38         rcu_assign_pointer(net->nf.queue_handler, qh);
39 }
40 EXPORT_SYMBOL(nf_register_queue_handler);
41
42 /* The caller must flush their queue before this */
43 void nf_unregister_queue_handler(struct net *net)
44 {
45         RCU_INIT_POINTER(net->nf.queue_handler, NULL);
46 }
47 EXPORT_SYMBOL(nf_unregister_queue_handler);
48
49 static void nf_queue_entry_release_br_nf_refs(struct sk_buff *skb)
50 {
51 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
52         struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
53
54         if (nf_bridge) {
55                 struct net_device *physdev;
56
57                 physdev = nf_bridge_get_physindev(skb);
58                 if (physdev)
59                         dev_put(physdev);
60                 physdev = nf_bridge_get_physoutdev(skb);
61                 if (physdev)
62                         dev_put(physdev);
63         }
64 #endif
65 }
66
67 void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
68 {
69         struct nf_hook_state *state = &entry->state;
70
71         /* Release those devices we held, or Alexey will kill me. */
72         if (state->in)
73                 dev_put(state->in);
74         if (state->out)
75                 dev_put(state->out);
76         if (state->sk)
77                 sock_put(state->sk);
78
79         nf_queue_entry_release_br_nf_refs(entry->skb);
80 }
81 EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
82
83 static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
84 {
85 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
86         struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
87
88         if (nf_bridge) {
89                 struct net_device *physdev;
90
91                 physdev = nf_bridge_get_physindev(skb);
92                 if (physdev)
93                         dev_hold(physdev);
94                 physdev = nf_bridge_get_physoutdev(skb);
95                 if (physdev)
96                         dev_hold(physdev);
97         }
98 #endif
99 }
100
101 /* Bump dev refs so they don't vanish while packet is out */
102 void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
103 {
104         struct nf_hook_state *state = &entry->state;
105
106         if (state->in)
107                 dev_hold(state->in);
108         if (state->out)
109                 dev_hold(state->out);
110         if (state->sk)
111                 sock_hold(state->sk);
112
113         nf_queue_entry_get_br_nf_refs(entry->skb);
114 }
115 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
116
117 void nf_queue_nf_hook_drop(struct net *net)
118 {
119         const struct nf_queue_handler *qh;
120
121         rcu_read_lock();
122         qh = rcu_dereference(net->nf.queue_handler);
123         if (qh)
124                 qh->nf_hook_drop(net);
125         rcu_read_unlock();
126 }
127 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
128
129 static void nf_ip_saveroute(const struct sk_buff *skb,
130                             struct nf_queue_entry *entry)
131 {
132         struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
133
134         if (entry->state.hook == NF_INET_LOCAL_OUT) {
135                 const struct iphdr *iph = ip_hdr(skb);
136
137                 rt_info->tos = iph->tos;
138                 rt_info->daddr = iph->daddr;
139                 rt_info->saddr = iph->saddr;
140                 rt_info->mark = skb->mark;
141         }
142 }
143
144 static void nf_ip6_saveroute(const struct sk_buff *skb,
145                              struct nf_queue_entry *entry)
146 {
147         struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
148
149         if (entry->state.hook == NF_INET_LOCAL_OUT) {
150                 const struct ipv6hdr *iph = ipv6_hdr(skb);
151
152                 rt_info->daddr = iph->daddr;
153                 rt_info->saddr = iph->saddr;
154                 rt_info->mark = skb->mark;
155         }
156 }
157
158 static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
159                       const struct nf_hook_entries *entries,
160                       unsigned int index, unsigned int queuenum)
161 {
162         int status = -ENOENT;
163         struct nf_queue_entry *entry = NULL;
164         const struct nf_queue_handler *qh;
165         struct net *net = state->net;
166         unsigned int route_key_size;
167
168         /* QUEUE == DROP if no one is waiting, to be safe. */
169         qh = rcu_dereference(net->nf.queue_handler);
170         if (!qh) {
171                 status = -ESRCH;
172                 goto err;
173         }
174
175         switch (state->pf) {
176         case AF_INET:
177                 route_key_size = sizeof(struct ip_rt_info);
178                 break;
179         case AF_INET6:
180                 route_key_size = sizeof(struct ip6_rt_info);
181                 break;
182         default:
183                 route_key_size = 0;
184                 break;
185         }
186
187         entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
188         if (!entry) {
189                 status = -ENOMEM;
190                 goto err;
191         }
192
193         *entry = (struct nf_queue_entry) {
194                 .skb    = skb,
195                 .state  = *state,
196                 .hook_index = index,
197                 .size   = sizeof(*entry) + route_key_size,
198         };
199
200         nf_queue_entry_get_refs(entry);
201         skb_dst_force(skb);
202
203         switch (entry->state.pf) {
204         case AF_INET:
205                 nf_ip_saveroute(skb, entry);
206                 break;
207         case AF_INET6:
208                 nf_ip6_saveroute(skb, entry);
209                 break;
210         }
211
212         status = qh->outfn(entry, queuenum);
213
214         if (status < 0) {
215                 nf_queue_entry_release_refs(entry);
216                 goto err;
217         }
218
219         return 0;
220
221 err:
222         kfree(entry);
223         return status;
224 }
225
226 /* Packets leaving via this function must come back through nf_reinject(). */
227 int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
228              const struct nf_hook_entries *entries, unsigned int index,
229              unsigned int verdict)
230 {
231         int ret;
232
233         ret = __nf_queue(skb, state, entries, index, verdict >> NF_VERDICT_QBITS);
234         if (ret < 0) {
235                 if (ret == -ESRCH &&
236                     (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
237                         return 1;
238                 kfree_skb(skb);
239         }
240
241         return 0;
242 }
243 EXPORT_SYMBOL_GPL(nf_queue);
244
245 static unsigned int nf_iterate(struct sk_buff *skb,
246                                struct nf_hook_state *state,
247                                const struct nf_hook_entries *hooks,
248                                unsigned int *index)
249 {
250         const struct nf_hook_entry *hook;
251         unsigned int verdict, i = *index;
252
253         while (i < hooks->num_hook_entries) {
254                 hook = &hooks->hooks[i];
255 repeat:
256                 verdict = nf_hook_entry_hookfn(hook, skb, state);
257                 if (verdict != NF_ACCEPT) {
258                         *index = i;
259                         if (verdict != NF_REPEAT)
260                                 return verdict;
261                         goto repeat;
262                 }
263                 i++;
264         }
265
266         *index = i;
267         return NF_ACCEPT;
268 }
269
270 static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
271 {
272         switch (pf) {
273 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
274         case NFPROTO_BRIDGE:
275                 return rcu_dereference(net->nf.hooks_bridge[hooknum]);
276 #endif
277         case NFPROTO_IPV4:
278                 return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
279         case NFPROTO_IPV6:
280                 return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
281         default:
282                 WARN_ON_ONCE(1);
283                 return NULL;
284         }
285
286         return NULL;
287 }
288
289 /* Caller must hold rcu read-side lock */
290 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
291 {
292         const struct nf_hook_entry *hook_entry;
293         const struct nf_hook_entries *hooks;
294         struct sk_buff *skb = entry->skb;
295         const struct net *net;
296         unsigned int i;
297         int err;
298         u8 pf;
299
300         net = entry->state.net;
301         pf = entry->state.pf;
302
303         hooks = nf_hook_entries_head(net, pf, entry->state.hook);
304
305         nf_queue_entry_release_refs(entry);
306
307         i = entry->hook_index;
308         if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
309                 kfree_skb(skb);
310                 kfree(entry);
311                 return;
312         }
313
314         hook_entry = &hooks->hooks[i];
315
316         /* Continue traversal iff userspace said ok... */
317         if (verdict == NF_REPEAT)
318                 verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
319
320         if (verdict == NF_ACCEPT) {
321                 if (nf_reroute(skb, entry) < 0)
322                         verdict = NF_DROP;
323         }
324
325         if (verdict == NF_ACCEPT) {
326 next_hook:
327                 ++i;
328                 verdict = nf_iterate(skb, &entry->state, hooks, &i);
329         }
330
331         switch (verdict & NF_VERDICT_MASK) {
332         case NF_ACCEPT:
333         case NF_STOP:
334                 local_bh_disable();
335                 entry->state.okfn(entry->state.net, entry->state.sk, skb);
336                 local_bh_enable();
337                 break;
338         case NF_QUEUE:
339                 err = nf_queue(skb, &entry->state, hooks, i, verdict);
340                 if (err == 1)
341                         goto next_hook;
342                 break;
343         case NF_STOLEN:
344                 break;
345         default:
346                 kfree_skb(skb);
347         }
348
349         kfree(entry);
350 }
351 EXPORT_SYMBOL(nf_reinject);
This page took 0.051439 seconds and 4 git commands to generate.