]> Git Repo - linux.git/blob - net/sched/cls_api.c
Merge tag 'input-for-v6.7-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor...
[linux.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <[email protected]>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <[email protected]> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54         const struct tcf_chain *chain;
55         const struct tcf_proto *tp;
56         const struct tcf_exts *exts;
57         u32 chain_index;
58         u32 tp_prio;
59         u32 handle;
60         u32 miss_cookie_base;
61         struct rcu_head rcu;
62 };
63
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65  * action index in the exts tc actions array.
66  */
67 union tcf_exts_miss_cookie {
68         struct {
69                 u32 miss_cookie_base;
70                 u32 act_index;
71         };
72         u64 miss_cookie;
73 };
74
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76 static int
77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78                                 u32 handle)
79 {
80         struct tcf_exts_miss_cookie_node *n;
81         static u32 next;
82         int err;
83
84         if (WARN_ON(!handle || !tp->ops->get_exts))
85                 return -EINVAL;
86
87         n = kzalloc(sizeof(*n), GFP_KERNEL);
88         if (!n)
89                 return -ENOMEM;
90
91         n->chain_index = tp->chain->index;
92         n->chain = tp->chain;
93         n->tp_prio = tp->prio;
94         n->tp = tp;
95         n->exts = exts;
96         n->handle = handle;
97
98         err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99                               n, xa_limit_32b, &next, GFP_KERNEL);
100         if (err)
101                 goto err_xa_alloc;
102
103         exts->miss_cookie_node = n;
104         return 0;
105
106 err_xa_alloc:
107         kfree(n);
108         return err;
109 }
110
111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112 {
113         struct tcf_exts_miss_cookie_node *n;
114
115         if (!exts->miss_cookie_node)
116                 return;
117
118         n = exts->miss_cookie_node;
119         xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120         kfree_rcu(n, rcu);
121 }
122
123 static struct tcf_exts_miss_cookie_node *
124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125 {
126         union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127
128         *act_index = mc.act_index;
129         return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130 }
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132 static int
133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134                                 u32 handle)
135 {
136         return 0;
137 }
138
139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140 {
141 }
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143
144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145 {
146         union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147
148         if (!miss_cookie_base)
149                 return 0;
150
151         mc.miss_cookie_base = miss_cookie_base;
152         return mc.miss_cookie;
153 }
154
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
158
159 void tc_skb_ext_tc_enable(void)
160 {
161         static_branch_inc(&tc_skb_ext_tc);
162 }
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164
165 void tc_skb_ext_tc_disable(void)
166 {
167         static_branch_dec(&tc_skb_ext_tc);
168 }
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170 #endif
171
172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173 {
174         return jhash_3words(tp->chain->index, tp->prio,
175                             (__force __u32)tp->protocol, 0);
176 }
177
178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179                                         struct tcf_proto *tp)
180 {
181         struct tcf_block *block = chain->block;
182
183         mutex_lock(&block->proto_destroy_lock);
184         hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185                      destroy_obj_hashfn(tp));
186         mutex_unlock(&block->proto_destroy_lock);
187 }
188
189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190                           const struct tcf_proto *tp2)
191 {
192         return tp1->chain->index == tp2->chain->index &&
193                tp1->prio == tp2->prio &&
194                tp1->protocol == tp2->protocol;
195 }
196
197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198                                         struct tcf_proto *tp)
199 {
200         u32 hash = destroy_obj_hashfn(tp);
201         struct tcf_proto *iter;
202         bool found = false;
203
204         rcu_read_lock();
205         hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206                                    destroy_ht_node, hash) {
207                 if (tcf_proto_cmp(tp, iter)) {
208                         found = true;
209                         break;
210                 }
211         }
212         rcu_read_unlock();
213
214         return found;
215 }
216
217 static void
218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219 {
220         struct tcf_block *block = chain->block;
221
222         mutex_lock(&block->proto_destroy_lock);
223         if (hash_hashed(&tp->destroy_ht_node))
224                 hash_del_rcu(&tp->destroy_ht_node);
225         mutex_unlock(&block->proto_destroy_lock);
226 }
227
228 /* Find classifier type by string name */
229
230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231 {
232         const struct tcf_proto_ops *t, *res = NULL;
233
234         if (kind) {
235                 read_lock(&cls_mod_lock);
236                 list_for_each_entry(t, &tcf_proto_base, head) {
237                         if (strcmp(kind, t->kind) == 0) {
238                                 if (try_module_get(t->owner))
239                                         res = t;
240                                 break;
241                         }
242                 }
243                 read_unlock(&cls_mod_lock);
244         }
245         return res;
246 }
247
248 static const struct tcf_proto_ops *
249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250                      struct netlink_ext_ack *extack)
251 {
252         const struct tcf_proto_ops *ops;
253
254         ops = __tcf_proto_lookup_ops(kind);
255         if (ops)
256                 return ops;
257 #ifdef CONFIG_MODULES
258         if (rtnl_held)
259                 rtnl_unlock();
260         request_module("cls_%s", kind);
261         if (rtnl_held)
262                 rtnl_lock();
263         ops = __tcf_proto_lookup_ops(kind);
264         /* We dropped the RTNL semaphore in order to perform
265          * the module load. So, even if we succeeded in loading
266          * the module we have to replay the request. We indicate
267          * this using -EAGAIN.
268          */
269         if (ops) {
270                 module_put(ops->owner);
271                 return ERR_PTR(-EAGAIN);
272         }
273 #endif
274         NL_SET_ERR_MSG(extack, "TC classifier not found");
275         return ERR_PTR(-ENOENT);
276 }
277
278 /* Register(unregister) new classifier type */
279
280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281 {
282         struct tcf_proto_ops *t;
283         int rc = -EEXIST;
284
285         write_lock(&cls_mod_lock);
286         list_for_each_entry(t, &tcf_proto_base, head)
287                 if (!strcmp(ops->kind, t->kind))
288                         goto out;
289
290         list_add_tail(&ops->head, &tcf_proto_base);
291         rc = 0;
292 out:
293         write_unlock(&cls_mod_lock);
294         return rc;
295 }
296 EXPORT_SYMBOL(register_tcf_proto_ops);
297
298 static struct workqueue_struct *tc_filter_wq;
299
300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301 {
302         struct tcf_proto_ops *t;
303         int rc = -ENOENT;
304
305         /* Wait for outstanding call_rcu()s, if any, from a
306          * tcf_proto_ops's destroy() handler.
307          */
308         rcu_barrier();
309         flush_workqueue(tc_filter_wq);
310
311         write_lock(&cls_mod_lock);
312         list_for_each_entry(t, &tcf_proto_base, head) {
313                 if (t == ops) {
314                         list_del(&t->head);
315                         rc = 0;
316                         break;
317                 }
318         }
319         write_unlock(&cls_mod_lock);
320
321         WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322 }
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
324
325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326 {
327         INIT_RCU_WORK(rwork, func);
328         return queue_rcu_work(tc_filter_wq, rwork);
329 }
330 EXPORT_SYMBOL(tcf_queue_work);
331
332 /* Select new prio value from the range, managed by kernel. */
333
334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335 {
336         u32 first = TC_H_MAKE(0xC0000000U, 0U);
337
338         if (tp)
339                 first = tp->prio - 1;
340
341         return TC_H_MAJ(first);
342 }
343
344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345 {
346         if (kind)
347                 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348         memset(name, 0, IFNAMSIZ);
349         return false;
350 }
351
352 static bool tcf_proto_is_unlocked(const char *kind)
353 {
354         const struct tcf_proto_ops *ops;
355         bool ret;
356
357         if (strlen(kind) == 0)
358                 return false;
359
360         ops = tcf_proto_lookup_ops(kind, false, NULL);
361         /* On error return false to take rtnl lock. Proto lookup/create
362          * functions will perform lookup again and properly handle errors.
363          */
364         if (IS_ERR(ops))
365                 return false;
366
367         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368         module_put(ops->owner);
369         return ret;
370 }
371
372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373                                           u32 prio, struct tcf_chain *chain,
374                                           bool rtnl_held,
375                                           struct netlink_ext_ack *extack)
376 {
377         struct tcf_proto *tp;
378         int err;
379
380         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381         if (!tp)
382                 return ERR_PTR(-ENOBUFS);
383
384         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385         if (IS_ERR(tp->ops)) {
386                 err = PTR_ERR(tp->ops);
387                 goto errout;
388         }
389         tp->classify = tp->ops->classify;
390         tp->protocol = protocol;
391         tp->prio = prio;
392         tp->chain = chain;
393         spin_lock_init(&tp->lock);
394         refcount_set(&tp->refcnt, 1);
395
396         err = tp->ops->init(tp);
397         if (err) {
398                 module_put(tp->ops->owner);
399                 goto errout;
400         }
401         return tp;
402
403 errout:
404         kfree(tp);
405         return ERR_PTR(err);
406 }
407
408 static void tcf_proto_get(struct tcf_proto *tp)
409 {
410         refcount_inc(&tp->refcnt);
411 }
412
413 static void tcf_chain_put(struct tcf_chain *chain);
414
415 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
416                               bool sig_destroy, struct netlink_ext_ack *extack)
417 {
418         tp->ops->destroy(tp, rtnl_held, extack);
419         if (sig_destroy)
420                 tcf_proto_signal_destroyed(tp->chain, tp);
421         tcf_chain_put(tp->chain);
422         module_put(tp->ops->owner);
423         kfree_rcu(tp, rcu);
424 }
425
426 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
427                           struct netlink_ext_ack *extack)
428 {
429         if (refcount_dec_and_test(&tp->refcnt))
430                 tcf_proto_destroy(tp, rtnl_held, true, extack);
431 }
432
433 static bool tcf_proto_check_delete(struct tcf_proto *tp)
434 {
435         if (tp->ops->delete_empty)
436                 return tp->ops->delete_empty(tp);
437
438         tp->deleting = true;
439         return tp->deleting;
440 }
441
442 static void tcf_proto_mark_delete(struct tcf_proto *tp)
443 {
444         spin_lock(&tp->lock);
445         tp->deleting = true;
446         spin_unlock(&tp->lock);
447 }
448
449 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
450 {
451         bool deleting;
452
453         spin_lock(&tp->lock);
454         deleting = tp->deleting;
455         spin_unlock(&tp->lock);
456
457         return deleting;
458 }
459
460 #define ASSERT_BLOCK_LOCKED(block)                                      \
461         lockdep_assert_held(&(block)->lock)
462
463 struct tcf_filter_chain_list_item {
464         struct list_head list;
465         tcf_chain_head_change_t *chain_head_change;
466         void *chain_head_change_priv;
467 };
468
469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
470                                           u32 chain_index)
471 {
472         struct tcf_chain *chain;
473
474         ASSERT_BLOCK_LOCKED(block);
475
476         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
477         if (!chain)
478                 return NULL;
479         list_add_tail_rcu(&chain->list, &block->chain_list);
480         mutex_init(&chain->filter_chain_lock);
481         chain->block = block;
482         chain->index = chain_index;
483         chain->refcnt = 1;
484         if (!chain->index)
485                 block->chain0.chain = chain;
486         return chain;
487 }
488
489 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
490                                        struct tcf_proto *tp_head)
491 {
492         if (item->chain_head_change)
493                 item->chain_head_change(tp_head, item->chain_head_change_priv);
494 }
495
496 static void tcf_chain0_head_change(struct tcf_chain *chain,
497                                    struct tcf_proto *tp_head)
498 {
499         struct tcf_filter_chain_list_item *item;
500         struct tcf_block *block = chain->block;
501
502         if (chain->index)
503                 return;
504
505         mutex_lock(&block->lock);
506         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
507                 tcf_chain_head_change_item(item, tp_head);
508         mutex_unlock(&block->lock);
509 }
510
511 /* Returns true if block can be safely freed. */
512
513 static bool tcf_chain_detach(struct tcf_chain *chain)
514 {
515         struct tcf_block *block = chain->block;
516
517         ASSERT_BLOCK_LOCKED(block);
518
519         list_del_rcu(&chain->list);
520         if (!chain->index)
521                 block->chain0.chain = NULL;
522
523         if (list_empty(&block->chain_list) &&
524             refcount_read(&block->refcnt) == 0)
525                 return true;
526
527         return false;
528 }
529
530 static void tcf_block_destroy(struct tcf_block *block)
531 {
532         mutex_destroy(&block->lock);
533         mutex_destroy(&block->proto_destroy_lock);
534         kfree_rcu(block, rcu);
535 }
536
537 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
538 {
539         struct tcf_block *block = chain->block;
540
541         mutex_destroy(&chain->filter_chain_lock);
542         kfree_rcu(chain, rcu);
543         if (free_block)
544                 tcf_block_destroy(block);
545 }
546
547 static void tcf_chain_hold(struct tcf_chain *chain)
548 {
549         ASSERT_BLOCK_LOCKED(chain->block);
550
551         ++chain->refcnt;
552 }
553
554 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
555 {
556         ASSERT_BLOCK_LOCKED(chain->block);
557
558         /* In case all the references are action references, this
559          * chain should not be shown to the user.
560          */
561         return chain->refcnt == chain->action_refcnt;
562 }
563
564 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
565                                           u32 chain_index)
566 {
567         struct tcf_chain *chain;
568
569         ASSERT_BLOCK_LOCKED(block);
570
571         list_for_each_entry(chain, &block->chain_list, list) {
572                 if (chain->index == chain_index)
573                         return chain;
574         }
575         return NULL;
576 }
577
578 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
579 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
580                                               u32 chain_index)
581 {
582         struct tcf_chain *chain;
583
584         list_for_each_entry_rcu(chain, &block->chain_list, list) {
585                 if (chain->index == chain_index)
586                         return chain;
587         }
588         return NULL;
589 }
590 #endif
591
592 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
593                            u32 seq, u16 flags, int event, bool unicast,
594                            struct netlink_ext_ack *extack);
595
596 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
597                                          u32 chain_index, bool create,
598                                          bool by_act)
599 {
600         struct tcf_chain *chain = NULL;
601         bool is_first_reference;
602
603         mutex_lock(&block->lock);
604         chain = tcf_chain_lookup(block, chain_index);
605         if (chain) {
606                 tcf_chain_hold(chain);
607         } else {
608                 if (!create)
609                         goto errout;
610                 chain = tcf_chain_create(block, chain_index);
611                 if (!chain)
612                         goto errout;
613         }
614
615         if (by_act)
616                 ++chain->action_refcnt;
617         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
618         mutex_unlock(&block->lock);
619
620         /* Send notification only in case we got the first
621          * non-action reference. Until then, the chain acts only as
622          * a placeholder for actions pointing to it and user ought
623          * not know about them.
624          */
625         if (is_first_reference && !by_act)
626                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
627                                 RTM_NEWCHAIN, false, NULL);
628
629         return chain;
630
631 errout:
632         mutex_unlock(&block->lock);
633         return chain;
634 }
635
636 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
637                                        bool create)
638 {
639         return __tcf_chain_get(block, chain_index, create, false);
640 }
641
642 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
643 {
644         return __tcf_chain_get(block, chain_index, true, true);
645 }
646 EXPORT_SYMBOL(tcf_chain_get_by_act);
647
648 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
649                                void *tmplt_priv);
650 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
651                                   void *tmplt_priv, u32 chain_index,
652                                   struct tcf_block *block, struct sk_buff *oskb,
653                                   u32 seq, u16 flags, bool unicast);
654
655 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
656                             bool explicitly_created)
657 {
658         struct tcf_block *block = chain->block;
659         const struct tcf_proto_ops *tmplt_ops;
660         unsigned int refcnt, non_act_refcnt;
661         bool free_block = false;
662         void *tmplt_priv;
663
664         mutex_lock(&block->lock);
665         if (explicitly_created) {
666                 if (!chain->explicitly_created) {
667                         mutex_unlock(&block->lock);
668                         return;
669                 }
670                 chain->explicitly_created = false;
671         }
672
673         if (by_act)
674                 chain->action_refcnt--;
675
676         /* tc_chain_notify_delete can't be called while holding block lock.
677          * However, when block is unlocked chain can be changed concurrently, so
678          * save these to temporary variables.
679          */
680         refcnt = --chain->refcnt;
681         non_act_refcnt = refcnt - chain->action_refcnt;
682         tmplt_ops = chain->tmplt_ops;
683         tmplt_priv = chain->tmplt_priv;
684
685         if (non_act_refcnt == chain->explicitly_created && !by_act) {
686                 if (non_act_refcnt == 0)
687                         tc_chain_notify_delete(tmplt_ops, tmplt_priv,
688                                                chain->index, block, NULL, 0, 0,
689                                                false);
690                 /* Last reference to chain, no need to lock. */
691                 chain->flushing = false;
692         }
693
694         if (refcnt == 0)
695                 free_block = tcf_chain_detach(chain);
696         mutex_unlock(&block->lock);
697
698         if (refcnt == 0) {
699                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
700                 tcf_chain_destroy(chain, free_block);
701         }
702 }
703
704 static void tcf_chain_put(struct tcf_chain *chain)
705 {
706         __tcf_chain_put(chain, false, false);
707 }
708
709 void tcf_chain_put_by_act(struct tcf_chain *chain)
710 {
711         __tcf_chain_put(chain, true, false);
712 }
713 EXPORT_SYMBOL(tcf_chain_put_by_act);
714
715 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
716 {
717         __tcf_chain_put(chain, false, true);
718 }
719
720 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
721 {
722         struct tcf_proto *tp, *tp_next;
723
724         mutex_lock(&chain->filter_chain_lock);
725         tp = tcf_chain_dereference(chain->filter_chain, chain);
726         while (tp) {
727                 tp_next = rcu_dereference_protected(tp->next, 1);
728                 tcf_proto_signal_destroying(chain, tp);
729                 tp = tp_next;
730         }
731         tp = tcf_chain_dereference(chain->filter_chain, chain);
732         RCU_INIT_POINTER(chain->filter_chain, NULL);
733         tcf_chain0_head_change(chain, NULL);
734         chain->flushing = true;
735         mutex_unlock(&chain->filter_chain_lock);
736
737         while (tp) {
738                 tp_next = rcu_dereference_protected(tp->next, 1);
739                 tcf_proto_put(tp, rtnl_held, NULL);
740                 tp = tp_next;
741         }
742 }
743
744 static int tcf_block_setup(struct tcf_block *block,
745                            struct flow_block_offload *bo);
746
747 static void tcf_block_offload_init(struct flow_block_offload *bo,
748                                    struct net_device *dev, struct Qdisc *sch,
749                                    enum flow_block_command command,
750                                    enum flow_block_binder_type binder_type,
751                                    struct flow_block *flow_block,
752                                    bool shared, struct netlink_ext_ack *extack)
753 {
754         bo->net = dev_net(dev);
755         bo->command = command;
756         bo->binder_type = binder_type;
757         bo->block = flow_block;
758         bo->block_shared = shared;
759         bo->extack = extack;
760         bo->sch = sch;
761         bo->cb_list_head = &flow_block->cb_list;
762         INIT_LIST_HEAD(&bo->cb_list);
763 }
764
765 static void tcf_block_unbind(struct tcf_block *block,
766                              struct flow_block_offload *bo);
767
768 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
769 {
770         struct tcf_block *block = block_cb->indr.data;
771         struct net_device *dev = block_cb->indr.dev;
772         struct Qdisc *sch = block_cb->indr.sch;
773         struct netlink_ext_ack extack = {};
774         struct flow_block_offload bo = {};
775
776         tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
777                                block_cb->indr.binder_type,
778                                &block->flow_block, tcf_block_shared(block),
779                                &extack);
780         rtnl_lock();
781         down_write(&block->cb_lock);
782         list_del(&block_cb->driver_list);
783         list_move(&block_cb->list, &bo.cb_list);
784         tcf_block_unbind(block, &bo);
785         up_write(&block->cb_lock);
786         rtnl_unlock();
787 }
788
789 static bool tcf_block_offload_in_use(struct tcf_block *block)
790 {
791         return atomic_read(&block->offloadcnt);
792 }
793
794 static int tcf_block_offload_cmd(struct tcf_block *block,
795                                  struct net_device *dev, struct Qdisc *sch,
796                                  struct tcf_block_ext_info *ei,
797                                  enum flow_block_command command,
798                                  struct netlink_ext_ack *extack)
799 {
800         struct flow_block_offload bo = {};
801
802         tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
803                                &block->flow_block, tcf_block_shared(block),
804                                extack);
805
806         if (dev->netdev_ops->ndo_setup_tc) {
807                 int err;
808
809                 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
810                 if (err < 0) {
811                         if (err != -EOPNOTSUPP)
812                                 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
813                         return err;
814                 }
815
816                 return tcf_block_setup(block, &bo);
817         }
818
819         flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
820                                     tc_block_indr_cleanup);
821         tcf_block_setup(block, &bo);
822
823         return -EOPNOTSUPP;
824 }
825
826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
827                                   struct tcf_block_ext_info *ei,
828                                   struct netlink_ext_ack *extack)
829 {
830         struct net_device *dev = q->dev_queue->dev;
831         int err;
832
833         down_write(&block->cb_lock);
834
835         /* If tc offload feature is disabled and the block we try to bind
836          * to already has some offloaded filters, forbid to bind.
837          */
838         if (dev->netdev_ops->ndo_setup_tc &&
839             !tc_can_offload(dev) &&
840             tcf_block_offload_in_use(block)) {
841                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
842                 err = -EOPNOTSUPP;
843                 goto err_unlock;
844         }
845
846         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
847         if (err == -EOPNOTSUPP)
848                 goto no_offload_dev_inc;
849         if (err)
850                 goto err_unlock;
851
852         up_write(&block->cb_lock);
853         return 0;
854
855 no_offload_dev_inc:
856         if (tcf_block_offload_in_use(block))
857                 goto err_unlock;
858
859         err = 0;
860         block->nooffloaddevcnt++;
861 err_unlock:
862         up_write(&block->cb_lock);
863         return err;
864 }
865
866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
867                                      struct tcf_block_ext_info *ei)
868 {
869         struct net_device *dev = q->dev_queue->dev;
870         int err;
871
872         down_write(&block->cb_lock);
873         err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
874         if (err == -EOPNOTSUPP)
875                 goto no_offload_dev_dec;
876         up_write(&block->cb_lock);
877         return;
878
879 no_offload_dev_dec:
880         WARN_ON(block->nooffloaddevcnt-- == 0);
881         up_write(&block->cb_lock);
882 }
883
884 static int
885 tcf_chain0_head_change_cb_add(struct tcf_block *block,
886                               struct tcf_block_ext_info *ei,
887                               struct netlink_ext_ack *extack)
888 {
889         struct tcf_filter_chain_list_item *item;
890         struct tcf_chain *chain0;
891
892         item = kmalloc(sizeof(*item), GFP_KERNEL);
893         if (!item) {
894                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
895                 return -ENOMEM;
896         }
897         item->chain_head_change = ei->chain_head_change;
898         item->chain_head_change_priv = ei->chain_head_change_priv;
899
900         mutex_lock(&block->lock);
901         chain0 = block->chain0.chain;
902         if (chain0)
903                 tcf_chain_hold(chain0);
904         else
905                 list_add(&item->list, &block->chain0.filter_chain_list);
906         mutex_unlock(&block->lock);
907
908         if (chain0) {
909                 struct tcf_proto *tp_head;
910
911                 mutex_lock(&chain0->filter_chain_lock);
912
913                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
914                 if (tp_head)
915                         tcf_chain_head_change_item(item, tp_head);
916
917                 mutex_lock(&block->lock);
918                 list_add(&item->list, &block->chain0.filter_chain_list);
919                 mutex_unlock(&block->lock);
920
921                 mutex_unlock(&chain0->filter_chain_lock);
922                 tcf_chain_put(chain0);
923         }
924
925         return 0;
926 }
927
928 static void
929 tcf_chain0_head_change_cb_del(struct tcf_block *block,
930                               struct tcf_block_ext_info *ei)
931 {
932         struct tcf_filter_chain_list_item *item;
933
934         mutex_lock(&block->lock);
935         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
936                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
937                     (item->chain_head_change == ei->chain_head_change &&
938                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
939                         if (block->chain0.chain)
940                                 tcf_chain_head_change_item(item, NULL);
941                         list_del(&item->list);
942                         mutex_unlock(&block->lock);
943
944                         kfree(item);
945                         return;
946                 }
947         }
948         mutex_unlock(&block->lock);
949         WARN_ON(1);
950 }
951
952 struct tcf_net {
953         spinlock_t idr_lock; /* Protects idr */
954         struct idr idr;
955 };
956
957 static unsigned int tcf_net_id;
958
959 static int tcf_block_insert(struct tcf_block *block, struct net *net,
960                             struct netlink_ext_ack *extack)
961 {
962         struct tcf_net *tn = net_generic(net, tcf_net_id);
963         int err;
964
965         idr_preload(GFP_KERNEL);
966         spin_lock(&tn->idr_lock);
967         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
968                             GFP_NOWAIT);
969         spin_unlock(&tn->idr_lock);
970         idr_preload_end();
971
972         return err;
973 }
974
975 static void tcf_block_remove(struct tcf_block *block, struct net *net)
976 {
977         struct tcf_net *tn = net_generic(net, tcf_net_id);
978
979         spin_lock(&tn->idr_lock);
980         idr_remove(&tn->idr, block->index);
981         spin_unlock(&tn->idr_lock);
982 }
983
984 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
985                                           u32 block_index,
986                                           struct netlink_ext_ack *extack)
987 {
988         struct tcf_block *block;
989
990         block = kzalloc(sizeof(*block), GFP_KERNEL);
991         if (!block) {
992                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
993                 return ERR_PTR(-ENOMEM);
994         }
995         mutex_init(&block->lock);
996         mutex_init(&block->proto_destroy_lock);
997         init_rwsem(&block->cb_lock);
998         flow_block_init(&block->flow_block);
999         INIT_LIST_HEAD(&block->chain_list);
1000         INIT_LIST_HEAD(&block->owner_list);
1001         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1002
1003         refcount_set(&block->refcnt, 1);
1004         block->net = net;
1005         block->index = block_index;
1006
1007         /* Don't store q pointer for blocks which are shared */
1008         if (!tcf_block_shared(block))
1009                 block->q = q;
1010         return block;
1011 }
1012
1013 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1014 {
1015         struct tcf_net *tn = net_generic(net, tcf_net_id);
1016
1017         return idr_find(&tn->idr, block_index);
1018 }
1019
1020 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1021 {
1022         struct tcf_block *block;
1023
1024         rcu_read_lock();
1025         block = tcf_block_lookup(net, block_index);
1026         if (block && !refcount_inc_not_zero(&block->refcnt))
1027                 block = NULL;
1028         rcu_read_unlock();
1029
1030         return block;
1031 }
1032
1033 static struct tcf_chain *
1034 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1035 {
1036         mutex_lock(&block->lock);
1037         if (chain)
1038                 chain = list_is_last(&chain->list, &block->chain_list) ?
1039                         NULL : list_next_entry(chain, list);
1040         else
1041                 chain = list_first_entry_or_null(&block->chain_list,
1042                                                  struct tcf_chain, list);
1043
1044         /* skip all action-only chains */
1045         while (chain && tcf_chain_held_by_acts_only(chain))
1046                 chain = list_is_last(&chain->list, &block->chain_list) ?
1047                         NULL : list_next_entry(chain, list);
1048
1049         if (chain)
1050                 tcf_chain_hold(chain);
1051         mutex_unlock(&block->lock);
1052
1053         return chain;
1054 }
1055
1056 /* Function to be used by all clients that want to iterate over all chains on
1057  * block. It properly obtains block->lock and takes reference to chain before
1058  * returning it. Users of this function must be tolerant to concurrent chain
1059  * insertion/deletion or ensure that no concurrent chain modification is
1060  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1061  * consistent dump because rtnl lock is released each time skb is filled with
1062  * data and sent to user-space.
1063  */
1064
1065 struct tcf_chain *
1066 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1067 {
1068         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1069
1070         if (chain)
1071                 tcf_chain_put(chain);
1072
1073         return chain_next;
1074 }
1075 EXPORT_SYMBOL(tcf_get_next_chain);
1076
1077 static struct tcf_proto *
1078 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1079 {
1080         u32 prio = 0;
1081
1082         ASSERT_RTNL();
1083         mutex_lock(&chain->filter_chain_lock);
1084
1085         if (!tp) {
1086                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1087         } else if (tcf_proto_is_deleting(tp)) {
1088                 /* 'deleting' flag is set and chain->filter_chain_lock was
1089                  * unlocked, which means next pointer could be invalid. Restart
1090                  * search.
1091                  */
1092                 prio = tp->prio + 1;
1093                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1094
1095                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1096                         if (!tp->deleting && tp->prio >= prio)
1097                                 break;
1098         } else {
1099                 tp = tcf_chain_dereference(tp->next, chain);
1100         }
1101
1102         if (tp)
1103                 tcf_proto_get(tp);
1104
1105         mutex_unlock(&chain->filter_chain_lock);
1106
1107         return tp;
1108 }
1109
1110 /* Function to be used by all clients that want to iterate over all tp's on
1111  * chain. Users of this function must be tolerant to concurrent tp
1112  * insertion/deletion or ensure that no concurrent chain modification is
1113  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1114  * consistent dump because rtnl lock is released each time skb is filled with
1115  * data and sent to user-space.
1116  */
1117
1118 struct tcf_proto *
1119 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1120 {
1121         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1122
1123         if (tp)
1124                 tcf_proto_put(tp, true, NULL);
1125
1126         return tp_next;
1127 }
1128 EXPORT_SYMBOL(tcf_get_next_proto);
1129
1130 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1131 {
1132         struct tcf_chain *chain;
1133
1134         /* Last reference to block. At this point chains cannot be added or
1135          * removed concurrently.
1136          */
1137         for (chain = tcf_get_next_chain(block, NULL);
1138              chain;
1139              chain = tcf_get_next_chain(block, chain)) {
1140                 tcf_chain_put_explicitly_created(chain);
1141                 tcf_chain_flush(chain, rtnl_held);
1142         }
1143 }
1144
1145 /* Lookup Qdisc and increments its reference counter.
1146  * Set parent, if necessary.
1147  */
1148
1149 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1150                             u32 *parent, int ifindex, bool rtnl_held,
1151                             struct netlink_ext_ack *extack)
1152 {
1153         const struct Qdisc_class_ops *cops;
1154         struct net_device *dev;
1155         int err = 0;
1156
1157         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1158                 return 0;
1159
1160         rcu_read_lock();
1161
1162         /* Find link */
1163         dev = dev_get_by_index_rcu(net, ifindex);
1164         if (!dev) {
1165                 rcu_read_unlock();
1166                 return -ENODEV;
1167         }
1168
1169         /* Find qdisc */
1170         if (!*parent) {
1171                 *q = rcu_dereference(dev->qdisc);
1172                 *parent = (*q)->handle;
1173         } else {
1174                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1175                 if (!*q) {
1176                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1177                         err = -EINVAL;
1178                         goto errout_rcu;
1179                 }
1180         }
1181
1182         *q = qdisc_refcount_inc_nz(*q);
1183         if (!*q) {
1184                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1185                 err = -EINVAL;
1186                 goto errout_rcu;
1187         }
1188
1189         /* Is it classful? */
1190         cops = (*q)->ops->cl_ops;
1191         if (!cops) {
1192                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1193                 err = -EINVAL;
1194                 goto errout_qdisc;
1195         }
1196
1197         if (!cops->tcf_block) {
1198                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1199                 err = -EOPNOTSUPP;
1200                 goto errout_qdisc;
1201         }
1202
1203 errout_rcu:
1204         /* At this point we know that qdisc is not noop_qdisc,
1205          * which means that qdisc holds a reference to net_device
1206          * and we hold a reference to qdisc, so it is safe to release
1207          * rcu read lock.
1208          */
1209         rcu_read_unlock();
1210         return err;
1211
1212 errout_qdisc:
1213         rcu_read_unlock();
1214
1215         if (rtnl_held)
1216                 qdisc_put(*q);
1217         else
1218                 qdisc_put_unlocked(*q);
1219         *q = NULL;
1220
1221         return err;
1222 }
1223
1224 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1225                                int ifindex, struct netlink_ext_ack *extack)
1226 {
1227         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1228                 return 0;
1229
1230         /* Do we search for filter, attached to class? */
1231         if (TC_H_MIN(parent)) {
1232                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1233
1234                 *cl = cops->find(q, parent);
1235                 if (*cl == 0) {
1236                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1237                         return -ENOENT;
1238                 }
1239         }
1240
1241         return 0;
1242 }
1243
1244 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1245                                           unsigned long cl, int ifindex,
1246                                           u32 block_index,
1247                                           struct netlink_ext_ack *extack)
1248 {
1249         struct tcf_block *block;
1250
1251         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1252                 block = tcf_block_refcnt_get(net, block_index);
1253                 if (!block) {
1254                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1255                         return ERR_PTR(-EINVAL);
1256                 }
1257         } else {
1258                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1259
1260                 block = cops->tcf_block(q, cl, extack);
1261                 if (!block)
1262                         return ERR_PTR(-EINVAL);
1263
1264                 if (tcf_block_shared(block)) {
1265                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1266                         return ERR_PTR(-EOPNOTSUPP);
1267                 }
1268
1269                 /* Always take reference to block in order to support execution
1270                  * of rules update path of cls API without rtnl lock. Caller
1271                  * must release block when it is finished using it. 'if' block
1272                  * of this conditional obtain reference to block by calling
1273                  * tcf_block_refcnt_get().
1274                  */
1275                 refcount_inc(&block->refcnt);
1276         }
1277
1278         return block;
1279 }
1280
1281 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1282                             struct tcf_block_ext_info *ei, bool rtnl_held)
1283 {
1284         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1285                 /* Flushing/putting all chains will cause the block to be
1286                  * deallocated when last chain is freed. However, if chain_list
1287                  * is empty, block has to be manually deallocated. After block
1288                  * reference counter reached 0, it is no longer possible to
1289                  * increment it or add new chains to block.
1290                  */
1291                 bool free_block = list_empty(&block->chain_list);
1292
1293                 mutex_unlock(&block->lock);
1294                 if (tcf_block_shared(block))
1295                         tcf_block_remove(block, block->net);
1296
1297                 if (q)
1298                         tcf_block_offload_unbind(block, q, ei);
1299
1300                 if (free_block)
1301                         tcf_block_destroy(block);
1302                 else
1303                         tcf_block_flush_all_chains(block, rtnl_held);
1304         } else if (q) {
1305                 tcf_block_offload_unbind(block, q, ei);
1306         }
1307 }
1308
1309 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1310 {
1311         __tcf_block_put(block, NULL, NULL, rtnl_held);
1312 }
1313
1314 /* Find tcf block.
1315  * Set q, parent, cl when appropriate.
1316  */
1317
1318 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1319                                         u32 *parent, unsigned long *cl,
1320                                         int ifindex, u32 block_index,
1321                                         struct netlink_ext_ack *extack)
1322 {
1323         struct tcf_block *block;
1324         int err = 0;
1325
1326         ASSERT_RTNL();
1327
1328         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1329         if (err)
1330                 goto errout;
1331
1332         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1333         if (err)
1334                 goto errout_qdisc;
1335
1336         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1337         if (IS_ERR(block)) {
1338                 err = PTR_ERR(block);
1339                 goto errout_qdisc;
1340         }
1341
1342         return block;
1343
1344 errout_qdisc:
1345         if (*q)
1346                 qdisc_put(*q);
1347 errout:
1348         *q = NULL;
1349         return ERR_PTR(err);
1350 }
1351
1352 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1353                               bool rtnl_held)
1354 {
1355         if (!IS_ERR_OR_NULL(block))
1356                 tcf_block_refcnt_put(block, rtnl_held);
1357
1358         if (q) {
1359                 if (rtnl_held)
1360                         qdisc_put(q);
1361                 else
1362                         qdisc_put_unlocked(q);
1363         }
1364 }
1365
1366 struct tcf_block_owner_item {
1367         struct list_head list;
1368         struct Qdisc *q;
1369         enum flow_block_binder_type binder_type;
1370 };
1371
1372 static void
1373 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1374                                struct Qdisc *q,
1375                                enum flow_block_binder_type binder_type)
1376 {
1377         if (block->keep_dst &&
1378             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1379             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1380                 netif_keep_dst(qdisc_dev(q));
1381 }
1382
1383 void tcf_block_netif_keep_dst(struct tcf_block *block)
1384 {
1385         struct tcf_block_owner_item *item;
1386
1387         block->keep_dst = true;
1388         list_for_each_entry(item, &block->owner_list, list)
1389                 tcf_block_owner_netif_keep_dst(block, item->q,
1390                                                item->binder_type);
1391 }
1392 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1393
1394 static int tcf_block_owner_add(struct tcf_block *block,
1395                                struct Qdisc *q,
1396                                enum flow_block_binder_type binder_type)
1397 {
1398         struct tcf_block_owner_item *item;
1399
1400         item = kmalloc(sizeof(*item), GFP_KERNEL);
1401         if (!item)
1402                 return -ENOMEM;
1403         item->q = q;
1404         item->binder_type = binder_type;
1405         list_add(&item->list, &block->owner_list);
1406         return 0;
1407 }
1408
1409 static void tcf_block_owner_del(struct tcf_block *block,
1410                                 struct Qdisc *q,
1411                                 enum flow_block_binder_type binder_type)
1412 {
1413         struct tcf_block_owner_item *item;
1414
1415         list_for_each_entry(item, &block->owner_list, list) {
1416                 if (item->q == q && item->binder_type == binder_type) {
1417                         list_del(&item->list);
1418                         kfree(item);
1419                         return;
1420                 }
1421         }
1422         WARN_ON(1);
1423 }
1424
1425 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1426                       struct tcf_block_ext_info *ei,
1427                       struct netlink_ext_ack *extack)
1428 {
1429         struct net *net = qdisc_net(q);
1430         struct tcf_block *block = NULL;
1431         int err;
1432
1433         if (ei->block_index)
1434                 /* block_index not 0 means the shared block is requested */
1435                 block = tcf_block_refcnt_get(net, ei->block_index);
1436
1437         if (!block) {
1438                 block = tcf_block_create(net, q, ei->block_index, extack);
1439                 if (IS_ERR(block))
1440                         return PTR_ERR(block);
1441                 if (tcf_block_shared(block)) {
1442                         err = tcf_block_insert(block, net, extack);
1443                         if (err)
1444                                 goto err_block_insert;
1445                 }
1446         }
1447
1448         err = tcf_block_owner_add(block, q, ei->binder_type);
1449         if (err)
1450                 goto err_block_owner_add;
1451
1452         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1453
1454         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1455         if (err)
1456                 goto err_chain0_head_change_cb_add;
1457
1458         err = tcf_block_offload_bind(block, q, ei, extack);
1459         if (err)
1460                 goto err_block_offload_bind;
1461
1462         *p_block = block;
1463         return 0;
1464
1465 err_block_offload_bind:
1466         tcf_chain0_head_change_cb_del(block, ei);
1467 err_chain0_head_change_cb_add:
1468         tcf_block_owner_del(block, q, ei->binder_type);
1469 err_block_owner_add:
1470 err_block_insert:
1471         tcf_block_refcnt_put(block, true);
1472         return err;
1473 }
1474 EXPORT_SYMBOL(tcf_block_get_ext);
1475
1476 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1477 {
1478         struct tcf_proto __rcu **p_filter_chain = priv;
1479
1480         rcu_assign_pointer(*p_filter_chain, tp_head);
1481 }
1482
1483 int tcf_block_get(struct tcf_block **p_block,
1484                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1485                   struct netlink_ext_ack *extack)
1486 {
1487         struct tcf_block_ext_info ei = {
1488                 .chain_head_change = tcf_chain_head_change_dflt,
1489                 .chain_head_change_priv = p_filter_chain,
1490         };
1491
1492         WARN_ON(!p_filter_chain);
1493         return tcf_block_get_ext(p_block, q, &ei, extack);
1494 }
1495 EXPORT_SYMBOL(tcf_block_get);
1496
1497 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1498  * actions should be all removed after flushing.
1499  */
1500 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1501                        struct tcf_block_ext_info *ei)
1502 {
1503         if (!block)
1504                 return;
1505         tcf_chain0_head_change_cb_del(block, ei);
1506         tcf_block_owner_del(block, q, ei->binder_type);
1507
1508         __tcf_block_put(block, q, ei, true);
1509 }
1510 EXPORT_SYMBOL(tcf_block_put_ext);
1511
1512 void tcf_block_put(struct tcf_block *block)
1513 {
1514         struct tcf_block_ext_info ei = {0, };
1515
1516         if (!block)
1517                 return;
1518         tcf_block_put_ext(block, block->q, &ei);
1519 }
1520
1521 EXPORT_SYMBOL(tcf_block_put);
1522
1523 static int
1524 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1525                             void *cb_priv, bool add, bool offload_in_use,
1526                             struct netlink_ext_ack *extack)
1527 {
1528         struct tcf_chain *chain, *chain_prev;
1529         struct tcf_proto *tp, *tp_prev;
1530         int err;
1531
1532         lockdep_assert_held(&block->cb_lock);
1533
1534         for (chain = __tcf_get_next_chain(block, NULL);
1535              chain;
1536              chain_prev = chain,
1537                      chain = __tcf_get_next_chain(block, chain),
1538                      tcf_chain_put(chain_prev)) {
1539                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1540                      tp_prev = tp,
1541                              tp = __tcf_get_next_proto(chain, tp),
1542                              tcf_proto_put(tp_prev, true, NULL)) {
1543                         if (tp->ops->reoffload) {
1544                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1545                                                          extack);
1546                                 if (err && add)
1547                                         goto err_playback_remove;
1548                         } else if (add && offload_in_use) {
1549                                 err = -EOPNOTSUPP;
1550                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1551                                 goto err_playback_remove;
1552                         }
1553                 }
1554         }
1555
1556         return 0;
1557
1558 err_playback_remove:
1559         tcf_proto_put(tp, true, NULL);
1560         tcf_chain_put(chain);
1561         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1562                                     extack);
1563         return err;
1564 }
1565
1566 static int tcf_block_bind(struct tcf_block *block,
1567                           struct flow_block_offload *bo)
1568 {
1569         struct flow_block_cb *block_cb, *next;
1570         int err, i = 0;
1571
1572         lockdep_assert_held(&block->cb_lock);
1573
1574         list_for_each_entry(block_cb, &bo->cb_list, list) {
1575                 err = tcf_block_playback_offloads(block, block_cb->cb,
1576                                                   block_cb->cb_priv, true,
1577                                                   tcf_block_offload_in_use(block),
1578                                                   bo->extack);
1579                 if (err)
1580                         goto err_unroll;
1581                 if (!bo->unlocked_driver_cb)
1582                         block->lockeddevcnt++;
1583
1584                 i++;
1585         }
1586         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1587
1588         return 0;
1589
1590 err_unroll:
1591         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1592                 list_del(&block_cb->driver_list);
1593                 if (i-- > 0) {
1594                         list_del(&block_cb->list);
1595                         tcf_block_playback_offloads(block, block_cb->cb,
1596                                                     block_cb->cb_priv, false,
1597                                                     tcf_block_offload_in_use(block),
1598                                                     NULL);
1599                         if (!bo->unlocked_driver_cb)
1600                                 block->lockeddevcnt--;
1601                 }
1602                 flow_block_cb_free(block_cb);
1603         }
1604
1605         return err;
1606 }
1607
1608 static void tcf_block_unbind(struct tcf_block *block,
1609                              struct flow_block_offload *bo)
1610 {
1611         struct flow_block_cb *block_cb, *next;
1612
1613         lockdep_assert_held(&block->cb_lock);
1614
1615         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1616                 tcf_block_playback_offloads(block, block_cb->cb,
1617                                             block_cb->cb_priv, false,
1618                                             tcf_block_offload_in_use(block),
1619                                             NULL);
1620                 list_del(&block_cb->list);
1621                 flow_block_cb_free(block_cb);
1622                 if (!bo->unlocked_driver_cb)
1623                         block->lockeddevcnt--;
1624         }
1625 }
1626
1627 static int tcf_block_setup(struct tcf_block *block,
1628                            struct flow_block_offload *bo)
1629 {
1630         int err;
1631
1632         switch (bo->command) {
1633         case FLOW_BLOCK_BIND:
1634                 err = tcf_block_bind(block, bo);
1635                 break;
1636         case FLOW_BLOCK_UNBIND:
1637                 err = 0;
1638                 tcf_block_unbind(block, bo);
1639                 break;
1640         default:
1641                 WARN_ON_ONCE(1);
1642                 err = -EOPNOTSUPP;
1643         }
1644
1645         return err;
1646 }
1647
1648 /* Main classifier routine: scans classifier chain attached
1649  * to this qdisc, (optionally) tests for protocol and asks
1650  * specific classifiers.
1651  */
1652 static inline int __tcf_classify(struct sk_buff *skb,
1653                                  const struct tcf_proto *tp,
1654                                  const struct tcf_proto *orig_tp,
1655                                  struct tcf_result *res,
1656                                  bool compat_mode,
1657                                  struct tcf_exts_miss_cookie_node *n,
1658                                  int act_index,
1659                                  u32 *last_executed_chain)
1660 {
1661         u32 orig_reason = res->drop_reason;
1662 #ifdef CONFIG_NET_CLS_ACT
1663         const int max_reclassify_loop = 16;
1664         const struct tcf_proto *first_tp;
1665         int limit = 0;
1666
1667 reclassify:
1668 #endif
1669         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1670                 __be16 protocol = skb_protocol(skb, false);
1671                 int err = 0;
1672
1673                 if (n) {
1674                         struct tcf_exts *exts;
1675
1676                         if (n->tp_prio != tp->prio)
1677                                 continue;
1678
1679                         /* We re-lookup the tp and chain based on index instead
1680                          * of having hard refs and locks to them, so do a sanity
1681                          * check if any of tp,chain,exts was replaced by the
1682                          * time we got here with a cookie from hardware.
1683                          */
1684                         if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1685                                      !tp->ops->get_exts)) {
1686                                 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1687                                 return TC_ACT_SHOT;
1688                         }
1689
1690                         exts = tp->ops->get_exts(tp, n->handle);
1691                         if (unlikely(!exts || n->exts != exts)) {
1692                                 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1693                                 return TC_ACT_SHOT;
1694                         }
1695
1696                         n = NULL;
1697                         err = tcf_exts_exec_ex(skb, exts, act_index, res);
1698                 } else {
1699                         if (tp->protocol != protocol &&
1700                             tp->protocol != htons(ETH_P_ALL))
1701                                 continue;
1702
1703                         err = tc_classify(skb, tp, res);
1704                 }
1705 #ifdef CONFIG_NET_CLS_ACT
1706                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1707                         first_tp = orig_tp;
1708                         *last_executed_chain = first_tp->chain->index;
1709                         goto reset;
1710                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1711                         first_tp = res->goto_tp;
1712                         *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1713                         goto reset;
1714                 }
1715 #endif
1716                 if (err >= 0) {
1717                         /* Policy drop or drop reason is over-written by
1718                          * classifiers with a bogus value(0) */
1719                         if (err == TC_ACT_SHOT &&
1720                             res->drop_reason == SKB_NOT_DROPPED_YET)
1721                                 tcf_set_drop_reason(res, orig_reason);
1722                         return err;
1723                 }
1724         }
1725
1726         if (unlikely(n)) {
1727                 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1728                 return TC_ACT_SHOT;
1729         }
1730
1731         return TC_ACT_UNSPEC; /* signal: continue lookup */
1732 #ifdef CONFIG_NET_CLS_ACT
1733 reset:
1734         if (unlikely(limit++ >= max_reclassify_loop)) {
1735                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1736                                        tp->chain->block->index,
1737                                        tp->prio & 0xffff,
1738                                        ntohs(tp->protocol));
1739                 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1740                 return TC_ACT_SHOT;
1741         }
1742
1743         tp = first_tp;
1744         goto reclassify;
1745 #endif
1746 }
1747
1748 int tcf_classify(struct sk_buff *skb,
1749                  const struct tcf_block *block,
1750                  const struct tcf_proto *tp,
1751                  struct tcf_result *res, bool compat_mode)
1752 {
1753 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1754         u32 last_executed_chain = 0;
1755
1756         return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1757                               &last_executed_chain);
1758 #else
1759         u32 last_executed_chain = tp ? tp->chain->index : 0;
1760         struct tcf_exts_miss_cookie_node *n = NULL;
1761         const struct tcf_proto *orig_tp = tp;
1762         struct tc_skb_ext *ext;
1763         int act_index = 0;
1764         int ret;
1765
1766         if (block) {
1767                 ext = skb_ext_find(skb, TC_SKB_EXT);
1768
1769                 if (ext && (ext->chain || ext->act_miss)) {
1770                         struct tcf_chain *fchain;
1771                         u32 chain;
1772
1773                         if (ext->act_miss) {
1774                                 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1775                                                                 &act_index);
1776                                 if (!n) {
1777                                         tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1778                                         return TC_ACT_SHOT;
1779                                 }
1780
1781                                 chain = n->chain_index;
1782                         } else {
1783                                 chain = ext->chain;
1784                         }
1785
1786                         fchain = tcf_chain_lookup_rcu(block, chain);
1787                         if (!fchain) {
1788                                 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1789                                 return TC_ACT_SHOT;
1790                         }
1791
1792                         /* Consume, so cloned/redirect skbs won't inherit ext */
1793                         skb_ext_del(skb, TC_SKB_EXT);
1794
1795                         tp = rcu_dereference_bh(fchain->filter_chain);
1796                         last_executed_chain = fchain->index;
1797                 }
1798         }
1799
1800         ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1801                              &last_executed_chain);
1802
1803         if (tc_skb_ext_tc_enabled()) {
1804                 /* If we missed on some chain */
1805                 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1806                         struct tc_skb_cb *cb = tc_skb_cb(skb);
1807
1808                         ext = tc_skb_ext_alloc(skb);
1809                         if (WARN_ON_ONCE(!ext)) {
1810                                 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1811                                 return TC_ACT_SHOT;
1812                         }
1813
1814                         ext->chain = last_executed_chain;
1815                         ext->mru = cb->mru;
1816                         ext->post_ct = cb->post_ct;
1817                         ext->post_ct_snat = cb->post_ct_snat;
1818                         ext->post_ct_dnat = cb->post_ct_dnat;
1819                         ext->zone = cb->zone;
1820                 }
1821         }
1822
1823         return ret;
1824 #endif
1825 }
1826 EXPORT_SYMBOL(tcf_classify);
1827
1828 struct tcf_chain_info {
1829         struct tcf_proto __rcu **pprev;
1830         struct tcf_proto __rcu *next;
1831 };
1832
1833 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1834                                            struct tcf_chain_info *chain_info)
1835 {
1836         return tcf_chain_dereference(*chain_info->pprev, chain);
1837 }
1838
1839 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1840                                struct tcf_chain_info *chain_info,
1841                                struct tcf_proto *tp)
1842 {
1843         if (chain->flushing)
1844                 return -EAGAIN;
1845
1846         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1847         if (*chain_info->pprev == chain->filter_chain)
1848                 tcf_chain0_head_change(chain, tp);
1849         tcf_proto_get(tp);
1850         rcu_assign_pointer(*chain_info->pprev, tp);
1851
1852         return 0;
1853 }
1854
1855 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1856                                 struct tcf_chain_info *chain_info,
1857                                 struct tcf_proto *tp)
1858 {
1859         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1860
1861         tcf_proto_mark_delete(tp);
1862         if (tp == chain->filter_chain)
1863                 tcf_chain0_head_change(chain, next);
1864         RCU_INIT_POINTER(*chain_info->pprev, next);
1865 }
1866
1867 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1868                                            struct tcf_chain_info *chain_info,
1869                                            u32 protocol, u32 prio,
1870                                            bool prio_allocate);
1871
1872 /* Try to insert new proto.
1873  * If proto with specified priority already exists, free new proto
1874  * and return existing one.
1875  */
1876
1877 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1878                                                     struct tcf_proto *tp_new,
1879                                                     u32 protocol, u32 prio,
1880                                                     bool rtnl_held)
1881 {
1882         struct tcf_chain_info chain_info;
1883         struct tcf_proto *tp;
1884         int err = 0;
1885
1886         mutex_lock(&chain->filter_chain_lock);
1887
1888         if (tcf_proto_exists_destroying(chain, tp_new)) {
1889                 mutex_unlock(&chain->filter_chain_lock);
1890                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1891                 return ERR_PTR(-EAGAIN);
1892         }
1893
1894         tp = tcf_chain_tp_find(chain, &chain_info,
1895                                protocol, prio, false);
1896         if (!tp)
1897                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1898         mutex_unlock(&chain->filter_chain_lock);
1899
1900         if (tp) {
1901                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1902                 tp_new = tp;
1903         } else if (err) {
1904                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1905                 tp_new = ERR_PTR(err);
1906         }
1907
1908         return tp_new;
1909 }
1910
1911 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1912                                       struct tcf_proto *tp, bool rtnl_held,
1913                                       struct netlink_ext_ack *extack)
1914 {
1915         struct tcf_chain_info chain_info;
1916         struct tcf_proto *tp_iter;
1917         struct tcf_proto **pprev;
1918         struct tcf_proto *next;
1919
1920         mutex_lock(&chain->filter_chain_lock);
1921
1922         /* Atomically find and remove tp from chain. */
1923         for (pprev = &chain->filter_chain;
1924              (tp_iter = tcf_chain_dereference(*pprev, chain));
1925              pprev = &tp_iter->next) {
1926                 if (tp_iter == tp) {
1927                         chain_info.pprev = pprev;
1928                         chain_info.next = tp_iter->next;
1929                         WARN_ON(tp_iter->deleting);
1930                         break;
1931                 }
1932         }
1933         /* Verify that tp still exists and no new filters were inserted
1934          * concurrently.
1935          * Mark tp for deletion if it is empty.
1936          */
1937         if (!tp_iter || !tcf_proto_check_delete(tp)) {
1938                 mutex_unlock(&chain->filter_chain_lock);
1939                 return;
1940         }
1941
1942         tcf_proto_signal_destroying(chain, tp);
1943         next = tcf_chain_dereference(chain_info.next, chain);
1944         if (tp == chain->filter_chain)
1945                 tcf_chain0_head_change(chain, next);
1946         RCU_INIT_POINTER(*chain_info.pprev, next);
1947         mutex_unlock(&chain->filter_chain_lock);
1948
1949         tcf_proto_put(tp, rtnl_held, extack);
1950 }
1951
1952 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1953                                            struct tcf_chain_info *chain_info,
1954                                            u32 protocol, u32 prio,
1955                                            bool prio_allocate)
1956 {
1957         struct tcf_proto **pprev;
1958         struct tcf_proto *tp;
1959
1960         /* Check the chain for existence of proto-tcf with this priority */
1961         for (pprev = &chain->filter_chain;
1962              (tp = tcf_chain_dereference(*pprev, chain));
1963              pprev = &tp->next) {
1964                 if (tp->prio >= prio) {
1965                         if (tp->prio == prio) {
1966                                 if (prio_allocate ||
1967                                     (tp->protocol != protocol && protocol))
1968                                         return ERR_PTR(-EINVAL);
1969                         } else {
1970                                 tp = NULL;
1971                         }
1972                         break;
1973                 }
1974         }
1975         chain_info->pprev = pprev;
1976         if (tp) {
1977                 chain_info->next = tp->next;
1978                 tcf_proto_get(tp);
1979         } else {
1980                 chain_info->next = NULL;
1981         }
1982         return tp;
1983 }
1984
1985 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1986                          struct tcf_proto *tp, struct tcf_block *block,
1987                          struct Qdisc *q, u32 parent, void *fh,
1988                          u32 portid, u32 seq, u16 flags, int event,
1989                          bool terse_dump, bool rtnl_held,
1990                          struct netlink_ext_ack *extack)
1991 {
1992         struct tcmsg *tcm;
1993         struct nlmsghdr  *nlh;
1994         unsigned char *b = skb_tail_pointer(skb);
1995
1996         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1997         if (!nlh)
1998                 goto out_nlmsg_trim;
1999         tcm = nlmsg_data(nlh);
2000         tcm->tcm_family = AF_UNSPEC;
2001         tcm->tcm__pad1 = 0;
2002         tcm->tcm__pad2 = 0;
2003         if (q) {
2004                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2005                 tcm->tcm_parent = parent;
2006         } else {
2007                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2008                 tcm->tcm_block_index = block->index;
2009         }
2010         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2011         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2012                 goto nla_put_failure;
2013         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2014                 goto nla_put_failure;
2015         if (!fh) {
2016                 tcm->tcm_handle = 0;
2017         } else if (terse_dump) {
2018                 if (tp->ops->terse_dump) {
2019                         if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2020                                                 rtnl_held) < 0)
2021                                 goto nla_put_failure;
2022                 } else {
2023                         goto cls_op_not_supp;
2024                 }
2025         } else {
2026                 if (tp->ops->dump &&
2027                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2028                         goto nla_put_failure;
2029         }
2030
2031         if (extack && extack->_msg &&
2032             nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2033                 goto nla_put_failure;
2034
2035         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2036
2037         return skb->len;
2038
2039 out_nlmsg_trim:
2040 nla_put_failure:
2041 cls_op_not_supp:
2042         nlmsg_trim(skb, b);
2043         return -1;
2044 }
2045
2046 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2047                           struct nlmsghdr *n, struct tcf_proto *tp,
2048                           struct tcf_block *block, struct Qdisc *q,
2049                           u32 parent, void *fh, int event, bool unicast,
2050                           bool rtnl_held, struct netlink_ext_ack *extack)
2051 {
2052         struct sk_buff *skb;
2053         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2054         int err = 0;
2055
2056         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2057         if (!skb)
2058                 return -ENOBUFS;
2059
2060         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2061                           n->nlmsg_seq, n->nlmsg_flags, event,
2062                           false, rtnl_held, extack) <= 0) {
2063                 kfree_skb(skb);
2064                 return -EINVAL;
2065         }
2066
2067         if (unicast)
2068                 err = rtnl_unicast(skb, net, portid);
2069         else
2070                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2071                                      n->nlmsg_flags & NLM_F_ECHO);
2072         return err;
2073 }
2074
2075 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2076                               struct nlmsghdr *n, struct tcf_proto *tp,
2077                               struct tcf_block *block, struct Qdisc *q,
2078                               u32 parent, void *fh, bool unicast, bool *last,
2079                               bool rtnl_held, struct netlink_ext_ack *extack)
2080 {
2081         struct sk_buff *skb;
2082         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2083         int err;
2084
2085         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2086         if (!skb)
2087                 return -ENOBUFS;
2088
2089         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2090                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2091                           false, rtnl_held, extack) <= 0) {
2092                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2093                 kfree_skb(skb);
2094                 return -EINVAL;
2095         }
2096
2097         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2098         if (err) {
2099                 kfree_skb(skb);
2100                 return err;
2101         }
2102
2103         if (unicast)
2104                 err = rtnl_unicast(skb, net, portid);
2105         else
2106                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2107                                      n->nlmsg_flags & NLM_F_ECHO);
2108         if (err < 0)
2109                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2110
2111         return err;
2112 }
2113
2114 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2115                                  struct tcf_block *block, struct Qdisc *q,
2116                                  u32 parent, struct nlmsghdr *n,
2117                                  struct tcf_chain *chain, int event,
2118                                  struct netlink_ext_ack *extack)
2119 {
2120         struct tcf_proto *tp;
2121
2122         for (tp = tcf_get_next_proto(chain, NULL);
2123              tp; tp = tcf_get_next_proto(chain, tp))
2124                 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2125                                event, false, true, extack);
2126 }
2127
2128 static void tfilter_put(struct tcf_proto *tp, void *fh)
2129 {
2130         if (tp->ops->put && fh)
2131                 tp->ops->put(tp, fh);
2132 }
2133
2134 static bool is_qdisc_ingress(__u32 classid)
2135 {
2136         return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2137 }
2138
2139 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2140                           struct netlink_ext_ack *extack)
2141 {
2142         struct net *net = sock_net(skb->sk);
2143         struct nlattr *tca[TCA_MAX + 1];
2144         char name[IFNAMSIZ];
2145         struct tcmsg *t;
2146         u32 protocol;
2147         u32 prio;
2148         bool prio_allocate;
2149         u32 parent;
2150         u32 chain_index;
2151         struct Qdisc *q;
2152         struct tcf_chain_info chain_info;
2153         struct tcf_chain *chain;
2154         struct tcf_block *block;
2155         struct tcf_proto *tp;
2156         unsigned long cl;
2157         void *fh;
2158         int err;
2159         int tp_created;
2160         bool rtnl_held = false;
2161         u32 flags;
2162
2163 replay:
2164         tp_created = 0;
2165
2166         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2167                                      rtm_tca_policy, extack);
2168         if (err < 0)
2169                 return err;
2170
2171         t = nlmsg_data(n);
2172         protocol = TC_H_MIN(t->tcm_info);
2173         prio = TC_H_MAJ(t->tcm_info);
2174         prio_allocate = false;
2175         parent = t->tcm_parent;
2176         tp = NULL;
2177         cl = 0;
2178         block = NULL;
2179         q = NULL;
2180         chain = NULL;
2181         flags = 0;
2182
2183         if (prio == 0) {
2184                 /* If no priority is provided by the user,
2185                  * we allocate one.
2186                  */
2187                 if (n->nlmsg_flags & NLM_F_CREATE) {
2188                         prio = TC_H_MAKE(0x80000000U, 0U);
2189                         prio_allocate = true;
2190                 } else {
2191                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2192                         return -ENOENT;
2193                 }
2194         }
2195
2196         /* Find head of filter chain. */
2197
2198         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2199         if (err)
2200                 return err;
2201
2202         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2203                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2204                 err = -EINVAL;
2205                 goto errout;
2206         }
2207
2208         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2209          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2210          * type is not specified, classifier is not unlocked.
2211          */
2212         if (rtnl_held ||
2213             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2214             !tcf_proto_is_unlocked(name)) {
2215                 rtnl_held = true;
2216                 rtnl_lock();
2217         }
2218
2219         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2220         if (err)
2221                 goto errout;
2222
2223         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2224                                  extack);
2225         if (IS_ERR(block)) {
2226                 err = PTR_ERR(block);
2227                 goto errout;
2228         }
2229         block->classid = parent;
2230
2231         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2232         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2233                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2234                 err = -EINVAL;
2235                 goto errout;
2236         }
2237         chain = tcf_chain_get(block, chain_index, true);
2238         if (!chain) {
2239                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2240                 err = -ENOMEM;
2241                 goto errout;
2242         }
2243
2244         mutex_lock(&chain->filter_chain_lock);
2245         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2246                                prio, prio_allocate);
2247         if (IS_ERR(tp)) {
2248                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2249                 err = PTR_ERR(tp);
2250                 goto errout_locked;
2251         }
2252
2253         if (tp == NULL) {
2254                 struct tcf_proto *tp_new = NULL;
2255
2256                 if (chain->flushing) {
2257                         err = -EAGAIN;
2258                         goto errout_locked;
2259                 }
2260
2261                 /* Proto-tcf does not exist, create new one */
2262
2263                 if (tca[TCA_KIND] == NULL || !protocol) {
2264                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2265                         err = -EINVAL;
2266                         goto errout_locked;
2267                 }
2268
2269                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2270                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2271                         err = -ENOENT;
2272                         goto errout_locked;
2273                 }
2274
2275                 if (prio_allocate)
2276                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2277                                                                &chain_info));
2278
2279                 mutex_unlock(&chain->filter_chain_lock);
2280                 tp_new = tcf_proto_create(name, protocol, prio, chain,
2281                                           rtnl_held, extack);
2282                 if (IS_ERR(tp_new)) {
2283                         err = PTR_ERR(tp_new);
2284                         goto errout_tp;
2285                 }
2286
2287                 tp_created = 1;
2288                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2289                                                 rtnl_held);
2290                 if (IS_ERR(tp)) {
2291                         err = PTR_ERR(tp);
2292                         goto errout_tp;
2293                 }
2294         } else {
2295                 mutex_unlock(&chain->filter_chain_lock);
2296         }
2297
2298         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2299                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2300                 err = -EINVAL;
2301                 goto errout;
2302         }
2303
2304         fh = tp->ops->get(tp, t->tcm_handle);
2305
2306         if (!fh) {
2307                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2308                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2309                         err = -ENOENT;
2310                         goto errout;
2311                 }
2312         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2313                 tfilter_put(tp, fh);
2314                 NL_SET_ERR_MSG(extack, "Filter already exists");
2315                 err = -EEXIST;
2316                 goto errout;
2317         }
2318
2319         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2320                 tfilter_put(tp, fh);
2321                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2322                 err = -EINVAL;
2323                 goto errout;
2324         }
2325
2326         if (!(n->nlmsg_flags & NLM_F_CREATE))
2327                 flags |= TCA_ACT_FLAGS_REPLACE;
2328         if (!rtnl_held)
2329                 flags |= TCA_ACT_FLAGS_NO_RTNL;
2330         if (is_qdisc_ingress(parent))
2331                 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2332         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2333                               flags, extack);
2334         if (err == 0) {
2335                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2336                                RTM_NEWTFILTER, false, rtnl_held, extack);
2337                 tfilter_put(tp, fh);
2338                 /* q pointer is NULL for shared blocks */
2339                 if (q)
2340                         q->flags &= ~TCQ_F_CAN_BYPASS;
2341         }
2342
2343 errout:
2344         if (err && tp_created)
2345                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2346 errout_tp:
2347         if (chain) {
2348                 if (tp && !IS_ERR(tp))
2349                         tcf_proto_put(tp, rtnl_held, NULL);
2350                 if (!tp_created)
2351                         tcf_chain_put(chain);
2352         }
2353         tcf_block_release(q, block, rtnl_held);
2354
2355         if (rtnl_held)
2356                 rtnl_unlock();
2357
2358         if (err == -EAGAIN) {
2359                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2360                  * of target chain.
2361                  */
2362                 rtnl_held = true;
2363                 /* Replay the request. */
2364                 goto replay;
2365         }
2366         return err;
2367
2368 errout_locked:
2369         mutex_unlock(&chain->filter_chain_lock);
2370         goto errout;
2371 }
2372
2373 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2374                           struct netlink_ext_ack *extack)
2375 {
2376         struct net *net = sock_net(skb->sk);
2377         struct nlattr *tca[TCA_MAX + 1];
2378         char name[IFNAMSIZ];
2379         struct tcmsg *t;
2380         u32 protocol;
2381         u32 prio;
2382         u32 parent;
2383         u32 chain_index;
2384         struct Qdisc *q = NULL;
2385         struct tcf_chain_info chain_info;
2386         struct tcf_chain *chain = NULL;
2387         struct tcf_block *block = NULL;
2388         struct tcf_proto *tp = NULL;
2389         unsigned long cl = 0;
2390         void *fh = NULL;
2391         int err;
2392         bool rtnl_held = false;
2393
2394         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2395                                      rtm_tca_policy, extack);
2396         if (err < 0)
2397                 return err;
2398
2399         t = nlmsg_data(n);
2400         protocol = TC_H_MIN(t->tcm_info);
2401         prio = TC_H_MAJ(t->tcm_info);
2402         parent = t->tcm_parent;
2403
2404         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2405                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2406                 return -ENOENT;
2407         }
2408
2409         /* Find head of filter chain. */
2410
2411         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2412         if (err)
2413                 return err;
2414
2415         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2416                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2417                 err = -EINVAL;
2418                 goto errout;
2419         }
2420         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2421          * found), qdisc is not unlocked, classifier type is not specified,
2422          * classifier is not unlocked.
2423          */
2424         if (!prio ||
2425             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2426             !tcf_proto_is_unlocked(name)) {
2427                 rtnl_held = true;
2428                 rtnl_lock();
2429         }
2430
2431         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2432         if (err)
2433                 goto errout;
2434
2435         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2436                                  extack);
2437         if (IS_ERR(block)) {
2438                 err = PTR_ERR(block);
2439                 goto errout;
2440         }
2441
2442         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2443         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2444                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2445                 err = -EINVAL;
2446                 goto errout;
2447         }
2448         chain = tcf_chain_get(block, chain_index, false);
2449         if (!chain) {
2450                 /* User requested flush on non-existent chain. Nothing to do,
2451                  * so just return success.
2452                  */
2453                 if (prio == 0) {
2454                         err = 0;
2455                         goto errout;
2456                 }
2457                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2458                 err = -ENOENT;
2459                 goto errout;
2460         }
2461
2462         if (prio == 0) {
2463                 tfilter_notify_chain(net, skb, block, q, parent, n,
2464                                      chain, RTM_DELTFILTER, extack);
2465                 tcf_chain_flush(chain, rtnl_held);
2466                 err = 0;
2467                 goto errout;
2468         }
2469
2470         mutex_lock(&chain->filter_chain_lock);
2471         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2472                                prio, false);
2473         if (!tp || IS_ERR(tp)) {
2474                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2475                 err = tp ? PTR_ERR(tp) : -ENOENT;
2476                 goto errout_locked;
2477         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2478                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2479                 err = -EINVAL;
2480                 goto errout_locked;
2481         } else if (t->tcm_handle == 0) {
2482                 tcf_proto_signal_destroying(chain, tp);
2483                 tcf_chain_tp_remove(chain, &chain_info, tp);
2484                 mutex_unlock(&chain->filter_chain_lock);
2485
2486                 tcf_proto_put(tp, rtnl_held, NULL);
2487                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2488                                RTM_DELTFILTER, false, rtnl_held, extack);
2489                 err = 0;
2490                 goto errout;
2491         }
2492         mutex_unlock(&chain->filter_chain_lock);
2493
2494         fh = tp->ops->get(tp, t->tcm_handle);
2495
2496         if (!fh) {
2497                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2498                 err = -ENOENT;
2499         } else {
2500                 bool last;
2501
2502                 err = tfilter_del_notify(net, skb, n, tp, block,
2503                                          q, parent, fh, false, &last,
2504                                          rtnl_held, extack);
2505
2506                 if (err)
2507                         goto errout;
2508                 if (last)
2509                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2510         }
2511
2512 errout:
2513         if (chain) {
2514                 if (tp && !IS_ERR(tp))
2515                         tcf_proto_put(tp, rtnl_held, NULL);
2516                 tcf_chain_put(chain);
2517         }
2518         tcf_block_release(q, block, rtnl_held);
2519
2520         if (rtnl_held)
2521                 rtnl_unlock();
2522
2523         return err;
2524
2525 errout_locked:
2526         mutex_unlock(&chain->filter_chain_lock);
2527         goto errout;
2528 }
2529
2530 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2531                           struct netlink_ext_ack *extack)
2532 {
2533         struct net *net = sock_net(skb->sk);
2534         struct nlattr *tca[TCA_MAX + 1];
2535         char name[IFNAMSIZ];
2536         struct tcmsg *t;
2537         u32 protocol;
2538         u32 prio;
2539         u32 parent;
2540         u32 chain_index;
2541         struct Qdisc *q = NULL;
2542         struct tcf_chain_info chain_info;
2543         struct tcf_chain *chain = NULL;
2544         struct tcf_block *block = NULL;
2545         struct tcf_proto *tp = NULL;
2546         unsigned long cl = 0;
2547         void *fh = NULL;
2548         int err;
2549         bool rtnl_held = false;
2550
2551         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2552                                      rtm_tca_policy, extack);
2553         if (err < 0)
2554                 return err;
2555
2556         t = nlmsg_data(n);
2557         protocol = TC_H_MIN(t->tcm_info);
2558         prio = TC_H_MAJ(t->tcm_info);
2559         parent = t->tcm_parent;
2560
2561         if (prio == 0) {
2562                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2563                 return -ENOENT;
2564         }
2565
2566         /* Find head of filter chain. */
2567
2568         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2569         if (err)
2570                 return err;
2571
2572         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2573                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2574                 err = -EINVAL;
2575                 goto errout;
2576         }
2577         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2578          * unlocked, classifier type is not specified, classifier is not
2579          * unlocked.
2580          */
2581         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2582             !tcf_proto_is_unlocked(name)) {
2583                 rtnl_held = true;
2584                 rtnl_lock();
2585         }
2586
2587         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2588         if (err)
2589                 goto errout;
2590
2591         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2592                                  extack);
2593         if (IS_ERR(block)) {
2594                 err = PTR_ERR(block);
2595                 goto errout;
2596         }
2597
2598         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2599         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2600                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2601                 err = -EINVAL;
2602                 goto errout;
2603         }
2604         chain = tcf_chain_get(block, chain_index, false);
2605         if (!chain) {
2606                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2607                 err = -EINVAL;
2608                 goto errout;
2609         }
2610
2611         mutex_lock(&chain->filter_chain_lock);
2612         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2613                                prio, false);
2614         mutex_unlock(&chain->filter_chain_lock);
2615         if (!tp || IS_ERR(tp)) {
2616                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2617                 err = tp ? PTR_ERR(tp) : -ENOENT;
2618                 goto errout;
2619         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2620                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2621                 err = -EINVAL;
2622                 goto errout;
2623         }
2624
2625         fh = tp->ops->get(tp, t->tcm_handle);
2626
2627         if (!fh) {
2628                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2629                 err = -ENOENT;
2630         } else {
2631                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2632                                      fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2633                 if (err < 0)
2634                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2635         }
2636
2637         tfilter_put(tp, fh);
2638 errout:
2639         if (chain) {
2640                 if (tp && !IS_ERR(tp))
2641                         tcf_proto_put(tp, rtnl_held, NULL);
2642                 tcf_chain_put(chain);
2643         }
2644         tcf_block_release(q, block, rtnl_held);
2645
2646         if (rtnl_held)
2647                 rtnl_unlock();
2648
2649         return err;
2650 }
2651
2652 struct tcf_dump_args {
2653         struct tcf_walker w;
2654         struct sk_buff *skb;
2655         struct netlink_callback *cb;
2656         struct tcf_block *block;
2657         struct Qdisc *q;
2658         u32 parent;
2659         bool terse_dump;
2660 };
2661
2662 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2663 {
2664         struct tcf_dump_args *a = (void *)arg;
2665         struct net *net = sock_net(a->skb->sk);
2666
2667         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2668                              n, NETLINK_CB(a->cb->skb).portid,
2669                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2670                              RTM_NEWTFILTER, a->terse_dump, true, NULL);
2671 }
2672
2673 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2674                            struct sk_buff *skb, struct netlink_callback *cb,
2675                            long index_start, long *p_index, bool terse)
2676 {
2677         struct net *net = sock_net(skb->sk);
2678         struct tcf_block *block = chain->block;
2679         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2680         struct tcf_proto *tp, *tp_prev;
2681         struct tcf_dump_args arg;
2682
2683         for (tp = __tcf_get_next_proto(chain, NULL);
2684              tp;
2685              tp_prev = tp,
2686                      tp = __tcf_get_next_proto(chain, tp),
2687                      tcf_proto_put(tp_prev, true, NULL),
2688                      (*p_index)++) {
2689                 if (*p_index < index_start)
2690                         continue;
2691                 if (TC_H_MAJ(tcm->tcm_info) &&
2692                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2693                         continue;
2694                 if (TC_H_MIN(tcm->tcm_info) &&
2695                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2696                         continue;
2697                 if (*p_index > index_start)
2698                         memset(&cb->args[1], 0,
2699                                sizeof(cb->args) - sizeof(cb->args[0]));
2700                 if (cb->args[1] == 0) {
2701                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2702                                           NETLINK_CB(cb->skb).portid,
2703                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2704                                           RTM_NEWTFILTER, false, true, NULL) <= 0)
2705                                 goto errout;
2706                         cb->args[1] = 1;
2707                 }
2708                 if (!tp->ops->walk)
2709                         continue;
2710                 arg.w.fn = tcf_node_dump;
2711                 arg.skb = skb;
2712                 arg.cb = cb;
2713                 arg.block = block;
2714                 arg.q = q;
2715                 arg.parent = parent;
2716                 arg.w.stop = 0;
2717                 arg.w.skip = cb->args[1] - 1;
2718                 arg.w.count = 0;
2719                 arg.w.cookie = cb->args[2];
2720                 arg.terse_dump = terse;
2721                 tp->ops->walk(tp, &arg.w, true);
2722                 cb->args[2] = arg.w.cookie;
2723                 cb->args[1] = arg.w.count + 1;
2724                 if (arg.w.stop)
2725                         goto errout;
2726         }
2727         return true;
2728
2729 errout:
2730         tcf_proto_put(tp, true, NULL);
2731         return false;
2732 }
2733
2734 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2735         [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2736 };
2737
2738 /* called with RTNL */
2739 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2740 {
2741         struct tcf_chain *chain, *chain_prev;
2742         struct net *net = sock_net(skb->sk);
2743         struct nlattr *tca[TCA_MAX + 1];
2744         struct Qdisc *q = NULL;
2745         struct tcf_block *block;
2746         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2747         bool terse_dump = false;
2748         long index_start;
2749         long index;
2750         u32 parent;
2751         int err;
2752
2753         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2754                 return skb->len;
2755
2756         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2757                                      tcf_tfilter_dump_policy, cb->extack);
2758         if (err)
2759                 return err;
2760
2761         if (tca[TCA_DUMP_FLAGS]) {
2762                 struct nla_bitfield32 flags =
2763                         nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2764
2765                 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2766         }
2767
2768         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2769                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2770                 if (!block)
2771                         goto out;
2772                 /* If we work with block index, q is NULL and parent value
2773                  * will never be used in the following code. The check
2774                  * in tcf_fill_node prevents it. However, compiler does not
2775                  * see that far, so set parent to zero to silence the warning
2776                  * about parent being uninitialized.
2777                  */
2778                 parent = 0;
2779         } else {
2780                 const struct Qdisc_class_ops *cops;
2781                 struct net_device *dev;
2782                 unsigned long cl = 0;
2783
2784                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2785                 if (!dev)
2786                         return skb->len;
2787
2788                 parent = tcm->tcm_parent;
2789                 if (!parent)
2790                         q = rtnl_dereference(dev->qdisc);
2791                 else
2792                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2793                 if (!q)
2794                         goto out;
2795                 cops = q->ops->cl_ops;
2796                 if (!cops)
2797                         goto out;
2798                 if (!cops->tcf_block)
2799                         goto out;
2800                 if (TC_H_MIN(tcm->tcm_parent)) {
2801                         cl = cops->find(q, tcm->tcm_parent);
2802                         if (cl == 0)
2803                                 goto out;
2804                 }
2805                 block = cops->tcf_block(q, cl, NULL);
2806                 if (!block)
2807                         goto out;
2808                 parent = block->classid;
2809                 if (tcf_block_shared(block))
2810                         q = NULL;
2811         }
2812
2813         index_start = cb->args[0];
2814         index = 0;
2815
2816         for (chain = __tcf_get_next_chain(block, NULL);
2817              chain;
2818              chain_prev = chain,
2819                      chain = __tcf_get_next_chain(block, chain),
2820                      tcf_chain_put(chain_prev)) {
2821                 if (tca[TCA_CHAIN] &&
2822                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2823                         continue;
2824                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2825                                     index_start, &index, terse_dump)) {
2826                         tcf_chain_put(chain);
2827                         err = -EMSGSIZE;
2828                         break;
2829                 }
2830         }
2831
2832         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2833                 tcf_block_refcnt_put(block, true);
2834         cb->args[0] = index;
2835
2836 out:
2837         /* If we did no progress, the error (EMSGSIZE) is real */
2838         if (skb->len == 0 && err)
2839                 return err;
2840         return skb->len;
2841 }
2842
2843 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2844                               void *tmplt_priv, u32 chain_index,
2845                               struct net *net, struct sk_buff *skb,
2846                               struct tcf_block *block,
2847                               u32 portid, u32 seq, u16 flags, int event,
2848                               struct netlink_ext_ack *extack)
2849 {
2850         unsigned char *b = skb_tail_pointer(skb);
2851         const struct tcf_proto_ops *ops;
2852         struct nlmsghdr *nlh;
2853         struct tcmsg *tcm;
2854         void *priv;
2855
2856         ops = tmplt_ops;
2857         priv = tmplt_priv;
2858
2859         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2860         if (!nlh)
2861                 goto out_nlmsg_trim;
2862         tcm = nlmsg_data(nlh);
2863         tcm->tcm_family = AF_UNSPEC;
2864         tcm->tcm__pad1 = 0;
2865         tcm->tcm__pad2 = 0;
2866         tcm->tcm_handle = 0;
2867         if (block->q) {
2868                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2869                 tcm->tcm_parent = block->q->handle;
2870         } else {
2871                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2872                 tcm->tcm_block_index = block->index;
2873         }
2874
2875         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2876                 goto nla_put_failure;
2877
2878         if (ops) {
2879                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2880                         goto nla_put_failure;
2881                 if (ops->tmplt_dump(skb, net, priv) < 0)
2882                         goto nla_put_failure;
2883         }
2884
2885         if (extack && extack->_msg &&
2886             nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2887                 goto out_nlmsg_trim;
2888
2889         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2890
2891         return skb->len;
2892
2893 out_nlmsg_trim:
2894 nla_put_failure:
2895         nlmsg_trim(skb, b);
2896         return -EMSGSIZE;
2897 }
2898
2899 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2900                            u32 seq, u16 flags, int event, bool unicast,
2901                            struct netlink_ext_ack *extack)
2902 {
2903         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2904         struct tcf_block *block = chain->block;
2905         struct net *net = block->net;
2906         struct sk_buff *skb;
2907         int err = 0;
2908
2909         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2910         if (!skb)
2911                 return -ENOBUFS;
2912
2913         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2914                                chain->index, net, skb, block, portid,
2915                                seq, flags, event, extack) <= 0) {
2916                 kfree_skb(skb);
2917                 return -EINVAL;
2918         }
2919
2920         if (unicast)
2921                 err = rtnl_unicast(skb, net, portid);
2922         else
2923                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2924                                      flags & NLM_F_ECHO);
2925
2926         return err;
2927 }
2928
2929 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2930                                   void *tmplt_priv, u32 chain_index,
2931                                   struct tcf_block *block, struct sk_buff *oskb,
2932                                   u32 seq, u16 flags, bool unicast)
2933 {
2934         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2935         struct net *net = block->net;
2936         struct sk_buff *skb;
2937
2938         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2939         if (!skb)
2940                 return -ENOBUFS;
2941
2942         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2943                                block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2944                 kfree_skb(skb);
2945                 return -EINVAL;
2946         }
2947
2948         if (unicast)
2949                 return rtnl_unicast(skb, net, portid);
2950
2951         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2952 }
2953
2954 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2955                               struct nlattr **tca,
2956                               struct netlink_ext_ack *extack)
2957 {
2958         const struct tcf_proto_ops *ops;
2959         char name[IFNAMSIZ];
2960         void *tmplt_priv;
2961
2962         /* If kind is not set, user did not specify template. */
2963         if (!tca[TCA_KIND])
2964                 return 0;
2965
2966         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2967                 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2968                 return -EINVAL;
2969         }
2970
2971         ops = tcf_proto_lookup_ops(name, true, extack);
2972         if (IS_ERR(ops))
2973                 return PTR_ERR(ops);
2974         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2975                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2976                 module_put(ops->owner);
2977                 return -EOPNOTSUPP;
2978         }
2979
2980         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2981         if (IS_ERR(tmplt_priv)) {
2982                 module_put(ops->owner);
2983                 return PTR_ERR(tmplt_priv);
2984         }
2985         chain->tmplt_ops = ops;
2986         chain->tmplt_priv = tmplt_priv;
2987         return 0;
2988 }
2989
2990 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2991                                void *tmplt_priv)
2992 {
2993         /* If template ops are set, no work to do for us. */
2994         if (!tmplt_ops)
2995                 return;
2996
2997         tmplt_ops->tmplt_destroy(tmplt_priv);
2998         module_put(tmplt_ops->owner);
2999 }
3000
3001 /* Add/delete/get a chain */
3002
3003 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3004                         struct netlink_ext_ack *extack)
3005 {
3006         struct net *net = sock_net(skb->sk);
3007         struct nlattr *tca[TCA_MAX + 1];
3008         struct tcmsg *t;
3009         u32 parent;
3010         u32 chain_index;
3011         struct Qdisc *q;
3012         struct tcf_chain *chain;
3013         struct tcf_block *block;
3014         unsigned long cl;
3015         int err;
3016
3017 replay:
3018         q = NULL;
3019         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3020                                      rtm_tca_policy, extack);
3021         if (err < 0)
3022                 return err;
3023
3024         t = nlmsg_data(n);
3025         parent = t->tcm_parent;
3026         cl = 0;
3027
3028         block = tcf_block_find(net, &q, &parent, &cl,
3029                                t->tcm_ifindex, t->tcm_block_index, extack);
3030         if (IS_ERR(block))
3031                 return PTR_ERR(block);
3032
3033         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3034         if (chain_index > TC_ACT_EXT_VAL_MASK) {
3035                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3036                 err = -EINVAL;
3037                 goto errout_block;
3038         }
3039
3040         mutex_lock(&block->lock);
3041         chain = tcf_chain_lookup(block, chain_index);
3042         if (n->nlmsg_type == RTM_NEWCHAIN) {
3043                 if (chain) {
3044                         if (tcf_chain_held_by_acts_only(chain)) {
3045                                 /* The chain exists only because there is
3046                                  * some action referencing it.
3047                                  */
3048                                 tcf_chain_hold(chain);
3049                         } else {
3050                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3051                                 err = -EEXIST;
3052                                 goto errout_block_locked;
3053                         }
3054                 } else {
3055                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3056                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3057                                 err = -ENOENT;
3058                                 goto errout_block_locked;
3059                         }
3060                         chain = tcf_chain_create(block, chain_index);
3061                         if (!chain) {
3062                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3063                                 err = -ENOMEM;
3064                                 goto errout_block_locked;
3065                         }
3066                 }
3067         } else {
3068                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3069                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3070                         err = -EINVAL;
3071                         goto errout_block_locked;
3072                 }
3073                 tcf_chain_hold(chain);
3074         }
3075
3076         if (n->nlmsg_type == RTM_NEWCHAIN) {
3077                 /* Modifying chain requires holding parent block lock. In case
3078                  * the chain was successfully added, take a reference to the
3079                  * chain. This ensures that an empty chain does not disappear at
3080                  * the end of this function.
3081                  */
3082                 tcf_chain_hold(chain);
3083                 chain->explicitly_created = true;
3084         }
3085         mutex_unlock(&block->lock);
3086
3087         switch (n->nlmsg_type) {
3088         case RTM_NEWCHAIN:
3089                 err = tc_chain_tmplt_add(chain, net, tca, extack);
3090                 if (err) {
3091                         tcf_chain_put_explicitly_created(chain);
3092                         goto errout;
3093                 }
3094
3095                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3096                                 RTM_NEWCHAIN, false, extack);
3097                 break;
3098         case RTM_DELCHAIN:
3099                 tfilter_notify_chain(net, skb, block, q, parent, n,
3100                                      chain, RTM_DELTFILTER, extack);
3101                 /* Flush the chain first as the user requested chain removal. */
3102                 tcf_chain_flush(chain, true);
3103                 /* In case the chain was successfully deleted, put a reference
3104                  * to the chain previously taken during addition.
3105                  */
3106                 tcf_chain_put_explicitly_created(chain);
3107                 break;
3108         case RTM_GETCHAIN:
3109                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3110                                       n->nlmsg_flags, n->nlmsg_type, true, extack);
3111                 if (err < 0)
3112                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3113                 break;
3114         default:
3115                 err = -EOPNOTSUPP;
3116                 NL_SET_ERR_MSG(extack, "Unsupported message type");
3117                 goto errout;
3118         }
3119
3120 errout:
3121         tcf_chain_put(chain);
3122 errout_block:
3123         tcf_block_release(q, block, true);
3124         if (err == -EAGAIN)
3125                 /* Replay the request. */
3126                 goto replay;
3127         return err;
3128
3129 errout_block_locked:
3130         mutex_unlock(&block->lock);
3131         goto errout_block;
3132 }
3133
3134 /* called with RTNL */
3135 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3136 {
3137         struct net *net = sock_net(skb->sk);
3138         struct nlattr *tca[TCA_MAX + 1];
3139         struct Qdisc *q = NULL;
3140         struct tcf_block *block;
3141         struct tcmsg *tcm = nlmsg_data(cb->nlh);
3142         struct tcf_chain *chain;
3143         long index_start;
3144         long index;
3145         int err;
3146
3147         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3148                 return skb->len;
3149
3150         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3151                                      rtm_tca_policy, cb->extack);
3152         if (err)
3153                 return err;
3154
3155         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3156                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3157                 if (!block)
3158                         goto out;
3159         } else {
3160                 const struct Qdisc_class_ops *cops;
3161                 struct net_device *dev;
3162                 unsigned long cl = 0;
3163
3164                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3165                 if (!dev)
3166                         return skb->len;
3167
3168                 if (!tcm->tcm_parent)
3169                         q = rtnl_dereference(dev->qdisc);
3170                 else
3171                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3172
3173                 if (!q)
3174                         goto out;
3175                 cops = q->ops->cl_ops;
3176                 if (!cops)
3177                         goto out;
3178                 if (!cops->tcf_block)
3179                         goto out;
3180                 if (TC_H_MIN(tcm->tcm_parent)) {
3181                         cl = cops->find(q, tcm->tcm_parent);
3182                         if (cl == 0)
3183                                 goto out;
3184                 }
3185                 block = cops->tcf_block(q, cl, NULL);
3186                 if (!block)
3187                         goto out;
3188                 if (tcf_block_shared(block))
3189                         q = NULL;
3190         }
3191
3192         index_start = cb->args[0];
3193         index = 0;
3194
3195         mutex_lock(&block->lock);
3196         list_for_each_entry(chain, &block->chain_list, list) {
3197                 if ((tca[TCA_CHAIN] &&
3198                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3199                         continue;
3200                 if (index < index_start) {
3201                         index++;
3202                         continue;
3203                 }
3204                 if (tcf_chain_held_by_acts_only(chain))
3205                         continue;
3206                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3207                                          chain->index, net, skb, block,
3208                                          NETLINK_CB(cb->skb).portid,
3209                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3210                                          RTM_NEWCHAIN, NULL);
3211                 if (err <= 0)
3212                         break;
3213                 index++;
3214         }
3215         mutex_unlock(&block->lock);
3216
3217         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3218                 tcf_block_refcnt_put(block, true);
3219         cb->args[0] = index;
3220
3221 out:
3222         /* If we did no progress, the error (EMSGSIZE) is real */
3223         if (skb->len == 0 && err)
3224                 return err;
3225         return skb->len;
3226 }
3227
3228 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3229                      int police, struct tcf_proto *tp, u32 handle,
3230                      bool use_action_miss)
3231 {
3232         int err = 0;
3233
3234 #ifdef CONFIG_NET_CLS_ACT
3235         exts->type = 0;
3236         exts->nr_actions = 0;
3237         exts->miss_cookie_node = NULL;
3238         /* Note: we do not own yet a reference on net.
3239          * This reference might be taken later from tcf_exts_get_net().
3240          */
3241         exts->net = net;
3242         exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3243                                 GFP_KERNEL);
3244         if (!exts->actions)
3245                 return -ENOMEM;
3246 #endif
3247
3248         exts->action = action;
3249         exts->police = police;
3250
3251         if (!use_action_miss)
3252                 return 0;
3253
3254         err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3255         if (err)
3256                 goto err_miss_alloc;
3257
3258         return 0;
3259
3260 err_miss_alloc:
3261         tcf_exts_destroy(exts);
3262 #ifdef CONFIG_NET_CLS_ACT
3263         exts->actions = NULL;
3264 #endif
3265         return err;
3266 }
3267 EXPORT_SYMBOL(tcf_exts_init_ex);
3268
3269 void tcf_exts_destroy(struct tcf_exts *exts)
3270 {
3271         tcf_exts_miss_cookie_base_destroy(exts);
3272
3273 #ifdef CONFIG_NET_CLS_ACT
3274         if (exts->actions) {
3275                 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3276                 kfree(exts->actions);
3277         }
3278         exts->nr_actions = 0;
3279 #endif
3280 }
3281 EXPORT_SYMBOL(tcf_exts_destroy);
3282
3283 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3284                          struct nlattr *rate_tlv, struct tcf_exts *exts,
3285                          u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3286 {
3287 #ifdef CONFIG_NET_CLS_ACT
3288         {
3289                 int init_res[TCA_ACT_MAX_PRIO] = {};
3290                 struct tc_action *act;
3291                 size_t attr_size = 0;
3292
3293                 if (exts->police && tb[exts->police]) {
3294                         struct tc_action_ops *a_o;
3295
3296                         a_o = tc_action_load_ops(tb[exts->police], true,
3297                                                  !(flags & TCA_ACT_FLAGS_NO_RTNL),
3298                                                  extack);
3299                         if (IS_ERR(a_o))
3300                                 return PTR_ERR(a_o);
3301                         flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3302                         act = tcf_action_init_1(net, tp, tb[exts->police],
3303                                                 rate_tlv, a_o, init_res, flags,
3304                                                 extack);
3305                         module_put(a_o->owner);
3306                         if (IS_ERR(act))
3307                                 return PTR_ERR(act);
3308
3309                         act->type = exts->type = TCA_OLD_COMPAT;
3310                         exts->actions[0] = act;
3311                         exts->nr_actions = 1;
3312                         tcf_idr_insert_many(exts->actions);
3313                 } else if (exts->action && tb[exts->action]) {
3314                         int err;
3315
3316                         flags |= TCA_ACT_FLAGS_BIND;
3317                         err = tcf_action_init(net, tp, tb[exts->action],
3318                                               rate_tlv, exts->actions, init_res,
3319                                               &attr_size, flags, fl_flags,
3320                                               extack);
3321                         if (err < 0)
3322                                 return err;
3323                         exts->nr_actions = err;
3324                 }
3325         }
3326 #else
3327         if ((exts->action && tb[exts->action]) ||
3328             (exts->police && tb[exts->police])) {
3329                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3330                 return -EOPNOTSUPP;
3331         }
3332 #endif
3333
3334         return 0;
3335 }
3336 EXPORT_SYMBOL(tcf_exts_validate_ex);
3337
3338 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3339                       struct nlattr *rate_tlv, struct tcf_exts *exts,
3340                       u32 flags, struct netlink_ext_ack *extack)
3341 {
3342         return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3343                                     flags, 0, extack);
3344 }
3345 EXPORT_SYMBOL(tcf_exts_validate);
3346
3347 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3348 {
3349 #ifdef CONFIG_NET_CLS_ACT
3350         struct tcf_exts old = *dst;
3351
3352         *dst = *src;
3353         tcf_exts_destroy(&old);
3354 #endif
3355 }
3356 EXPORT_SYMBOL(tcf_exts_change);
3357
3358 #ifdef CONFIG_NET_CLS_ACT
3359 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3360 {
3361         if (exts->nr_actions == 0)
3362                 return NULL;
3363         else
3364                 return exts->actions[0];
3365 }
3366 #endif
3367
3368 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3369 {
3370 #ifdef CONFIG_NET_CLS_ACT
3371         struct nlattr *nest;
3372
3373         if (exts->action && tcf_exts_has_actions(exts)) {
3374                 /*
3375                  * again for backward compatible mode - we want
3376                  * to work with both old and new modes of entering
3377                  * tc data even if iproute2  was newer - jhs
3378                  */
3379                 if (exts->type != TCA_OLD_COMPAT) {
3380                         nest = nla_nest_start_noflag(skb, exts->action);
3381                         if (nest == NULL)
3382                                 goto nla_put_failure;
3383
3384                         if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3385                             < 0)
3386                                 goto nla_put_failure;
3387                         nla_nest_end(skb, nest);
3388                 } else if (exts->police) {
3389                         struct tc_action *act = tcf_exts_first_act(exts);
3390                         nest = nla_nest_start_noflag(skb, exts->police);
3391                         if (nest == NULL || !act)
3392                                 goto nla_put_failure;
3393                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3394                                 goto nla_put_failure;
3395                         nla_nest_end(skb, nest);
3396                 }
3397         }
3398         return 0;
3399
3400 nla_put_failure:
3401         nla_nest_cancel(skb, nest);
3402         return -1;
3403 #else
3404         return 0;
3405 #endif
3406 }
3407 EXPORT_SYMBOL(tcf_exts_dump);
3408
3409 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3410 {
3411 #ifdef CONFIG_NET_CLS_ACT
3412         struct nlattr *nest;
3413
3414         if (!exts->action || !tcf_exts_has_actions(exts))
3415                 return 0;
3416
3417         nest = nla_nest_start_noflag(skb, exts->action);
3418         if (!nest)
3419                 goto nla_put_failure;
3420
3421         if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3422                 goto nla_put_failure;
3423         nla_nest_end(skb, nest);
3424         return 0;
3425
3426 nla_put_failure:
3427         nla_nest_cancel(skb, nest);
3428         return -1;
3429 #else
3430         return 0;
3431 #endif
3432 }
3433 EXPORT_SYMBOL(tcf_exts_terse_dump);
3434
3435 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3436 {
3437 #ifdef CONFIG_NET_CLS_ACT
3438         struct tc_action *a = tcf_exts_first_act(exts);
3439         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3440                 return -1;
3441 #endif
3442         return 0;
3443 }
3444 EXPORT_SYMBOL(tcf_exts_dump_stats);
3445
3446 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3447 {
3448         if (*flags & TCA_CLS_FLAGS_IN_HW)
3449                 return;
3450         *flags |= TCA_CLS_FLAGS_IN_HW;
3451         atomic_inc(&block->offloadcnt);
3452 }
3453
3454 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3455 {
3456         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3457                 return;
3458         *flags &= ~TCA_CLS_FLAGS_IN_HW;
3459         atomic_dec(&block->offloadcnt);
3460 }
3461
3462 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3463                                       struct tcf_proto *tp, u32 *cnt,
3464                                       u32 *flags, u32 diff, bool add)
3465 {
3466         lockdep_assert_held(&block->cb_lock);
3467
3468         spin_lock(&tp->lock);
3469         if (add) {
3470                 if (!*cnt)
3471                         tcf_block_offload_inc(block, flags);
3472                 *cnt += diff;
3473         } else {
3474                 *cnt -= diff;
3475                 if (!*cnt)
3476                         tcf_block_offload_dec(block, flags);
3477         }
3478         spin_unlock(&tp->lock);
3479 }
3480
3481 static void
3482 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3483                          u32 *cnt, u32 *flags)
3484 {
3485         lockdep_assert_held(&block->cb_lock);
3486
3487         spin_lock(&tp->lock);
3488         tcf_block_offload_dec(block, flags);
3489         *cnt = 0;
3490         spin_unlock(&tp->lock);
3491 }
3492
3493 static int
3494 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3495                    void *type_data, bool err_stop)
3496 {
3497         struct flow_block_cb *block_cb;
3498         int ok_count = 0;
3499         int err;
3500
3501         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3502                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3503                 if (err) {
3504                         if (err_stop)
3505                                 return err;
3506                 } else {
3507                         ok_count++;
3508                 }
3509         }
3510         return ok_count;
3511 }
3512
3513 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3514                      void *type_data, bool err_stop, bool rtnl_held)
3515 {
3516         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3517         int ok_count;
3518
3519 retry:
3520         if (take_rtnl)
3521                 rtnl_lock();
3522         down_read(&block->cb_lock);
3523         /* Need to obtain rtnl lock if block is bound to devs that require it.
3524          * In block bind code cb_lock is obtained while holding rtnl, so we must
3525          * obtain the locks in same order here.
3526          */
3527         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3528                 up_read(&block->cb_lock);
3529                 take_rtnl = true;
3530                 goto retry;
3531         }
3532
3533         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3534
3535         up_read(&block->cb_lock);
3536         if (take_rtnl)
3537                 rtnl_unlock();
3538         return ok_count;
3539 }
3540 EXPORT_SYMBOL(tc_setup_cb_call);
3541
3542 /* Non-destructive filter add. If filter that wasn't already in hardware is
3543  * successfully offloaded, increment block offloads counter. On failure,
3544  * previously offloaded filter is considered to be intact and offloads counter
3545  * is not decremented.
3546  */
3547
3548 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3549                     enum tc_setup_type type, void *type_data, bool err_stop,
3550                     u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3551 {
3552         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3553         int ok_count;
3554
3555 retry:
3556         if (take_rtnl)
3557                 rtnl_lock();
3558         down_read(&block->cb_lock);
3559         /* Need to obtain rtnl lock if block is bound to devs that require it.
3560          * In block bind code cb_lock is obtained while holding rtnl, so we must
3561          * obtain the locks in same order here.
3562          */
3563         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3564                 up_read(&block->cb_lock);
3565                 take_rtnl = true;
3566                 goto retry;
3567         }
3568
3569         /* Make sure all netdevs sharing this block are offload-capable. */
3570         if (block->nooffloaddevcnt && err_stop) {
3571                 ok_count = -EOPNOTSUPP;
3572                 goto err_unlock;
3573         }
3574
3575         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3576         if (ok_count < 0)
3577                 goto err_unlock;
3578
3579         if (tp->ops->hw_add)
3580                 tp->ops->hw_add(tp, type_data);
3581         if (ok_count > 0)
3582                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3583                                           ok_count, true);
3584 err_unlock:
3585         up_read(&block->cb_lock);
3586         if (take_rtnl)
3587                 rtnl_unlock();
3588         return min(ok_count, 0);
3589 }
3590 EXPORT_SYMBOL(tc_setup_cb_add);
3591
3592 /* Destructive filter replace. If filter that wasn't already in hardware is
3593  * successfully offloaded, increment block offload counter. On failure,
3594  * previously offloaded filter is considered to be destroyed and offload counter
3595  * is decremented.
3596  */
3597
3598 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3599                         enum tc_setup_type type, void *type_data, bool err_stop,
3600                         u32 *old_flags, unsigned int *old_in_hw_count,
3601                         u32 *new_flags, unsigned int *new_in_hw_count,
3602                         bool rtnl_held)
3603 {
3604         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3605         int ok_count;
3606
3607 retry:
3608         if (take_rtnl)
3609                 rtnl_lock();
3610         down_read(&block->cb_lock);
3611         /* Need to obtain rtnl lock if block is bound to devs that require it.
3612          * In block bind code cb_lock is obtained while holding rtnl, so we must
3613          * obtain the locks in same order here.
3614          */
3615         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3616                 up_read(&block->cb_lock);
3617                 take_rtnl = true;
3618                 goto retry;
3619         }
3620
3621         /* Make sure all netdevs sharing this block are offload-capable. */
3622         if (block->nooffloaddevcnt && err_stop) {
3623                 ok_count = -EOPNOTSUPP;
3624                 goto err_unlock;
3625         }
3626
3627         tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3628         if (tp->ops->hw_del)
3629                 tp->ops->hw_del(tp, type_data);
3630
3631         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3632         if (ok_count < 0)
3633                 goto err_unlock;
3634
3635         if (tp->ops->hw_add)
3636                 tp->ops->hw_add(tp, type_data);
3637         if (ok_count > 0)
3638                 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3639                                           new_flags, ok_count, true);
3640 err_unlock:
3641         up_read(&block->cb_lock);
3642         if (take_rtnl)
3643                 rtnl_unlock();
3644         return min(ok_count, 0);
3645 }
3646 EXPORT_SYMBOL(tc_setup_cb_replace);
3647
3648 /* Destroy filter and decrement block offload counter, if filter was previously
3649  * offloaded.
3650  */
3651
3652 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3653                         enum tc_setup_type type, void *type_data, bool err_stop,
3654                         u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3655 {
3656         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3657         int ok_count;
3658
3659 retry:
3660         if (take_rtnl)
3661                 rtnl_lock();
3662         down_read(&block->cb_lock);
3663         /* Need to obtain rtnl lock if block is bound to devs that require it.
3664          * In block bind code cb_lock is obtained while holding rtnl, so we must
3665          * obtain the locks in same order here.
3666          */
3667         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3668                 up_read(&block->cb_lock);
3669                 take_rtnl = true;
3670                 goto retry;
3671         }
3672
3673         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3674
3675         tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3676         if (tp->ops->hw_del)
3677                 tp->ops->hw_del(tp, type_data);
3678
3679         up_read(&block->cb_lock);
3680         if (take_rtnl)
3681                 rtnl_unlock();
3682         return min(ok_count, 0);
3683 }
3684 EXPORT_SYMBOL(tc_setup_cb_destroy);
3685
3686 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3687                           bool add, flow_setup_cb_t *cb,
3688                           enum tc_setup_type type, void *type_data,
3689                           void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3690 {
3691         int err = cb(type, type_data, cb_priv);
3692
3693         if (err) {
3694                 if (add && tc_skip_sw(*flags))
3695                         return err;
3696         } else {
3697                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3698                                           add);
3699         }
3700
3701         return 0;
3702 }
3703 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3704
3705 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3706                                    const struct tc_action *act)
3707 {
3708         struct tc_cookie *user_cookie;
3709         int err = 0;
3710
3711         rcu_read_lock();
3712         user_cookie = rcu_dereference(act->user_cookie);
3713         if (user_cookie) {
3714                 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3715                                                                user_cookie->len,
3716                                                                GFP_ATOMIC);
3717                 if (!entry->user_cookie)
3718                         err = -ENOMEM;
3719         }
3720         rcu_read_unlock();
3721         return err;
3722 }
3723
3724 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3725 {
3726         flow_action_cookie_destroy(entry->user_cookie);
3727 }
3728
3729 void tc_cleanup_offload_action(struct flow_action *flow_action)
3730 {
3731         struct flow_action_entry *entry;
3732         int i;
3733
3734         flow_action_for_each(i, entry, flow_action) {
3735                 tcf_act_put_user_cookie(entry);
3736                 if (entry->destructor)
3737                         entry->destructor(entry->destructor_priv);
3738         }
3739 }
3740 EXPORT_SYMBOL(tc_cleanup_offload_action);
3741
3742 static int tc_setup_offload_act(struct tc_action *act,
3743                                 struct flow_action_entry *entry,
3744                                 u32 *index_inc,
3745                                 struct netlink_ext_ack *extack)
3746 {
3747 #ifdef CONFIG_NET_CLS_ACT
3748         if (act->ops->offload_act_setup) {
3749                 return act->ops->offload_act_setup(act, entry, index_inc, true,
3750                                                    extack);
3751         } else {
3752                 NL_SET_ERR_MSG(extack, "Action does not support offload");
3753                 return -EOPNOTSUPP;
3754         }
3755 #else
3756         return 0;
3757 #endif
3758 }
3759
3760 int tc_setup_action(struct flow_action *flow_action,
3761                     struct tc_action *actions[],
3762                     u32 miss_cookie_base,
3763                     struct netlink_ext_ack *extack)
3764 {
3765         int i, j, k, index, err = 0;
3766         struct tc_action *act;
3767
3768         BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3769         BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3770         BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3771
3772         if (!actions)
3773                 return 0;
3774
3775         j = 0;
3776         tcf_act_for_each_action(i, act, actions) {
3777                 struct flow_action_entry *entry;
3778
3779                 entry = &flow_action->entries[j];
3780                 spin_lock_bh(&act->tcfa_lock);
3781                 err = tcf_act_get_user_cookie(entry, act);
3782                 if (err)
3783                         goto err_out_locked;
3784
3785                 index = 0;
3786                 err = tc_setup_offload_act(act, entry, &index, extack);
3787                 if (err)
3788                         goto err_out_locked;
3789
3790                 for (k = 0; k < index ; k++) {
3791                         entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3792                         entry[k].hw_index = act->tcfa_index;
3793                         entry[k].cookie = (unsigned long)act;
3794                         entry[k].miss_cookie =
3795                                 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3796                 }
3797
3798                 j += index;
3799
3800                 spin_unlock_bh(&act->tcfa_lock);
3801         }
3802
3803 err_out:
3804         if (err)
3805                 tc_cleanup_offload_action(flow_action);
3806
3807         return err;
3808 err_out_locked:
3809         spin_unlock_bh(&act->tcfa_lock);
3810         goto err_out;
3811 }
3812
3813 int tc_setup_offload_action(struct flow_action *flow_action,
3814                             const struct tcf_exts *exts,
3815                             struct netlink_ext_ack *extack)
3816 {
3817 #ifdef CONFIG_NET_CLS_ACT
3818         u32 miss_cookie_base;
3819
3820         if (!exts)
3821                 return 0;
3822
3823         miss_cookie_base = exts->miss_cookie_node ?
3824                            exts->miss_cookie_node->miss_cookie_base : 0;
3825         return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3826                                extack);
3827 #else
3828         return 0;
3829 #endif
3830 }
3831 EXPORT_SYMBOL(tc_setup_offload_action);
3832
3833 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3834 {
3835         unsigned int num_acts = 0;
3836         struct tc_action *act;
3837         int i;
3838
3839         tcf_exts_for_each_action(i, act, exts) {
3840                 if (is_tcf_pedit(act))
3841                         num_acts += tcf_pedit_nkeys(act);
3842                 else
3843                         num_acts++;
3844         }
3845         return num_acts;
3846 }
3847 EXPORT_SYMBOL(tcf_exts_num_actions);
3848
3849 #ifdef CONFIG_NET_CLS_ACT
3850 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3851                                         u32 *p_block_index,
3852                                         struct netlink_ext_ack *extack)
3853 {
3854         *p_block_index = nla_get_u32(block_index_attr);
3855         if (!*p_block_index) {
3856                 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3857                 return -EINVAL;
3858         }
3859
3860         return 0;
3861 }
3862
3863 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3864                     enum flow_block_binder_type binder_type,
3865                     struct nlattr *block_index_attr,
3866                     struct netlink_ext_ack *extack)
3867 {
3868         u32 block_index;
3869         int err;
3870
3871         if (!block_index_attr)
3872                 return 0;
3873
3874         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3875         if (err)
3876                 return err;
3877
3878         qe->info.binder_type = binder_type;
3879         qe->info.chain_head_change = tcf_chain_head_change_dflt;
3880         qe->info.chain_head_change_priv = &qe->filter_chain;
3881         qe->info.block_index = block_index;
3882
3883         return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3884 }
3885 EXPORT_SYMBOL(tcf_qevent_init);
3886
3887 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3888 {
3889         if (qe->info.block_index)
3890                 tcf_block_put_ext(qe->block, sch, &qe->info);
3891 }
3892 EXPORT_SYMBOL(tcf_qevent_destroy);
3893
3894 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3895                                struct netlink_ext_ack *extack)
3896 {
3897         u32 block_index;
3898         int err;
3899
3900         if (!block_index_attr)
3901                 return 0;
3902
3903         err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3904         if (err)
3905                 return err;
3906
3907         /* Bounce newly-configured block or change in block. */
3908         if (block_index != qe->info.block_index) {
3909                 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3910                 return -EINVAL;
3911         }
3912
3913         return 0;
3914 }
3915 EXPORT_SYMBOL(tcf_qevent_validate_change);
3916
3917 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3918                                   struct sk_buff **to_free, int *ret)
3919 {
3920         struct tcf_result cl_res;
3921         struct tcf_proto *fl;
3922
3923         if (!qe->info.block_index)
3924                 return skb;
3925
3926         fl = rcu_dereference_bh(qe->filter_chain);
3927
3928         switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3929         case TC_ACT_SHOT:
3930                 qdisc_qstats_drop(sch);
3931                 __qdisc_drop(skb, to_free);
3932                 *ret = __NET_XMIT_BYPASS;
3933                 return NULL;
3934         case TC_ACT_STOLEN:
3935         case TC_ACT_QUEUED:
3936         case TC_ACT_TRAP:
3937                 __qdisc_drop(skb, to_free);
3938                 *ret = __NET_XMIT_STOLEN;
3939                 return NULL;
3940         case TC_ACT_REDIRECT:
3941                 skb_do_redirect(skb);
3942                 *ret = __NET_XMIT_STOLEN;
3943                 return NULL;
3944         }
3945
3946         return skb;
3947 }
3948 EXPORT_SYMBOL(tcf_qevent_handle);
3949
3950 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3951 {
3952         if (!qe->info.block_index)
3953                 return 0;
3954         return nla_put_u32(skb, attr_name, qe->info.block_index);
3955 }
3956 EXPORT_SYMBOL(tcf_qevent_dump);
3957 #endif
3958
3959 static __net_init int tcf_net_init(struct net *net)
3960 {
3961         struct tcf_net *tn = net_generic(net, tcf_net_id);
3962
3963         spin_lock_init(&tn->idr_lock);
3964         idr_init(&tn->idr);
3965         return 0;
3966 }
3967
3968 static void __net_exit tcf_net_exit(struct net *net)
3969 {
3970         struct tcf_net *tn = net_generic(net, tcf_net_id);
3971
3972         idr_destroy(&tn->idr);
3973 }
3974
3975 static struct pernet_operations tcf_net_ops = {
3976         .init = tcf_net_init,
3977         .exit = tcf_net_exit,
3978         .id   = &tcf_net_id,
3979         .size = sizeof(struct tcf_net),
3980 };
3981
3982 static int __init tc_filter_init(void)
3983 {
3984         int err;
3985
3986         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3987         if (!tc_filter_wq)
3988                 return -ENOMEM;
3989
3990         err = register_pernet_subsys(&tcf_net_ops);
3991         if (err)
3992                 goto err_register_pernet_subsys;
3993
3994         xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
3995
3996         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3997                       RTNL_FLAG_DOIT_UNLOCKED);
3998         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3999                       RTNL_FLAG_DOIT_UNLOCKED);
4000         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
4001                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
4002         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
4003         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
4004         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
4005                       tc_dump_chain, 0);
4006
4007         return 0;
4008
4009 err_register_pernet_subsys:
4010         destroy_workqueue(tc_filter_wq);
4011         return err;
4012 }
4013
4014 subsys_initcall(tc_filter_init);
This page took 0.271317 seconds and 4 git commands to generate.