4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/if_vlan.h>
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
20 #include <linux/tc_act/tc_vlan.h>
21 #include <net/tc_act/tc_vlan.h>
23 static unsigned int vlan_net_id;
24 static struct tc_action_ops act_vlan_ops;
26 static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
27 struct tcf_result *res)
29 struct tcf_vlan *v = to_vlan(a);
30 struct tcf_vlan_params *p;
35 tcf_lastuse_update(&v->tcf_tm);
36 bstats_cpu_update(this_cpu_ptr(v->common.cpu_bstats), skb);
38 /* Ensure 'data' points at mac_header prior calling vlan manipulating
41 if (skb_at_tc_ingress(skb))
42 skb_push_rcsum(skb, skb->mac_len);
44 action = READ_ONCE(v->tcf_action);
46 p = rcu_dereference_bh(v->vlan_p);
48 switch (p->tcfv_action) {
49 case TCA_VLAN_ACT_POP:
50 err = skb_vlan_pop(skb);
54 case TCA_VLAN_ACT_PUSH:
55 err = skb_vlan_push(skb, p->tcfv_push_proto, p->tcfv_push_vid |
56 (p->tcfv_push_prio << VLAN_PRIO_SHIFT));
60 case TCA_VLAN_ACT_MODIFY:
61 /* No-op if no vlan tag (either hw-accel or in-payload) */
62 if (!skb_vlan_tagged(skb))
64 /* extract existing tag (and guarantee no hw-accel tag) */
65 if (skb_vlan_tag_present(skb)) {
66 tci = skb_vlan_tag_get(skb);
67 __vlan_hwaccel_clear_tag(skb);
69 /* in-payload vlan tag, pop it */
70 err = __skb_vlan_pop(skb, &tci);
75 tci = (tci & ~VLAN_VID_MASK) | p->tcfv_push_vid;
76 /* replace prio bits, if tcfv_push_prio specified */
77 if (p->tcfv_push_prio) {
78 tci &= ~VLAN_PRIO_MASK;
79 tci |= p->tcfv_push_prio << VLAN_PRIO_SHIFT;
81 /* put updated tci as hwaccel tag */
82 __vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci);
89 if (skb_at_tc_ingress(skb))
90 skb_pull_rcsum(skb, skb->mac_len);
95 qstats_drop_inc(this_cpu_ptr(v->common.cpu_qstats));
99 static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
100 [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) },
101 [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 },
102 [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 },
103 [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 },
106 static int tcf_vlan_init(struct net *net, struct nlattr *nla,
107 struct nlattr *est, struct tc_action **a,
108 int ovr, int bind, bool rtnl_held,
109 struct tcf_proto *tp, struct netlink_ext_ack *extack)
111 struct tc_action_net *tn = net_generic(net, vlan_net_id);
112 struct nlattr *tb[TCA_VLAN_MAX + 1];
113 struct tcf_chain *goto_ch = NULL;
114 struct tcf_vlan_params *p;
115 struct tc_vlan *parm;
119 __be16 push_proto = 0;
127 err = nla_parse_nested_deprecated(tb, TCA_VLAN_MAX, nla, vlan_policy,
132 if (!tb[TCA_VLAN_PARMS])
134 parm = nla_data(tb[TCA_VLAN_PARMS]);
135 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
142 switch (parm->v_action) {
143 case TCA_VLAN_ACT_POP:
145 case TCA_VLAN_ACT_PUSH:
146 case TCA_VLAN_ACT_MODIFY:
147 if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
149 tcf_idr_release(*a, bind);
151 tcf_idr_cleanup(tn, parm->index);
154 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
155 if (push_vid >= VLAN_VID_MASK) {
157 tcf_idr_release(*a, bind);
159 tcf_idr_cleanup(tn, parm->index);
163 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
164 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
165 switch (push_proto) {
166 case htons(ETH_P_8021Q):
167 case htons(ETH_P_8021AD):
171 tcf_idr_release(*a, bind);
173 tcf_idr_cleanup(tn, parm->index);
174 return -EPROTONOSUPPORT;
177 push_proto = htons(ETH_P_8021Q);
180 if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
181 push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
185 tcf_idr_release(*a, bind);
187 tcf_idr_cleanup(tn, parm->index);
190 action = parm->v_action;
193 ret = tcf_idr_create(tn, parm->index, est, a,
194 &act_vlan_ops, bind, true);
196 tcf_idr_cleanup(tn, parm->index);
202 tcf_idr_release(*a, bind);
206 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
212 p = kzalloc(sizeof(*p), GFP_KERNEL);
218 p->tcfv_action = action;
219 p->tcfv_push_vid = push_vid;
220 p->tcfv_push_prio = push_prio;
221 p->tcfv_push_proto = push_proto;
223 spin_lock_bh(&v->tcf_lock);
224 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
225 rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
226 spin_unlock_bh(&v->tcf_lock);
229 tcf_chain_put_by_act(goto_ch);
233 if (ret == ACT_P_CREATED)
234 tcf_idr_insert(tn, *a);
238 tcf_chain_put_by_act(goto_ch);
240 tcf_idr_release(*a, bind);
244 static void tcf_vlan_cleanup(struct tc_action *a)
246 struct tcf_vlan *v = to_vlan(a);
247 struct tcf_vlan_params *p;
249 p = rcu_dereference_protected(v->vlan_p, 1);
254 static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
257 unsigned char *b = skb_tail_pointer(skb);
258 struct tcf_vlan *v = to_vlan(a);
259 struct tcf_vlan_params *p;
260 struct tc_vlan opt = {
261 .index = v->tcf_index,
262 .refcnt = refcount_read(&v->tcf_refcnt) - ref,
263 .bindcnt = atomic_read(&v->tcf_bindcnt) - bind,
267 spin_lock_bh(&v->tcf_lock);
268 opt.action = v->tcf_action;
269 p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
270 opt.v_action = p->tcfv_action;
271 if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
272 goto nla_put_failure;
274 if ((p->tcfv_action == TCA_VLAN_ACT_PUSH ||
275 p->tcfv_action == TCA_VLAN_ACT_MODIFY) &&
276 (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, p->tcfv_push_vid) ||
277 nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
278 p->tcfv_push_proto) ||
279 (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY,
280 p->tcfv_push_prio))))
281 goto nla_put_failure;
283 tcf_tm_dump(&t, &v->tcf_tm);
284 if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
285 goto nla_put_failure;
286 spin_unlock_bh(&v->tcf_lock);
291 spin_unlock_bh(&v->tcf_lock);
296 static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
297 struct netlink_callback *cb, int type,
298 const struct tc_action_ops *ops,
299 struct netlink_ext_ack *extack)
301 struct tc_action_net *tn = net_generic(net, vlan_net_id);
303 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
306 static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
308 struct tc_action_net *tn = net_generic(net, vlan_net_id);
310 return tcf_idr_search(tn, a, index);
313 static struct tc_action_ops act_vlan_ops = {
316 .owner = THIS_MODULE,
318 .dump = tcf_vlan_dump,
319 .init = tcf_vlan_init,
320 .cleanup = tcf_vlan_cleanup,
321 .walk = tcf_vlan_walker,
322 .lookup = tcf_vlan_search,
323 .size = sizeof(struct tcf_vlan),
326 static __net_init int vlan_init_net(struct net *net)
328 struct tc_action_net *tn = net_generic(net, vlan_net_id);
330 return tc_action_net_init(tn, &act_vlan_ops);
333 static void __net_exit vlan_exit_net(struct list_head *net_list)
335 tc_action_net_exit(net_list, vlan_net_id);
338 static struct pernet_operations vlan_net_ops = {
339 .init = vlan_init_net,
340 .exit_batch = vlan_exit_net,
342 .size = sizeof(struct tc_action_net),
345 static int __init vlan_init_module(void)
347 return tcf_register_action(&act_vlan_ops, &vlan_net_ops);
350 static void __exit vlan_cleanup_module(void)
352 tcf_unregister_action(&act_vlan_ops, &vlan_net_ops);
355 module_init(vlan_init_module);
356 module_exit(vlan_cleanup_module);
359 MODULE_DESCRIPTION("vlan manipulation actions");
360 MODULE_LICENSE("GPL v2");