1 // SPDX-License-Identifier: GPL-2.0+
3 * IPv6 IOAM implementation
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/net.h>
13 #include <linux/ioam6.h>
14 #include <linux/ioam6_genl.h>
15 #include <linux/rhashtable.h>
16 #include <linux/netdevice.h>
18 #include <net/addrconf.h>
19 #include <net/genetlink.h>
20 #include <net/ioam6.h>
21 #include <net/sch_generic.h>
23 static void ioam6_ns_release(struct ioam6_namespace *ns)
28 static void ioam6_sc_release(struct ioam6_schema *sc)
33 static void ioam6_free_ns(void *ptr, void *arg)
35 struct ioam6_namespace *ns = (struct ioam6_namespace *)ptr;
41 static void ioam6_free_sc(void *ptr, void *arg)
43 struct ioam6_schema *sc = (struct ioam6_schema *)ptr;
49 static int ioam6_ns_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
51 const struct ioam6_namespace *ns = obj;
53 return (ns->id != *(__be16 *)arg->key);
56 static int ioam6_sc_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
58 const struct ioam6_schema *sc = obj;
60 return (sc->id != *(u32 *)arg->key);
63 static const struct rhashtable_params rht_ns_params = {
64 .key_len = sizeof(__be16),
65 .key_offset = offsetof(struct ioam6_namespace, id),
66 .head_offset = offsetof(struct ioam6_namespace, head),
67 .automatic_shrinking = true,
68 .obj_cmpfn = ioam6_ns_cmpfn,
71 static const struct rhashtable_params rht_sc_params = {
72 .key_len = sizeof(u32),
73 .key_offset = offsetof(struct ioam6_schema, id),
74 .head_offset = offsetof(struct ioam6_schema, head),
75 .automatic_shrinking = true,
76 .obj_cmpfn = ioam6_sc_cmpfn,
79 static struct genl_family ioam6_genl_family;
81 static const struct nla_policy ioam6_genl_policy_addns[] = {
82 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
83 [IOAM6_ATTR_NS_DATA] = { .type = NLA_U32 },
84 [IOAM6_ATTR_NS_DATA_WIDE] = { .type = NLA_U64 },
87 static const struct nla_policy ioam6_genl_policy_delns[] = {
88 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
91 static const struct nla_policy ioam6_genl_policy_addsc[] = {
92 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
93 [IOAM6_ATTR_SC_DATA] = { .type = NLA_BINARY,
94 .len = IOAM6_MAX_SCHEMA_DATA_LEN },
97 static const struct nla_policy ioam6_genl_policy_delsc[] = {
98 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
101 static const struct nla_policy ioam6_genl_policy_ns_sc[] = {
102 [IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
103 [IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
104 [IOAM6_ATTR_SC_NONE] = { .type = NLA_FLAG },
107 static int ioam6_genl_addns(struct sk_buff *skb, struct genl_info *info)
109 struct ioam6_pernet_data *nsdata;
110 struct ioam6_namespace *ns;
116 if (!info->attrs[IOAM6_ATTR_NS_ID])
119 id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
120 nsdata = ioam6_pernet(genl_info_net(info));
122 mutex_lock(&nsdata->lock);
124 ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
130 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
138 if (!info->attrs[IOAM6_ATTR_NS_DATA])
139 data32 = IOAM6_U32_UNAVAILABLE;
141 data32 = nla_get_u32(info->attrs[IOAM6_ATTR_NS_DATA]);
143 if (!info->attrs[IOAM6_ATTR_NS_DATA_WIDE])
144 data64 = IOAM6_U64_UNAVAILABLE;
146 data64 = nla_get_u64(info->attrs[IOAM6_ATTR_NS_DATA_WIDE]);
148 ns->data = cpu_to_be32(data32);
149 ns->data_wide = cpu_to_be64(data64);
151 err = rhashtable_lookup_insert_fast(&nsdata->namespaces, &ns->head,
157 mutex_unlock(&nsdata->lock);
161 static int ioam6_genl_delns(struct sk_buff *skb, struct genl_info *info)
163 struct ioam6_pernet_data *nsdata;
164 struct ioam6_namespace *ns;
165 struct ioam6_schema *sc;
169 if (!info->attrs[IOAM6_ATTR_NS_ID])
172 id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
173 nsdata = ioam6_pernet(genl_info_net(info));
175 mutex_lock(&nsdata->lock);
177 ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
183 sc = rcu_dereference_protected(ns->schema,
184 lockdep_is_held(&nsdata->lock));
186 err = rhashtable_remove_fast(&nsdata->namespaces, &ns->head,
192 rcu_assign_pointer(sc->ns, NULL);
194 ioam6_ns_release(ns);
197 mutex_unlock(&nsdata->lock);
201 static int __ioam6_genl_dumpns_element(struct ioam6_namespace *ns,
208 struct ioam6_schema *sc;
213 hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
217 data32 = be32_to_cpu(ns->data);
218 data64 = be64_to_cpu(ns->data_wide);
220 if (nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id)) ||
221 (data32 != IOAM6_U32_UNAVAILABLE &&
222 nla_put_u32(skb, IOAM6_ATTR_NS_DATA, data32)) ||
223 (data64 != IOAM6_U64_UNAVAILABLE &&
224 nla_put_u64_64bit(skb, IOAM6_ATTR_NS_DATA_WIDE,
225 data64, IOAM6_ATTR_PAD)))
226 goto nla_put_failure;
230 sc = rcu_dereference(ns->schema);
231 if (sc && nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id)) {
233 goto nla_put_failure;
238 genlmsg_end(skb, hdr);
242 genlmsg_cancel(skb, hdr);
246 static int ioam6_genl_dumpns_start(struct netlink_callback *cb)
248 struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
249 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
252 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
256 cb->args[0] = (long)iter;
259 rhashtable_walk_enter(&nsdata->namespaces, iter);
264 static int ioam6_genl_dumpns_done(struct netlink_callback *cb)
266 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
268 rhashtable_walk_exit(iter);
274 static int ioam6_genl_dumpns(struct sk_buff *skb, struct netlink_callback *cb)
276 struct rhashtable_iter *iter;
277 struct ioam6_namespace *ns;
280 iter = (struct rhashtable_iter *)cb->args[0];
281 rhashtable_walk_start(iter);
284 ns = rhashtable_walk_next(iter);
287 if (PTR_ERR(ns) == -EAGAIN)
295 err = __ioam6_genl_dumpns_element(ns,
296 NETLINK_CB(cb->skb).portid,
300 IOAM6_CMD_DUMP_NAMESPACES);
308 rhashtable_walk_stop(iter);
312 static int ioam6_genl_addsc(struct sk_buff *skb, struct genl_info *info)
314 struct ioam6_pernet_data *nsdata;
315 int len, len_aligned, err;
316 struct ioam6_schema *sc;
319 if (!info->attrs[IOAM6_ATTR_SC_ID] || !info->attrs[IOAM6_ATTR_SC_DATA])
322 id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
323 nsdata = ioam6_pernet(genl_info_net(info));
325 mutex_lock(&nsdata->lock);
327 sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
333 len = nla_len(info->attrs[IOAM6_ATTR_SC_DATA]);
334 len_aligned = ALIGN(len, 4);
336 sc = kzalloc(sizeof(*sc) + len_aligned, GFP_KERNEL);
343 sc->len = len_aligned;
344 sc->hdr = cpu_to_be32(sc->id | ((u8)(sc->len / 4) << 24));
345 nla_memcpy(sc->data, info->attrs[IOAM6_ATTR_SC_DATA], len);
347 err = rhashtable_lookup_insert_fast(&nsdata->schemas, &sc->head,
353 mutex_unlock(&nsdata->lock);
360 static int ioam6_genl_delsc(struct sk_buff *skb, struct genl_info *info)
362 struct ioam6_pernet_data *nsdata;
363 struct ioam6_namespace *ns;
364 struct ioam6_schema *sc;
368 if (!info->attrs[IOAM6_ATTR_SC_ID])
371 id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
372 nsdata = ioam6_pernet(genl_info_net(info));
374 mutex_lock(&nsdata->lock);
376 sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
382 ns = rcu_dereference_protected(sc->ns, lockdep_is_held(&nsdata->lock));
384 err = rhashtable_remove_fast(&nsdata->schemas, &sc->head,
390 rcu_assign_pointer(ns->schema, NULL);
392 ioam6_sc_release(sc);
395 mutex_unlock(&nsdata->lock);
399 static int __ioam6_genl_dumpsc_element(struct ioam6_schema *sc,
400 u32 portid, u32 seq, u32 flags,
401 struct sk_buff *skb, u8 cmd)
403 struct ioam6_namespace *ns;
406 hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
410 if (nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id) ||
411 nla_put(skb, IOAM6_ATTR_SC_DATA, sc->len, sc->data))
412 goto nla_put_failure;
416 ns = rcu_dereference(sc->ns);
417 if (ns && nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id))) {
419 goto nla_put_failure;
424 genlmsg_end(skb, hdr);
428 genlmsg_cancel(skb, hdr);
432 static int ioam6_genl_dumpsc_start(struct netlink_callback *cb)
434 struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
435 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
438 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
442 cb->args[0] = (long)iter;
445 rhashtable_walk_enter(&nsdata->schemas, iter);
450 static int ioam6_genl_dumpsc_done(struct netlink_callback *cb)
452 struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
454 rhashtable_walk_exit(iter);
460 static int ioam6_genl_dumpsc(struct sk_buff *skb, struct netlink_callback *cb)
462 struct rhashtable_iter *iter;
463 struct ioam6_schema *sc;
466 iter = (struct rhashtable_iter *)cb->args[0];
467 rhashtable_walk_start(iter);
470 sc = rhashtable_walk_next(iter);
473 if (PTR_ERR(sc) == -EAGAIN)
481 err = __ioam6_genl_dumpsc_element(sc,
482 NETLINK_CB(cb->skb).portid,
486 IOAM6_CMD_DUMP_SCHEMAS);
494 rhashtable_walk_stop(iter);
498 static int ioam6_genl_ns_set_schema(struct sk_buff *skb, struct genl_info *info)
500 struct ioam6_namespace *ns, *ns_ref;
501 struct ioam6_schema *sc, *sc_ref;
502 struct ioam6_pernet_data *nsdata;
507 if (!info->attrs[IOAM6_ATTR_NS_ID] ||
508 (!info->attrs[IOAM6_ATTR_SC_ID] &&
509 !info->attrs[IOAM6_ATTR_SC_NONE]))
512 ns_id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
513 nsdata = ioam6_pernet(genl_info_net(info));
515 mutex_lock(&nsdata->lock);
517 ns = rhashtable_lookup_fast(&nsdata->namespaces, &ns_id, rht_ns_params);
523 if (info->attrs[IOAM6_ATTR_SC_NONE]) {
526 sc_id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
527 sc = rhashtable_lookup_fast(&nsdata->schemas, &sc_id,
535 sc_ref = rcu_dereference_protected(ns->schema,
536 lockdep_is_held(&nsdata->lock));
538 rcu_assign_pointer(sc_ref->ns, NULL);
539 rcu_assign_pointer(ns->schema, sc);
542 ns_ref = rcu_dereference_protected(sc->ns,
543 lockdep_is_held(&nsdata->lock));
545 rcu_assign_pointer(ns_ref->schema, NULL);
546 rcu_assign_pointer(sc->ns, ns);
552 mutex_unlock(&nsdata->lock);
556 static const struct genl_ops ioam6_genl_ops[] = {
558 .cmd = IOAM6_CMD_ADD_NAMESPACE,
559 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
560 .doit = ioam6_genl_addns,
561 .flags = GENL_ADMIN_PERM,
562 .policy = ioam6_genl_policy_addns,
563 .maxattr = ARRAY_SIZE(ioam6_genl_policy_addns) - 1,
566 .cmd = IOAM6_CMD_DEL_NAMESPACE,
567 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
568 .doit = ioam6_genl_delns,
569 .flags = GENL_ADMIN_PERM,
570 .policy = ioam6_genl_policy_delns,
571 .maxattr = ARRAY_SIZE(ioam6_genl_policy_delns) - 1,
574 .cmd = IOAM6_CMD_DUMP_NAMESPACES,
575 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
576 .start = ioam6_genl_dumpns_start,
577 .dumpit = ioam6_genl_dumpns,
578 .done = ioam6_genl_dumpns_done,
579 .flags = GENL_ADMIN_PERM,
582 .cmd = IOAM6_CMD_ADD_SCHEMA,
583 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
584 .doit = ioam6_genl_addsc,
585 .flags = GENL_ADMIN_PERM,
586 .policy = ioam6_genl_policy_addsc,
587 .maxattr = ARRAY_SIZE(ioam6_genl_policy_addsc) - 1,
590 .cmd = IOAM6_CMD_DEL_SCHEMA,
591 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
592 .doit = ioam6_genl_delsc,
593 .flags = GENL_ADMIN_PERM,
594 .policy = ioam6_genl_policy_delsc,
595 .maxattr = ARRAY_SIZE(ioam6_genl_policy_delsc) - 1,
598 .cmd = IOAM6_CMD_DUMP_SCHEMAS,
599 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
600 .start = ioam6_genl_dumpsc_start,
601 .dumpit = ioam6_genl_dumpsc,
602 .done = ioam6_genl_dumpsc_done,
603 .flags = GENL_ADMIN_PERM,
606 .cmd = IOAM6_CMD_NS_SET_SCHEMA,
607 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
608 .doit = ioam6_genl_ns_set_schema,
609 .flags = GENL_ADMIN_PERM,
610 .policy = ioam6_genl_policy_ns_sc,
611 .maxattr = ARRAY_SIZE(ioam6_genl_policy_ns_sc) - 1,
615 #define IOAM6_GENL_EV_GRP_OFFSET 0
617 static const struct genl_multicast_group ioam6_mcgrps[] = {
618 [IOAM6_GENL_EV_GRP_OFFSET] = { .name = IOAM6_GENL_EV_GRP_NAME,
619 .flags = GENL_MCAST_CAP_NET_ADMIN },
622 static int ioam6_event_put_trace(struct sk_buff *skb,
623 struct ioam6_trace_hdr *trace,
626 if (nla_put_u16(skb, IOAM6_EVENT_ATTR_TRACE_NAMESPACE,
627 be16_to_cpu(trace->namespace_id)) ||
628 nla_put_u8(skb, IOAM6_EVENT_ATTR_TRACE_NODELEN, trace->nodelen) ||
629 nla_put_u32(skb, IOAM6_EVENT_ATTR_TRACE_TYPE,
630 be32_to_cpu(trace->type_be32)) ||
631 nla_put(skb, IOAM6_EVENT_ATTR_TRACE_DATA,
632 len - sizeof(struct ioam6_trace_hdr) - trace->remlen * 4,
633 trace->data + trace->remlen * 4))
639 void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
640 void *opt, unsigned int opt_len)
642 struct nlmsghdr *nlh;
645 if (!genl_has_listeners(&ioam6_genl_family, net,
646 IOAM6_GENL_EV_GRP_OFFSET))
649 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
653 nlh = genlmsg_put(skb, 0, 0, &ioam6_genl_family, 0, type);
655 goto nla_put_failure;
658 case IOAM6_EVENT_UNSPEC:
661 case IOAM6_EVENT_TRACE:
662 if (ioam6_event_put_trace(skb, (struct ioam6_trace_hdr *)opt,
664 goto nla_put_failure;
668 genlmsg_end(skb, nlh);
669 genlmsg_multicast_netns(&ioam6_genl_family, net, skb, 0,
670 IOAM6_GENL_EV_GRP_OFFSET, gfp);
677 static struct genl_family ioam6_genl_family __ro_after_init = {
678 .name = IOAM6_GENL_NAME,
679 .version = IOAM6_GENL_VERSION,
681 .parallel_ops = true,
682 .ops = ioam6_genl_ops,
683 .n_ops = ARRAY_SIZE(ioam6_genl_ops),
684 .resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1,
685 .mcgrps = ioam6_mcgrps,
686 .n_mcgrps = ARRAY_SIZE(ioam6_mcgrps),
687 .module = THIS_MODULE,
690 struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id)
692 struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
694 return rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
697 static void __ioam6_fill_trace_data(struct sk_buff *skb,
698 struct ioam6_namespace *ns,
699 struct ioam6_trace_hdr *trace,
700 struct ioam6_schema *sc,
701 u8 sclen, bool is_input)
703 struct timespec64 ts;
711 data = trace->data + trace->remlen * 4 - trace->nodelen * 4 - sclen * 4;
713 /* hop_lim and node_id */
714 if (trace->type.bit0) {
715 byte = ipv6_hdr(skb)->hop_limit;
719 raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id;
721 *(__be32 *)data = cpu_to_be32((byte << 24) | raw32);
722 data += sizeof(__be32);
725 /* ingress_if_id and egress_if_id */
726 if (trace->type.bit1) {
728 raw16 = IOAM6_U16_UNAVAILABLE;
730 raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
732 *(__be16 *)data = cpu_to_be16(raw16);
733 data += sizeof(__be16);
735 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
736 raw16 = IOAM6_U16_UNAVAILABLE;
738 raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
740 *(__be16 *)data = cpu_to_be16(raw16);
741 data += sizeof(__be16);
744 /* timestamp seconds */
745 if (trace->type.bit2) {
747 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
749 tstamp = skb_tstamp_cond(skb, true);
750 ts = ktime_to_timespec64(tstamp);
752 *(__be32 *)data = cpu_to_be32((u32)ts.tv_sec);
754 data += sizeof(__be32);
757 /* timestamp subseconds */
758 if (trace->type.bit3) {
760 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
762 if (!trace->type.bit2) {
763 tstamp = skb_tstamp_cond(skb, true);
764 ts = ktime_to_timespec64(tstamp);
767 *(__be32 *)data = cpu_to_be32((u32)(ts.tv_nsec / NSEC_PER_USEC));
769 data += sizeof(__be32);
773 if (trace->type.bit4) {
774 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
775 data += sizeof(__be32);
779 if (trace->type.bit5) {
780 *(__be32 *)data = ns->data;
781 data += sizeof(__be32);
785 if (trace->type.bit6) {
786 struct netdev_queue *queue;
790 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
791 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
793 queue = skb_get_tx_queue(skb_dst(skb)->dev, skb);
794 qdisc = rcu_dereference(queue->qdisc);
795 qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog);
797 *(__be32 *)data = cpu_to_be32(backlog);
799 data += sizeof(__be32);
802 /* checksum complement */
803 if (trace->type.bit7) {
804 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
805 data += sizeof(__be32);
808 /* hop_lim and node_id (wide) */
809 if (trace->type.bit8) {
810 byte = ipv6_hdr(skb)->hop_limit;
814 raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide;
816 *(__be64 *)data = cpu_to_be64(((u64)byte << 56) | raw64);
817 data += sizeof(__be64);
820 /* ingress_if_id and egress_if_id (wide) */
821 if (trace->type.bit9) {
823 raw32 = IOAM6_U32_UNAVAILABLE;
825 raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
827 *(__be32 *)data = cpu_to_be32(raw32);
828 data += sizeof(__be32);
830 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
831 raw32 = IOAM6_U32_UNAVAILABLE;
833 raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
835 *(__be32 *)data = cpu_to_be32(raw32);
836 data += sizeof(__be32);
839 /* namespace data (wide) */
840 if (trace->type.bit10) {
841 *(__be64 *)data = ns->data_wide;
842 data += sizeof(__be64);
845 /* buffer occupancy */
846 if (trace->type.bit11) {
847 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
848 data += sizeof(__be32);
851 /* bit12 undefined: filled with empty value */
852 if (trace->type.bit12) {
853 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
854 data += sizeof(__be32);
857 /* bit13 undefined: filled with empty value */
858 if (trace->type.bit13) {
859 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
860 data += sizeof(__be32);
863 /* bit14 undefined: filled with empty value */
864 if (trace->type.bit14) {
865 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
866 data += sizeof(__be32);
869 /* bit15 undefined: filled with empty value */
870 if (trace->type.bit15) {
871 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
872 data += sizeof(__be32);
875 /* bit16 undefined: filled with empty value */
876 if (trace->type.bit16) {
877 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
878 data += sizeof(__be32);
881 /* bit17 undefined: filled with empty value */
882 if (trace->type.bit17) {
883 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
884 data += sizeof(__be32);
887 /* bit18 undefined: filled with empty value */
888 if (trace->type.bit18) {
889 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
890 data += sizeof(__be32);
893 /* bit19 undefined: filled with empty value */
894 if (trace->type.bit19) {
895 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
896 data += sizeof(__be32);
899 /* bit20 undefined: filled with empty value */
900 if (trace->type.bit20) {
901 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
902 data += sizeof(__be32);
905 /* bit21 undefined: filled with empty value */
906 if (trace->type.bit21) {
907 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
908 data += sizeof(__be32);
911 /* opaque state snapshot */
912 if (trace->type.bit22) {
914 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE >> 8);
916 *(__be32 *)data = sc->hdr;
917 data += sizeof(__be32);
919 memcpy(data, sc->data, sc->len);
924 /* called with rcu_read_lock() */
925 void ioam6_fill_trace_data(struct sk_buff *skb,
926 struct ioam6_namespace *ns,
927 struct ioam6_trace_hdr *trace,
930 struct ioam6_schema *sc;
933 /* Skip if Overflow flag is set
938 /* NodeLen does not include Opaque State Snapshot length. We need to
939 * take it into account if the corresponding bit is set (bit 22) and
940 * if the current IOAM namespace has an active schema attached to it
942 sc = rcu_dereference(ns->schema);
943 if (trace->type.bit22) {
944 sclen = sizeof_field(struct ioam6_schema, hdr) / 4;
947 sclen += sc->len / 4;
950 /* If there is no space remaining, we set the Overflow flag and we
951 * skip without filling the trace
953 if (!trace->remlen || trace->remlen < trace->nodelen + sclen) {
958 __ioam6_fill_trace_data(skb, ns, trace, sc, sclen, is_input);
959 trace->remlen -= trace->nodelen + sclen;
962 static int __net_init ioam6_net_init(struct net *net)
964 struct ioam6_pernet_data *nsdata;
967 nsdata = kzalloc(sizeof(*nsdata), GFP_KERNEL);
971 mutex_init(&nsdata->lock);
972 net->ipv6.ioam6_data = nsdata;
974 err = rhashtable_init(&nsdata->namespaces, &rht_ns_params);
978 err = rhashtable_init(&nsdata->schemas, &rht_sc_params);
985 rhashtable_destroy(&nsdata->namespaces);
988 net->ipv6.ioam6_data = NULL;
992 static void __net_exit ioam6_net_exit(struct net *net)
994 struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
996 rhashtable_free_and_destroy(&nsdata->namespaces, ioam6_free_ns, NULL);
997 rhashtable_free_and_destroy(&nsdata->schemas, ioam6_free_sc, NULL);
1002 static struct pernet_operations ioam6_net_ops = {
1003 .init = ioam6_net_init,
1004 .exit = ioam6_net_exit,
1007 int __init ioam6_init(void)
1009 int err = register_pernet_subsys(&ioam6_net_ops);
1013 err = genl_register_family(&ioam6_genl_family);
1015 goto out_unregister_pernet_subsys;
1017 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1018 err = ioam6_iptunnel_init();
1020 goto out_unregister_genl;
1023 pr_info("In-situ OAM (IOAM) with IPv6\n");
1027 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1028 out_unregister_genl:
1029 genl_unregister_family(&ioam6_genl_family);
1031 out_unregister_pernet_subsys:
1032 unregister_pernet_subsys(&ioam6_net_ops);
1036 void ioam6_exit(void)
1038 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1039 ioam6_iptunnel_exit();
1041 genl_unregister_family(&ioam6_genl_family);
1042 unregister_pernet_subsys(&ioam6_net_ops);