1 // SPDX-License-Identifier: GPL-2.0
3 * Management Component Transport Protocol (MCTP) - routing
6 * This is currently based on a simple routing table, with no dst cache. The
7 * number of routes should stay fairly small, so the lookup cost is small.
9 * Copyright (c) 2021 Code Construct
10 * Copyright (c) 2021 Google
13 #include <linux/idr.h>
14 #include <linux/mctp.h>
15 #include <linux/netdevice.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/skbuff.h>
19 #include <uapi/linux/if_arp.h>
22 #include <net/mctpdevice.h>
23 #include <net/netlink.h>
26 static const unsigned int mctp_message_maxlen = 64 * 1024;
28 /* route output callbacks */
29 static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb)
35 static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
37 struct mctp_skb_cb *cb = mctp_cb(skb);
42 WARN_ON(!rcu_read_lock_held());
44 /* TODO: look up in skb->cb? */
47 if (!skb_headlen(skb))
50 type = (*(u8 *)skb->data) & 0x7f;
52 sk_for_each_rcu(sk, &net->mctp.binds) {
53 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
55 if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net)
58 if (msk->bind_type != type)
61 if (msk->bind_addr != MCTP_ADDR_ANY &&
62 msk->bind_addr != mh->dest)
71 static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
72 mctp_eid_t peer, u8 tag)
74 if (key->local_addr != local)
77 if (key->peer_addr != peer)
86 static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
89 struct mctp_sk_key *key, *ret;
93 WARN_ON(!rcu_read_lock_held());
96 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
100 hlist_for_each_entry_rcu(key, &net->mctp.keys, hlist) {
101 if (mctp_key_match(key, mh->dest, peer, tag)) {
110 static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
111 mctp_eid_t local, mctp_eid_t peer,
114 struct mctp_sk_key *key;
116 key = kzalloc(sizeof(*key), gfp);
120 key->peer_addr = peer;
121 key->local_addr = local;
124 spin_lock_init(&key->reasm_lock);
129 static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
131 struct net *net = sock_net(&msk->sk);
132 struct mctp_sk_key *tmp;
136 spin_lock_irqsave(&net->mctp.keys_lock, flags);
138 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
139 if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
147 hlist_add_head(&key->hlist, &net->mctp.keys);
148 hlist_add_head(&key->sklist, &msk->keys);
151 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
156 /* Must be called with key->reasm_lock, which it will release. Will schedule
157 * the key for an RCU free.
159 static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net,
161 __releases(&key->reasm_lock)
165 skb = key->reasm_head;
166 key->reasm_head = NULL;
167 key->reasm_dead = true;
168 spin_unlock_irqrestore(&key->reasm_lock, flags);
170 spin_lock_irqsave(&net->mctp.keys_lock, flags);
171 hlist_del_rcu(&key->hlist);
172 hlist_del_rcu(&key->sklist);
173 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
180 static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
182 struct mctp_hdr *hdr = mctp_hdr(skb);
183 u8 exp_seq, this_seq;
185 this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT)
188 if (!key->reasm_head) {
189 key->reasm_head = skb;
190 key->reasm_tailp = &(skb_shinfo(skb)->frag_list);
191 key->last_seq = this_seq;
195 exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK;
197 if (this_seq != exp_seq)
200 if (key->reasm_head->len + skb->len > mctp_message_maxlen)
205 *key->reasm_tailp = skb;
206 key->reasm_tailp = &skb->next;
208 key->last_seq = this_seq;
210 key->reasm_head->data_len += skb->len;
211 key->reasm_head->len += skb->len;
212 key->reasm_head->truesize += skb->truesize;
217 static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
219 struct net *net = dev_net(skb->dev);
220 struct mctp_sk_key *key;
221 struct mctp_sock *msk;
230 /* we may be receiving a locally-routed packet; drop source sk
235 /* ensure we have enough data for a header and a type */
236 if (skb->len < sizeof(struct mctp_hdr) + 1)
239 /* grab header, advance data ptr */
241 skb_pull(skb, sizeof(struct mctp_hdr));
246 flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
247 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
251 /* lookup socket / reasm context, exactly matching (src,dest,tag) */
252 key = mctp_lookup_key(net, skb, mh->src);
254 if (flags & MCTP_HDR_FLAG_SOM) {
256 msk = container_of(key->sk, struct mctp_sock, sk);
258 /* first response to a broadcast? do a more general
259 * key lookup to find the socket, but don't use this
260 * key for reassembly - we'll create a more specific
261 * one for future packets if required (ie, !EOM).
263 key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY);
265 msk = container_of(key->sk,
266 struct mctp_sock, sk);
271 if (!key && !msk && (tag & MCTP_HDR_FLAG_TO))
272 msk = mctp_lookup_bind(net, skb);
279 /* single-packet message? deliver to socket, clean up any
282 if (flags & MCTP_HDR_FLAG_EOM) {
283 sock_queue_rcv_skb(&msk->sk, skb);
285 spin_lock_irqsave(&key->reasm_lock, f);
286 /* we've hit a pending reassembly; not much we
289 __mctp_key_unlock_drop(key, net, f);
295 /* broadcast response or a bind() - create a key for further
296 * packets for this message
299 key = mctp_key_alloc(msk, mh->dest, mh->src,
306 /* we can queue without the reasm lock here, as the
307 * key isn't observable yet
309 mctp_frag_queue(key, skb);
311 /* if the key_add fails, we've raced with another
312 * SOM packet with the same src, dest and tag. There's
313 * no way to distinguish future packets, so all we
314 * can do is drop; we'll free the skb on exit from
317 rc = mctp_key_add(key, msk);
322 /* existing key: start reassembly */
323 spin_lock_irqsave(&key->reasm_lock, f);
325 if (key->reasm_head || key->reasm_dead) {
326 /* duplicate start? drop everything */
327 __mctp_key_unlock_drop(key, net, f);
330 rc = mctp_frag_queue(key, skb);
331 spin_unlock_irqrestore(&key->reasm_lock, f);
336 /* this packet continues a previous message; reassemble
337 * using the message-specific key
340 spin_lock_irqsave(&key->reasm_lock, f);
342 /* we need to be continuing an existing reassembly... */
343 if (!key->reasm_head)
346 rc = mctp_frag_queue(key, skb);
348 /* end of message? deliver to socket, and we're done with
349 * the reassembly/response key
351 if (!rc && flags & MCTP_HDR_FLAG_EOM) {
352 sock_queue_rcv_skb(key->sk, key->reasm_head);
353 key->reasm_head = NULL;
354 __mctp_key_unlock_drop(key, net, f);
356 spin_unlock_irqrestore(&key->reasm_lock, f);
360 /* not a start, no matching key */
372 static unsigned int mctp_route_mtu(struct mctp_route *rt)
374 return rt->mtu ?: READ_ONCE(rt->dev->dev->mtu);
377 static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
379 struct mctp_hdr *hdr = mctp_hdr(skb);
380 char daddr_buf[MAX_ADDR_LEN];
385 skb->protocol = htons(ETH_P_MCTP);
387 mtu = READ_ONCE(skb->dev->mtu);
388 if (skb->len > mtu) {
393 /* If lookup fails let the device handle daddr==NULL */
394 if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0)
397 rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
398 daddr, skb->dev->dev_addr, skb->len);
401 return -EHOSTUNREACH;
404 rc = dev_queue_xmit(skb);
406 rc = net_xmit_errno(rc);
411 /* route alloc/release */
412 static void mctp_route_release(struct mctp_route *rt)
414 if (refcount_dec_and_test(&rt->refs)) {
415 dev_put(rt->dev->dev);
420 /* returns a route with the refcount at 1 */
421 static struct mctp_route *mctp_route_alloc(void)
423 struct mctp_route *rt;
425 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
429 INIT_LIST_HEAD(&rt->list);
430 refcount_set(&rt->refs, 1);
431 rt->output = mctp_route_discard;
436 unsigned int mctp_default_net(struct net *net)
438 return READ_ONCE(net->mctp.default_net);
441 int mctp_default_net_set(struct net *net, unsigned int index)
445 WRITE_ONCE(net->mctp.default_net, index);
450 static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
451 struct mctp_sock *msk)
453 struct netns_mctp *mns = &net->mctp;
455 lockdep_assert_held(&mns->keys_lock);
457 /* we hold the net->key_lock here, allowing updates to both
460 hlist_add_head_rcu(&key->hlist, &mns->keys);
461 hlist_add_head_rcu(&key->sklist, &msk->keys);
464 /* Allocate a locally-owned tag value for (saddr, daddr), and reserve
465 * it for the socket msk
467 static int mctp_alloc_local_tag(struct mctp_sock *msk,
468 mctp_eid_t saddr, mctp_eid_t daddr, u8 *tagp)
470 struct net *net = sock_net(&msk->sk);
471 struct netns_mctp *mns = &net->mctp;
472 struct mctp_sk_key *key, *tmp;
477 /* be optimistic, alloc now */
478 key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL);
482 /* 8 possible tag values */
485 spin_lock_irqsave(&mns->keys_lock, flags);
487 /* Walk through the existing keys, looking for potential conflicting
488 * tags. If we find a conflict, clear that bit from tagbits
490 hlist_for_each_entry(tmp, &mns->keys, hlist) {
491 /* if we don't own the tag, it can't conflict */
492 if (tmp->tag & MCTP_HDR_FLAG_TO)
495 if ((tmp->peer_addr == daddr ||
496 tmp->peer_addr == MCTP_ADDR_ANY) &&
497 tmp->local_addr == saddr)
498 tagbits &= ~(1 << tmp->tag);
505 key->tag = __ffs(tagbits);
506 mctp_reserve_tag(net, key, msk);
511 spin_unlock_irqrestore(&mns->keys_lock, flags);
519 /* routing lookups */
520 static bool mctp_rt_match_eid(struct mctp_route *rt,
521 unsigned int net, mctp_eid_t eid)
523 return READ_ONCE(rt->dev->net) == net &&
524 rt->min <= eid && rt->max >= eid;
527 /* compares match, used for duplicate prevention */
528 static bool mctp_rt_compare_exact(struct mctp_route *rt1,
529 struct mctp_route *rt2)
532 return rt1->dev->net == rt2->dev->net &&
533 rt1->min == rt2->min &&
534 rt1->max == rt2->max;
537 struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
540 struct mctp_route *tmp, *rt = NULL;
542 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
543 /* TODO: add metrics */
544 if (mctp_rt_match_eid(tmp, dnet, daddr)) {
545 if (refcount_inc_not_zero(&tmp->refs)) {
555 /* sends a skb to rt and releases the route. */
556 int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb)
560 rc = rt->output(rt, skb);
561 mctp_route_release(rt);
565 static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
566 unsigned int mtu, u8 tag)
568 const unsigned int hlen = sizeof(struct mctp_hdr);
569 struct mctp_hdr *hdr, *hdr2;
570 unsigned int pos, size;
571 struct sk_buff *skb2;
579 if (mtu < hlen + 1) {
584 /* we've got the header */
587 for (pos = 0; pos < skb->len;) {
588 /* size of message payload */
589 size = min(mtu - hlen, skb->len - pos);
591 skb2 = alloc_skb(MCTP_HEADER_MAXLEN + hlen + size, GFP_KERNEL);
597 /* generic skb copy */
598 skb2->protocol = skb->protocol;
599 skb2->priority = skb->priority;
600 skb2->dev = skb->dev;
601 memcpy(skb2->cb, skb->cb, sizeof(skb2->cb));
604 skb_set_owner_w(skb2, skb->sk);
606 /* establish packet */
607 skb_reserve(skb2, MCTP_HEADER_MAXLEN);
608 skb_reset_network_header(skb2);
609 skb_put(skb2, hlen + size);
610 skb2->transport_header = skb2->network_header + hlen;
612 /* copy header fields, calculate SOM/EOM flags & seq */
613 hdr2 = mctp_hdr(skb2);
614 hdr2->ver = hdr->ver;
615 hdr2->dest = hdr->dest;
616 hdr2->src = hdr->src;
617 hdr2->flags_seq_tag = tag &
618 (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
621 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM;
623 if (pos + size == skb->len)
624 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM;
626 hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT;
628 /* copy message payload */
629 skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
631 /* do route, but don't drop the rt reference */
632 rc = rt->output(rt, skb2);
636 seq = (seq + 1) & MCTP_HDR_SEQ_MASK;
640 mctp_route_release(rt);
645 int mctp_local_output(struct sock *sk, struct mctp_route *rt,
646 struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag)
648 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
649 struct mctp_skb_cb *cb = mctp_cb(skb);
650 struct mctp_hdr *hdr;
657 if (WARN_ON(!rt->dev))
660 spin_lock_irqsave(&rt->dev->addrs_lock, flags);
661 if (rt->dev->num_addrs == 0) {
664 /* use the outbound interface's first address as our source */
665 saddr = rt->dev->addrs[0];
668 spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
673 if (req_tag & MCTP_HDR_FLAG_TO) {
674 rc = mctp_alloc_local_tag(msk, saddr, daddr, &tag);
677 tag |= MCTP_HDR_FLAG_TO;
683 skb->protocol = htons(ETH_P_MCTP);
685 skb_reset_transport_header(skb);
686 skb_push(skb, sizeof(struct mctp_hdr));
687 skb_reset_network_header(skb);
688 skb->dev = rt->dev->dev;
690 /* cb->net will have been set on initial ingress */
693 /* set up common header fields */
699 mtu = mctp_route_mtu(rt);
701 if (skb->len + sizeof(struct mctp_hdr) <= mtu) {
702 hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM |
704 return mctp_do_route(rt, skb);
706 return mctp_do_fragment_route(rt, skb, mtu, tag);
710 /* route management */
711 static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
712 unsigned int daddr_extent, unsigned int mtu,
715 int (*rtfn)(struct mctp_route *rt, struct sk_buff *skb);
716 struct net *net = dev_net(mdev->dev);
717 struct mctp_route *rt, *ert;
719 if (!mctp_address_ok(daddr_start))
722 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
727 rtfn = mctp_route_input;
730 rtfn = mctp_route_output;
736 rt = mctp_route_alloc();
740 rt->min = daddr_start;
741 rt->max = daddr_start + daddr_extent;
744 dev_hold(rt->dev->dev);
749 /* Prevent duplicate identical routes. */
750 list_for_each_entry(ert, &net->mctp.routes, list) {
751 if (mctp_rt_compare_exact(rt, ert)) {
752 mctp_route_release(rt);
757 list_add_rcu(&rt->list, &net->mctp.routes);
762 static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
763 unsigned int daddr_extent)
765 struct net *net = dev_net(mdev->dev);
766 struct mctp_route *rt, *tmp;
767 mctp_eid_t daddr_end;
770 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
773 daddr_end = daddr_start + daddr_extent;
778 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
779 if (rt->dev == mdev &&
780 rt->min == daddr_start && rt->max == daddr_end) {
781 list_del_rcu(&rt->list);
782 /* TODO: immediate RTM_DELROUTE */
783 mctp_route_release(rt);
788 return dropped ? 0 : -ENOENT;
791 int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
793 return mctp_route_add(mdev, addr, 0, 0, RTN_LOCAL);
796 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
798 return mctp_route_remove(mdev, addr, 0);
801 /* removes all entries for a given device */
802 void mctp_route_remove_dev(struct mctp_dev *mdev)
804 struct net *net = dev_net(mdev->dev);
805 struct mctp_route *rt, *tmp;
808 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
809 if (rt->dev == mdev) {
810 list_del_rcu(&rt->list);
811 /* TODO: immediate RTM_DELROUTE */
812 mctp_route_release(rt);
817 /* Incoming packet-handling */
819 static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
820 struct packet_type *pt,
821 struct net_device *orig_dev)
823 struct net *net = dev_net(dev);
824 struct mctp_skb_cb *cb;
825 struct mctp_route *rt;
828 /* basic non-data sanity checks */
829 if (dev->type != ARPHRD_MCTP)
832 if (!pskb_may_pull(skb, sizeof(struct mctp_hdr)))
835 skb_reset_transport_header(skb);
836 skb_reset_network_header(skb);
838 /* We have enough for a header; decode and route */
840 if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
845 cb->net = READ_ONCE(__mctp_dev_get(dev)->net);
848 rt = mctp_route_lookup(net, cb->net, mh->dest);
852 mctp_do_route(rt, skb);
854 return NET_RX_SUCCESS;
861 static struct packet_type mctp_packet_type = {
862 .type = cpu_to_be16(ETH_P_MCTP),
863 .func = mctp_pkttype_receive,
866 /* netlink interface */
868 static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = {
869 [RTA_DST] = { .type = NLA_U8 },
870 [RTA_METRICS] = { .type = NLA_NESTED },
871 [RTA_OIF] = { .type = NLA_U32 },
874 /* Common part for RTM_NEWROUTE and RTM_DELROUTE parsing.
875 * tb must hold RTA_MAX+1 elements.
877 static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
878 struct netlink_ext_ack *extack,
879 struct nlattr **tb, struct rtmsg **rtm,
880 struct mctp_dev **mdev, mctp_eid_t *daddr_start)
882 struct net *net = sock_net(skb->sk);
883 struct net_device *dev;
884 unsigned int ifindex;
887 rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX,
888 rta_mctp_policy, extack);
890 NL_SET_ERR_MSG(extack, "incorrect format");
895 NL_SET_ERR_MSG(extack, "dst EID missing");
898 *daddr_start = nla_get_u8(tb[RTA_DST]);
901 NL_SET_ERR_MSG(extack, "ifindex missing");
904 ifindex = nla_get_u32(tb[RTA_OIF]);
906 *rtm = nlmsg_data(nlh);
907 if ((*rtm)->rtm_family != AF_MCTP) {
908 NL_SET_ERR_MSG(extack, "route family must be AF_MCTP");
912 dev = __dev_get_by_index(net, ifindex);
914 NL_SET_ERR_MSG(extack, "bad ifindex");
917 *mdev = mctp_dev_get_rtnl(dev);
921 if (dev->flags & IFF_LOOPBACK) {
922 NL_SET_ERR_MSG(extack, "no routes to loopback");
929 static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
930 struct netlink_ext_ack *extack)
932 struct nlattr *tb[RTA_MAX + 1];
933 mctp_eid_t daddr_start;
934 struct mctp_dev *mdev;
939 rc = mctp_route_nlparse(skb, nlh, extack, tb,
940 &rtm, &mdev, &daddr_start);
944 if (rtm->rtm_type != RTN_UNICAST) {
945 NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST");
949 /* TODO: parse mtu from nlparse */
952 if (rtm->rtm_type != RTN_UNICAST)
955 rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu,
960 static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
961 struct netlink_ext_ack *extack)
963 struct nlattr *tb[RTA_MAX + 1];
964 mctp_eid_t daddr_start;
965 struct mctp_dev *mdev;
969 rc = mctp_route_nlparse(skb, nlh, extack, tb,
970 &rtm, &mdev, &daddr_start);
974 /* we only have unicast routes */
975 if (rtm->rtm_type != RTN_UNICAST)
978 rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
982 static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt,
983 u32 portid, u32 seq, int event, unsigned int flags)
985 struct nlmsghdr *nlh;
989 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
993 hdr = nlmsg_data(nlh);
994 hdr->rtm_family = AF_MCTP;
996 /* we use the _len fields as a number of EIDs, rather than
997 * a number of bits in the address
999 hdr->rtm_dst_len = rt->max - rt->min;
1000 hdr->rtm_src_len = 0;
1002 hdr->rtm_table = RT_TABLE_DEFAULT;
1003 hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */
1004 hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */
1005 hdr->rtm_type = rt->type;
1007 if (nla_put_u8(skb, RTA_DST, rt->min))
1010 metrics = nla_nest_start_noflag(skb, RTA_METRICS);
1015 if (nla_put_u32(skb, RTAX_MTU, rt->mtu))
1019 nla_nest_end(skb, metrics);
1022 if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex))
1026 /* TODO: conditional neighbour physaddr? */
1028 nlmsg_end(skb, nlh);
1033 nlmsg_cancel(skb, nlh);
1037 static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb)
1039 struct net *net = sock_net(skb->sk);
1040 struct mctp_route *rt;
1043 /* TODO: allow filtering on route data, possibly under
1047 /* TODO: change to struct overlay */
1048 s_idx = cb->args[0];
1052 list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
1055 if (mctp_fill_rtinfo(skb, rt,
1056 NETLINK_CB(cb->skb).portid,
1058 RTM_NEWROUTE, NLM_F_MULTI) < 0)
1068 /* net namespace implementation */
1069 static int __net_init mctp_routes_net_init(struct net *net)
1071 struct netns_mctp *ns = &net->mctp;
1073 INIT_LIST_HEAD(&ns->routes);
1074 INIT_HLIST_HEAD(&ns->binds);
1075 mutex_init(&ns->bind_lock);
1076 INIT_HLIST_HEAD(&ns->keys);
1077 spin_lock_init(&ns->keys_lock);
1078 WARN_ON(mctp_default_net_set(net, MCTP_INITIAL_DEFAULT_NET));
1082 static void __net_exit mctp_routes_net_exit(struct net *net)
1084 struct mctp_route *rt;
1086 list_for_each_entry_rcu(rt, &net->mctp.routes, list)
1087 mctp_route_release(rt);
1090 static struct pernet_operations mctp_net_ops = {
1091 .init = mctp_routes_net_init,
1092 .exit = mctp_routes_net_exit,
1095 int __init mctp_routes_init(void)
1097 dev_add_pack(&mctp_packet_type);
1099 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETROUTE,
1100 NULL, mctp_dump_rtinfo, 0);
1101 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWROUTE,
1102 mctp_newroute, NULL, 0);
1103 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELROUTE,
1104 mctp_delroute, NULL, 0);
1106 return register_pernet_subsys(&mctp_net_ops);
1109 void __exit mctp_routes_exit(void)
1111 unregister_pernet_subsys(&mctp_net_ops);
1112 rtnl_unregister(PF_MCTP, RTM_DELROUTE);
1113 rtnl_unregister(PF_MCTP, RTM_NEWROUTE);
1114 rtnl_unregister(PF_MCTP, RTM_GETROUTE);
1115 dev_remove_pack(&mctp_packet_type);