1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2023 Bootlin
10 #include <linux/phy_link_topology.h>
11 #include <linux/sfp.h>
14 struct ethnl_req_info base;
15 struct phy_device_node *pdn;
18 #define PHY_REQINFO(__req_base) \
19 container_of(__req_base, struct phy_req_info, base)
21 const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = {
22 [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
25 /* Caller holds rtnl */
27 ethnl_phy_reply_size(const struct ethnl_req_info *req_base,
28 struct netlink_ext_ack *extack)
30 struct phy_req_info *req_info = PHY_REQINFO(req_base);
31 struct phy_device_node *pdn = req_info->pdn;
32 struct phy_device *phydev = pdn->phy;
37 /* ETHTOOL_A_PHY_INDEX */
38 size += nla_total_size(sizeof(u32));
40 /* ETHTOOL_A_DRVNAME */
42 size += nla_total_size(strlen(phydev->drv->name) + 1);
45 size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1);
47 /* ETHTOOL_A_PHY_UPSTREAM_TYPE */
48 size += nla_total_size(sizeof(u32));
50 if (phy_on_sfp(phydev)) {
51 const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus);
53 /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */
54 if (upstream_sfp_name)
55 size += nla_total_size(strlen(upstream_sfp_name) + 1);
57 /* ETHTOOL_A_PHY_UPSTREAM_INDEX */
58 size += nla_total_size(sizeof(u32));
61 /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */
62 if (phydev->sfp_bus) {
63 const char *sfp_name = sfp_get_name(phydev->sfp_bus);
66 size += nla_total_size(strlen(sfp_name) + 1);
73 ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb)
75 struct phy_req_info *req_info = PHY_REQINFO(req_base);
76 struct phy_device_node *pdn = req_info->pdn;
77 struct phy_device *phydev = pdn->phy;
78 enum phy_upstream ptype;
80 ptype = pdn->upstream_type;
82 if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) ||
83 nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) ||
84 nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype))
88 nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name))
91 if (ptype == PHY_UPSTREAM_PHY) {
92 struct phy_device *upstream = pdn->upstream.phydev;
93 const char *sfp_upstream_name;
96 if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex))
99 if (pdn->parent_sfp_bus) {
100 sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus);
101 if (sfp_upstream_name &&
102 nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME,
108 if (phydev->sfp_bus) {
109 const char *sfp_name = sfp_get_name(phydev->sfp_bus);
112 nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME,
120 static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
122 struct netlink_ext_ack *extack)
124 struct phy_link_topology *topo = req_base->dev->link_topo;
125 struct phy_req_info *req_info = PHY_REQINFO(req_base);
126 struct phy_device *phydev;
128 phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER],
134 return PTR_ERR(phydev);
139 req_info->pdn = xa_load(&topo->phys, phydev->phyindex);
144 int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
146 struct phy_req_info req_info = {};
147 struct nlattr **tb = info->attrs;
148 struct sk_buff *rskb;
153 ret = ethnl_parse_header_dev_get(&req_info.base,
154 tb[ETHTOOL_A_PHY_HEADER],
155 genl_info_net(info), info->extack,
162 ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack);
164 goto err_unlock_rtnl;
166 /* No PHY, return early */
168 goto err_unlock_rtnl;
170 ret = ethnl_phy_reply_size(&req_info.base, info->extack);
172 goto err_unlock_rtnl;
173 reply_len = ret + ethnl_reply_header_size();
175 rskb = ethnl_reply_init(reply_len, req_info.base.dev,
176 ETHTOOL_MSG_PHY_GET_REPLY,
177 ETHTOOL_A_PHY_HEADER,
178 info, &reply_payload);
181 goto err_unlock_rtnl;
184 ret = ethnl_phy_fill_reply(&req_info.base, rskb);
189 ethnl_parse_header_dev_put(&req_info.base);
190 genlmsg_end(rskb, reply_payload);
192 return genlmsg_reply(rskb, info);
198 ethnl_parse_header_dev_put(&req_info.base);
202 struct ethnl_phy_dump_ctx {
203 struct phy_req_info *phy_req_info;
204 unsigned long ifindex;
205 unsigned long phy_index;
208 int ethnl_phy_start(struct netlink_callback *cb)
210 const struct genl_info *info = genl_info_dump(cb);
211 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
214 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
216 ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL);
217 if (!ctx->phy_req_info)
220 ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base,
221 info->attrs[ETHTOOL_A_PHY_HEADER],
222 sock_net(cb->skb->sk), cb->extack,
228 kfree(ctx->phy_req_info);
233 int ethnl_phy_done(struct netlink_callback *cb)
235 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
237 if (ctx->phy_req_info->base.dev)
238 ethnl_parse_header_dev_put(&ctx->phy_req_info->base);
240 kfree(ctx->phy_req_info);
245 static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev,
246 struct netlink_callback *cb)
248 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
249 struct phy_req_info *pri = ctx->phy_req_info;
250 struct phy_device_node *pdn;
257 xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) {
258 ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY);
264 ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER);
266 genlmsg_cancel(skb, ehdr);
271 ret = ethnl_phy_fill_reply(&pri->base, skb);
273 genlmsg_cancel(skb, ehdr);
277 genlmsg_end(skb, ehdr);
283 int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
285 struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx;
286 struct net *net = sock_net(skb->sk);
287 struct net_device *dev;
292 if (ctx->phy_req_info->base.dev) {
293 ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb);
295 for_each_netdev_dump(net, dev, ctx->ifindex) {
296 ret = ethnl_phy_dump_one_dev(skb, dev, cb);