1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2020, Red Hat. All rights reserved.
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/virtio_ids.h>
19 static LIST_HEAD(mdev_head);
20 /* A global mutex that protects vdpa management device and device level operations. */
21 static DECLARE_RWSEM(vdpa_dev_lock);
22 static DEFINE_IDA(vdpa_index_ida);
24 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
26 down_write(&vdev->cf_lock);
27 vdev->config->set_status(vdev, status);
28 up_write(&vdev->cf_lock);
30 EXPORT_SYMBOL(vdpa_set_status);
32 static struct genl_family vdpa_nl_family;
34 static int vdpa_dev_probe(struct device *d)
36 struct vdpa_device *vdev = dev_to_vdpa(d);
37 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 const struct vdpa_config_ops *ops = vdev->config;
39 u32 max_num, min_num = 1;
42 max_num = ops->get_vq_num_max(vdev);
43 if (ops->get_vq_num_min)
44 min_num = ops->get_vq_num_min(vdev);
45 if (max_num < min_num)
48 if (drv && drv->probe)
49 ret = drv->probe(vdev);
54 static void vdpa_dev_remove(struct device *d)
56 struct vdpa_device *vdev = dev_to_vdpa(d);
57 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
59 if (drv && drv->remove)
63 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
65 struct vdpa_device *vdev = dev_to_vdpa(dev);
67 /* Check override first, and if set, only use the named driver */
68 if (vdev->driver_override)
69 return strcmp(vdev->driver_override, drv->name) == 0;
71 /* Currently devices must be supported by all vDPA bus drivers */
75 static ssize_t driver_override_store(struct device *dev,
76 struct device_attribute *attr,
77 const char *buf, size_t count)
79 struct vdpa_device *vdev = dev_to_vdpa(dev);
82 ret = driver_set_override(dev, &vdev->driver_override, buf, count);
89 static ssize_t driver_override_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
92 struct vdpa_device *vdev = dev_to_vdpa(dev);
96 len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
101 static DEVICE_ATTR_RW(driver_override);
103 static struct attribute *vdpa_dev_attrs[] = {
104 &dev_attr_driver_override.attr,
108 static const struct attribute_group vdpa_dev_group = {
109 .attrs = vdpa_dev_attrs,
111 __ATTRIBUTE_GROUPS(vdpa_dev);
113 static struct bus_type vdpa_bus = {
115 .dev_groups = vdpa_dev_groups,
116 .match = vdpa_dev_match,
117 .probe = vdpa_dev_probe,
118 .remove = vdpa_dev_remove,
121 static void vdpa_release_dev(struct device *d)
123 struct vdpa_device *vdev = dev_to_vdpa(d);
124 const struct vdpa_config_ops *ops = vdev->config;
129 ida_simple_remove(&vdpa_index_ida, vdev->index);
130 kfree(vdev->driver_override);
135 * __vdpa_alloc_device - allocate and initilaize a vDPA device
136 * This allows driver to some prepartion after device is
137 * initialized but before registered.
138 * @parent: the parent device
139 * @config: the bus operations that is supported by this device
140 * @ngroups: number of groups supported by this device
141 * @nas: number of address spaces supported by this device
142 * @size: size of the parent structure that contains private data
143 * @name: name of the vdpa device; optional.
144 * @use_va: indicate whether virtual address must be used by this device
146 * Driver should use vdpa_alloc_device() wrapper macro instead of
147 * using this directly.
149 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
152 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
153 const struct vdpa_config_ops *config,
154 unsigned int ngroups, unsigned int nas,
155 size_t size, const char *name,
158 struct vdpa_device *vdev;
164 if (!!config->dma_map != !!config->dma_unmap)
167 /* It should only work for the device that use on-chip IOMMU */
168 if (use_va && !(config->dma_map || config->set_map))
172 vdev = kzalloc(size, GFP_KERNEL);
176 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
180 vdev->dev.bus = &vdpa_bus;
181 vdev->dev.parent = parent;
182 vdev->dev.release = vdpa_release_dev;
184 vdev->config = config;
185 vdev->features_valid = false;
186 vdev->use_va = use_va;
187 vdev->ngroups = ngroups;
191 err = dev_set_name(&vdev->dev, "%s", name);
193 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
197 init_rwsem(&vdev->cf_lock);
198 device_initialize(&vdev->dev);
203 ida_simple_remove(&vdpa_index_ida, vdev->index);
209 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
211 static int vdpa_name_match(struct device *dev, const void *data)
213 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
215 return (strcmp(dev_name(&vdev->dev), data) == 0);
218 static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
224 lockdep_assert_held(&vdpa_dev_lock);
225 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
230 return device_add(&vdev->dev);
234 * _vdpa_register_device - register a vDPA device with vdpa lock held
235 * Caller must have a succeed call of vdpa_alloc_device() before.
236 * Caller must invoke this routine in the management device dev_add()
237 * callback after setting up valid mgmtdev for this vdpa device.
238 * @vdev: the vdpa device to be registered to vDPA bus
239 * @nvqs: number of virtqueues supported by this device
241 * Return: Returns an error when fail to add device to vDPA bus
243 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
248 return __vdpa_register_device(vdev, nvqs);
250 EXPORT_SYMBOL_GPL(_vdpa_register_device);
253 * vdpa_register_device - register a vDPA device
254 * Callers must have a succeed call of vdpa_alloc_device() before.
255 * @vdev: the vdpa device to be registered to vDPA bus
256 * @nvqs: number of virtqueues supported by this device
258 * Return: Returns an error when fail to add to vDPA bus
260 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
264 down_write(&vdpa_dev_lock);
265 err = __vdpa_register_device(vdev, nvqs);
266 up_write(&vdpa_dev_lock);
269 EXPORT_SYMBOL_GPL(vdpa_register_device);
272 * _vdpa_unregister_device - unregister a vDPA device
273 * Caller must invoke this routine as part of management device dev_del()
275 * @vdev: the vdpa device to be unregisted from vDPA bus
277 void _vdpa_unregister_device(struct vdpa_device *vdev)
279 lockdep_assert_held(&vdpa_dev_lock);
280 WARN_ON(!vdev->mdev);
281 device_unregister(&vdev->dev);
283 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
286 * vdpa_unregister_device - unregister a vDPA device
287 * @vdev: the vdpa device to be unregisted from vDPA bus
289 void vdpa_unregister_device(struct vdpa_device *vdev)
291 down_write(&vdpa_dev_lock);
292 device_unregister(&vdev->dev);
293 up_write(&vdpa_dev_lock);
295 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
298 * __vdpa_register_driver - register a vDPA device driver
299 * @drv: the vdpa device driver to be registered
300 * @owner: module owner of the driver
302 * Return: Returns an err when fail to do the registration
304 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
306 drv->driver.bus = &vdpa_bus;
307 drv->driver.owner = owner;
309 return driver_register(&drv->driver);
311 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
314 * vdpa_unregister_driver - unregister a vDPA device driver
315 * @drv: the vdpa device driver to be unregistered
317 void vdpa_unregister_driver(struct vdpa_driver *drv)
319 driver_unregister(&drv->driver);
321 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
324 * vdpa_mgmtdev_register - register a vdpa management device
326 * @mdev: Pointer to vdpa management device
327 * vdpa_mgmtdev_register() register a vdpa management device which supports
328 * vdpa device management.
329 * Return: Returns 0 on success or failure when required callback ops are not
332 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
334 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
337 INIT_LIST_HEAD(&mdev->list);
338 down_write(&vdpa_dev_lock);
339 list_add_tail(&mdev->list, &mdev_head);
340 up_write(&vdpa_dev_lock);
343 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
345 static int vdpa_match_remove(struct device *dev, void *data)
347 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
348 struct vdpa_mgmt_dev *mdev = vdev->mdev;
351 mdev->ops->dev_del(mdev, vdev);
355 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
357 down_write(&vdpa_dev_lock);
359 list_del(&mdev->list);
361 /* Filter out all the entries belong to this management device and delete it. */
362 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
364 up_write(&vdpa_dev_lock);
366 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
368 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
370 void *buf, unsigned int len)
372 const struct vdpa_config_ops *ops = vdev->config;
375 * Config accesses aren't supposed to trigger before features are set.
376 * If it does happen we assume a legacy guest.
378 if (!vdev->features_valid)
379 vdpa_set_features_unlocked(vdev, 0);
380 ops->get_config(vdev, offset, buf, len);
384 * vdpa_get_config - Get one or more device configuration fields.
385 * @vdev: vdpa device to operate on
386 * @offset: starting byte offset of the field
387 * @buf: buffer pointer to read to
388 * @len: length of the configuration fields in bytes
390 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
391 void *buf, unsigned int len)
393 down_read(&vdev->cf_lock);
394 vdpa_get_config_unlocked(vdev, offset, buf, len);
395 up_read(&vdev->cf_lock);
397 EXPORT_SYMBOL_GPL(vdpa_get_config);
400 * vdpa_set_config - Set one or more device configuration fields.
401 * @vdev: vdpa device to operate on
402 * @offset: starting byte offset of the field
403 * @buf: buffer pointer to read from
404 * @length: length of the configuration fields in bytes
406 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
407 const void *buf, unsigned int length)
409 down_write(&vdev->cf_lock);
410 vdev->config->set_config(vdev, offset, buf, length);
411 up_write(&vdev->cf_lock);
413 EXPORT_SYMBOL_GPL(vdpa_set_config);
415 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
416 const char *busname, const char *devname)
418 /* Bus name is optional for simulated management device, so ignore the
419 * device with bus if bus attribute is provided.
421 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
424 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
427 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
428 (strcmp(dev_name(mdev->device), devname) == 0))
434 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
436 struct vdpa_mgmt_dev *mdev;
437 const char *busname = NULL;
440 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
441 return ERR_PTR(-EINVAL);
442 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
443 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
444 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
446 list_for_each_entry(mdev, &mdev_head, list) {
447 if (mgmtdev_handle_match(mdev, busname, devname))
450 return ERR_PTR(-ENODEV);
453 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
455 if (mdev->device->bus &&
456 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
458 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
463 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
464 u32 portid, u32 seq, int flags)
466 u64 supported_classes = 0;
471 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
474 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
478 while (mdev->id_table[i].device) {
479 if (mdev->id_table[i].device <= 63)
480 supported_classes |= BIT_ULL(mdev->id_table[i].device);
484 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
485 supported_classes, VDPA_ATTR_UNSPEC)) {
489 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
490 mdev->max_supported_vqs)) {
494 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
495 mdev->supported_features, VDPA_ATTR_PAD)) {
500 genlmsg_end(msg, hdr);
504 genlmsg_cancel(msg, hdr);
508 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
510 struct vdpa_mgmt_dev *mdev;
514 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
518 down_read(&vdpa_dev_lock);
519 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
521 up_read(&vdpa_dev_lock);
522 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
527 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
528 up_read(&vdpa_dev_lock);
531 err = genlmsg_reply(msg, info);
540 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
542 struct vdpa_mgmt_dev *mdev;
543 int start = cb->args[0];
547 down_read(&vdpa_dev_lock);
548 list_for_each_entry(mdev, &mdev_head, list) {
553 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
554 cb->nlh->nlmsg_seq, NLM_F_MULTI);
560 up_read(&vdpa_dev_lock);
565 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
566 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
567 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
569 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
571 struct vdpa_dev_set_config config = {};
572 struct nlattr **nl_attrs = info->attrs;
573 struct vdpa_mgmt_dev *mdev;
578 if (!info->attrs[VDPA_ATTR_DEV_NAME])
581 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
583 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
584 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
585 memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
586 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
588 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
590 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
591 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
593 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
594 config.net.max_vq_pairs =
595 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
596 if (!config.net.max_vq_pairs) {
597 NL_SET_ERR_MSG_MOD(info->extack,
598 "At least one pair of VQs is required");
601 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
603 if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
604 config.device_features =
605 nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
606 config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
609 /* Skip checking capability if user didn't prefer to configure any
610 * device networking attributes. It is likely that user might have used
611 * a device specific method to configure such attributes or using device
612 * default attributes.
614 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
615 !netlink_capable(skb, CAP_NET_ADMIN))
618 down_write(&vdpa_dev_lock);
619 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
621 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
625 if ((config.mask & mdev->config_attr_mask) != config.mask) {
626 NL_SET_ERR_MSG_MOD(info->extack,
627 "All provided attributes are not supported");
632 err = mdev->ops->dev_add(mdev, name, &config);
634 up_write(&vdpa_dev_lock);
638 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
640 struct vdpa_mgmt_dev *mdev;
641 struct vdpa_device *vdev;
646 if (!info->attrs[VDPA_ATTR_DEV_NAME])
648 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
650 down_write(&vdpa_dev_lock);
651 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
653 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
657 vdev = container_of(dev, struct vdpa_device, dev);
659 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
664 mdev->ops->dev_del(mdev, vdev);
668 up_write(&vdpa_dev_lock);
673 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
674 int flags, struct netlink_ext_ack *extack)
683 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
687 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
691 device_id = vdev->config->get_device_id(vdev);
692 vendor_id = vdev->config->get_vendor_id(vdev);
693 max_vq_size = vdev->config->get_vq_num_max(vdev);
694 if (vdev->config->get_vq_num_min)
695 min_vq_size = vdev->config->get_vq_num_min(vdev);
698 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
700 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
702 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
704 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
706 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
708 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
711 genlmsg_end(msg, hdr);
715 genlmsg_cancel(msg, hdr);
719 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
721 struct vdpa_device *vdev;
727 if (!info->attrs[VDPA_ATTR_DEV_NAME])
729 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
730 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
734 down_read(&vdpa_dev_lock);
735 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
737 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
741 vdev = container_of(dev, struct vdpa_device, dev);
746 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
750 err = genlmsg_reply(msg, info);
752 up_read(&vdpa_dev_lock);
758 up_read(&vdpa_dev_lock);
763 struct vdpa_dev_dump_info {
765 struct netlink_callback *cb;
770 static int vdpa_dev_dump(struct device *dev, void *data)
772 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
773 struct vdpa_dev_dump_info *info = data;
778 if (info->idx < info->start_idx) {
782 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
783 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
791 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
793 struct vdpa_dev_dump_info info;
797 info.start_idx = cb->args[0];
800 down_read(&vdpa_dev_lock);
801 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
802 up_read(&vdpa_dev_lock);
803 cb->args[0] = info.idx;
807 static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
808 const struct virtio_net_config *config)
812 if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
813 (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
816 val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
818 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
821 static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
822 const struct virtio_net_config *config)
826 if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
829 val_u16 = __virtio16_to_cpu(true, config->mtu);
831 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
834 static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
835 const struct virtio_net_config *config)
837 if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
840 return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
841 sizeof(config->mac), config->mac);
844 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
846 struct virtio_net_config config = {};
850 vdev->config->get_config(vdev, 0, &config, sizeof(config));
852 val_u16 = __virtio16_to_cpu(true, config.status);
853 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
856 features_device = vdev->config->get_device_features(vdev);
858 if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
862 if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
865 if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
868 return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
872 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
873 int flags, struct netlink_ext_ack *extack)
881 down_read(&vdev->cf_lock);
882 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
883 VDPA_CMD_DEV_CONFIG_GET);
889 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
894 device_id = vdev->config->get_device_id(vdev);
895 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
900 /* only read driver features after the feature negotiation is done */
901 status = vdev->config->get_status(vdev);
902 if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
903 features_driver = vdev->config->get_driver_features(vdev);
904 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
913 err = vdpa_dev_net_config_fill(vdev, msg);
922 up_read(&vdev->cf_lock);
923 genlmsg_end(msg, hdr);
927 genlmsg_cancel(msg, hdr);
929 up_read(&vdev->cf_lock);
933 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
934 struct genl_info *info, u32 index)
936 struct virtio_net_config config = {};
942 status = vdev->config->get_status(vdev);
943 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
944 NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
947 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
949 max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
950 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
953 features = vdev->config->get_driver_features(vdev);
954 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
955 features, VDPA_ATTR_PAD))
958 if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
961 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
968 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
969 struct genl_info *info, u32 index)
973 down_read(&vdev->cf_lock);
974 if (!vdev->config->get_vendor_vq_stats) {
979 err = vdpa_fill_stats_rec(vdev, msg, info, index);
981 up_read(&vdev->cf_lock);
985 static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
987 struct genl_info *info, u32 index)
992 u32 portid = info->snd_portid;
993 u32 seq = info->snd_seq;
996 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
997 VDPA_CMD_DEV_VSTATS_GET);
1001 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
1006 device_id = vdev->config->get_device_id(vdev);
1007 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
1012 switch (device_id) {
1014 if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
1015 NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
1020 err = vendor_stats_fill(vdev, msg, info, index);
1026 genlmsg_end(msg, hdr);
1031 genlmsg_cancel(msg, hdr);
1035 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
1037 struct vdpa_device *vdev;
1038 struct sk_buff *msg;
1039 const char *devname;
1043 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1045 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1046 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1050 down_read(&vdpa_dev_lock);
1051 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1053 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1057 vdev = container_of(dev, struct vdpa_device, dev);
1059 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1063 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
1066 err = genlmsg_reply(msg, info);
1071 up_read(&vdpa_dev_lock);
1077 static int vdpa_dev_config_dump(struct device *dev, void *data)
1079 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
1080 struct vdpa_dev_dump_info *info = data;
1085 if (info->idx < info->start_idx) {
1089 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
1090 info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1100 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
1102 struct vdpa_dev_dump_info info;
1106 info.start_idx = cb->args[0];
1109 down_read(&vdpa_dev_lock);
1110 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
1111 up_read(&vdpa_dev_lock);
1112 cb->args[0] = info.idx;
1116 static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
1117 struct genl_info *info)
1119 struct vdpa_device *vdev;
1120 struct sk_buff *msg;
1121 const char *devname;
1126 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1129 if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
1132 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1133 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1137 index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
1138 down_read(&vdpa_dev_lock);
1139 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1141 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1145 vdev = container_of(dev, struct vdpa_device, dev);
1147 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1151 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
1155 err = genlmsg_reply(msg, info);
1158 up_read(&vdpa_dev_lock);
1166 up_read(&vdpa_dev_lock);
1170 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
1171 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
1172 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
1173 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
1174 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1175 /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
1176 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1179 static const struct genl_ops vdpa_nl_ops[] = {
1181 .cmd = VDPA_CMD_MGMTDEV_GET,
1182 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1183 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
1184 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1187 .cmd = VDPA_CMD_DEV_NEW,
1188 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1189 .doit = vdpa_nl_cmd_dev_add_set_doit,
1190 .flags = GENL_ADMIN_PERM,
1193 .cmd = VDPA_CMD_DEV_DEL,
1194 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1195 .doit = vdpa_nl_cmd_dev_del_set_doit,
1196 .flags = GENL_ADMIN_PERM,
1199 .cmd = VDPA_CMD_DEV_GET,
1200 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1201 .doit = vdpa_nl_cmd_dev_get_doit,
1202 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
1205 .cmd = VDPA_CMD_DEV_CONFIG_GET,
1206 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1207 .doit = vdpa_nl_cmd_dev_config_get_doit,
1208 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1211 .cmd = VDPA_CMD_DEV_VSTATS_GET,
1212 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1213 .doit = vdpa_nl_cmd_dev_stats_get_doit,
1214 .flags = GENL_ADMIN_PERM,
1218 static struct genl_family vdpa_nl_family __ro_after_init = {
1219 .name = VDPA_GENL_NAME,
1220 .version = VDPA_GENL_VERSION,
1221 .maxattr = VDPA_ATTR_MAX,
1222 .policy = vdpa_nl_policy,
1224 .module = THIS_MODULE,
1226 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
1227 .resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
1230 static int vdpa_init(void)
1234 err = bus_register(&vdpa_bus);
1237 err = genl_register_family(&vdpa_nl_family);
1243 bus_unregister(&vdpa_bus);
1247 static void __exit vdpa_exit(void)
1249 genl_unregister_family(&vdpa_nl_family);
1250 bus_unregister(&vdpa_bus);
1251 ida_destroy(&vdpa_index_ida);
1253 core_initcall(vdpa_init);
1254 module_exit(vdpa_exit);
1257 MODULE_LICENSE("GPL v2");