1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2018-2020, Intel Corporation.
7 #include <linux/module.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/scatterlist.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/virtio.h>
13 #include <linux/virtio_config.h>
14 #include <linux/virtio_ids.h>
15 #include <linux/atomic.h>
21 #define MEI_VIRTIO_RPM_TIMEOUT 500
22 /* ACRN virtio device types */
24 #define VIRTIO_ID_MEI 0xFFFE /* virtio mei */
28 * struct mei_virtio_cfg - settings passed from the virtio backend
29 * @buf_depth: read buffer depth in slots (4bytes)
30 * @hw_ready: hw is ready for operation
31 * @host_reset: synchronize reset with virtio backend
32 * @reserved: reserved for alignment
33 * @fw_status: FW status
35 struct mei_virtio_cfg {
40 u32 fw_status[MEI_FW_STATUS_MAX];
43 struct mei_virtio_hw {
44 struct mei_device mdev;
48 struct virtqueue *out;
51 struct work_struct intr_handler;
64 struct mei_virtio_cfg cfg;
67 #define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev)
70 * mei_virtio_fw_status() - read status register of mei
72 * @fw_status: fw status register values
76 static int mei_virtio_fw_status(struct mei_device *dev,
77 struct mei_fw_status *fw_status)
79 struct virtio_device *vdev = dev_to_virtio(dev->dev);
81 fw_status->count = MEI_FW_STATUS_MAX;
82 virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status),
83 fw_status->status, sizeof(fw_status->status));
88 * mei_virtio_pg_state() - translate internal pg state
89 * to the mei power gating state
90 * There is no power management in ACRN mode always return OFF
94 * * MEI_PG_OFF - if aliveness is on (always)
95 * * MEI_PG_ON - (never)
97 static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev)
103 * mei_virtio_hw_config() - configure hw dependent settings
109 static int mei_virtio_hw_config(struct mei_device *dev)
115 * mei_virtio_hbuf_empty_slots() - counts write empty slots.
116 * @dev: the device structure
118 * Return: always return frontend buf size if buffer is ready, 0 otherwise
120 static int mei_virtio_hbuf_empty_slots(struct mei_device *dev)
122 struct mei_virtio_hw *hw = to_virtio_hw(dev);
124 return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0;
128 * mei_virtio_hbuf_is_ready() - checks if write buffer is ready
129 * @dev: the device structure
131 * Return: true if hbuf is ready
133 static bool mei_virtio_hbuf_is_ready(struct mei_device *dev)
135 struct mei_virtio_hw *hw = to_virtio_hw(dev);
137 return atomic_read(&hw->hbuf_ready) == 1;
141 * mei_virtio_hbuf_max_depth() - returns depth of FE write buffer.
142 * @dev: the device structure
144 * Return: size of frontend write buffer in bytes
146 static u32 mei_virtio_hbuf_depth(const struct mei_device *dev)
148 struct mei_virtio_hw *hw = to_virtio_hw(dev);
150 return hw->cfg.buf_depth;
154 * mei_virtio_intr_clear() - clear and stop interrupts
155 * @dev: the device structure
157 static void mei_virtio_intr_clear(struct mei_device *dev)
160 * In our virtio solution, there are two types of interrupts,
161 * vq interrupt and config change interrupt.
162 * 1) start/reset rely on virtio config changed interrupt;
163 * 2) send/recv rely on virtio virtqueue interrupts.
164 * They are all virtual interrupts. So, we don't have corresponding
165 * operation to do here.
170 * mei_virtio_intr_enable() - enables mei BE virtqueues callbacks
171 * @dev: the device structure
173 static void mei_virtio_intr_enable(struct mei_device *dev)
175 struct mei_virtio_hw *hw = to_virtio_hw(dev);
176 struct virtio_device *vdev = dev_to_virtio(dev->dev);
178 virtio_config_enable(vdev);
180 virtqueue_enable_cb(hw->in);
181 virtqueue_enable_cb(hw->out);
185 * mei_virtio_intr_disable() - disables mei BE virtqueues callbacks
187 * @dev: the device structure
189 static void mei_virtio_intr_disable(struct mei_device *dev)
191 struct mei_virtio_hw *hw = to_virtio_hw(dev);
192 struct virtio_device *vdev = dev_to_virtio(dev->dev);
194 virtio_config_disable(vdev);
196 virtqueue_disable_cb(hw->in);
197 virtqueue_disable_cb(hw->out);
201 * mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all
203 * @dev: the device structure
205 static void mei_virtio_synchronize_irq(struct mei_device *dev)
207 struct mei_virtio_hw *hw = to_virtio_hw(dev);
210 * Now, all IRQ handlers are converted to workqueue.
211 * Change synchronize irq to flush this work.
213 flush_work(&hw->intr_handler);
216 static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw)
225 * mei_virtio_write_message() - writes a message to mei virtio back-end service.
226 * @dev: the device structure
227 * @hdr: mei header of message
228 * @hdr_len: header length
229 * @data: message payload will be written
230 * @data_len: message payload length
234 * * -EIO: if write has failed
235 * * -ENOMEM: on memory allocation failure
237 static int mei_virtio_write_message(struct mei_device *dev,
238 const void *hdr, size_t hdr_len,
239 const void *data, size_t data_len)
241 struct mei_virtio_hw *hw = to_virtio_hw(dev);
242 struct scatterlist sg[2];
243 const void *hbuf, *dbuf;
246 if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0)))
249 hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL);
252 dbuf = kmemdup(data, data_len, GFP_KERNEL);
255 if (!hbuf || !dbuf) {
260 sg_init_table(sg, 2);
261 sg_set_buf(&sg[0], hbuf, hdr_len);
262 sg_set_buf(&sg[1], dbuf, data_len);
264 ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL);
266 dev_err(dev->dev, "failed to add outbuf\n");
270 virtqueue_kick(hw->out);
274 mei_virtio_free_outbufs(hw);
280 * mei_virtio_count_full_read_slots() - counts read full slots.
281 * @dev: the device structure
283 * Return: -EOVERFLOW if overflow, otherwise filled slots count
285 static int mei_virtio_count_full_read_slots(struct mei_device *dev)
287 struct mei_virtio_hw *hw = to_virtio_hw(dev);
289 if (hw->recv_idx > hw->recv_len)
292 return hw->recv_len - hw->recv_idx;
296 * mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer
298 * @dev: the device structure
300 * Return: 32bit dword of receive buffer (u32)
302 static inline u32 mei_virtio_read_hdr(const struct mei_device *dev)
304 struct mei_virtio_hw *hw = to_virtio_hw(dev);
306 WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1);
308 return hw->recv_buf[hw->recv_idx++];
311 static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer,
314 struct mei_virtio_hw *hw = to_virtio_hw(dev);
315 u32 slots = mei_data2slots(len);
317 if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots))
321 * Assumption: There is only one MEI message in recv_buf each time.
322 * Backend service need follow this rule too.
324 memcpy(buffer, hw->recv_buf + hw->recv_idx, len);
325 hw->recv_idx += slots;
330 static bool mei_virtio_pg_is_enabled(struct mei_device *dev)
335 static bool mei_virtio_pg_in_transition(struct mei_device *dev)
340 static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw)
342 struct scatterlist sg;
344 if (hw->recv_rdy) /* not needed */
347 /* refill the recv_buf to IN virtqueue to get next message */
348 sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth));
352 virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL);
353 virtqueue_kick(hw->in);
357 * mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready
361 static bool mei_virtio_hw_is_ready(struct mei_device *dev)
363 struct mei_virtio_hw *hw = to_virtio_hw(dev);
364 struct virtio_device *vdev = dev_to_virtio(dev->dev);
366 virtio_cread(vdev, struct mei_virtio_cfg,
367 hw_ready, &hw->cfg.hw_ready);
369 dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready);
371 return hw->cfg.hw_ready;
375 * mei_virtio_hw_reset - resets virtio hw.
377 * @dev: the device structure
378 * @intr_enable: virtio use data/config callbacks
380 * Return: 0 on success an error code otherwise
382 static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable)
384 struct mei_virtio_hw *hw = to_virtio_hw(dev);
385 struct virtio_device *vdev = dev_to_virtio(dev->dev);
387 dev_dbg(dev->dev, "hw reset\n");
389 dev->recvd_hw_ready = false;
390 hw->host_ready = false;
391 atomic_set(&hw->hbuf_ready, 0);
395 hw->cfg.host_reset = 1;
396 virtio_cwrite(vdev, struct mei_virtio_cfg,
397 host_reset, &hw->cfg.host_reset);
399 mei_virtio_hw_is_ready(dev);
402 mei_virtio_intr_enable(dev);
408 * mei_virtio_hw_reset_release() - release device from the reset
409 * @dev: the device structure
411 static void mei_virtio_hw_reset_release(struct mei_device *dev)
413 struct mei_virtio_hw *hw = to_virtio_hw(dev);
414 struct virtio_device *vdev = dev_to_virtio(dev->dev);
416 dev_dbg(dev->dev, "hw reset release\n");
417 hw->cfg.host_reset = 0;
418 virtio_cwrite(vdev, struct mei_virtio_cfg,
419 host_reset, &hw->cfg.host_reset);
423 * mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready
424 * or timeout is reached
427 * Return: 0 on success, error otherwise
429 static int mei_virtio_hw_ready_wait(struct mei_device *dev)
431 mutex_unlock(&dev->device_lock);
432 wait_event_timeout(dev->wait_hw_ready,
434 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
435 mutex_lock(&dev->device_lock);
436 if (!dev->recvd_hw_ready) {
437 dev_err(dev->dev, "wait hw ready failed\n");
441 dev->recvd_hw_ready = false;
446 * mei_virtio_hw_start() - hw start routine
449 * Return: 0 on success, error otherwise
451 static int mei_virtio_hw_start(struct mei_device *dev)
453 struct mei_virtio_hw *hw = to_virtio_hw(dev);
456 dev_dbg(dev->dev, "hw start\n");
457 mei_virtio_hw_reset_release(dev);
459 ret = mei_virtio_hw_ready_wait(dev);
463 mei_virtio_add_recv_buf(hw);
464 atomic_set(&hw->hbuf_ready, 1);
465 dev_dbg(dev->dev, "hw is ready\n");
466 hw->host_ready = true;
472 * mei_virtio_host_is_ready() - check whether the FE has turned ready
477 static bool mei_virtio_host_is_ready(struct mei_device *dev)
479 struct mei_virtio_hw *hw = to_virtio_hw(dev);
481 dev_dbg(dev->dev, "host ready %d\n", hw->host_ready);
483 return hw->host_ready;
487 * mei_virtio_data_in() - The callback of recv virtqueue of virtio mei
488 * @vq: receiving virtqueue
490 static void mei_virtio_data_in(struct virtqueue *vq)
492 struct mei_virtio_hw *hw = vq->vdev->priv;
494 /* disable interrupts (enabled again from in the interrupt worker) */
495 virtqueue_disable_cb(hw->in);
497 schedule_work(&hw->intr_handler);
501 * mei_virtio_data_out() - The callback of send virtqueue of virtio mei
502 * @vq: transmitting virtqueue
504 static void mei_virtio_data_out(struct virtqueue *vq)
506 struct mei_virtio_hw *hw = vq->vdev->priv;
508 schedule_work(&hw->intr_handler);
511 static void mei_virtio_intr_handler(struct work_struct *work)
513 struct mei_virtio_hw *hw =
514 container_of(work, struct mei_virtio_hw, intr_handler);
515 struct mei_device *dev = &hw->mdev;
516 LIST_HEAD(complete_list);
522 mutex_lock(&dev->device_lock);
524 if (dev->dev_state == MEI_DEV_DISABLED) {
525 dev_warn(dev->dev, "Interrupt in disabled state.\n");
526 mei_virtio_intr_disable(dev);
530 /* check if ME wants a reset */
531 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
532 dev_warn(dev->dev, "BE service not ready: resetting.\n");
533 schedule_work(&dev->reset_work);
537 /* check if we need to start the dev */
538 if (!mei_host_is_ready(dev)) {
539 if (mei_hw_is_ready(dev)) {
540 dev_dbg(dev->dev, "we need to start the dev.\n");
541 dev->recvd_hw_ready = true;
542 wake_up(&dev->wait_hw_ready);
544 dev_warn(dev->dev, "Spurious Interrupt\n");
551 data = virtqueue_get_buf(hw->in, &len);
553 dev_dbg(dev->dev, "No data %d", len);
555 dev_dbg(dev->dev, "data_in %d\n", len);
556 WARN_ON(data != hw->recv_buf);
557 hw->recv_len = mei_data2slots(len);
563 if (!atomic_read(&hw->hbuf_ready)) {
564 if (!virtqueue_get_buf(hw->out, &len)) {
565 dev_warn(dev->dev, "Failed to getbuf\n");
567 mei_virtio_free_outbufs(hw);
568 atomic_inc(&hw->hbuf_ready);
572 /* check slots available for reading */
573 slots = mei_count_full_read_slots(dev);
575 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
576 rets = mei_irq_read_handler(dev, &complete_list, &slots);
579 (dev->dev_state != MEI_DEV_RESETTING &&
580 dev->dev_state != MEI_DEV_POWER_DOWN)) {
581 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
583 schedule_work(&dev->reset_work);
588 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
590 mei_irq_write_handler(dev, &complete_list);
592 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
594 mei_irq_compl_handler(dev, &complete_list);
596 mei_virtio_add_recv_buf(hw);
599 if (dev->dev_state != MEI_DEV_DISABLED) {
600 if (!virtqueue_enable_cb(hw->in))
601 schedule_work(&hw->intr_handler);
604 mutex_unlock(&dev->device_lock);
607 static void mei_virtio_config_changed(struct virtio_device *vdev)
609 struct mei_virtio_hw *hw = vdev->priv;
610 struct mei_device *dev = &hw->mdev;
612 virtio_cread(vdev, struct mei_virtio_cfg,
613 hw_ready, &hw->cfg.hw_ready);
615 if (dev->dev_state == MEI_DEV_DISABLED) {
616 dev_dbg(dev->dev, "disabled state don't start\n");
620 /* Run intr handler once to handle reset notify */
621 schedule_work(&hw->intr_handler);
624 static void mei_virtio_remove_vqs(struct virtio_device *vdev)
626 struct mei_virtio_hw *hw = vdev->priv;
628 virtqueue_detach_unused_buf(hw->in);
633 virtqueue_detach_unused_buf(hw->out);
635 mei_virtio_free_outbufs(hw);
637 vdev->config->del_vqs(vdev);
641 * There are two virtqueues, one is for send and another is for recv.
643 static int mei_virtio_init_vqs(struct mei_virtio_hw *hw,
644 struct virtio_device *vdev)
646 struct virtqueue *vqs[2];
648 vq_callback_t *cbs[] = {
652 static const char * const names[] = {
658 ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL);
668 static const struct mei_hw_ops mei_virtio_ops = {
669 .fw_status = mei_virtio_fw_status,
670 .pg_state = mei_virtio_pg_state,
672 .host_is_ready = mei_virtio_host_is_ready,
674 .hw_is_ready = mei_virtio_hw_is_ready,
675 .hw_reset = mei_virtio_hw_reset,
676 .hw_config = mei_virtio_hw_config,
677 .hw_start = mei_virtio_hw_start,
679 .pg_in_transition = mei_virtio_pg_in_transition,
680 .pg_is_enabled = mei_virtio_pg_is_enabled,
682 .intr_clear = mei_virtio_intr_clear,
683 .intr_enable = mei_virtio_intr_enable,
684 .intr_disable = mei_virtio_intr_disable,
685 .synchronize_irq = mei_virtio_synchronize_irq,
687 .hbuf_free_slots = mei_virtio_hbuf_empty_slots,
688 .hbuf_is_ready = mei_virtio_hbuf_is_ready,
689 .hbuf_depth = mei_virtio_hbuf_depth,
691 .write = mei_virtio_write_message,
693 .rdbuf_full_slots = mei_virtio_count_full_read_slots,
694 .read_hdr = mei_virtio_read_hdr,
695 .read = mei_virtio_read,
698 static int mei_virtio_probe(struct virtio_device *vdev)
700 struct mei_virtio_hw *hw;
703 hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL);
709 INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler);
711 ret = mei_virtio_init_vqs(hw, vdev);
715 virtio_cread(vdev, struct mei_virtio_cfg,
716 buf_depth, &hw->cfg.buf_depth);
718 hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL);
723 atomic_set(&hw->hbuf_ready, 0);
725 virtio_device_ready(vdev);
727 mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops);
729 pm_runtime_get_noresume(&vdev->dev);
730 pm_runtime_set_active(&vdev->dev);
731 pm_runtime_enable(&vdev->dev);
733 ret = mei_start(&hw->mdev);
735 goto mei_start_failed;
737 pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT);
738 pm_runtime_use_autosuspend(&vdev->dev);
740 ret = mei_register(&hw->mdev, &vdev->dev);
744 pm_runtime_put(&vdev->dev);
751 mei_cancel_work(&hw->mdev);
752 mei_disable_interrupts(&hw->mdev);
755 vdev->config->del_vqs(vdev);
760 static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device)
762 struct virtio_device *vdev = dev_to_virtio(device);
763 struct mei_virtio_hw *hw = vdev->priv;
765 dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n");
770 if (mei_write_is_idle(&hw->mdev))
771 pm_runtime_autosuspend(device);
776 static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device)
781 static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device)
786 static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev)
788 struct mei_virtio_hw *hw = vdev->priv;
790 dev_dbg(&vdev->dev, "freeze\n");
796 mei_disable_interrupts(&hw->mdev);
797 cancel_work_sync(&hw->intr_handler);
798 vdev->config->reset(vdev);
799 mei_virtio_remove_vqs(vdev);
804 static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev)
806 struct mei_virtio_hw *hw = vdev->priv;
809 dev_dbg(&vdev->dev, "restore\n");
814 ret = mei_virtio_init_vqs(hw, vdev);
818 virtio_device_ready(vdev);
820 ret = mei_restart(&hw->mdev);
824 /* Start timer if stopped in suspend */
825 schedule_delayed_work(&hw->mdev.timer_work, HZ);
830 static const struct dev_pm_ops mei_virtio_pm_ops = {
831 SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend,
832 mei_virtio_pm_runtime_resume,
833 mei_virtio_pm_runtime_idle)
836 static void mei_virtio_remove(struct virtio_device *vdev)
838 struct mei_virtio_hw *hw = vdev->priv;
841 mei_disable_interrupts(&hw->mdev);
842 cancel_work_sync(&hw->intr_handler);
843 mei_deregister(&hw->mdev);
844 vdev->config->reset(vdev);
845 mei_virtio_remove_vqs(vdev);
847 pm_runtime_disable(&vdev->dev);
850 static struct virtio_device_id id_table[] = {
851 { VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID },
855 static struct virtio_driver mei_virtio_driver = {
856 .id_table = id_table,
857 .probe = mei_virtio_probe,
858 .remove = mei_virtio_remove,
859 .config_changed = mei_virtio_config_changed,
861 .name = KBUILD_MODNAME,
862 .owner = THIS_MODULE,
863 .pm = &mei_virtio_pm_ops,
865 #ifdef CONFIG_PM_SLEEP
866 .freeze = mei_virtio_freeze,
867 .restore = mei_virtio_restore,
871 module_virtio_driver(mei_virtio_driver);
872 MODULE_DEVICE_TABLE(virtio, id_table);
873 MODULE_DESCRIPTION("Virtio MEI frontend driver");
874 MODULE_LICENSE("GPL v2");