1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2023 Intel Corporation. */
3 #define dev_fmt(fmt) "Telemetry: " fmt
6 #include <linux/atomic.h>
7 #include <linux/device.h>
8 #include <linux/dev_printk.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 #include <linux/workqueue.h>
17 #include "adf_admin.h"
18 #include "adf_accel_devices.h"
19 #include "adf_common_drv.h"
20 #include "adf_telemetry.h"
22 #define TL_IS_ZERO(input) ((input) == 0)
24 static bool is_tl_supported(struct adf_accel_dev *accel_dev)
26 u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities;
28 return fw_caps & TL_CAPABILITY_BIT;
31 static int validate_tl_data(struct adf_tl_hw_data *tl_data)
33 if (!tl_data->dev_counters ||
34 TL_IS_ZERO(tl_data->num_dev_counters) ||
35 !tl_data->sl_util_counters ||
36 !tl_data->sl_exec_counters ||
37 !tl_data->rp_counters ||
38 TL_IS_ZERO(tl_data->num_rp_counters))
44 static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
46 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
47 struct device *dev = &GET_DEV(accel_dev);
48 size_t regs_sz = tl_data->layout_sz;
49 struct adf_telemetry *telemetry;
50 int node = dev_to_node(dev);
54 telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
58 telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp,
59 sizeof(*telemetry->rp_num_indexes),
61 if (!telemetry->rp_num_indexes)
64 telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff,
65 sizeof(*telemetry->regs_hist_buff),
67 if (!telemetry->regs_hist_buff)
68 goto err_free_rp_indexes;
70 telemetry->regs_data = dma_alloc_coherent(dev, regs_sz,
71 &telemetry->regs_data_p,
73 if (!telemetry->regs_data)
74 goto err_free_regs_hist_buff;
76 for (i = 0; i < tl_data->num_hbuff; i++) {
77 tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
81 telemetry->regs_hist_buff[i] = tl_data_regs;
84 accel_dev->telemetry = telemetry;
89 dma_free_coherent(dev, regs_sz, telemetry->regs_data,
90 telemetry->regs_data_p);
93 kfree(telemetry->regs_hist_buff[i]);
95 err_free_regs_hist_buff:
96 kfree(telemetry->regs_hist_buff);
98 kfree(telemetry->rp_num_indexes);
105 static void adf_tl_free_mem(struct adf_accel_dev *accel_dev)
107 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
108 struct adf_telemetry *telemetry = accel_dev->telemetry;
109 struct device *dev = &GET_DEV(accel_dev);
110 size_t regs_sz = tl_data->layout_sz;
113 for (i = 0; i < tl_data->num_hbuff; i++)
114 kfree(telemetry->regs_hist_buff[i]);
116 dma_free_coherent(dev, regs_sz, telemetry->regs_data,
117 telemetry->regs_data_p);
119 kfree(telemetry->regs_hist_buff);
120 kfree(telemetry->rp_num_indexes);
122 accel_dev->telemetry = NULL;
125 static unsigned long get_next_timeout(void)
127 return msecs_to_jiffies(ADF_TL_TIMER_INT_MS);
130 static void snapshot_regs(struct adf_telemetry *telemetry, size_t size)
132 void *dst = telemetry->regs_hist_buff[telemetry->hb_num];
133 void *src = telemetry->regs_data;
135 memcpy(dst, src, size);
138 static void tl_work_handler(struct work_struct *work)
140 struct delayed_work *delayed_work;
141 struct adf_telemetry *telemetry;
142 struct adf_tl_hw_data *tl_data;
143 u32 msg_cnt, old_msg_cnt;
148 delayed_work = to_delayed_work(work);
149 telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx);
150 tl_data = &GET_TL_DATA(telemetry->accel_dev);
151 regs_data = telemetry->regs_data;
153 id = tl_data->msg_cnt_off / sizeof(*regs_data);
154 layout_sz = tl_data->layout_sz;
156 if (!atomic_read(&telemetry->state)) {
157 cancel_delayed_work_sync(&telemetry->work_ctx);
161 msg_cnt = regs_data[id];
162 old_msg_cnt = msg_cnt;
163 if (msg_cnt == telemetry->msg_cnt)
166 mutex_lock(&telemetry->regs_hist_lock);
168 snapshot_regs(telemetry, layout_sz);
170 /* Check if data changed while updating it */
171 msg_cnt = regs_data[id];
172 if (old_msg_cnt != msg_cnt)
173 snapshot_regs(telemetry, layout_sz);
175 telemetry->msg_cnt = msg_cnt;
177 telemetry->hb_num %= telemetry->hbuffs;
179 mutex_unlock(&telemetry->regs_hist_lock);
182 adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
185 int adf_tl_halt(struct adf_accel_dev *accel_dev)
187 struct adf_telemetry *telemetry = accel_dev->telemetry;
188 struct device *dev = &GET_DEV(accel_dev);
191 cancel_delayed_work_sync(&telemetry->work_ctx);
192 atomic_set(&telemetry->state, 0);
194 ret = adf_send_admin_tl_stop(accel_dev);
196 dev_err(dev, "failed to stop telemetry\n");
201 int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
203 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
204 struct adf_telemetry *telemetry = accel_dev->telemetry;
205 struct device *dev = &GET_DEV(accel_dev);
206 size_t layout_sz = tl_data->layout_sz;
209 ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p,
210 layout_sz, telemetry->rp_num_indexes,
211 &telemetry->slice_cnt);
213 dev_err(dev, "failed to start telemetry\n");
217 telemetry->hbuffs = state;
218 atomic_set(&telemetry->state, state);
220 adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
225 int adf_tl_init(struct adf_accel_dev *accel_dev)
227 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
228 u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
229 struct device *dev = &GET_DEV(accel_dev);
230 struct adf_telemetry *telemetry;
234 ret = validate_tl_data(tl_data);
238 ret = adf_tl_alloc_mem(accel_dev);
240 dev_err(dev, "failed to initialize: %d\n", ret);
244 telemetry = accel_dev->telemetry;
245 telemetry->accel_dev = accel_dev;
247 mutex_init(&telemetry->wr_lock);
248 mutex_init(&telemetry->regs_hist_lock);
249 INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler);
251 for (i = 0; i < max_rp; i++)
252 telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED;
257 int adf_tl_start(struct adf_accel_dev *accel_dev)
259 struct device *dev = &GET_DEV(accel_dev);
261 if (!accel_dev->telemetry)
264 if (!is_tl_supported(accel_dev)) {
265 dev_info(dev, "feature not supported by FW\n");
266 adf_tl_free_mem(accel_dev);
273 void adf_tl_stop(struct adf_accel_dev *accel_dev)
275 if (!accel_dev->telemetry)
278 if (atomic_read(&accel_dev->telemetry->state))
279 adf_tl_halt(accel_dev);
282 void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
284 if (!accel_dev->telemetry)
287 adf_tl_free_mem(accel_dev);