1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/types.h>
5 #include <linux/percpu_counter.h>
6 #include <linux/math64.h>
9 #include "mds_client.h"
11 static void ktime_to_ceph_timespec(struct ceph_timespec *ts, ktime_t val)
13 struct timespec64 t = ktime_to_timespec64(val);
14 ceph_encode_timespec64(ts, &t);
17 static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
18 struct ceph_mds_session *s)
20 struct ceph_metric_head *head;
21 struct ceph_metric_cap *cap;
22 struct ceph_metric_read_latency *read;
23 struct ceph_metric_write_latency *write;
24 struct ceph_metric_metadata_latency *meta;
25 struct ceph_metric_dlease *dlease;
26 struct ceph_opened_files *files;
27 struct ceph_pinned_icaps *icaps;
28 struct ceph_opened_inodes *inodes;
29 struct ceph_read_io_size *rsize;
30 struct ceph_write_io_size *wsize;
31 struct ceph_client_metric *m = &mdsc->metric;
32 u64 nr_caps = atomic64_read(&m->total_caps);
33 u32 header_len = sizeof(struct ceph_metric_header);
34 struct ceph_client *cl = mdsc->fsc->client;
40 /* Do not send the metrics until the MDS rank is ready */
41 mutex_lock(&mdsc->mutex);
42 if (ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) != CEPH_MDS_STATE_ACTIVE) {
43 mutex_unlock(&mdsc->mutex);
46 mutex_unlock(&mdsc->mutex);
48 len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
49 + sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
50 + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
53 msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
55 pr_err_client(cl, "to mds%d, failed to allocate message\n",
60 head = msg->front.iov_base;
62 /* encode the cap metric */
63 cap = (struct ceph_metric_cap *)(head + 1);
64 cap->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
66 cap->header.compat = 1;
67 cap->header.data_len = cpu_to_le32(sizeof(*cap) - header_len);
68 cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit));
69 cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis));
70 cap->total = cpu_to_le64(nr_caps);
73 /* encode the read latency metric */
74 read = (struct ceph_metric_read_latency *)(cap + 1);
75 read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
77 read->header.compat = 1;
78 read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
79 sum = m->metric[METRIC_READ].latency_sum;
80 ktime_to_ceph_timespec(&read->lat, sum);
81 ktime_to_ceph_timespec(&read->avg, m->metric[METRIC_READ].latency_avg);
82 read->sq_sum = cpu_to_le64(m->metric[METRIC_READ].latency_sq_sum);
83 read->count = cpu_to_le64(m->metric[METRIC_READ].total);
86 /* encode the write latency metric */
87 write = (struct ceph_metric_write_latency *)(read + 1);
88 write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
89 write->header.ver = 2;
90 write->header.compat = 1;
91 write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
92 sum = m->metric[METRIC_WRITE].latency_sum;
93 ktime_to_ceph_timespec(&write->lat, sum);
94 ktime_to_ceph_timespec(&write->avg, m->metric[METRIC_WRITE].latency_avg);
95 write->sq_sum = cpu_to_le64(m->metric[METRIC_WRITE].latency_sq_sum);
96 write->count = cpu_to_le64(m->metric[METRIC_WRITE].total);
99 /* encode the metadata latency metric */
100 meta = (struct ceph_metric_metadata_latency *)(write + 1);
101 meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
102 meta->header.ver = 2;
103 meta->header.compat = 1;
104 meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
105 sum = m->metric[METRIC_METADATA].latency_sum;
106 ktime_to_ceph_timespec(&meta->lat, sum);
107 ktime_to_ceph_timespec(&meta->avg, m->metric[METRIC_METADATA].latency_avg);
108 meta->sq_sum = cpu_to_le64(m->metric[METRIC_METADATA].latency_sq_sum);
109 meta->count = cpu_to_le64(m->metric[METRIC_METADATA].total);
112 /* encode the dentry lease metric */
113 dlease = (struct ceph_metric_dlease *)(meta + 1);
114 dlease->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
115 dlease->header.ver = 1;
116 dlease->header.compat = 1;
117 dlease->header.data_len = cpu_to_le32(sizeof(*dlease) - header_len);
118 dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit));
119 dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis));
120 dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries));
123 sum = percpu_counter_sum(&m->total_inodes);
125 /* encode the opened files metric */
126 files = (struct ceph_opened_files *)(dlease + 1);
127 files->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES);
128 files->header.ver = 1;
129 files->header.compat = 1;
130 files->header.data_len = cpu_to_le32(sizeof(*files) - header_len);
131 files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files));
132 files->total = cpu_to_le64(sum);
135 /* encode the pinned icaps metric */
136 icaps = (struct ceph_pinned_icaps *)(files + 1);
137 icaps->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS);
138 icaps->header.ver = 1;
139 icaps->header.compat = 1;
140 icaps->header.data_len = cpu_to_le32(sizeof(*icaps) - header_len);
141 icaps->pinned_icaps = cpu_to_le64(nr_caps);
142 icaps->total = cpu_to_le64(sum);
145 /* encode the opened inodes metric */
146 inodes = (struct ceph_opened_inodes *)(icaps + 1);
147 inodes->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES);
148 inodes->header.ver = 1;
149 inodes->header.compat = 1;
150 inodes->header.data_len = cpu_to_le32(sizeof(*inodes) - header_len);
151 inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes));
152 inodes->total = cpu_to_le64(sum);
155 /* encode the read io size metric */
156 rsize = (struct ceph_read_io_size *)(inodes + 1);
157 rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES);
158 rsize->header.ver = 1;
159 rsize->header.compat = 1;
160 rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
161 rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
162 rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
165 /* encode the write io size metric */
166 wsize = (struct ceph_write_io_size *)(rsize + 1);
167 wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES);
168 wsize->header.ver = 1;
169 wsize->header.compat = 1;
170 wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
171 wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
172 wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
175 put_unaligned_le32(items, &head->num);
176 msg->front.iov_len = len;
177 msg->hdr.version = cpu_to_le16(1);
178 msg->hdr.compat_version = cpu_to_le16(1);
179 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
180 ceph_con_send(&s->s_con, msg);
186 static void metric_get_session(struct ceph_mds_client *mdsc)
188 struct ceph_mds_session *s;
191 mutex_lock(&mdsc->mutex);
192 for (i = 0; i < mdsc->max_sessions; i++) {
193 s = __ceph_lookup_mds_session(mdsc, i);
198 * Skip it if MDS doesn't support the metric collection,
199 * or the MDS will close the session's socket connection
200 * directly when it get this message.
202 if (check_session_state(s) &&
203 test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
204 mdsc->metric.session = s;
208 ceph_put_mds_session(s);
210 mutex_unlock(&mdsc->mutex);
213 static void metric_delayed_work(struct work_struct *work)
215 struct ceph_client_metric *m =
216 container_of(work, struct ceph_client_metric, delayed_work.work);
217 struct ceph_mds_client *mdsc =
218 container_of(m, struct ceph_mds_client, metric);
220 if (mdsc->stopping || disable_send_metrics)
223 if (!m->session || !check_session_state(m->session)) {
225 ceph_put_mds_session(m->session);
228 metric_get_session(mdsc);
231 ceph_mdsc_send_metrics(mdsc, m->session);
232 metric_schedule_delayed(m);
236 int ceph_metric_init(struct ceph_client_metric *m)
238 struct ceph_metric *metric;
244 atomic64_set(&m->total_dentries, 0);
245 ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
249 ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
251 goto err_d_lease_mis;
253 atomic64_set(&m->total_caps, 0);
254 ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
258 ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
262 for (i = 0; i < METRIC_MAX; i++) {
263 metric = &m->metric[i];
264 spin_lock_init(&metric->lock);
265 metric->size_sum = 0;
266 metric->size_min = U64_MAX;
267 metric->size_max = 0;
269 metric->latency_sum = 0;
270 metric->latency_avg = 0;
271 metric->latency_sq_sum = 0;
272 metric->latency_min = KTIME_MAX;
273 metric->latency_max = 0;
276 atomic64_set(&m->opened_files, 0);
277 ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
279 goto err_opened_inodes;
280 ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL);
282 goto err_total_inodes;
285 INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
290 percpu_counter_destroy(&m->opened_inodes);
292 percpu_counter_destroy(&m->i_caps_mis);
294 percpu_counter_destroy(&m->i_caps_hit);
296 percpu_counter_destroy(&m->d_lease_mis);
298 percpu_counter_destroy(&m->d_lease_hit);
303 void ceph_metric_destroy(struct ceph_client_metric *m)
308 cancel_delayed_work_sync(&m->delayed_work);
310 percpu_counter_destroy(&m->total_inodes);
311 percpu_counter_destroy(&m->opened_inodes);
312 percpu_counter_destroy(&m->i_caps_mis);
313 percpu_counter_destroy(&m->i_caps_hit);
314 percpu_counter_destroy(&m->d_lease_mis);
315 percpu_counter_destroy(&m->d_lease_hit);
317 ceph_put_mds_session(m->session);
320 #define METRIC_UPDATE_MIN_MAX(min, max, new) \
322 if (unlikely(new < min)) \
324 if (unlikely(new > max)) \
328 static inline void __update_mean_and_stdev(ktime_t total, ktime_t *lavg,
329 ktime_t *sq_sump, ktime_t lat)
333 if (unlikely(total == 1)) {
336 /* the sq is (lat - old_avg) * (lat - new_avg) */
337 avg = *lavg + div64_s64(lat - *lavg, total);
338 *sq_sump += (lat - *lavg)*(lat - avg);
343 void ceph_update_metrics(struct ceph_metric *m,
344 ktime_t r_start, ktime_t r_end,
345 unsigned int size, int rc)
347 ktime_t lat = ktime_sub(r_end, r_start);
350 if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
356 METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
357 m->latency_sum += lat;
358 METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
359 __update_mean_and_stdev(total, &m->latency_avg, &m->latency_sq_sum,
361 spin_unlock(&m->lock);