1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * SMC statistics netlink routines
7 * Copyright IBM Corp. 2021
9 * Author(s): Guvenc Gulce
11 #include <linux/init.h>
12 #include <linux/mutex.h>
13 #include <linux/percpu.h>
14 #include <linux/ctype.h>
15 #include <linux/smc.h>
16 #include <net/genetlink.h>
18 #include "smc_netlink.h"
19 #include "smc_stats.h"
21 int smc_stats_init(struct net *net)
23 net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
24 if (!net->smc.fback_rsn)
26 net->smc.smc_stats = alloc_percpu(struct smc_stats);
27 if (!net->smc.smc_stats)
29 mutex_init(&net->smc.mutex_fback_rsn);
33 kfree(net->smc.fback_rsn);
38 void smc_stats_exit(struct net *net)
40 kfree(net->smc.fback_rsn);
41 if (net->smc.smc_stats)
42 free_percpu(net->smc.smc_stats);
45 static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
46 struct smc_stats *stats, int tech,
49 struct smc_stats_rmbcnt *stats_rmb_cnt;
52 if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
53 stats_rmb_cnt = &stats->smc[tech].rmb_tx;
55 stats_rmb_cnt = &stats->smc[tech].rmb_rx;
57 attrs = nla_nest_start(skb, type);
60 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
61 stats_rmb_cnt->reuse_cnt,
62 SMC_NLA_STATS_RMB_PAD))
64 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
65 stats_rmb_cnt->buf_size_small_peer_cnt,
66 SMC_NLA_STATS_RMB_PAD))
68 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
69 stats_rmb_cnt->buf_size_small_cnt,
70 SMC_NLA_STATS_RMB_PAD))
72 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
73 stats_rmb_cnt->buf_full_peer_cnt,
74 SMC_NLA_STATS_RMB_PAD))
76 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
77 stats_rmb_cnt->buf_full_cnt,
78 SMC_NLA_STATS_RMB_PAD))
80 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
81 stats_rmb_cnt->alloc_cnt,
82 SMC_NLA_STATS_RMB_PAD))
84 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
85 stats_rmb_cnt->dgrade_cnt,
86 SMC_NLA_STATS_RMB_PAD))
89 nla_nest_end(skb, attrs);
93 nla_nest_cancel(skb, attrs);
98 static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
99 struct smc_stats *stats, int tech,
102 struct smc_stats_memsize *stats_pload;
103 struct nlattr *attrs;
105 if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
106 stats_pload = &stats->smc[tech].tx_pd;
107 else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
108 stats_pload = &stats->smc[tech].rx_pd;
109 else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
110 stats_pload = &stats->smc[tech].tx_rmbsize;
111 else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
112 stats_pload = &stats->smc[tech].rx_rmbsize;
116 attrs = nla_nest_start(skb, type);
119 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
120 stats_pload->buf[SMC_BUF_8K],
121 SMC_NLA_STATS_PLOAD_PAD))
123 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
124 stats_pload->buf[SMC_BUF_16K],
125 SMC_NLA_STATS_PLOAD_PAD))
127 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
128 stats_pload->buf[SMC_BUF_32K],
129 SMC_NLA_STATS_PLOAD_PAD))
131 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
132 stats_pload->buf[SMC_BUF_64K],
133 SMC_NLA_STATS_PLOAD_PAD))
135 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
136 stats_pload->buf[SMC_BUF_128K],
137 SMC_NLA_STATS_PLOAD_PAD))
139 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
140 stats_pload->buf[SMC_BUF_256K],
141 SMC_NLA_STATS_PLOAD_PAD))
143 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
144 stats_pload->buf[SMC_BUF_512K],
145 SMC_NLA_STATS_PLOAD_PAD))
147 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
148 stats_pload->buf[SMC_BUF_1024K],
149 SMC_NLA_STATS_PLOAD_PAD))
151 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
152 stats_pload->buf[SMC_BUF_G_1024K],
153 SMC_NLA_STATS_PLOAD_PAD))
156 nla_nest_end(skb, attrs);
160 nla_nest_cancel(skb, attrs);
165 static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
166 struct smc_stats *stats, int tech)
168 struct smc_stats_tech *smc_tech;
169 struct nlattr *attrs;
171 smc_tech = &stats->smc[tech];
172 if (tech == SMC_TYPE_D)
173 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
175 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
179 if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
180 SMC_NLA_STATS_T_TX_RMB_STATS))
182 if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
183 SMC_NLA_STATS_T_RX_RMB_STATS))
185 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
186 SMC_NLA_STATS_T_TXPLOAD_SIZE))
188 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
189 SMC_NLA_STATS_T_RXPLOAD_SIZE))
191 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
192 SMC_NLA_STATS_T_TX_RMB_SIZE))
194 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
195 SMC_NLA_STATS_T_RX_RMB_SIZE))
197 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
198 smc_tech->clnt_v1_succ_cnt,
201 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
202 smc_tech->clnt_v2_succ_cnt,
205 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
206 smc_tech->srv_v1_succ_cnt,
209 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
210 smc_tech->srv_v2_succ_cnt,
213 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
217 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
221 if (nla_put_uint(skb, SMC_NLA_STATS_T_RX_RMB_USAGE,
222 smc_tech->rx_rmbuse))
224 if (nla_put_uint(skb, SMC_NLA_STATS_T_TX_RMB_USAGE,
225 smc_tech->tx_rmbuse))
227 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
231 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
235 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
239 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
243 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
247 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
248 smc_tech->splice_cnt,
251 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
252 smc_tech->urg_data_cnt,
256 nla_nest_end(skb, attrs);
260 nla_nest_cancel(skb, attrs);
265 int smc_nl_get_stats(struct sk_buff *skb,
266 struct netlink_callback *cb)
268 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
269 struct net *net = sock_net(skb->sk);
270 struct smc_stats *stats;
271 struct nlattr *attrs;
279 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
280 &smc_gen_nl_family, NLM_F_MULTI,
281 SMC_NETLINK_GET_STATS);
285 attrs = nla_nest_start(skb, SMC_GEN_STATS);
288 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
291 size = sizeof(*stats) / sizeof(u64);
292 for_each_possible_cpu(cpu) {
293 src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
295 for (i = 0; i < size; i++)
296 *(sum++) += *(src++);
298 if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
300 if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
302 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
303 stats->clnt_hshake_err_cnt,
306 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
307 stats->srv_hshake_err_cnt,
311 nla_nest_end(skb, attrs);
312 genlmsg_end(skb, nlh);
320 nla_nest_cancel(skb, attrs);
322 genlmsg_cancel(skb, nlh);
327 static int smc_nl_get_fback_details(struct sk_buff *skb,
328 struct netlink_callback *cb, int pos,
331 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
332 struct net *net = sock_net(skb->sk);
333 int cnt_reported = cb_ctx->pos[2];
334 struct smc_stats_fback *trgt_arr;
335 struct nlattr *attrs;
340 trgt_arr = &net->smc.fback_rsn->srv[0];
342 trgt_arr = &net->smc.fback_rsn->clnt[0];
343 if (!trgt_arr[pos].fback_code)
345 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
346 &smc_gen_nl_family, NLM_F_MULTI,
347 SMC_NETLINK_GET_FBACK_STATS);
350 attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
353 if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
356 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
357 net->smc.fback_rsn->srv_fback_cnt,
358 SMC_NLA_FBACK_STATS_PAD))
360 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
361 net->smc.fback_rsn->clnt_fback_cnt,
362 SMC_NLA_FBACK_STATS_PAD))
367 if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
368 trgt_arr[pos].fback_code))
370 if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
371 trgt_arr[pos].count))
374 cb_ctx->pos[2] = cnt_reported;
375 nla_nest_end(skb, attrs);
376 genlmsg_end(skb, nlh);
380 nla_nest_cancel(skb, attrs);
382 genlmsg_cancel(skb, nlh);
387 int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
389 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
390 struct net *net = sock_net(skb->sk);
391 int rc_srv = 0, rc_clnt = 0, k;
392 int skip_serv = cb_ctx->pos[1];
393 int snum = cb_ctx->pos[0];
396 mutex_lock(&net->smc.mutex_fback_rsn);
397 for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
401 rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
402 if (rc_srv && rc_srv != -ENODATA)
407 rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
408 if (rc_clnt && rc_clnt != -ENODATA) {
412 if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
415 mutex_unlock(&net->smc.mutex_fback_rsn);
416 cb_ctx->pos[1] = skip_serv;