]> Git Repo - linux.git/blob - net/smc/smc_stats.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / net / smc / smc_stats.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  * SMC statistics netlink routines
6  *
7  * Copyright IBM Corp. 2021
8  *
9  * Author(s):  Guvenc Gulce
10  */
11 #include <linux/init.h>
12 #include <linux/mutex.h>
13 #include <linux/percpu.h>
14 #include <linux/ctype.h>
15 #include <linux/smc.h>
16 #include <net/genetlink.h>
17 #include <net/sock.h>
18 #include "smc_netlink.h"
19 #include "smc_stats.h"
20
21 int smc_stats_init(struct net *net)
22 {
23         net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
24         if (!net->smc.fback_rsn)
25                 goto err_fback;
26         net->smc.smc_stats = alloc_percpu(struct smc_stats);
27         if (!net->smc.smc_stats)
28                 goto err_stats;
29         mutex_init(&net->smc.mutex_fback_rsn);
30         return 0;
31
32 err_stats:
33         kfree(net->smc.fback_rsn);
34 err_fback:
35         return -ENOMEM;
36 }
37
38 void smc_stats_exit(struct net *net)
39 {
40         kfree(net->smc.fback_rsn);
41         if (net->smc.smc_stats)
42                 free_percpu(net->smc.smc_stats);
43 }
44
45 static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
46                                       struct smc_stats *stats, int tech,
47                                       int type)
48 {
49         struct smc_stats_rmbcnt *stats_rmb_cnt;
50         struct nlattr *attrs;
51
52         if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
53                 stats_rmb_cnt = &stats->smc[tech].rmb_tx;
54         else
55                 stats_rmb_cnt = &stats->smc[tech].rmb_rx;
56
57         attrs = nla_nest_start(skb, type);
58         if (!attrs)
59                 goto errout;
60         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
61                               stats_rmb_cnt->reuse_cnt,
62                               SMC_NLA_STATS_RMB_PAD))
63                 goto errattr;
64         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
65                               stats_rmb_cnt->buf_size_small_peer_cnt,
66                               SMC_NLA_STATS_RMB_PAD))
67                 goto errattr;
68         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
69                               stats_rmb_cnt->buf_size_small_cnt,
70                               SMC_NLA_STATS_RMB_PAD))
71                 goto errattr;
72         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
73                               stats_rmb_cnt->buf_full_peer_cnt,
74                               SMC_NLA_STATS_RMB_PAD))
75                 goto errattr;
76         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
77                               stats_rmb_cnt->buf_full_cnt,
78                               SMC_NLA_STATS_RMB_PAD))
79                 goto errattr;
80         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
81                               stats_rmb_cnt->alloc_cnt,
82                               SMC_NLA_STATS_RMB_PAD))
83                 goto errattr;
84         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
85                               stats_rmb_cnt->dgrade_cnt,
86                               SMC_NLA_STATS_RMB_PAD))
87                 goto errattr;
88
89         nla_nest_end(skb, attrs);
90         return 0;
91
92 errattr:
93         nla_nest_cancel(skb, attrs);
94 errout:
95         return -EMSGSIZE;
96 }
97
98 static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
99                                           struct smc_stats *stats, int tech,
100                                           int type)
101 {
102         struct smc_stats_memsize *stats_pload;
103         struct nlattr *attrs;
104
105         if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
106                 stats_pload = &stats->smc[tech].tx_pd;
107         else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
108                 stats_pload = &stats->smc[tech].rx_pd;
109         else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
110                 stats_pload = &stats->smc[tech].tx_rmbsize;
111         else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
112                 stats_pload = &stats->smc[tech].rx_rmbsize;
113         else
114                 goto errout;
115
116         attrs = nla_nest_start(skb, type);
117         if (!attrs)
118                 goto errout;
119         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
120                               stats_pload->buf[SMC_BUF_8K],
121                               SMC_NLA_STATS_PLOAD_PAD))
122                 goto errattr;
123         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
124                               stats_pload->buf[SMC_BUF_16K],
125                               SMC_NLA_STATS_PLOAD_PAD))
126                 goto errattr;
127         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
128                               stats_pload->buf[SMC_BUF_32K],
129                               SMC_NLA_STATS_PLOAD_PAD))
130                 goto errattr;
131         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
132                               stats_pload->buf[SMC_BUF_64K],
133                               SMC_NLA_STATS_PLOAD_PAD))
134                 goto errattr;
135         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
136                               stats_pload->buf[SMC_BUF_128K],
137                               SMC_NLA_STATS_PLOAD_PAD))
138                 goto errattr;
139         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
140                               stats_pload->buf[SMC_BUF_256K],
141                               SMC_NLA_STATS_PLOAD_PAD))
142                 goto errattr;
143         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
144                               stats_pload->buf[SMC_BUF_512K],
145                               SMC_NLA_STATS_PLOAD_PAD))
146                 goto errattr;
147         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
148                               stats_pload->buf[SMC_BUF_1024K],
149                               SMC_NLA_STATS_PLOAD_PAD))
150                 goto errattr;
151         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
152                               stats_pload->buf[SMC_BUF_G_1024K],
153                               SMC_NLA_STATS_PLOAD_PAD))
154                 goto errattr;
155
156         nla_nest_end(skb, attrs);
157         return 0;
158
159 errattr:
160         nla_nest_cancel(skb, attrs);
161 errout:
162         return -EMSGSIZE;
163 }
164
165 static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
166                                        struct smc_stats *stats, int tech)
167 {
168         struct smc_stats_tech *smc_tech;
169         struct nlattr *attrs;
170
171         smc_tech = &stats->smc[tech];
172         if (tech == SMC_TYPE_D)
173                 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
174         else
175                 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
176
177         if (!attrs)
178                 goto errout;
179         if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
180                                        SMC_NLA_STATS_T_TX_RMB_STATS))
181                 goto errattr;
182         if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
183                                        SMC_NLA_STATS_T_RX_RMB_STATS))
184                 goto errattr;
185         if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
186                                            SMC_NLA_STATS_T_TXPLOAD_SIZE))
187                 goto errattr;
188         if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
189                                            SMC_NLA_STATS_T_RXPLOAD_SIZE))
190                 goto errattr;
191         if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
192                                            SMC_NLA_STATS_T_TX_RMB_SIZE))
193                 goto errattr;
194         if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
195                                            SMC_NLA_STATS_T_RX_RMB_SIZE))
196                 goto errattr;
197         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
198                               smc_tech->clnt_v1_succ_cnt,
199                               SMC_NLA_STATS_PAD))
200                 goto errattr;
201         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
202                               smc_tech->clnt_v2_succ_cnt,
203                               SMC_NLA_STATS_PAD))
204                 goto errattr;
205         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
206                               smc_tech->srv_v1_succ_cnt,
207                               SMC_NLA_STATS_PAD))
208                 goto errattr;
209         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
210                               smc_tech->srv_v2_succ_cnt,
211                               SMC_NLA_STATS_PAD))
212                 goto errattr;
213         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
214                               smc_tech->rx_bytes,
215                               SMC_NLA_STATS_PAD))
216                 goto errattr;
217         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
218                               smc_tech->tx_bytes,
219                               SMC_NLA_STATS_PAD))
220                 goto errattr;
221         if (nla_put_uint(skb, SMC_NLA_STATS_T_RX_RMB_USAGE,
222                          smc_tech->rx_rmbuse))
223                 goto errattr;
224         if (nla_put_uint(skb, SMC_NLA_STATS_T_TX_RMB_USAGE,
225                          smc_tech->tx_rmbuse))
226                 goto errattr;
227         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
228                               smc_tech->rx_cnt,
229                               SMC_NLA_STATS_PAD))
230                 goto errattr;
231         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
232                               smc_tech->tx_cnt,
233                               SMC_NLA_STATS_PAD))
234                 goto errattr;
235         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
236                               0,
237                               SMC_NLA_STATS_PAD))
238                 goto errattr;
239         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
240                               smc_tech->cork_cnt,
241                               SMC_NLA_STATS_PAD))
242                 goto errattr;
243         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
244                               smc_tech->ndly_cnt,
245                               SMC_NLA_STATS_PAD))
246                 goto errattr;
247         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
248                               smc_tech->splice_cnt,
249                               SMC_NLA_STATS_PAD))
250                 goto errattr;
251         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
252                               smc_tech->urg_data_cnt,
253                               SMC_NLA_STATS_PAD))
254                 goto errattr;
255
256         nla_nest_end(skb, attrs);
257         return 0;
258
259 errattr:
260         nla_nest_cancel(skb, attrs);
261 errout:
262         return -EMSGSIZE;
263 }
264
265 int smc_nl_get_stats(struct sk_buff *skb,
266                      struct netlink_callback *cb)
267 {
268         struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
269         struct net *net = sock_net(skb->sk);
270         struct smc_stats *stats;
271         struct nlattr *attrs;
272         int cpu, i, size;
273         void *nlh;
274         u64 *src;
275         u64 *sum;
276
277         if (cb_ctx->pos[0])
278                 goto errmsg;
279         nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
280                           &smc_gen_nl_family, NLM_F_MULTI,
281                           SMC_NETLINK_GET_STATS);
282         if (!nlh)
283                 goto errmsg;
284
285         attrs = nla_nest_start(skb, SMC_GEN_STATS);
286         if (!attrs)
287                 goto errnest;
288         stats = kzalloc(sizeof(*stats), GFP_KERNEL);
289         if (!stats)
290                 goto erralloc;
291         size = sizeof(*stats) / sizeof(u64);
292         for_each_possible_cpu(cpu) {
293                 src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
294                 sum = (u64 *)stats;
295                 for (i = 0; i < size; i++)
296                         *(sum++) += *(src++);
297         }
298         if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
299                 goto errattr;
300         if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
301                 goto errattr;
302         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
303                               stats->clnt_hshake_err_cnt,
304                               SMC_NLA_STATS_PAD))
305                 goto errattr;
306         if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
307                               stats->srv_hshake_err_cnt,
308                               SMC_NLA_STATS_PAD))
309                 goto errattr;
310
311         nla_nest_end(skb, attrs);
312         genlmsg_end(skb, nlh);
313         cb_ctx->pos[0] = 1;
314         kfree(stats);
315         return skb->len;
316
317 errattr:
318         kfree(stats);
319 erralloc:
320         nla_nest_cancel(skb, attrs);
321 errnest:
322         genlmsg_cancel(skb, nlh);
323 errmsg:
324         return skb->len;
325 }
326
327 static int smc_nl_get_fback_details(struct sk_buff *skb,
328                                     struct netlink_callback *cb, int pos,
329                                     bool is_srv)
330 {
331         struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
332         struct net *net = sock_net(skb->sk);
333         int cnt_reported = cb_ctx->pos[2];
334         struct smc_stats_fback *trgt_arr;
335         struct nlattr *attrs;
336         int rc = 0;
337         void *nlh;
338
339         if (is_srv)
340                 trgt_arr = &net->smc.fback_rsn->srv[0];
341         else
342                 trgt_arr = &net->smc.fback_rsn->clnt[0];
343         if (!trgt_arr[pos].fback_code)
344                 return -ENODATA;
345         nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
346                           &smc_gen_nl_family, NLM_F_MULTI,
347                           SMC_NETLINK_GET_FBACK_STATS);
348         if (!nlh)
349                 goto errmsg;
350         attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
351         if (!attrs)
352                 goto errout;
353         if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
354                 goto errattr;
355         if (!cnt_reported) {
356                 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
357                                       net->smc.fback_rsn->srv_fback_cnt,
358                                       SMC_NLA_FBACK_STATS_PAD))
359                         goto errattr;
360                 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
361                                       net->smc.fback_rsn->clnt_fback_cnt,
362                                       SMC_NLA_FBACK_STATS_PAD))
363                         goto errattr;
364                 cnt_reported = 1;
365         }
366
367         if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
368                         trgt_arr[pos].fback_code))
369                 goto errattr;
370         if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
371                         trgt_arr[pos].count))
372                 goto errattr;
373
374         cb_ctx->pos[2] = cnt_reported;
375         nla_nest_end(skb, attrs);
376         genlmsg_end(skb, nlh);
377         return rc;
378
379 errattr:
380         nla_nest_cancel(skb, attrs);
381 errout:
382         genlmsg_cancel(skb, nlh);
383 errmsg:
384         return -EMSGSIZE;
385 }
386
387 int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
388 {
389         struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
390         struct net *net = sock_net(skb->sk);
391         int rc_srv = 0, rc_clnt = 0, k;
392         int skip_serv = cb_ctx->pos[1];
393         int snum = cb_ctx->pos[0];
394         bool is_srv = true;
395
396         mutex_lock(&net->smc.mutex_fback_rsn);
397         for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
398                 if (k < snum)
399                         continue;
400                 if (!skip_serv) {
401                         rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
402                         if (rc_srv && rc_srv != -ENODATA)
403                                 break;
404                 } else {
405                         skip_serv = 0;
406                 }
407                 rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
408                 if (rc_clnt && rc_clnt != -ENODATA) {
409                         skip_serv = 1;
410                         break;
411                 }
412                 if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
413                         break;
414         }
415         mutex_unlock(&net->smc.mutex_fback_rsn);
416         cb_ctx->pos[1] = skip_serv;
417         cb_ctx->pos[0] = k;
418         return skb->len;
419 }
This page took 0.056864 seconds and 4 git commands to generate.