2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/rhashtable.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/fs_helpers.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/rbtree.h>
40 #include "mlx5_core.h"
42 #include "fpga/ipsec.h"
44 #include "fpga/core.h"
46 enum mlx5_fpga_ipsec_cmd_status {
47 MLX5_FPGA_IPSEC_CMD_PENDING,
48 MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
49 MLX5_FPGA_IPSEC_CMD_COMPLETE,
52 struct mlx5_fpga_ipsec_cmd_context {
53 struct mlx5_fpga_dma_buf buf;
54 enum mlx5_fpga_ipsec_cmd_status status;
55 struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
57 struct completion complete;
58 struct mlx5_fpga_device *dev;
59 struct list_head list; /* Item in pending_cmds */
63 struct mlx5_fpga_esp_xfrm;
65 struct mlx5_fpga_ipsec_sa_ctx {
66 struct rhash_head hash;
67 struct mlx5_ifc_fpga_ipsec_sa hw_sa;
68 struct mlx5_core_dev *dev;
69 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
72 struct mlx5_fpga_esp_xfrm {
73 unsigned int num_rules;
74 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
75 struct mutex lock; /* xfrm lock */
76 struct mlx5_accel_esp_xfrm accel_xfrm;
79 struct mlx5_fpga_ipsec_rule {
82 struct mlx5_fpga_ipsec_sa_ctx *ctx;
85 static const struct rhashtable_params rhash_sa = {
86 .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
87 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
88 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
89 .automatic_shrinking = true,
93 struct mlx5_fpga_ipsec {
94 struct mlx5_fpga_device *fdev;
95 struct list_head pending_cmds;
96 spinlock_t pending_cmds_lock; /* Protects pending_cmds */
97 u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
98 struct mlx5_fpga_conn *conn;
100 struct notifier_block fs_notifier_ingress_bypass;
101 struct notifier_block fs_notifier_egress;
103 /* Map hardware SA --> SA context
104 * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
105 * We will use this hash to avoid SAs duplication in fpga which
108 struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
109 struct mutex sa_hash_lock;
111 /* Tree holding all rules for this fpga device
112 * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
114 struct rb_root rules_rb;
115 struct mutex rules_rb_lock; /* rules lock */
118 static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
120 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
123 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
124 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
127 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
128 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
134 static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
135 struct mlx5_fpga_device *fdev,
136 struct mlx5_fpga_dma_buf *buf,
139 struct mlx5_fpga_ipsec_cmd_context *context;
142 context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
144 mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
146 context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
147 complete(&context->complete);
152 int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
155 case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
157 case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
159 case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
161 case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
167 static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
169 struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
170 struct mlx5_fpga_ipsec_cmd_context *context;
171 enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
172 struct mlx5_fpga_device *fdev = cb_arg;
175 if (buf->sg[0].size < sizeof(*resp)) {
176 mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
177 buf->sg[0].size, sizeof(*resp));
181 mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
182 ntohl(resp->syndrome));
184 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
185 context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
186 struct mlx5_fpga_ipsec_cmd_context,
189 list_del(&context->list);
190 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
193 mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
196 mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
198 syndrome = ntohl(resp->syndrome);
199 context->status_code = syndrome_to_errno(syndrome);
200 context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
201 memcpy(&context->resp, resp, sizeof(*resp));
203 if (context->status_code)
204 mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
207 complete(&context->complete);
210 static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
211 const void *cmd, int cmd_size)
213 struct mlx5_fpga_ipsec_cmd_context *context;
214 struct mlx5_fpga_device *fdev = mdev->fpga;
218 if (!fdev || !fdev->ipsec)
219 return ERR_PTR(-EOPNOTSUPP);
222 return ERR_PTR(-EINVAL);
224 context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
226 return ERR_PTR(-ENOMEM);
228 context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
230 context->buf.complete = mlx5_fpga_ipsec_send_complete;
231 init_completion(&context->complete);
232 memcpy(&context->command, cmd, cmd_size);
233 context->buf.sg[0].size = cmd_size;
234 context->buf.sg[0].data = &context->command;
236 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
237 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
239 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
240 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
243 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
248 /* Context should be freed by the caller after completion. */
252 static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
254 struct mlx5_fpga_ipsec_cmd_context *context = ctx;
255 unsigned long timeout =
256 msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
259 res = wait_for_completion_timeout(&context->complete, timeout);
261 mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
265 if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
266 res = context->status_code;
273 static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
275 if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
280 static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
281 struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
284 struct mlx5_core_dev *dev = fdev->mdev;
285 struct mlx5_ifc_fpga_ipsec_sa *sa;
286 struct mlx5_fpga_ipsec_cmd_context *cmd_context;
290 hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
291 if (is_v2_sadb_supported(fdev->ipsec))
292 sa_cmd_size = sizeof(*hw_sa);
294 sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
296 cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
297 mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
298 if (IS_ERR(cmd_context))
299 return PTR_ERR(cmd_context);
301 err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
305 sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
306 if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
307 mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
308 ntohl(sa->ipsec_sa_v1.sw_sa_handle),
309 ntohl(cmd_context->resp.sw_sa_handle));
318 u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
320 struct mlx5_fpga_device *fdev = mdev->fpga;
323 if (mlx5_fpga_is_ipsec_device(mdev)) {
324 ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
325 ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
333 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
334 ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
336 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
337 ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
339 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
340 ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
342 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
343 ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
345 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
346 ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
347 ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
353 unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
355 struct mlx5_fpga_device *fdev = mdev->fpga;
357 if (!fdev || !fdev->ipsec)
360 return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
361 number_of_ipsec_counters);
364 int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
365 unsigned int counters_count)
367 struct mlx5_fpga_device *fdev = mdev->fpga;
374 if (!fdev || !fdev->ipsec)
377 addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
378 ipsec_counters_addr_low) +
379 ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
380 ipsec_counters_addr_high) << 32);
382 count = mlx5_fpga_ipsec_counters_count(mdev);
384 data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
390 ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
391 MLX5_FPGA_ACCESS_TYPE_DONTCARE);
393 mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
399 if (count > counters_count)
400 count = counters_count;
402 /* Each counter is low word, then high. But each word is big-endian */
403 for (i = 0; i < count; i++)
404 counters[i] = (u64)ntohl(data[i * 2]) |
405 ((u64)ntohl(data[i * 2 + 1]) << 32);
412 static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
414 struct mlx5_fpga_ipsec_cmd_context *context;
415 struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
418 cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
419 cmd.flags = htonl(flags);
420 context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
422 return PTR_ERR(context);
424 err = mlx5_fpga_ipsec_cmd_wait(context);
428 if ((context->resp.flags & cmd.flags) != cmd.flags) {
429 mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
431 context->resp.flags);
440 static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
442 u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
445 if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
446 flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
448 return mlx5_fpga_ipsec_set_caps(mdev, flags);
452 mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
453 const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
454 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
456 const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
459 memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
460 aes_gcm->key_len / 8);
461 /* Duplicate 128 bit key twice according to HW layout */
462 if (aes_gcm->key_len == 128)
463 memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
464 aes_gcm->aes_key, aes_gcm->key_len / 8);
466 /* salt and seq_iv */
467 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
468 sizeof(aes_gcm->seq_iv));
469 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
470 sizeof(aes_gcm->salt));
473 if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
474 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
475 hw_sa->ipsec_sa_v1.flags |=
477 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
478 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
479 hw_sa->esn = htonl(xfrm_attrs->esn);
481 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
482 hw_sa->ipsec_sa_v1.flags &=
483 ~(xfrm_attrs->flags &
484 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
485 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
490 hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
493 switch (aes_gcm->key_len) {
495 hw_sa->ipsec_sa_v1.enc_mode =
496 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
499 hw_sa->ipsec_sa_v1.enc_mode =
500 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
505 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
506 MLX5_FPGA_IPSEC_SA_SPI_EN |
507 MLX5_FPGA_IPSEC_SA_IP_ESP;
509 if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
510 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
512 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
516 mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
517 struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
518 const __be32 saddr[4],
519 const __be32 daddr[4],
520 const __be32 spi, bool is_ipv6,
521 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
523 mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
526 memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
527 memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
530 hw_sa->ipsec_sa_v1.spi = spi;
534 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
537 static bool is_full_mask(const void *p, size_t len)
541 return !memchr_inv(p, 0xff, len);
544 static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
548 const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
551 const void *headers_c = MLX5_ADDR_OF(fte_match_param,
554 const void *headers_v = MLX5_ADDR_OF(fte_match_param,
558 if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
559 const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
561 src_ipv4_src_ipv6.ipv4_layout.ipv4);
562 const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
564 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
566 if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
568 !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
572 const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
574 src_ipv4_src_ipv6.ipv6_layout.ipv6);
575 const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
577 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
579 if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
581 !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
586 if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
588 MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
594 static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
595 u8 match_criteria_enable,
599 u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
602 ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
604 if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
605 mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
606 mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
607 mlx5_fs_is_vxlan_flow(match_c) ||
608 !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
612 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
615 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
616 mlx5_fs_is_outer_ipsec_flow(match_c))
619 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
623 if (!validate_fpga_full_mask(dev, match_c, match_v))
629 static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
630 u8 match_criteria_enable,
633 struct mlx5_flow_act *flow_act)
635 const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
637 bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
638 MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
639 bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
640 MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
643 ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
648 if (is_dmac || is_smac ||
649 (match_criteria_enable &
650 ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
651 (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
652 (flow_act->flags & FLOW_ACT_HAS_TAG))
658 void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
659 struct mlx5_accel_esp_xfrm *accel_xfrm,
660 const __be32 saddr[4],
661 const __be32 daddr[4],
662 const __be32 spi, bool is_ipv6)
664 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
665 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
666 container_of(accel_xfrm, typeof(*fpga_xfrm),
668 struct mlx5_fpga_device *fdev = mdev->fpga;
669 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
674 sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
676 return ERR_PTR(-ENOMEM);
680 /* build candidate SA */
681 mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
682 saddr, daddr, spi, is_ipv6,
685 mutex_lock(&fpga_xfrm->lock);
687 if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
688 /* all rules must be with same IPs and SPI */
689 if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
690 sizeof(sa_ctx->hw_sa))) {
691 context = ERR_PTR(-EINVAL);
695 ++fpga_xfrm->num_rules;
696 context = fpga_xfrm->sa_ctx;
700 /* This is unbounded fpga_xfrm, try to add to hash */
701 mutex_lock(&fipsec->sa_hash_lock);
703 err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
706 /* Can't bound different accel_xfrm to already existing sa_ctx.
707 * This is because we can't support multiple ketmats for
710 context = ERR_PTR(-EEXIST);
714 /* Bound accel_xfrm to sa_ctx */
715 opcode = is_v2_sadb_supported(fdev->ipsec) ?
716 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
717 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
718 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
719 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
721 context = ERR_PTR(err);
725 mutex_unlock(&fipsec->sa_hash_lock);
727 ++fpga_xfrm->num_rules;
728 fpga_xfrm->sa_ctx = sa_ctx;
729 sa_ctx->fpga_xfrm = fpga_xfrm;
731 mutex_unlock(&fpga_xfrm->lock);
736 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
739 mutex_unlock(&fipsec->sa_hash_lock);
742 mutex_unlock(&fpga_xfrm->lock);
748 mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
752 struct mlx5_accel_esp_xfrm *accel_xfrm;
753 __be32 saddr[4], daddr[4], spi;
754 struct mlx5_flow_group *fg;
755 bool is_ipv6 = false;
757 fs_get_obj(fg, fte->node.parent);
760 !mlx5_is_fpga_egress_ipsec_rule(mdev,
761 fg->mask.match_criteria_enable,
762 fg->mask.match_criteria,
765 return ERR_PTR(-EINVAL);
766 else if (!mlx5_is_fpga_ipsec_rule(mdev,
767 fg->mask.match_criteria_enable,
768 fg->mask.match_criteria,
770 return ERR_PTR(-EINVAL);
772 /* get xfrm context */
774 (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
777 if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
780 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
782 src_ipv4_src_ipv6.ipv4_layout.ipv4),
785 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
787 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
791 MLX5_ADDR_OF(fte_match_param,
793 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
796 MLX5_ADDR_OF(fte_match_param,
798 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
804 spi = MLX5_GET_BE(typeof(spi),
805 fte_match_param, fte->val,
806 misc_parameters.outer_esp_spi);
809 return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
815 mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
817 struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
818 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
819 int opcode = is_v2_sadb_supported(fdev->ipsec) ?
820 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
821 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
824 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
825 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
831 mutex_lock(&fipsec->sa_hash_lock);
832 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
834 mutex_unlock(&fipsec->sa_hash_lock);
837 void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
839 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
840 ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
842 mutex_lock(&fpga_xfrm->lock);
843 if (!--fpga_xfrm->num_rules) {
844 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
845 fpga_xfrm->sa_ctx = NULL;
847 mutex_unlock(&fpga_xfrm->lock);
850 static inline struct mlx5_fpga_ipsec_rule *
851 _rule_search(struct rb_root *root, struct fs_fte *fte)
853 struct rb_node *node = root->rb_node;
856 struct mlx5_fpga_ipsec_rule *rule =
857 container_of(node, struct mlx5_fpga_ipsec_rule,
861 node = node->rb_left;
862 else if (rule->fte > fte)
863 node = node->rb_right;
870 static struct mlx5_fpga_ipsec_rule *
871 rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
873 struct mlx5_fpga_ipsec_rule *rule;
875 mutex_lock(&ipsec_dev->rules_rb_lock);
876 rule = _rule_search(&ipsec_dev->rules_rb, fte);
877 mutex_unlock(&ipsec_dev->rules_rb_lock);
882 static inline int _rule_insert(struct rb_root *root,
883 struct mlx5_fpga_ipsec_rule *rule)
885 struct rb_node **new = &root->rb_node, *parent = NULL;
887 /* Figure out where to put new node */
889 struct mlx5_fpga_ipsec_rule *this =
890 container_of(*new, struct mlx5_fpga_ipsec_rule,
894 if (rule->fte < this->fte)
895 new = &((*new)->rb_left);
896 else if (rule->fte > this->fte)
897 new = &((*new)->rb_right);
902 /* Add new node and rebalance tree. */
903 rb_link_node(&rule->node, parent, new);
904 rb_insert_color(&rule->node, root);
909 static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
910 struct mlx5_fpga_ipsec_rule *rule)
914 mutex_lock(&ipsec_dev->rules_rb_lock);
915 ret = _rule_insert(&ipsec_dev->rules_rb, rule);
916 mutex_unlock(&ipsec_dev->rules_rb_lock);
921 static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
922 struct mlx5_fpga_ipsec_rule *rule)
924 struct rb_root *root = &ipsec_dev->rules_rb;
926 mutex_lock(&ipsec_dev->rules_rb_lock);
927 rb_erase(&rule->node, root);
928 mutex_unlock(&ipsec_dev->rules_rb_lock);
931 static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
932 struct mlx5_fpga_ipsec_rule *rule)
934 _rule_delete(ipsec_dev, rule);
939 uintptr_t saved_esp_id;
941 u32 saved_outer_esp_spi_value;
944 static void restore_spec_mailbox(struct fs_fte *fte,
945 struct mailbox_mod *mbox_mod)
947 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
951 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
952 mbox_mod->saved_outer_esp_spi_value);
953 fte->action.action |= mbox_mod->saved_action;
954 fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
957 static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
959 struct mailbox_mod *mbox_mod)
961 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
965 mbox_mod->saved_esp_id = fte->action.esp_id;
966 mbox_mod->saved_action = fte->action.action &
967 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
968 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
969 mbox_mod->saved_outer_esp_spi_value =
970 MLX5_GET(fte_match_set_misc, misc_params_v,
973 fte->action.esp_id = 0;
974 fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
975 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
976 if (!MLX5_CAP_FLOWTABLE(mdev,
977 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
978 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
981 static enum fs_flow_table_type egress_to_fs_ft(bool egress)
983 return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
986 static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
987 struct mlx5_flow_table *ft,
989 unsigned int *group_id,
992 int (*create_flow_group)(struct mlx5_core_dev *dev,
993 struct mlx5_flow_table *ft, u32 *in,
994 unsigned int *group_id) =
995 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
996 char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
997 match_criteria.misc_parameters);
998 u32 saved_outer_esp_spi_mask;
999 u8 match_criteria_enable;
1002 if (MLX5_CAP_FLOWTABLE(dev,
1003 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1004 return create_flow_group(dev, ft, in, group_id);
1006 match_criteria_enable =
1007 MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1008 saved_outer_esp_spi_mask =
1009 MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1010 if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1011 return create_flow_group(dev, ft, in, group_id);
1013 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1015 if (!(*misc_params_c) &&
1016 !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1017 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1018 match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1020 ret = create_flow_group(dev, ft, in, group_id);
1022 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1023 MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1028 static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
1029 struct mlx5_flow_table *ft,
1030 struct mlx5_flow_group *fg,
1034 int (*create_fte)(struct mlx5_core_dev *dev,
1035 struct mlx5_flow_table *ft,
1036 struct mlx5_flow_group *fg,
1037 struct fs_fte *fte) =
1038 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1039 struct mlx5_fpga_device *fdev = dev->fpga;
1040 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1041 struct mlx5_fpga_ipsec_rule *rule;
1042 bool is_esp = fte->action.esp_id;
1043 struct mailbox_mod mbox_mod;
1047 !(fte->action.action &
1048 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1049 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1050 return create_fte(dev, ft, fg, fte);
1052 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1056 rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1057 if (IS_ERR(rule->ctx)) {
1058 int err = PTR_ERR(rule->ctx);
1064 WARN_ON(rule_insert(fipsec, rule));
1066 modify_spec_mailbox(dev, fte, &mbox_mod);
1067 ret = create_fte(dev, ft, fg, fte);
1068 restore_spec_mailbox(fte, &mbox_mod);
1070 _rule_delete(fipsec, rule);
1071 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1078 static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
1079 struct mlx5_flow_table *ft,
1080 unsigned int group_id,
1085 int (*update_fte)(struct mlx5_core_dev *dev,
1086 struct mlx5_flow_table *ft,
1087 unsigned int group_id,
1089 struct fs_fte *fte) =
1090 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1091 bool is_esp = fte->action.esp_id;
1092 struct mailbox_mod mbox_mod;
1096 !(fte->action.action &
1097 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1098 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1099 return update_fte(dev, ft, group_id, modify_mask, fte);
1101 modify_spec_mailbox(dev, fte, &mbox_mod);
1102 ret = update_fte(dev, ft, group_id, modify_mask, fte);
1103 restore_spec_mailbox(fte, &mbox_mod);
1108 static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
1109 struct mlx5_flow_table *ft,
1113 int (*delete_fte)(struct mlx5_core_dev *dev,
1114 struct mlx5_flow_table *ft,
1115 struct fs_fte *fte) =
1116 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1117 struct mlx5_fpga_device *fdev = dev->fpga;
1118 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1119 struct mlx5_fpga_ipsec_rule *rule;
1120 bool is_esp = fte->action.esp_id;
1121 struct mailbox_mod mbox_mod;
1125 !(fte->action.action &
1126 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1127 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1128 return delete_fte(dev, ft, fte);
1130 rule = rule_search(fipsec, fte);
1134 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1135 rule_delete(fipsec, rule);
1137 modify_spec_mailbox(dev, fte, &mbox_mod);
1138 ret = delete_fte(dev, ft, fte);
1139 restore_spec_mailbox(fte, &mbox_mod);
1145 mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
1146 struct mlx5_flow_table *ft,
1148 unsigned int *group_id)
1150 return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
1154 mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
1155 struct mlx5_flow_table *ft,
1156 struct mlx5_flow_group *fg,
1159 return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
1163 mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
1164 struct mlx5_flow_table *ft,
1165 unsigned int group_id,
1169 return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1174 mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
1175 struct mlx5_flow_table *ft,
1178 return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
1182 mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
1183 struct mlx5_flow_table *ft,
1185 unsigned int *group_id)
1187 return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
1191 mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
1192 struct mlx5_flow_table *ft,
1193 struct mlx5_flow_group *fg,
1196 return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
1200 mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
1201 struct mlx5_flow_table *ft,
1202 unsigned int group_id,
1206 return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1211 mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
1212 struct mlx5_flow_table *ft,
1215 return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
1218 static struct mlx5_flow_cmds fpga_ipsec_ingress;
1219 static struct mlx5_flow_cmds fpga_ipsec_egress;
1221 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1225 return &fpga_ipsec_ingress;
1227 return &fpga_ipsec_egress;
1234 int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1236 struct mlx5_fpga_conn_attr init_attr = {0};
1237 struct mlx5_fpga_device *fdev = mdev->fpga;
1238 struct mlx5_fpga_conn *conn;
1241 if (!mlx5_fpga_is_ipsec_device(mdev))
1244 fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1248 fdev->ipsec->fdev = fdev;
1250 err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1253 mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1258 INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1259 spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1261 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1262 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1263 init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1264 init_attr.cb_arg = fdev;
1265 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1267 err = PTR_ERR(conn);
1268 mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1272 fdev->ipsec->conn = conn;
1274 err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1276 goto err_destroy_conn;
1277 mutex_init(&fdev->ipsec->sa_hash_lock);
1279 fdev->ipsec->rules_rb = RB_ROOT;
1280 mutex_init(&fdev->ipsec->rules_rb_lock);
1282 err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1284 mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1286 goto err_destroy_hash;
1292 rhashtable_destroy(&fdev->ipsec->sa_hash);
1295 mlx5_fpga_sbu_conn_destroy(conn);
1303 static void destroy_rules_rb(struct rb_root *root)
1305 struct mlx5_fpga_ipsec_rule *r, *tmp;
1307 rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1308 rb_erase(&r->node, root);
1309 mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1314 void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1316 struct mlx5_fpga_device *fdev = mdev->fpga;
1318 if (!mlx5_fpga_is_ipsec_device(mdev))
1321 destroy_rules_rb(&fdev->ipsec->rules_rb);
1322 rhashtable_destroy(&fdev->ipsec->sa_hash);
1324 mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1329 void mlx5_fpga_ipsec_build_fs_cmds(void)
1332 fpga_ipsec_ingress.create_flow_table =
1333 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1334 fpga_ipsec_ingress.destroy_flow_table =
1335 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1336 fpga_ipsec_ingress.modify_flow_table =
1337 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1338 fpga_ipsec_ingress.create_flow_group =
1339 mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1340 fpga_ipsec_ingress.destroy_flow_group =
1341 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1342 fpga_ipsec_ingress.create_fte =
1343 mlx5_fpga_ipsec_fs_create_fte_ingress;
1344 fpga_ipsec_ingress.update_fte =
1345 mlx5_fpga_ipsec_fs_update_fte_ingress;
1346 fpga_ipsec_ingress.delete_fte =
1347 mlx5_fpga_ipsec_fs_delete_fte_ingress;
1348 fpga_ipsec_ingress.update_root_ft =
1349 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1352 fpga_ipsec_egress.create_flow_table =
1353 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1354 fpga_ipsec_egress.destroy_flow_table =
1355 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1356 fpga_ipsec_egress.modify_flow_table =
1357 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1358 fpga_ipsec_egress.create_flow_group =
1359 mlx5_fpga_ipsec_fs_create_flow_group_egress;
1360 fpga_ipsec_egress.destroy_flow_group =
1361 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1362 fpga_ipsec_egress.create_fte =
1363 mlx5_fpga_ipsec_fs_create_fte_egress;
1364 fpga_ipsec_egress.update_fte =
1365 mlx5_fpga_ipsec_fs_update_fte_egress;
1366 fpga_ipsec_egress.delete_fte =
1367 mlx5_fpga_ipsec_fs_delete_fte_egress;
1368 fpga_ipsec_egress.update_root_ft =
1369 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1373 mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1374 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1376 if (attrs->tfc_pad) {
1377 mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1381 if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1382 mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1386 if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1387 mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1391 if (attrs->keymat.aes_gcm.iv_algo !=
1392 MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1393 mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1397 if (attrs->keymat.aes_gcm.icv_len != 128) {
1398 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1402 if (attrs->keymat.aes_gcm.key_len != 128 &&
1403 attrs->keymat.aes_gcm.key_len != 256) {
1404 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1408 if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1409 (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1411 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1418 struct mlx5_accel_esp_xfrm *
1419 mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1420 const struct mlx5_accel_esp_xfrm_attrs *attrs,
1423 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1425 if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1426 mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1427 return ERR_PTR(-EINVAL);
1430 if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1431 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1432 return ERR_PTR(-EOPNOTSUPP);
1435 fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1437 return ERR_PTR(-ENOMEM);
1439 mutex_init(&fpga_xfrm->lock);
1440 memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1441 sizeof(fpga_xfrm->accel_xfrm.attrs));
1443 return &fpga_xfrm->accel_xfrm;
1446 void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1448 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1449 container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1451 /* assuming no sa_ctx are connected to this xfrm_ctx */
1455 int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1456 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1458 struct mlx5_core_dev *mdev = xfrm->mdev;
1459 struct mlx5_fpga_device *fdev = mdev->fpga;
1460 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1461 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1462 struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1466 if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1469 if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1470 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1474 if (is_v2_sadb_supported(fipsec)) {
1475 mlx5_core_warn(mdev, "Modify esp is not supported\n");
1479 fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1481 mutex_lock(&fpga_xfrm->lock);
1483 if (!fpga_xfrm->sa_ctx)
1484 /* Unbounded xfrm, chane only sw attrs */
1485 goto change_sw_xfrm_attrs;
1487 /* copy original hw sa */
1488 memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1489 mutex_lock(&fipsec->sa_hash_lock);
1490 /* remove original hw sa from hash */
1491 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1492 &fpga_xfrm->sa_ctx->hash, rhash_sa));
1493 /* update hw_sa with new xfrm attrs*/
1494 mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1495 &fpga_xfrm->sa_ctx->hw_sa);
1496 /* try to insert new hw_sa to hash */
1497 err = rhashtable_insert_fast(&fipsec->sa_hash,
1498 &fpga_xfrm->sa_ctx->hash, rhash_sa);
1502 /* modify device with new hw_sa */
1503 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1504 MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1505 fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1507 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1508 &fpga_xfrm->sa_ctx->hash,
1512 /* return original hw_sa to hash */
1513 memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1515 WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1516 &fpga_xfrm->sa_ctx->hash,
1519 mutex_unlock(&fipsec->sa_hash_lock);
1521 change_sw_xfrm_attrs:
1523 memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1524 mutex_unlock(&fpga_xfrm->lock);