]> Git Repo - linux.git/blob - drivers/cxl/core/mbox.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / cxl / core / mbox.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/security.h>
4 #include <linux/debugfs.h>
5 #include <linux/ktime.h>
6 #include <linux/mutex.h>
7 #include <asm/unaligned.h>
8 #include <cxlpci.h>
9 #include <cxlmem.h>
10 #include <cxl.h>
11
12 #include "core.h"
13 #include "trace.h"
14
15 static bool cxl_raw_allow_all;
16
17 /**
18  * DOC: cxl mbox
19  *
20  * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
21  * implementation is used by the cxl_pci driver to initialize the device
22  * and implement the cxl_mem.h IOCTL UAPI. It also implements the
23  * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
24  */
25
26 #define cxl_for_each_cmd(cmd)                                                  \
27         for ((cmd) = &cxl_mem_commands[0];                                     \
28              ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
29
30 #define CXL_CMD(_id, sin, sout, _flags)                                        \
31         [CXL_MEM_COMMAND_ID_##_id] = {                                         \
32         .info = {                                                              \
33                         .id = CXL_MEM_COMMAND_ID_##_id,                        \
34                         .size_in = sin,                                        \
35                         .size_out = sout,                                      \
36                 },                                                             \
37         .opcode = CXL_MBOX_OP_##_id,                                           \
38         .flags = _flags,                                                       \
39         }
40
41 #define CXL_VARIABLE_PAYLOAD    ~0U
42 /*
43  * This table defines the supported mailbox commands for the driver. This table
44  * is made up of a UAPI structure. Non-negative values as parameters in the
45  * table will be validated against the user's input. For example, if size_in is
46  * 0, and the user passed in 1, it is an error.
47  */
48 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
49         CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
50 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
51         CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
52 #endif
53         CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
54         CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
55         CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
56         CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
57         CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
58         CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
59         CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
60         CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
61         CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
62         CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
63         CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
64         CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
65         CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
66         CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
67         CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
68         CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
69         CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
70 };
71
72 /*
73  * Commands that RAW doesn't permit. The rationale for each:
74  *
75  * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
76  * coordination of transaction timeout values at the root bridge level.
77  *
78  * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
79  * and needs to be coordinated with HDM updates.
80  *
81  * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
82  * driver and any writes from userspace invalidates those contents.
83  *
84  * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
85  * to the device after it is marked clean, userspace can not make that
86  * assertion.
87  *
88  * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
89  * is kept up to date with patrol notifications and error management.
90  *
91  * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
92  * driver orchestration for safety.
93  */
94 static u16 cxl_disabled_raw_commands[] = {
95         CXL_MBOX_OP_ACTIVATE_FW,
96         CXL_MBOX_OP_SET_PARTITION_INFO,
97         CXL_MBOX_OP_SET_LSA,
98         CXL_MBOX_OP_SET_SHUTDOWN_STATE,
99         CXL_MBOX_OP_SCAN_MEDIA,
100         CXL_MBOX_OP_GET_SCAN_MEDIA,
101         CXL_MBOX_OP_GET_POISON,
102         CXL_MBOX_OP_INJECT_POISON,
103         CXL_MBOX_OP_CLEAR_POISON,
104 };
105
106 /*
107  * Command sets that RAW doesn't permit. All opcodes in this set are
108  * disabled because they pass plain text security payloads over the
109  * user/kernel boundary. This functionality is intended to be wrapped
110  * behind the keys ABI which allows for encrypted payloads in the UAPI
111  */
112 static u8 security_command_sets[] = {
113         0x44, /* Sanitize */
114         0x45, /* Persistent Memory Data-at-rest Security */
115         0x46, /* Security Passthrough */
116 };
117
118 static bool cxl_is_security_command(u16 opcode)
119 {
120         int i;
121
122         for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
123                 if (security_command_sets[i] == (opcode >> 8))
124                         return true;
125         return false;
126 }
127
128 static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
129                                          u16 opcode)
130 {
131         switch (opcode) {
132         case CXL_MBOX_OP_SANITIZE:
133                 set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
134                 break;
135         case CXL_MBOX_OP_SECURE_ERASE:
136                 set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
137                         security->enabled_cmds);
138                 break;
139         case CXL_MBOX_OP_GET_SECURITY_STATE:
140                 set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
141                         security->enabled_cmds);
142                 break;
143         case CXL_MBOX_OP_SET_PASSPHRASE:
144                 set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
145                         security->enabled_cmds);
146                 break;
147         case CXL_MBOX_OP_DISABLE_PASSPHRASE:
148                 set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
149                         security->enabled_cmds);
150                 break;
151         case CXL_MBOX_OP_UNLOCK:
152                 set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
153                 break;
154         case CXL_MBOX_OP_FREEZE_SECURITY:
155                 set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
156                         security->enabled_cmds);
157                 break;
158         case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
159                 set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
160                         security->enabled_cmds);
161                 break;
162         default:
163                 break;
164         }
165 }
166
167 static bool cxl_is_poison_command(u16 opcode)
168 {
169 #define CXL_MBOX_OP_POISON_CMDS 0x43
170
171         if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
172                 return true;
173
174         return false;
175 }
176
177 static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
178                                        u16 opcode)
179 {
180         switch (opcode) {
181         case CXL_MBOX_OP_GET_POISON:
182                 set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
183                 break;
184         case CXL_MBOX_OP_INJECT_POISON:
185                 set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
186                 break;
187         case CXL_MBOX_OP_CLEAR_POISON:
188                 set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
189                 break;
190         case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
191                 set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
192                 break;
193         case CXL_MBOX_OP_SCAN_MEDIA:
194                 set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
195                 break;
196         case CXL_MBOX_OP_GET_SCAN_MEDIA:
197                 set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
198                 break;
199         default:
200                 break;
201         }
202 }
203
204 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
205 {
206         struct cxl_mem_command *c;
207
208         cxl_for_each_cmd(c)
209                 if (c->opcode == opcode)
210                         return c;
211
212         return NULL;
213 }
214
215 static const char *cxl_mem_opcode_to_name(u16 opcode)
216 {
217         struct cxl_mem_command *c;
218
219         c = cxl_mem_find_command(opcode);
220         if (!c)
221                 return NULL;
222
223         return cxl_command_names[c->info.id].name;
224 }
225
226 /**
227  * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
228  * @cxl_mbox: CXL mailbox context
229  * @mbox_cmd: initialized command to execute
230  *
231  * Context: Any context.
232  * Return:
233  *  * %>=0      - Number of bytes returned in @out.
234  *  * %-E2BIG   - Payload is too large for hardware.
235  *  * %-EBUSY   - Couldn't acquire exclusive mailbox access.
236  *  * %-EFAULT  - Hardware error occurred.
237  *  * %-ENXIO   - Command completed, but device reported an error.
238  *  * %-EIO     - Unexpected output size.
239  *
240  * Mailbox commands may execute successfully yet the device itself reported an
241  * error. While this distinction can be useful for commands from userspace, the
242  * kernel will only be able to use results when both are successful.
243  */
244 int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
245                           struct cxl_mbox_cmd *mbox_cmd)
246 {
247         size_t out_size, min_out;
248         int rc;
249
250         if (mbox_cmd->size_in > cxl_mbox->payload_size ||
251             mbox_cmd->size_out > cxl_mbox->payload_size)
252                 return -E2BIG;
253
254         out_size = mbox_cmd->size_out;
255         min_out = mbox_cmd->min_out;
256         rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
257         /*
258          * EIO is reserved for a payload size mismatch and mbox_send()
259          * may not return this error.
260          */
261         if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
262                 return -ENXIO;
263         if (rc)
264                 return rc;
265
266         if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
267             mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
268                 return cxl_mbox_cmd_rc2errno(mbox_cmd);
269
270         if (!out_size)
271                 return 0;
272
273         /*
274          * Variable sized output needs to at least satisfy the caller's
275          * minimum if not the fully requested size.
276          */
277         if (min_out == 0)
278                 min_out = out_size;
279
280         if (mbox_cmd->size_out < min_out)
281                 return -EIO;
282         return 0;
283 }
284 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
285
286 static bool cxl_mem_raw_command_allowed(u16 opcode)
287 {
288         int i;
289
290         if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
291                 return false;
292
293         if (security_locked_down(LOCKDOWN_PCI_ACCESS))
294                 return false;
295
296         if (cxl_raw_allow_all)
297                 return true;
298
299         if (cxl_is_security_command(opcode))
300                 return false;
301
302         for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
303                 if (cxl_disabled_raw_commands[i] == opcode)
304                         return false;
305
306         return true;
307 }
308
309 /**
310  * cxl_payload_from_user_allowed() - Check contents of in_payload.
311  * @opcode: The mailbox command opcode.
312  * @payload_in: Pointer to the input payload passed in from user space.
313  *
314  * Return:
315  *  * true      - payload_in passes check for @opcode.
316  *  * false     - payload_in contains invalid or unsupported values.
317  *
318  * The driver may inspect payload contents before sending a mailbox
319  * command from user space to the device. The intent is to reject
320  * commands with input payloads that are known to be unsafe. This
321  * check is not intended to replace the users careful selection of
322  * mailbox command parameters and makes no guarantee that the user
323  * command will succeed, nor that it is appropriate.
324  *
325  * The specific checks are determined by the opcode.
326  */
327 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
328 {
329         switch (opcode) {
330         case CXL_MBOX_OP_SET_PARTITION_INFO: {
331                 struct cxl_mbox_set_partition_info *pi = payload_in;
332
333                 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
334                         return false;
335                 break;
336         }
337         case CXL_MBOX_OP_CLEAR_LOG: {
338                 const uuid_t *uuid = (uuid_t *)payload_in;
339
340                 /*
341                  * Restrict the â€˜Clear log’ action to only apply to
342                  * Vendor debug logs.
343                  */
344                 return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
345         }
346         default:
347                 break;
348         }
349         return true;
350 }
351
352 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
353                              struct cxl_memdev_state *mds, u16 opcode,
354                              size_t in_size, size_t out_size, u64 in_payload)
355 {
356         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
357         *mbox = (struct cxl_mbox_cmd) {
358                 .opcode = opcode,
359                 .size_in = in_size,
360         };
361
362         if (in_size) {
363                 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
364                                                 in_size);
365                 if (IS_ERR(mbox->payload_in))
366                         return PTR_ERR(mbox->payload_in);
367
368                 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
369                         dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
370                                 cxl_mem_opcode_to_name(opcode));
371                         kvfree(mbox->payload_in);
372                         return -EBUSY;
373                 }
374         }
375
376         /* Prepare to handle a full payload for variable sized output */
377         if (out_size == CXL_VARIABLE_PAYLOAD)
378                 mbox->size_out = cxl_mbox->payload_size;
379         else
380                 mbox->size_out = out_size;
381
382         if (mbox->size_out) {
383                 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
384                 if (!mbox->payload_out) {
385                         kvfree(mbox->payload_in);
386                         return -ENOMEM;
387                 }
388         }
389         return 0;
390 }
391
392 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
393 {
394         kvfree(mbox->payload_in);
395         kvfree(mbox->payload_out);
396 }
397
398 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
399                               const struct cxl_send_command *send_cmd,
400                               struct cxl_memdev_state *mds)
401 {
402         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
403
404         if (send_cmd->raw.rsvd)
405                 return -EINVAL;
406
407         /*
408          * Unlike supported commands, the output size of RAW commands
409          * gets passed along without further checking, so it must be
410          * validated here.
411          */
412         if (send_cmd->out.size > cxl_mbox->payload_size)
413                 return -EINVAL;
414
415         if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
416                 return -EPERM;
417
418         dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
419
420         *mem_cmd = (struct cxl_mem_command) {
421                 .info = {
422                         .id = CXL_MEM_COMMAND_ID_RAW,
423                         .size_in = send_cmd->in.size,
424                         .size_out = send_cmd->out.size,
425                 },
426                 .opcode = send_cmd->raw.opcode
427         };
428
429         return 0;
430 }
431
432 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
433                           const struct cxl_send_command *send_cmd,
434                           struct cxl_memdev_state *mds)
435 {
436         struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
437         const struct cxl_command_info *info = &c->info;
438
439         if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
440                 return -EINVAL;
441
442         if (send_cmd->rsvd)
443                 return -EINVAL;
444
445         if (send_cmd->in.rsvd || send_cmd->out.rsvd)
446                 return -EINVAL;
447
448         /* Check that the command is enabled for hardware */
449         if (!test_bit(info->id, mds->enabled_cmds))
450                 return -ENOTTY;
451
452         /* Check that the command is not claimed for exclusive kernel use */
453         if (test_bit(info->id, mds->exclusive_cmds))
454                 return -EBUSY;
455
456         /* Check the input buffer is the expected size */
457         if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
458             (info->size_in != send_cmd->in.size))
459                 return -ENOMEM;
460
461         /* Check the output buffer is at least large enough */
462         if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
463             (send_cmd->out.size < info->size_out))
464                 return -ENOMEM;
465
466         *mem_cmd = (struct cxl_mem_command) {
467                 .info = {
468                         .id = info->id,
469                         .flags = info->flags,
470                         .size_in = send_cmd->in.size,
471                         .size_out = send_cmd->out.size,
472                 },
473                 .opcode = c->opcode
474         };
475
476         return 0;
477 }
478
479 /**
480  * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
481  * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
482  * @mds: The driver data for the operation
483  * @send_cmd: &struct cxl_send_command copied in from userspace.
484  *
485  * Return:
486  *  * %0        - @out_cmd is ready to send.
487  *  * %-ENOTTY  - Invalid command specified.
488  *  * %-EINVAL  - Reserved fields or invalid values were used.
489  *  * %-ENOMEM  - Input or output buffer wasn't sized properly.
490  *  * %-EPERM   - Attempted to use a protected command.
491  *  * %-EBUSY   - Kernel has claimed exclusive access to this opcode
492  *
493  * The result of this command is a fully validated command in @mbox_cmd that is
494  * safe to send to the hardware.
495  */
496 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
497                                       struct cxl_memdev_state *mds,
498                                       const struct cxl_send_command *send_cmd)
499 {
500         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
501         struct cxl_mem_command mem_cmd;
502         int rc;
503
504         if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
505                 return -ENOTTY;
506
507         /*
508          * The user can never specify an input payload larger than what hardware
509          * supports, but output can be arbitrarily large (simply write out as
510          * much data as the hardware provides).
511          */
512         if (send_cmd->in.size > cxl_mbox->payload_size)
513                 return -EINVAL;
514
515         /* Sanitize and construct a cxl_mem_command */
516         if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
517                 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
518         else
519                 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
520
521         if (rc)
522                 return rc;
523
524         /* Sanitize and construct a cxl_mbox_cmd */
525         return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
526                                  mem_cmd.info.size_in, mem_cmd.info.size_out,
527                                  send_cmd->in.payload);
528 }
529
530 int cxl_query_cmd(struct cxl_memdev *cxlmd,
531                   struct cxl_mem_query_commands __user *q)
532 {
533         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
534         struct device *dev = &cxlmd->dev;
535         struct cxl_mem_command *cmd;
536         u32 n_commands;
537         int j = 0;
538
539         dev_dbg(dev, "Query IOCTL\n");
540
541         if (get_user(n_commands, &q->n_commands))
542                 return -EFAULT;
543
544         /* returns the total number if 0 elements are requested. */
545         if (n_commands == 0)
546                 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
547
548         /*
549          * otherwise, return min(n_commands, total commands) cxl_command_info
550          * structures.
551          */
552         cxl_for_each_cmd(cmd) {
553                 struct cxl_command_info info = cmd->info;
554
555                 if (test_bit(info.id, mds->enabled_cmds))
556                         info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
557                 if (test_bit(info.id, mds->exclusive_cmds))
558                         info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
559
560                 if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
561                         return -EFAULT;
562
563                 if (j == n_commands)
564                         break;
565         }
566
567         return 0;
568 }
569
570 /**
571  * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
572  * @mds: The driver data for the operation
573  * @mbox_cmd: The validated mailbox command.
574  * @out_payload: Pointer to userspace's output payload.
575  * @size_out: (Input) Max payload size to copy out.
576  *            (Output) Payload size hardware generated.
577  * @retval: Hardware generated return code from the operation.
578  *
579  * Return:
580  *  * %0        - Mailbox transaction succeeded. This implies the mailbox
581  *                protocol completed successfully not that the operation itself
582  *                was successful.
583  *  * %-ENOMEM  - Couldn't allocate a bounce buffer.
584  *  * %-EFAULT  - Something happened with copy_to/from_user.
585  *  * %-EINTR   - Mailbox acquisition interrupted.
586  *  * %-EXXX    - Transaction level failures.
587  *
588  * Dispatches a mailbox command on behalf of a userspace request.
589  * The output payload is copied to userspace.
590  *
591  * See cxl_send_cmd().
592  */
593 static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
594                                         struct cxl_mbox_cmd *mbox_cmd,
595                                         u64 out_payload, s32 *size_out,
596                                         u32 *retval)
597 {
598         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
599         struct device *dev = mds->cxlds.dev;
600         int rc;
601
602         dev_dbg(dev,
603                 "Submitting %s command for user\n"
604                 "\topcode: %x\n"
605                 "\tsize: %zx\n",
606                 cxl_mem_opcode_to_name(mbox_cmd->opcode),
607                 mbox_cmd->opcode, mbox_cmd->size_in);
608
609         rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
610         if (rc)
611                 goto out;
612
613         /*
614          * @size_out contains the max size that's allowed to be written back out
615          * to userspace. While the payload may have written more output than
616          * this it will have to be ignored.
617          */
618         if (mbox_cmd->size_out) {
619                 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
620                               "Invalid return size\n");
621                 if (copy_to_user(u64_to_user_ptr(out_payload),
622                                  mbox_cmd->payload_out, mbox_cmd->size_out)) {
623                         rc = -EFAULT;
624                         goto out;
625                 }
626         }
627
628         *size_out = mbox_cmd->size_out;
629         *retval = mbox_cmd->return_code;
630
631 out:
632         cxl_mbox_cmd_dtor(mbox_cmd);
633         return rc;
634 }
635
636 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
637 {
638         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
639         struct device *dev = &cxlmd->dev;
640         struct cxl_send_command send;
641         struct cxl_mbox_cmd mbox_cmd;
642         int rc;
643
644         dev_dbg(dev, "Send IOCTL\n");
645
646         if (copy_from_user(&send, s, sizeof(send)))
647                 return -EFAULT;
648
649         rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
650         if (rc)
651                 return rc;
652
653         rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
654                                           &send.out.size, &send.retval);
655         if (rc)
656                 return rc;
657
658         if (copy_to_user(s, &send, sizeof(send)))
659                 return -EFAULT;
660
661         return 0;
662 }
663
664 static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
665                         u32 *size, u8 *out)
666 {
667         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
668         u32 remaining = *size;
669         u32 offset = 0;
670
671         while (remaining) {
672                 u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
673                 struct cxl_mbox_cmd mbox_cmd;
674                 struct cxl_mbox_get_log log;
675                 int rc;
676
677                 log = (struct cxl_mbox_get_log) {
678                         .uuid = *uuid,
679                         .offset = cpu_to_le32(offset),
680                         .length = cpu_to_le32(xfer_size),
681                 };
682
683                 mbox_cmd = (struct cxl_mbox_cmd) {
684                         .opcode = CXL_MBOX_OP_GET_LOG,
685                         .size_in = sizeof(log),
686                         .payload_in = &log,
687                         .size_out = xfer_size,
688                         .payload_out = out,
689                 };
690
691                 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
692
693                 /*
694                  * The output payload length that indicates the number
695                  * of valid bytes can be smaller than the Log buffer
696                  * size.
697                  */
698                 if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
699                         offset += mbox_cmd.size_out;
700                         break;
701                 }
702
703                 if (rc < 0)
704                         return rc;
705
706                 out += xfer_size;
707                 remaining -= xfer_size;
708                 offset += xfer_size;
709         }
710
711         *size = offset;
712
713         return 0;
714 }
715
716 /**
717  * cxl_walk_cel() - Walk through the Command Effects Log.
718  * @mds: The driver data for the operation
719  * @size: Length of the Command Effects Log.
720  * @cel: CEL
721  *
722  * Iterate over each entry in the CEL and determine if the driver supports the
723  * command. If so, the command is enabled for the device and can be used later.
724  */
725 static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
726 {
727         struct cxl_cel_entry *cel_entry;
728         const int cel_entries = size / sizeof(*cel_entry);
729         struct device *dev = mds->cxlds.dev;
730         int i;
731
732         cel_entry = (struct cxl_cel_entry *) cel;
733
734         for (i = 0; i < cel_entries; i++) {
735                 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
736                 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
737                 int enabled = 0;
738
739                 if (cmd) {
740                         set_bit(cmd->info.id, mds->enabled_cmds);
741                         enabled++;
742                 }
743
744                 if (cxl_is_poison_command(opcode)) {
745                         cxl_set_poison_cmd_enabled(&mds->poison, opcode);
746                         enabled++;
747                 }
748
749                 if (cxl_is_security_command(opcode)) {
750                         cxl_set_security_cmd_enabled(&mds->security, opcode);
751                         enabled++;
752                 }
753
754                 dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
755                         enabled ? "enabled" : "unsupported by driver");
756         }
757 }
758
759 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
760 {
761         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
762         struct cxl_mbox_get_supported_logs *ret;
763         struct cxl_mbox_cmd mbox_cmd;
764         int rc;
765
766         ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
767         if (!ret)
768                 return ERR_PTR(-ENOMEM);
769
770         mbox_cmd = (struct cxl_mbox_cmd) {
771                 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
772                 .size_out = cxl_mbox->payload_size,
773                 .payload_out = ret,
774                 /* At least the record number field must be valid */
775                 .min_out = 2,
776         };
777         rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
778         if (rc < 0) {
779                 kvfree(ret);
780                 return ERR_PTR(rc);
781         }
782
783
784         return ret;
785 }
786
787 enum {
788         CEL_UUID,
789         VENDOR_DEBUG_UUID,
790 };
791
792 /* See CXL 2.0 Table 170. Get Log Input Payload */
793 static const uuid_t log_uuid[] = {
794         [CEL_UUID] = DEFINE_CXL_CEL_UUID,
795         [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
796 };
797
798 /**
799  * cxl_enumerate_cmds() - Enumerate commands for a device.
800  * @mds: The driver data for the operation
801  *
802  * Returns 0 if enumerate completed successfully.
803  *
804  * CXL devices have optional support for certain commands. This function will
805  * determine the set of supported commands for the hardware and update the
806  * enabled_cmds bitmap in the @mds.
807  */
808 int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
809 {
810         struct cxl_mbox_get_supported_logs *gsl;
811         struct device *dev = mds->cxlds.dev;
812         struct cxl_mem_command *cmd;
813         int i, rc;
814
815         gsl = cxl_get_gsl(mds);
816         if (IS_ERR(gsl))
817                 return PTR_ERR(gsl);
818
819         rc = -ENOENT;
820         for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
821                 u32 size = le32_to_cpu(gsl->entry[i].size);
822                 uuid_t uuid = gsl->entry[i].uuid;
823                 u8 *log;
824
825                 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
826
827                 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
828                         continue;
829
830                 log = kvmalloc(size, GFP_KERNEL);
831                 if (!log) {
832                         rc = -ENOMEM;
833                         goto out;
834                 }
835
836                 rc = cxl_xfer_log(mds, &uuid, &size, log);
837                 if (rc) {
838                         kvfree(log);
839                         goto out;
840                 }
841
842                 cxl_walk_cel(mds, size, log);
843                 kvfree(log);
844
845                 /* In case CEL was bogus, enable some default commands. */
846                 cxl_for_each_cmd(cmd)
847                         if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
848                                 set_bit(cmd->info.id, mds->enabled_cmds);
849
850                 /* Found the required CEL */
851                 rc = 0;
852         }
853 out:
854         kvfree(gsl);
855         return rc;
856 }
857 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
858
859 void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
860                             enum cxl_event_log_type type,
861                             enum cxl_event_type event_type,
862                             const uuid_t *uuid, union cxl_event *evt)
863 {
864         if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
865                 trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
866                 return;
867         }
868         if (event_type == CXL_CPER_EVENT_GENERIC) {
869                 trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
870                 return;
871         }
872
873         if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
874                 u64 dpa, hpa = ULLONG_MAX;
875                 struct cxl_region *cxlr;
876
877                 /*
878                  * These trace points are annotated with HPA and region
879                  * translations. Take topology mutation locks and lookup
880                  * { HPA, REGION } from { DPA, MEMDEV } in the event record.
881                  */
882                 guard(rwsem_read)(&cxl_region_rwsem);
883                 guard(rwsem_read)(&cxl_dpa_rwsem);
884
885                 dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
886                 cxlr = cxl_dpa_to_region(cxlmd, dpa);
887                 if (cxlr)
888                         hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
889
890                 if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
891                         trace_cxl_general_media(cxlmd, type, cxlr, hpa,
892                                                 &evt->gen_media);
893                 else if (event_type == CXL_CPER_EVENT_DRAM)
894                         trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
895         }
896 }
897 EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
898
899 static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
900                                      enum cxl_event_log_type type,
901                                      struct cxl_event_record_raw *record)
902 {
903         enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
904         const uuid_t *uuid = &record->id;
905
906         if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
907                 ev_type = CXL_CPER_EVENT_GEN_MEDIA;
908         else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
909                 ev_type = CXL_CPER_EVENT_DRAM;
910         else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
911                 ev_type = CXL_CPER_EVENT_MEM_MODULE;
912
913         cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
914 }
915
916 static int cxl_clear_event_record(struct cxl_memdev_state *mds,
917                                   enum cxl_event_log_type log,
918                                   struct cxl_get_event_payload *get_pl)
919 {
920         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
921         struct cxl_mbox_clear_event_payload *payload;
922         u16 total = le16_to_cpu(get_pl->record_count);
923         u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
924         size_t pl_size = struct_size(payload, handles, max_handles);
925         struct cxl_mbox_cmd mbox_cmd;
926         u16 cnt;
927         int rc = 0;
928         int i;
929
930         /* Payload size may limit the max handles */
931         if (pl_size > cxl_mbox->payload_size) {
932                 max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
933                               sizeof(__le16);
934                 pl_size = struct_size(payload, handles, max_handles);
935         }
936
937         payload = kvzalloc(pl_size, GFP_KERNEL);
938         if (!payload)
939                 return -ENOMEM;
940
941         *payload = (struct cxl_mbox_clear_event_payload) {
942                 .event_log = log,
943         };
944
945         mbox_cmd = (struct cxl_mbox_cmd) {
946                 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
947                 .payload_in = payload,
948                 .size_in = pl_size,
949         };
950
951         /*
952          * Clear Event Records uses u8 for the handle cnt while Get Event
953          * Record can return up to 0xffff records.
954          */
955         i = 0;
956         for (cnt = 0; cnt < total; cnt++) {
957                 struct cxl_event_record_raw *raw = &get_pl->records[cnt];
958                 struct cxl_event_generic *gen = &raw->event.generic;
959
960                 payload->handles[i++] = gen->hdr.handle;
961                 dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
962                         le16_to_cpu(payload->handles[i - 1]));
963
964                 if (i == max_handles) {
965                         payload->nr_recs = i;
966                         rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
967                         if (rc)
968                                 goto free_pl;
969                         i = 0;
970                 }
971         }
972
973         /* Clear what is left if any */
974         if (i) {
975                 payload->nr_recs = i;
976                 mbox_cmd.size_in = struct_size(payload, handles, i);
977                 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
978                 if (rc)
979                         goto free_pl;
980         }
981
982 free_pl:
983         kvfree(payload);
984         return rc;
985 }
986
987 static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
988                                     enum cxl_event_log_type type)
989 {
990         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
991         struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
992         struct device *dev = mds->cxlds.dev;
993         struct cxl_get_event_payload *payload;
994         u8 log_type = type;
995         u16 nr_rec;
996
997         mutex_lock(&mds->event.log_lock);
998         payload = mds->event.buf;
999
1000         do {
1001                 int rc, i;
1002                 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
1003                         .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
1004                         .payload_in = &log_type,
1005                         .size_in = sizeof(log_type),
1006                         .payload_out = payload,
1007                         .size_out = cxl_mbox->payload_size,
1008                         .min_out = struct_size(payload, records, 0),
1009                 };
1010
1011                 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1012                 if (rc) {
1013                         dev_err_ratelimited(dev,
1014                                 "Event log '%d': Failed to query event records : %d",
1015                                 type, rc);
1016                         break;
1017                 }
1018
1019                 nr_rec = le16_to_cpu(payload->record_count);
1020                 if (!nr_rec)
1021                         break;
1022
1023                 for (i = 0; i < nr_rec; i++)
1024                         __cxl_event_trace_record(cxlmd, type,
1025                                                  &payload->records[i]);
1026
1027                 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
1028                         trace_cxl_overflow(cxlmd, type, payload);
1029
1030                 rc = cxl_clear_event_record(mds, type, payload);
1031                 if (rc) {
1032                         dev_err_ratelimited(dev,
1033                                 "Event log '%d': Failed to clear events : %d",
1034                                 type, rc);
1035                         break;
1036                 }
1037         } while (nr_rec);
1038
1039         mutex_unlock(&mds->event.log_lock);
1040 }
1041
1042 /**
1043  * cxl_mem_get_event_records - Get Event Records from the device
1044  * @mds: The driver data for the operation
1045  * @status: Event Status register value identifying which events are available.
1046  *
1047  * Retrieve all event records available on the device, report them as trace
1048  * events, and clear them.
1049  *
1050  * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1051  * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1052  */
1053 void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
1054 {
1055         dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
1056
1057         if (status & CXLDEV_EVENT_STATUS_FATAL)
1058                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
1059         if (status & CXLDEV_EVENT_STATUS_FAIL)
1060                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
1061         if (status & CXLDEV_EVENT_STATUS_WARN)
1062                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
1063         if (status & CXLDEV_EVENT_STATUS_INFO)
1064                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
1065 }
1066 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
1067
1068 /**
1069  * cxl_mem_get_partition_info - Get partition info
1070  * @mds: The driver data for the operation
1071  *
1072  * Retrieve the current partition info for the device specified.  The active
1073  * values are the current capacity in bytes.  If not 0, the 'next' values are
1074  * the pending values, in bytes, which take affect on next cold reset.
1075  *
1076  * Return: 0 if no error: or the result of the mailbox command.
1077  *
1078  * See CXL @8.2.9.5.2.1 Get Partition Info
1079  */
1080 static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
1081 {
1082         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1083         struct cxl_mbox_get_partition_info pi;
1084         struct cxl_mbox_cmd mbox_cmd;
1085         int rc;
1086
1087         mbox_cmd = (struct cxl_mbox_cmd) {
1088                 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
1089                 .size_out = sizeof(pi),
1090                 .payload_out = &pi,
1091         };
1092         rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1093         if (rc)
1094                 return rc;
1095
1096         mds->active_volatile_bytes =
1097                 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1098         mds->active_persistent_bytes =
1099                 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
1100         mds->next_volatile_bytes =
1101                 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1102         mds->next_persistent_bytes =
1103                 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1104
1105         return 0;
1106 }
1107
1108 /**
1109  * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1110  * @mds: The driver data for the operation
1111  *
1112  * Return: 0 if identify was executed successfully or media not ready.
1113  *
1114  * This will dispatch the identify command to the device and on success populate
1115  * structures to be exported to sysfs.
1116  */
1117 int cxl_dev_state_identify(struct cxl_memdev_state *mds)
1118 {
1119         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1120         /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1121         struct cxl_mbox_identify id;
1122         struct cxl_mbox_cmd mbox_cmd;
1123         u32 val;
1124         int rc;
1125
1126         if (!mds->cxlds.media_ready)
1127                 return 0;
1128
1129         mbox_cmd = (struct cxl_mbox_cmd) {
1130                 .opcode = CXL_MBOX_OP_IDENTIFY,
1131                 .size_out = sizeof(id),
1132                 .payload_out = &id,
1133         };
1134         rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1135         if (rc < 0)
1136                 return rc;
1137
1138         mds->total_bytes =
1139                 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
1140         mds->volatile_only_bytes =
1141                 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
1142         mds->persistent_only_bytes =
1143                 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
1144         mds->partition_align_bytes =
1145                 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
1146
1147         mds->lsa_size = le32_to_cpu(id.lsa_size);
1148         memcpy(mds->firmware_version, id.fw_revision,
1149                sizeof(id.fw_revision));
1150
1151         if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
1152                 val = get_unaligned_le24(id.poison_list_max_mer);
1153                 mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
1154         }
1155
1156         return 0;
1157 }
1158 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
1159
1160 static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
1161 {
1162         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1163         int rc;
1164         u32 sec_out = 0;
1165         struct cxl_get_security_output {
1166                 __le32 flags;
1167         } out;
1168         struct cxl_mbox_cmd sec_cmd = {
1169                 .opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
1170                 .payload_out = &out,
1171                 .size_out = sizeof(out),
1172         };
1173         struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
1174
1175         if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
1176                 return -EINVAL;
1177
1178         rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
1179         if (rc < 0) {
1180                 dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
1181                 return rc;
1182         }
1183
1184         /*
1185          * Prior to using these commands, any security applied to
1186          * the user data areas of the device shall be DISABLED (or
1187          * UNLOCKED for secure erase case).
1188          */
1189         sec_out = le32_to_cpu(out.flags);
1190         if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
1191                 return -EINVAL;
1192
1193         if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
1194             sec_out & CXL_PMEM_SEC_STATE_LOCKED)
1195                 return -EINVAL;
1196
1197         rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1198         if (rc < 0) {
1199                 dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
1200                 return rc;
1201         }
1202
1203         return 0;
1204 }
1205
1206
1207 /**
1208  * cxl_mem_sanitize() - Send a sanitization command to the device.
1209  * @cxlmd: The device for the operation
1210  * @cmd: The specific sanitization command opcode
1211  *
1212  * Return: 0 if the command was executed successfully, regardless of
1213  * whether or not the actual security operation is done in the background,
1214  * such as for the Sanitize case.
1215  * Error return values can be the result of the mailbox command, -EINVAL
1216  * when security requirements are not met or invalid contexts, or -EBUSY
1217  * if the sanitize operation is already in flight.
1218  *
1219  * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1220  */
1221 int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
1222 {
1223         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1224         struct cxl_port  *endpoint;
1225         int rc;
1226
1227         /* synchronize with cxl_mem_probe() and decoder write operations */
1228         guard(device)(&cxlmd->dev);
1229         endpoint = cxlmd->endpoint;
1230         down_read(&cxl_region_rwsem);
1231         /*
1232          * Require an endpoint to be safe otherwise the driver can not
1233          * be sure that the device is unmapped.
1234          */
1235         if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
1236                 rc = __cxl_mem_sanitize(mds, cmd);
1237         else
1238                 rc = -EBUSY;
1239         up_read(&cxl_region_rwsem);
1240
1241         return rc;
1242 }
1243
1244 static int add_dpa_res(struct device *dev, struct resource *parent,
1245                        struct resource *res, resource_size_t start,
1246                        resource_size_t size, const char *type)
1247 {
1248         int rc;
1249
1250         res->name = type;
1251         res->start = start;
1252         res->end = start + size - 1;
1253         res->flags = IORESOURCE_MEM;
1254         if (resource_size(res) == 0) {
1255                 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
1256                 return 0;
1257         }
1258         rc = request_resource(parent, res);
1259         if (rc) {
1260                 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
1261                         res, rc);
1262                 return rc;
1263         }
1264
1265         dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
1266
1267         return 0;
1268 }
1269
1270 int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
1271 {
1272         struct cxl_dev_state *cxlds = &mds->cxlds;
1273         struct device *dev = cxlds->dev;
1274         int rc;
1275
1276         if (!cxlds->media_ready) {
1277                 cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
1278                 cxlds->ram_res = DEFINE_RES_MEM(0, 0);
1279                 cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
1280                 return 0;
1281         }
1282
1283         cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
1284
1285         if (mds->partition_align_bytes == 0) {
1286                 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1287                                  mds->volatile_only_bytes, "ram");
1288                 if (rc)
1289                         return rc;
1290                 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1291                                    mds->volatile_only_bytes,
1292                                    mds->persistent_only_bytes, "pmem");
1293         }
1294
1295         rc = cxl_mem_get_partition_info(mds);
1296         if (rc) {
1297                 dev_err(dev, "Failed to query partition information\n");
1298                 return rc;
1299         }
1300
1301         rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1302                          mds->active_volatile_bytes, "ram");
1303         if (rc)
1304                 return rc;
1305         return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1306                            mds->active_volatile_bytes,
1307                            mds->active_persistent_bytes, "pmem");
1308 }
1309 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
1310
1311 int cxl_set_timestamp(struct cxl_memdev_state *mds)
1312 {
1313         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1314         struct cxl_mbox_cmd mbox_cmd;
1315         struct cxl_mbox_set_timestamp_in pi;
1316         int rc;
1317
1318         pi.timestamp = cpu_to_le64(ktime_get_real_ns());
1319         mbox_cmd = (struct cxl_mbox_cmd) {
1320                 .opcode = CXL_MBOX_OP_SET_TIMESTAMP,
1321                 .size_in = sizeof(pi),
1322                 .payload_in = &pi,
1323         };
1324
1325         rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1326         /*
1327          * Command is optional. Devices may have another way of providing
1328          * a timestamp, or may return all 0s in timestamp fields.
1329          * Don't report an error if this command isn't supported
1330          */
1331         if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
1332                 return rc;
1333
1334         return 0;
1335 }
1336 EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
1337
1338 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
1339                        struct cxl_region *cxlr)
1340 {
1341         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1342         struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1343         struct cxl_mbox_poison_out *po;
1344         struct cxl_mbox_poison_in pi;
1345         int nr_records = 0;
1346         int rc;
1347
1348         rc = mutex_lock_interruptible(&mds->poison.lock);
1349         if (rc)
1350                 return rc;
1351
1352         po = mds->poison.list_out;
1353         pi.offset = cpu_to_le64(offset);
1354         pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
1355
1356         do {
1357                 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
1358                         .opcode = CXL_MBOX_OP_GET_POISON,
1359                         .size_in = sizeof(pi),
1360                         .payload_in = &pi,
1361                         .size_out = cxl_mbox->payload_size,
1362                         .payload_out = po,
1363                         .min_out = struct_size(po, record, 0),
1364                 };
1365
1366                 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1367                 if (rc)
1368                         break;
1369
1370                 for (int i = 0; i < le16_to_cpu(po->count); i++)
1371                         trace_cxl_poison(cxlmd, cxlr, &po->record[i],
1372                                          po->flags, po->overflow_ts,
1373                                          CXL_POISON_TRACE_LIST);
1374
1375                 /* Protect against an uncleared _FLAG_MORE */
1376                 nr_records = nr_records + le16_to_cpu(po->count);
1377                 if (nr_records >= mds->poison.max_errors) {
1378                         dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
1379                                 nr_records);
1380                         break;
1381                 }
1382         } while (po->flags & CXL_POISON_FLAG_MORE);
1383
1384         mutex_unlock(&mds->poison.lock);
1385         return rc;
1386 }
1387 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
1388
1389 static void free_poison_buf(void *buf)
1390 {
1391         kvfree(buf);
1392 }
1393
1394 /* Get Poison List output buffer is protected by mds->poison.lock */
1395 static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
1396 {
1397         struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1398
1399         mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
1400         if (!mds->poison.list_out)
1401                 return -ENOMEM;
1402
1403         return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
1404                                         mds->poison.list_out);
1405 }
1406
1407 int cxl_poison_state_init(struct cxl_memdev_state *mds)
1408 {
1409         int rc;
1410
1411         if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
1412                 return 0;
1413
1414         rc = cxl_poison_alloc_buf(mds);
1415         if (rc) {
1416                 clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
1417                 return rc;
1418         }
1419
1420         mutex_init(&mds->poison.lock);
1421         return 0;
1422 }
1423 EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
1424
1425 int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
1426 {
1427         if (!cxl_mbox || !host)
1428                 return -EINVAL;
1429
1430         cxl_mbox->host = host;
1431         mutex_init(&cxl_mbox->mbox_mutex);
1432         rcuwait_init(&cxl_mbox->mbox_wait);
1433
1434         return 0;
1435 }
1436 EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, CXL);
1437
1438 struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1439 {
1440         struct cxl_memdev_state *mds;
1441
1442         mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
1443         if (!mds) {
1444                 dev_err(dev, "No memory available\n");
1445                 return ERR_PTR(-ENOMEM);
1446         }
1447
1448         mutex_init(&mds->event.log_lock);
1449         mds->cxlds.dev = dev;
1450         mds->cxlds.reg_map.host = dev;
1451         mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
1452         mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
1453         mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
1454         mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
1455
1456         return mds;
1457 }
1458 EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL);
1459
1460 void __init cxl_mbox_init(void)
1461 {
1462         struct dentry *mbox_debugfs;
1463
1464         mbox_debugfs = cxl_debugfs_create_dir("mbox");
1465         debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1466                             &cxl_raw_allow_all);
1467 }
This page took 0.114587 seconds and 4 git commands to generate.