]> Git Repo - linux.git/commitdiff
Merge branch 'for-6.5/cxl-type-2' into for-6.5/cxl
authorDan Williams <[email protected]>
Mon, 26 Jun 2023 00:16:51 +0000 (17:16 -0700)
committerDan Williams <[email protected]>
Mon, 26 Jun 2023 00:16:51 +0000 (17:16 -0700)
Pick up the driver cleanups identified in preparation for CXL "type-2"
(accelerator) device support. The major change here from a conflict
generation perspective is the split of 'struct cxl_memdev_state' from
the core 'struct cxl_dev_state'. Since an accelerator may not care about
all the optional features that are standard on a CXL "type-3" (host-only
memory expander) device.

A silent conflict also occurs with the move of the endpoint port to be a
formal property of a 'struct cxl_memdev' rather than drvdata.

1  2 
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/cxl/pci.c
drivers/cxl/security.c
tools/testing/cxl/test/mem.c

diff --combined drivers/cxl/core/mbox.c
index 31b1ac4c206d0c56467c5fefac2376c58c704f65,1990a5940b7c102648e660fbbfb1b9e2f012a08e..d6d067fbee970e49670e92746145566ed6925332
@@@ -182,7 -182,7 +182,7 @@@ static const char *cxl_mem_opcode_to_na
  
  /**
   * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   * @mbox_cmd: initialized command to execute
   *
   * Context: Any context.
   * error. While this distinction can be useful for commands from userspace, the
   * kernel will only be able to use results when both are successful.
   */
- int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
+ int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
                          struct cxl_mbox_cmd *mbox_cmd)
  {
        size_t out_size, min_out;
        int rc;
  
-       if (mbox_cmd->size_in > cxlds->payload_size ||
-           mbox_cmd->size_out > cxlds->payload_size)
+       if (mbox_cmd->size_in > mds->payload_size ||
+           mbox_cmd->size_out > mds->payload_size)
                return -E2BIG;
  
        out_size = mbox_cmd->size_out;
        min_out = mbox_cmd->min_out;
-       rc = cxlds->mbox_send(cxlds, mbox_cmd);
+       rc = mds->mbox_send(mds, mbox_cmd);
        /*
         * EIO is reserved for a payload size mismatch and mbox_send()
         * may not return this error.
        if (rc)
                return rc;
  
 -      if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS)
 +      if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
 +          mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
                return cxl_mbox_cmd_rc2errno(mbox_cmd);
  
        if (!out_size)
@@@ -298,7 -297,7 +298,7 @@@ static bool cxl_payload_from_user_allow
  }
  
  static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
-                            struct cxl_dev_state *cxlds, u16 opcode,
+                            struct cxl_memdev_state *mds, u16 opcode,
                             size_t in_size, size_t out_size, u64 in_payload)
  {
        *mbox = (struct cxl_mbox_cmd) {
                        return PTR_ERR(mbox->payload_in);
  
                if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
-                       dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
+                       dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
                                cxl_mem_opcode_to_name(opcode));
                        kvfree(mbox->payload_in);
                        return -EBUSY;
  
        /* Prepare to handle a full payload for variable sized output */
        if (out_size == CXL_VARIABLE_PAYLOAD)
-               mbox->size_out = cxlds->payload_size;
+               mbox->size_out = mds->payload_size;
        else
                mbox->size_out = out_size;
  
@@@ -344,7 -343,7 +344,7 @@@ static void cxl_mbox_cmd_dtor(struct cx
  
  static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
                              const struct cxl_send_command *send_cmd,
-                             struct cxl_dev_state *cxlds)
+                             struct cxl_memdev_state *mds)
  {
        if (send_cmd->raw.rsvd)
                return -EINVAL;
         * gets passed along without further checking, so it must be
         * validated here.
         */
-       if (send_cmd->out.size > cxlds->payload_size)
+       if (send_cmd->out.size > mds->payload_size)
                return -EINVAL;
  
        if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
                return -EPERM;
  
-       dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
+       dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
  
        *mem_cmd = (struct cxl_mem_command) {
                .info = {
  
  static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
                          const struct cxl_send_command *send_cmd,
-                         struct cxl_dev_state *cxlds)
+                         struct cxl_memdev_state *mds)
  {
        struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
        const struct cxl_command_info *info = &c->info;
                return -EINVAL;
  
        /* Check that the command is enabled for hardware */
-       if (!test_bit(info->id, cxlds->enabled_cmds))
+       if (!test_bit(info->id, mds->enabled_cmds))
                return -ENOTTY;
  
        /* Check that the command is not claimed for exclusive kernel use */
-       if (test_bit(info->id, cxlds->exclusive_cmds))
+       if (test_bit(info->id, mds->exclusive_cmds))
                return -EBUSY;
  
        /* Check the input buffer is the expected size */
  /**
   * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
   * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   * @send_cmd: &struct cxl_send_command copied in from userspace.
   *
   * Return:
   * safe to send to the hardware.
   */
  static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
-                                     struct cxl_dev_state *cxlds,
+                                     struct cxl_memdev_state *mds,
                                      const struct cxl_send_command *send_cmd)
  {
        struct cxl_mem_command mem_cmd;
         * supports, but output can be arbitrarily large (simply write out as
         * much data as the hardware provides).
         */
-       if (send_cmd->in.size > cxlds->payload_size)
+       if (send_cmd->in.size > mds->payload_size)
                return -EINVAL;
  
        /* Sanitize and construct a cxl_mem_command */
        if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
-               rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
+               rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
        else
-               rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
+               rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
  
        if (rc)
                return rc;
  
        /* Sanitize and construct a cxl_mbox_cmd */
-       return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
+       return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
                                 mem_cmd.info.size_in, mem_cmd.info.size_out,
                                 send_cmd->in.payload);
  }
  int cxl_query_cmd(struct cxl_memdev *cxlmd,
                  struct cxl_mem_query_commands __user *q)
  {
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct device *dev = &cxlmd->dev;
        struct cxl_mem_command *cmd;
        u32 n_commands;
        cxl_for_each_cmd(cmd) {
                struct cxl_command_info info = cmd->info;
  
-               if (test_bit(info.id, cxlmd->cxlds->enabled_cmds))
+               if (test_bit(info.id, mds->enabled_cmds))
                        info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
-               if (test_bit(info.id, cxlmd->cxlds->exclusive_cmds))
+               if (test_bit(info.id, mds->exclusive_cmds))
                        info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
  
                if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
  
  /**
   * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   * @mbox_cmd: The validated mailbox command.
   * @out_payload: Pointer to userspace's output payload.
   * @size_out: (Input) Max payload size to copy out.
   *
   * See cxl_send_cmd().
   */
- static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
+ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
                                        struct cxl_mbox_cmd *mbox_cmd,
                                        u64 out_payload, s32 *size_out,
                                        u32 *retval)
  {
-       struct device *dev = cxlds->dev;
+       struct device *dev = mds->cxlds.dev;
        int rc;
  
        dev_dbg(dev,
                cxl_mem_opcode_to_name(mbox_cmd->opcode),
                mbox_cmd->opcode, mbox_cmd->size_in);
  
-       rc = cxlds->mbox_send(cxlds, mbox_cmd);
+       rc = mds->mbox_send(mds, mbox_cmd);
        if (rc)
                goto out;
  
@@@ -577,7 -577,7 +578,7 @@@ out
  
  int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
  {
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct device *dev = &cxlmd->dev;
        struct cxl_send_command send;
        struct cxl_mbox_cmd mbox_cmd;
        if (copy_from_user(&send, s, sizeof(send)))
                return -EFAULT;
  
-       rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
+       rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
        if (rc)
                return rc;
  
-       rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
+       rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
                                          &send.out.size, &send.retval);
        if (rc)
                return rc;
        return 0;
  }
  
- static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 *size, u8 *out)
+ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
+                       u32 *size, u8 *out)
  {
        u32 remaining = *size;
        u32 offset = 0;
  
        while (remaining) {
-               u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
+               u32 xfer_size = min_t(u32, remaining, mds->payload_size);
                struct cxl_mbox_cmd mbox_cmd;
                struct cxl_mbox_get_log log;
                int rc;
                        .payload_out = out,
                };
  
-               rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+               rc = cxl_internal_send_cmd(mds, &mbox_cmd);
  
                /*
                 * The output payload length that indicates the number
  
  /**
   * cxl_walk_cel() - Walk through the Command Effects Log.
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   * @size: Length of the Command Effects Log.
   * @cel: CEL
   *
   * Iterate over each entry in the CEL and determine if the driver supports the
   * command. If so, the command is enabled for the device and can be used later.
   */
- static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
+ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
  {
        struct cxl_cel_entry *cel_entry;
        const int cel_entries = size / sizeof(*cel_entry);
+       struct device *dev = mds->cxlds.dev;
        int i;
  
        cel_entry = (struct cxl_cel_entry *) cel;
                struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
  
                if (!cmd && !cxl_is_poison_command(opcode)) {
-                       dev_dbg(cxlds->dev,
+                       dev_dbg(dev,
                                "Opcode 0x%04x unsupported by driver\n", opcode);
                        continue;
                }
  
                if (cmd)
-                       set_bit(cmd->info.id, cxlds->enabled_cmds);
+                       set_bit(cmd->info.id, mds->enabled_cmds);
  
                if (cxl_is_poison_command(opcode))
-                       cxl_set_poison_cmd_enabled(&cxlds->poison, opcode);
+                       cxl_set_poison_cmd_enabled(&mds->poison, opcode);
  
-               dev_dbg(cxlds->dev, "Opcode 0x%04x enabled\n", opcode);
+               dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
        }
  }
  
- static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
+ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
  {
        struct cxl_mbox_get_supported_logs *ret;
        struct cxl_mbox_cmd mbox_cmd;
        int rc;
  
-       ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
+       ret = kvmalloc(mds->payload_size, GFP_KERNEL);
        if (!ret)
                return ERR_PTR(-ENOMEM);
  
        mbox_cmd = (struct cxl_mbox_cmd) {
                .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
-               .size_out = cxlds->payload_size,
+               .size_out = mds->payload_size,
                .payload_out = ret,
                /* At least the record number field must be valid */
                .min_out = 2,
        };
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc < 0) {
                kvfree(ret);
                return ERR_PTR(rc);
@@@ -730,22 -732,22 +733,22 @@@ static const uuid_t log_uuid[] = 
  
  /**
   * cxl_enumerate_cmds() - Enumerate commands for a device.
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   *
   * Returns 0 if enumerate completed successfully.
   *
   * CXL devices have optional support for certain commands. This function will
   * determine the set of supported commands for the hardware and update the
-  * enabled_cmds bitmap in the @cxlds.
+  * enabled_cmds bitmap in the @mds.
   */
- int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
+ int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
  {
        struct cxl_mbox_get_supported_logs *gsl;
-       struct device *dev = cxlds->dev;
+       struct device *dev = mds->cxlds.dev;
        struct cxl_mem_command *cmd;
        int i, rc;
  
-       gsl = cxl_get_gsl(cxlds);
+       gsl = cxl_get_gsl(mds);
        if (IS_ERR(gsl))
                return PTR_ERR(gsl);
  
                        goto out;
                }
  
-               rc = cxl_xfer_log(cxlds, &uuid, &size, log);
+               rc = cxl_xfer_log(mds, &uuid, &size, log);
                if (rc) {
                        kvfree(log);
                        goto out;
                }
  
-               cxl_walk_cel(cxlds, size, log);
+               cxl_walk_cel(mds, size, log);
                kvfree(log);
  
                /* In case CEL was bogus, enable some default commands. */
                cxl_for_each_cmd(cmd)
                        if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
-                               set_bit(cmd->info.id, cxlds->enabled_cmds);
+                               set_bit(cmd->info.id, mds->enabled_cmds);
  
                /* Found the required CEL */
                rc = 0;
@@@ -839,7 -841,7 +842,7 @@@ static void cxl_event_trace_record(cons
        }
  }
  
- static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
+ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
                                  enum cxl_event_log_type log,
                                  struct cxl_get_event_payload *get_pl)
  {
        int i;
  
        /* Payload size may limit the max handles */
-       if (pl_size > cxlds->payload_size) {
-               max_handles = (cxlds->payload_size - sizeof(*payload)) /
-                               sizeof(__le16);
+       if (pl_size > mds->payload_size) {
+               max_handles = (mds->payload_size - sizeof(*payload)) /
+                             sizeof(__le16);
                pl_size = struct_size(payload, handles, max_handles);
        }
  
        i = 0;
        for (cnt = 0; cnt < total; cnt++) {
                payload->handles[i++] = get_pl->records[cnt].hdr.handle;
-               dev_dbg(cxlds->dev, "Event log '%d': Clearing %u\n",
-                       log, le16_to_cpu(payload->handles[i]));
+               dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
+                       le16_to_cpu(payload->handles[i]));
  
                if (i == max_handles) {
                        payload->nr_recs = i;
-                       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+                       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
                        if (rc)
                                goto free_pl;
                        i = 0;
        if (i) {
                payload->nr_recs = i;
                mbox_cmd.size_in = struct_size(payload, handles, i);
-               rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+               rc = cxl_internal_send_cmd(mds, &mbox_cmd);
                if (rc)
                        goto free_pl;
        }
@@@ -906,32 -908,34 +909,34 @@@ free_pl
        return rc;
  }
  
- static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
+ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
                                    enum cxl_event_log_type type)
  {
+       struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
+       struct device *dev = mds->cxlds.dev;
        struct cxl_get_event_payload *payload;
        struct cxl_mbox_cmd mbox_cmd;
        u8 log_type = type;
        u16 nr_rec;
  
-       mutex_lock(&cxlds->event.log_lock);
-       payload = cxlds->event.buf;
+       mutex_lock(&mds->event.log_lock);
+       payload = mds->event.buf;
  
        mbox_cmd = (struct cxl_mbox_cmd) {
                .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
                .payload_in = &log_type,
                .size_in = sizeof(log_type),
                .payload_out = payload,
-               .size_out = cxlds->payload_size,
+               .size_out = mds->payload_size,
                .min_out = struct_size(payload, records, 0),
        };
  
        do {
                int rc, i;
  
-               rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+               rc = cxl_internal_send_cmd(mds, &mbox_cmd);
                if (rc) {
-                       dev_err_ratelimited(cxlds->dev,
+                       dev_err_ratelimited(dev,
                                "Event log '%d': Failed to query event records : %d",
                                type, rc);
                        break;
                        break;
  
                for (i = 0; i < nr_rec; i++)
-                       cxl_event_trace_record(cxlds->cxlmd, type,
+                       cxl_event_trace_record(cxlmd, type,
                                               &payload->records[i]);
  
                if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
-                       trace_cxl_overflow(cxlds->cxlmd, type, payload);
+                       trace_cxl_overflow(cxlmd, type, payload);
  
-               rc = cxl_clear_event_record(cxlds, type, payload);
+               rc = cxl_clear_event_record(mds, type, payload);
                if (rc) {
-                       dev_err_ratelimited(cxlds->dev,
+                       dev_err_ratelimited(dev,
                                "Event log '%d': Failed to clear events : %d",
                                type, rc);
                        break;
                }
        } while (nr_rec);
  
-       mutex_unlock(&cxlds->event.log_lock);
+       mutex_unlock(&mds->event.log_lock);
  }
  
  /**
   * cxl_mem_get_event_records - Get Event Records from the device
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   * @status: Event Status register value identifying which events are available.
   *
   * Retrieve all event records available on the device, report them as trace
   * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
   * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
   */
- void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status)
+ void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
  {
-       dev_dbg(cxlds->dev, "Reading event logs: %x\n", status);
+       dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
  
        if (status & CXLDEV_EVENT_STATUS_FATAL)
-               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FATAL);
+               cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
        if (status & CXLDEV_EVENT_STATUS_FAIL)
-               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FAIL);
+               cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
        if (status & CXLDEV_EVENT_STATUS_WARN)
-               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_WARN);
+               cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
        if (status & CXLDEV_EVENT_STATUS_INFO)
-               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_INFO);
+               cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
  }
  EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
  
  /**
   * cxl_mem_get_partition_info - Get partition info
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   *
   * Retrieve the current partition info for the device specified.  The active
   * values are the current capacity in bytes.  If not 0, the 'next' values are
   *
   * See CXL @8.2.9.5.2.1 Get Partition Info
   */
- static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
+ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
  {
        struct cxl_mbox_get_partition_info pi;
        struct cxl_mbox_cmd mbox_cmd;
                .size_out = sizeof(pi),
                .payload_out = &pi,
        };
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc)
                return rc;
  
-       cxlds->active_volatile_bytes =
+       mds->active_volatile_bytes =
                le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
-       cxlds->active_persistent_bytes =
+       mds->active_persistent_bytes =
                le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
-       cxlds->next_volatile_bytes =
+       mds->next_volatile_bytes =
                le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
-       cxlds->next_persistent_bytes =
+       mds->next_persistent_bytes =
                le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
  
        return 0;
  
  /**
   * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
-  * @cxlds: The device data for the operation
+  * @mds: The driver data for the operation
   *
   * Return: 0 if identify was executed successfully or media not ready.
   *
   * This will dispatch the identify command to the device and on success populate
   * structures to be exported to sysfs.
   */
- int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
+ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
  {
        /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
        struct cxl_mbox_identify id;
        u32 val;
        int rc;
  
-       if (!cxlds->media_ready)
+       if (!mds->cxlds.media_ready)
                return 0;
  
        mbox_cmd = (struct cxl_mbox_cmd) {
                .size_out = sizeof(id),
                .payload_out = &id,
        };
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc < 0)
                return rc;
  
-       cxlds->total_bytes =
+       mds->total_bytes =
                le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
-       cxlds->volatile_only_bytes =
+       mds->volatile_only_bytes =
                le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
-       cxlds->persistent_only_bytes =
+       mds->persistent_only_bytes =
                le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
-       cxlds->partition_align_bytes =
+       mds->partition_align_bytes =
                le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
  
-       cxlds->lsa_size = le32_to_cpu(id.lsa_size);
-       memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+       mds->lsa_size = le32_to_cpu(id.lsa_size);
+       memcpy(mds->firmware_version, id.fw_revision,
+              sizeof(id.fw_revision));
  
-       if (test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) {
+       if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
                val = get_unaligned_le24(id.poison_list_max_mer);
-               cxlds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
+               mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
        }
  
        return 0;
  }
  EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
  
-  * @cxlds: The device data for the operation
 +/**
 + * cxl_mem_sanitize() - Send a sanitization command to the device.
- int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd)
++ * @mds: The device data for the operation
 + * @cmd: The specific sanitization command opcode
 + *
 + * Return: 0 if the command was executed successfully, regardless of
 + * whether or not the actual security operation is done in the background,
 + * such as for the Sanitize case.
 + * Error return values can be the result of the mailbox command, -EINVAL
 + * when security requirements are not met or invalid contexts.
 + *
 + * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
 + */
-       rc = cxl_internal_send_cmd(cxlds, &sec_cmd);
++int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
 +{
 +      int rc;
 +      u32 sec_out = 0;
 +      struct cxl_get_security_output {
 +              __le32 flags;
 +      } out;
 +      struct cxl_mbox_cmd sec_cmd = {
 +              .opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
 +              .payload_out = &out,
 +              .size_out = sizeof(out),
 +      };
 +      struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +
 +      if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
 +              return -EINVAL;
 +
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++      rc = cxl_internal_send_cmd(mds, &sec_cmd);
 +      if (rc < 0) {
 +              dev_err(cxlds->dev, "Failed to get security state : %d", rc);
 +              return rc;
 +      }
 +
 +      /*
 +       * Prior to using these commands, any security applied to
 +       * the user data areas of the device shall be DISABLED (or
 +       * UNLOCKED for secure erase case).
 +       */
 +      sec_out = le32_to_cpu(out.flags);
 +      if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
 +              return -EINVAL;
 +
 +      if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
 +          sec_out & CXL_PMEM_SEC_STATE_LOCKED)
 +              return -EINVAL;
 +
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      if (rc < 0) {
 +              dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
 +              return rc;
 +      }
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
 +
  static int add_dpa_res(struct device *dev, struct resource *parent,
                       struct resource *res, resource_size_t start,
                       resource_size_t size, const char *type)
        return 0;
  }
  
- int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
+ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
  {
+       struct cxl_dev_state *cxlds = &mds->cxlds;
        struct device *dev = cxlds->dev;
        int rc;
  
        }
  
        cxlds->dpa_res =
-               (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
+               (struct resource)DEFINE_RES_MEM(0, mds->total_bytes);
  
-       if (cxlds->partition_align_bytes == 0) {
+       if (mds->partition_align_bytes == 0) {
                rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
-                                cxlds->volatile_only_bytes, "ram");
+                                mds->volatile_only_bytes, "ram");
                if (rc)
                        return rc;
                return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
-                                  cxlds->volatile_only_bytes,
-                                  cxlds->persistent_only_bytes, "pmem");
+                                  mds->volatile_only_bytes,
+                                  mds->persistent_only_bytes, "pmem");
        }
  
-       rc = cxl_mem_get_partition_info(cxlds);
+       rc = cxl_mem_get_partition_info(mds);
        if (rc) {
                dev_err(dev, "Failed to query partition information\n");
                return rc;
        }
  
        rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
-                        cxlds->active_volatile_bytes, "ram");
+                        mds->active_volatile_bytes, "ram");
        if (rc)
                return rc;
        return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
-                          cxlds->active_volatile_bytes,
-                          cxlds->active_persistent_bytes, "pmem");
+                          mds->active_volatile_bytes,
+                          mds->active_persistent_bytes, "pmem");
  }
  EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
  
- int cxl_set_timestamp(struct cxl_dev_state *cxlds)
+ int cxl_set_timestamp(struct cxl_memdev_state *mds)
  {
        struct cxl_mbox_cmd mbox_cmd;
        struct cxl_mbox_set_timestamp_in pi;
                .payload_in = &pi,
        };
  
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        /*
         * Command is optional. Devices may have another way of providing
         * a timestamp, or may return all 0s in timestamp fields.
@@@ -1230,18 -1177,18 +1238,18 @@@ EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp
  int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
                       struct cxl_region *cxlr)
  {
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_mbox_poison_out *po;
        struct cxl_mbox_poison_in pi;
        struct cxl_mbox_cmd mbox_cmd;
        int nr_records = 0;
        int rc;
  
-       rc = mutex_lock_interruptible(&cxlds->poison.lock);
+       rc = mutex_lock_interruptible(&mds->poison.lock);
        if (rc)
                return rc;
  
-       po = cxlds->poison.list_out;
+       po = mds->poison.list_out;
        pi.offset = cpu_to_le64(offset);
        pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
  
                .opcode = CXL_MBOX_OP_GET_POISON,
                .size_in = sizeof(pi),
                .payload_in = &pi,
-               .size_out = cxlds->payload_size,
+               .size_out = mds->payload_size,
                .payload_out = po,
                .min_out = struct_size(po, record, 0),
        };
  
        do {
-               rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+               rc = cxl_internal_send_cmd(mds, &mbox_cmd);
                if (rc)
                        break;
  
  
                /* Protect against an uncleared _FLAG_MORE */
                nr_records = nr_records + le16_to_cpu(po->count);
-               if (nr_records >= cxlds->poison.max_errors) {
+               if (nr_records >= mds->poison.max_errors) {
                        dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
                                nr_records);
                        break;
                }
        } while (po->flags & CXL_POISON_FLAG_MORE);
  
-       mutex_unlock(&cxlds->poison.lock);
+       mutex_unlock(&mds->poison.lock);
        return rc;
  }
  EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
@@@ -1283,52 -1230,53 +1291,53 @@@ static void free_poison_buf(void *buf
        kvfree(buf);
  }
  
- /* Get Poison List output buffer is protected by cxlds->poison.lock */
- static int cxl_poison_alloc_buf(struct cxl_dev_state *cxlds)
+ /* Get Poison List output buffer is protected by mds->poison.lock */
+ static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
  {
-       cxlds->poison.list_out = kvmalloc(cxlds->payload_size, GFP_KERNEL);
-       if (!cxlds->poison.list_out)
+       mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL);
+       if (!mds->poison.list_out)
                return -ENOMEM;
  
-       return devm_add_action_or_reset(cxlds->dev, free_poison_buf,
-                                       cxlds->poison.list_out);
+       return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
+                                       mds->poison.list_out);
  }
  
- int cxl_poison_state_init(struct cxl_dev_state *cxlds)
+ int cxl_poison_state_init(struct cxl_memdev_state *mds)
  {
        int rc;
  
-       if (!test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds))
+       if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
                return 0;
  
-       rc = cxl_poison_alloc_buf(cxlds);
+       rc = cxl_poison_alloc_buf(mds);
        if (rc) {
-               clear_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds);
+               clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
                return rc;
        }
  
-       mutex_init(&cxlds->poison.lock);
+       mutex_init(&mds->poison.lock);
        return 0;
  }
  EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
  
- struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
+ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
  {
-       struct cxl_dev_state *cxlds;
+       struct cxl_memdev_state *mds;
  
-       cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
-       if (!cxlds) {
+       mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
+       if (!mds) {
                dev_err(dev, "No memory available\n");
                return ERR_PTR(-ENOMEM);
        }
  
-       mutex_init(&cxlds->mbox_mutex);
-       mutex_init(&cxlds->event.log_lock);
-       cxlds->dev = dev;
+       mutex_init(&mds->mbox_mutex);
+       mutex_init(&mds->event.log_lock);
+       mds->cxlds.dev = dev;
+       mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
  
-       return cxlds;
+       return mds;
  }
- EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
+ EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL);
  
  void __init cxl_mbox_init(void)
  {
index fd2e6b0f79c038daf927e87862845d73dafa5295,65a685e5616f78e0d52abc8d02ec9131ecbb4e27..90237b9487a761c7e22f1073c7a4244741210ba4
@@@ -1,8 -1,6 +1,8 @@@
  // SPDX-License-Identifier: GPL-2.0-only
  /* Copyright(c) 2020 Intel Corporation. */
  
 +#include <linux/io-64-nonatomic-lo-hi.h>
 +#include <linux/firmware.h>
  #include <linux/device.h>
  #include <linux/slab.h>
  #include <linux/idr.h>
@@@ -41,8 -39,11 +41,11 @@@ static ssize_t firmware_version_show(st
  {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
  
-       return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
+       if (!mds)
+               return sysfs_emit(buf, "\n");
+       return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
  }
  static DEVICE_ATTR_RO(firmware_version);
  
@@@ -51,8 -52,11 +54,11 @@@ static ssize_t payload_max_show(struct 
  {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
  
-       return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
+       if (!mds)
+               return sysfs_emit(buf, "\n");
+       return sysfs_emit(buf, "%zu\n", mds->payload_size);
  }
  static DEVICE_ATTR_RO(payload_max);
  
@@@ -61,8 -65,11 +67,11 @@@ static ssize_t label_storage_size_show(
  {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
  
-       return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
+       if (!mds)
+               return sysfs_emit(buf, "\n");
+       return sysfs_emit(buf, "%zu\n", mds->lsa_size);
  }
  static DEVICE_ATTR_RO(label_storage_size);
  
@@@ -109,88 -116,6 +118,89 @@@ static ssize_t numa_node_show(struct de
  }
  static DEVICE_ATTR_RO(numa_node);
  
-       unsigned long state = cxlds->security.state;
 +static ssize_t security_state_show(struct device *dev,
 +                                 struct device_attribute *attr,
 +                                 char *buf)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 +      struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_port *port = dev_get_drvdata(&cxlmd->dev);
-       ssize_t rc;
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 +      u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +      u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
 +      u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
++      unsigned long state = mds->security.state;
 +
 +      if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
 +              return sysfs_emit(buf, "sanitize\n");
 +
 +      if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
 +              return sysfs_emit(buf, "disabled\n");
 +      if (state & CXL_PMEM_SEC_STATE_FROZEN ||
 +          state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
 +          state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
 +              return sysfs_emit(buf, "frozen\n");
 +      if (state & CXL_PMEM_SEC_STATE_LOCKED)
 +              return sysfs_emit(buf, "locked\n");
 +      else
 +              return sysfs_emit(buf, "unlocked\n");
 +}
 +static struct device_attribute dev_attr_security_state =
 +      __ATTR(state, 0444, security_state_show, NULL);
 +
 +static ssize_t security_sanitize_store(struct device *dev,
 +                                     struct device_attribute *attr,
 +                                     const char *buf, size_t len)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SANITIZE);
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
++      struct cxl_port *port = cxlmd->endpoint;
 +      bool sanitize;
++      ssize_t rc;
 +
 +      if (kstrtobool(buf, &sanitize) || !sanitize)
 +              return -EINVAL;
 +
 +      if (!port || !is_cxl_endpoint(port))
 +              return -EINVAL;
 +
 +      /* ensure no regions are mapped to this memdev */
 +      if (port->commit_end != -1)
 +              return -EBUSY;
 +
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_port *port = dev_get_drvdata(&cxlmd->dev);
++      rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
 +
 +      return rc ? rc : len;
 +}
 +static struct device_attribute dev_attr_security_sanitize =
 +      __ATTR(sanitize, 0200, NULL, security_sanitize_store);
 +
 +static ssize_t security_erase_store(struct device *dev,
 +                                  struct device_attribute *attr,
 +                                  const char *buf, size_t len)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SECURE_ERASE);
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
++      struct cxl_port *port = cxlmd->endpoint;
 +      ssize_t rc;
 +      bool erase;
 +
 +      if (kstrtobool(buf, &erase) || !erase)
 +              return -EINVAL;
 +
 +      if (!port || !is_cxl_endpoint(port))
 +              return -EINVAL;
 +
 +      /* ensure no regions are mapped to this memdev */
 +      if (port->commit_end != -1)
 +              return -EBUSY;
 +
++      rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
 +
 +      return rc ? rc : len;
 +}
 +static struct device_attribute dev_attr_security_erase =
 +      __ATTR(erase, 0200, NULL, security_erase_store);
 +
  static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
  {
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@@ -224,7 -149,7 +234,7 @@@ int cxl_trigger_poison_list(struct cxl_
        struct cxl_port *port;
        int rc;
  
-       port = dev_get_drvdata(&cxlmd->dev);
+       port = cxlmd->endpoint;
        if (!port || !is_cxl_endpoint(port))
                return -EINVAL;
  
@@@ -282,7 -207,7 +292,7 @@@ static struct cxl_region *cxl_dpa_to_re
        ctx = (struct cxl_dpa_to_region_context) {
                .dpa = dpa,
        };
-       port = dev_get_drvdata(&cxlmd->dev);
+       port = cxlmd->endpoint;
        if (port && is_cxl_endpoint(port) && port->commit_end != -1)
                device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
  
@@@ -315,7 -240,7 +325,7 @@@ static int cxl_validate_poison_dpa(stru
  
  int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
  {
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_mbox_inject_poison inject;
        struct cxl_poison_record record;
        struct cxl_mbox_cmd mbox_cmd;
                .size_in = sizeof(inject),
                .payload_in = &inject,
        };
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc)
                goto out;
  
        cxlr = cxl_dpa_to_region(cxlmd, dpa);
        if (cxlr)
-               dev_warn_once(cxlds->dev,
+               dev_warn_once(mds->cxlds.dev,
                              "poison inject dpa:%#llx region: %s\n", dpa,
                              dev_name(&cxlr->dev));
  
@@@ -363,7 -288,7 +373,7 @@@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison
  
  int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
  {
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_mbox_clear_poison clear;
        struct cxl_poison_record record;
        struct cxl_mbox_cmd mbox_cmd;
                .payload_in = &clear,
        };
  
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc)
                goto out;
  
        cxlr = cxl_dpa_to_region(cxlmd, dpa);
        if (cxlr)
-               dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n",
-                             dpa, dev_name(&cxlr->dev));
+               dev_warn_once(mds->cxlds.dev,
+                             "poison clear dpa:%#llx region: %s\n", dpa,
+                             dev_name(&cxlr->dev));
  
        record = (struct cxl_poison_record) {
                .address = cpu_to_le64(dpa),
@@@ -436,13 -362,6 +447,13 @@@ static struct attribute *cxl_memdev_ram
        NULL,
  };
  
 +static struct attribute *cxl_memdev_security_attributes[] = {
 +      &dev_attr_security_state.attr,
 +      &dev_attr_security_sanitize.attr,
 +      &dev_attr_security_erase.attr,
 +      NULL,
 +};
 +
  static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
                                  int n)
  {
@@@ -466,16 -385,10 +477,16 @@@ static struct attribute_group cxl_memde
        .attrs = cxl_memdev_pmem_attributes,
  };
  
 +static struct attribute_group cxl_memdev_security_attribute_group = {
 +      .name = "security",
 +      .attrs = cxl_memdev_security_attributes,
 +};
 +
  static const struct attribute_group *cxl_memdev_attribute_groups[] = {
        &cxl_memdev_attribute_group,
        &cxl_memdev_ram_attribute_group,
        &cxl_memdev_pmem_attribute_group,
 +      &cxl_memdev_security_attribute_group,
        NULL,
  };
  
@@@ -494,17 -407,18 +505,18 @@@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL
  
  /**
   * set_exclusive_cxl_commands() - atomically disable user cxl commands
-  * @cxlds: The device state to operate on
+  * @mds: The device state to operate on
   * @cmds: bitmap of commands to mark exclusive
   *
   * Grab the cxl_memdev_rwsem in write mode to flush in-flight
   * invocations of the ioctl path and then disable future execution of
   * commands with the command ids set in @cmds.
   */
- void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
+ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+                               unsigned long *cmds)
  {
        down_write(&cxl_memdev_rwsem);
-       bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
+       bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
                  CXL_MEM_COMMAND_ID_MAX);
        up_write(&cxl_memdev_rwsem);
  }
@@@ -512,33 -426,24 +524,34 @@@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_
  
  /**
   * clear_exclusive_cxl_commands() - atomically enable user cxl commands
-  * @cxlds: The device state to modify
+  * @mds: The device state to modify
   * @cmds: bitmap of commands to mark available for userspace
   */
- void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
+ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+                                 unsigned long *cmds)
  {
        down_write(&cxl_memdev_rwsem);
-       bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
+       bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
                      CXL_MEM_COMMAND_ID_MAX);
        up_write(&cxl_memdev_rwsem);
  }
  EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
  
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
 +static void cxl_memdev_security_shutdown(struct device *dev)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       if (cxlds->security.poll)
-               cancel_delayed_work_sync(&cxlds->security.poll_dwork);
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 +
++      if (mds->security.poll)
++              cancel_delayed_work_sync(&mds->security.poll_dwork);
 +}
 +
  static void cxl_memdev_shutdown(struct device *dev)
  {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  
        down_write(&cxl_memdev_rwsem);
 +      cxl_memdev_security_shutdown(dev);
        cxlmd->cxlds = NULL;
        up_write(&cxl_memdev_rwsem);
  }
@@@ -618,10 -523,12 +631,12 @@@ static long cxl_memdev_ioctl(struct fil
                             unsigned long arg)
  {
        struct cxl_memdev *cxlmd = file->private_data;
+       struct cxl_dev_state *cxlds;
        int rc = -ENXIO;
  
        down_read(&cxl_memdev_rwsem);
-       if (cxlmd->cxlds)
+       cxlds = cxlmd->cxlds;
+       if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
                rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
        up_read(&cxl_memdev_rwsem);
  
@@@ -649,313 -556,6 +664,316 @@@ static int cxl_memdev_release_file(stru
        return 0;
  }
  
- static int cxl_mem_get_fw_info(struct cxl_dev_state *cxlds)
 +/**
 + * cxl_mem_get_fw_info - Get Firmware info
 + * @cxlds: The device data for the operation
 + *
 + * Retrieve firmware info for the device specified.
 + *
 + * Return: 0 if no error: or the result of the mailbox command.
 + *
 + * See CXL-3.0 8.2.9.3.1 Get FW Info
 + */
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
 +{
 +      struct cxl_mbox_get_fw_info info;
 +      struct cxl_mbox_cmd mbox_cmd;
 +      int rc;
 +
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_GET_FW_INFO,
 +              .size_out = sizeof(info),
 +              .payload_out = &info,
 +      };
 +
-       cxlds->fw.num_slots = info.num_slots;
-       cxlds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      if (rc < 0)
 +              return rc;
 +
-  * @cxlds: The device data for the operation
++      mds->fw.num_slots = info.num_slots;
++      mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
 +                                     info.slot_info);
 +
 +      return 0;
 +}
 +
 +/**
 + * cxl_mem_activate_fw - Activate Firmware
- static int cxl_mem_activate_fw(struct cxl_dev_state *cxlds, int slot)
++ * @mds: The device data for the operation
 + * @slot: slot number to activate
 + *
 + * Activate firmware in a given slot for the device specified.
 + *
 + * Return: 0 if no error: or the result of the mailbox command.
 + *
 + * See CXL-3.0 8.2.9.3.3 Activate FW
 + */
-       if (slot == 0 || slot > cxlds->fw.num_slots)
++static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
 +{
 +      struct cxl_mbox_activate_fw activate;
 +      struct cxl_mbox_cmd mbox_cmd;
 +
-       return cxl_internal_send_cmd(cxlds, &mbox_cmd);
++      if (slot == 0 || slot > mds->fw.num_slots)
 +              return -EINVAL;
 +
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_ACTIVATE_FW,
 +              .size_in = sizeof(activate),
 +              .payload_in = &activate,
 +      };
 +
 +      /* Only offline activation supported for now */
 +      activate.action = CXL_FW_ACTIVATE_OFFLINE;
 +      activate.slot = slot;
 +
-  * @cxlds: The device data for the operation
++      return cxl_internal_send_cmd(mds, &mbox_cmd);
 +}
 +
 +/**
 + * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
- static int cxl_mem_abort_fw_xfer(struct cxl_dev_state *cxlds)
++ * @mds: The device data for the operation
 + *
 + * Abort an in-progress firmware transfer for the device specified.
 + *
 + * Return: 0 if no error: or the result of the mailbox command.
 + *
 + * See CXL-3.0 8.2.9.3.2 Transfer FW
 + */
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
 +{
 +      struct cxl_mbox_transfer_fw *transfer;
 +      struct cxl_mbox_cmd mbox_cmd;
 +      int rc;
 +
 +      transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
 +      if (!transfer)
 +              return -ENOMEM;
 +
 +      /* Set a 1s poll interval and a total wait time of 30s */
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_TRANSFER_FW,
 +              .size_in = sizeof(*transfer),
 +              .payload_in = transfer,
 +              .poll_interval_ms = 1000,
 +              .poll_count = 30,
 +      };
 +
 +      transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
 +
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      kfree(transfer);
 +      return rc;
 +}
 +
 +static void cxl_fw_cleanup(struct fw_upload *fwl)
 +{
-       cxlds->fw.next_slot = 0;
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      mds->fw.next_slot = 0;
 +}
 +
 +static int cxl_fw_do_cancel(struct fw_upload *fwl)
 +{
-       rc = cxl_mem_abort_fw_xfer(cxlds);
++      struct cxl_memdev_state *mds = fwl->dd_handle;
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +      struct cxl_memdev *cxlmd = cxlds->cxlmd;
 +      int rc;
 +
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      rc = cxl_mem_abort_fw_xfer(mds);
 +      if (rc < 0)
 +              dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
 +
 +      return FW_UPLOAD_ERR_CANCELED;
 +}
 +
 +static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
 +                                       u32 size)
 +{
-       cxlds->fw.oneshot = struct_size(transfer, data, size) <
-                           cxlds->payload_size;
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +      struct cxl_mbox_transfer_fw *transfer;
 +
 +      if (!size)
 +              return FW_UPLOAD_ERR_INVALID_SIZE;
 +
-       if (cxl_mem_get_fw_info(cxlds))
++      mds->fw.oneshot = struct_size(transfer, data, size) <
++                          mds->payload_size;
 +
-       if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state))
++      if (cxl_mem_get_fw_info(mds))
 +              return FW_UPLOAD_ERR_HW_ERROR;
 +
 +      /*
 +       * So far no state has been changed, hence no other cleanup is
 +       * necessary. Simply return the cancelled status.
 +       */
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 +              return FW_UPLOAD_ERR_CANCELED;
 +
 +      return FW_UPLOAD_ERR_NONE;
 +}
 +
 +static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
 +                                     u32 offset, u32 size, u32 *written)
 +{
-        * Pick transfer size based on cxlds->payload_size
-        * @size must bw 128-byte aligned, ->payload_size is a power of 2
-        * starting at 256 bytes, and sizeof(*transfer) is 128.
-        * These constraints imply that @cur_size will always be 128b aligned.
++      struct cxl_memdev_state *mds = fwl->dd_handle;
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +      struct cxl_memdev *cxlmd = cxlds->cxlmd;
 +      struct cxl_mbox_transfer_fw *transfer;
 +      struct cxl_mbox_cmd mbox_cmd;
 +      u32 cur_size, remaining;
 +      size_t size_in;
 +      int rc;
 +
 +      *written = 0;
 +
 +      /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
 +      if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
 +              dev_err(&cxlmd->dev,
 +                      "misaligned offset for FW transfer slice (%u)\n",
 +                      offset);
 +              return FW_UPLOAD_ERR_RW_ERROR;
 +      }
 +
 +      /*
-       cur_size = min_t(size_t, size, cxlds->payload_size - sizeof(*transfer));
++       * Pick transfer size based on mds->payload_size @size must bw 128-byte
++       * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
++       * sizeof(*transfer) is 128.  These constraints imply that @cur_size
++       * will always be 128b aligned.
 +       */
-       if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state))
++      cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
 +
 +      remaining = size - cur_size;
 +      size_in = struct_size(transfer, data, cur_size);
 +
-       cxlds->fw.next_slot = (cxlds->fw.cur_slot % cxlds->fw.num_slots) + 1;
++      if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 +              return cxl_fw_do_cancel(fwl);
 +
 +      /*
 +       * Slot numbers are 1-indexed
 +       * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
 +       * Check for rollover using modulo, and 1-index it by adding 1
 +       */
-       if (cxlds->fw.oneshot) {
++      mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
 +
 +      /* Do the transfer via mailbox cmd */
 +      transfer = kzalloc(size_in, GFP_KERNEL);
 +      if (!transfer)
 +              return FW_UPLOAD_ERR_RW_ERROR;
 +
 +      transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
 +      memcpy(transfer->data, data + offset, cur_size);
-               transfer->slot = cxlds->fw.next_slot;
++      if (mds->fw.oneshot) {
 +              transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
-                       transfer->slot = cxlds->fw.next_slot;
++              transfer->slot = mds->fw.next_slot;
 +      } else {
 +              if (offset == 0) {
 +                      transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
 +              } else if (remaining == 0) {
 +                      transfer->action = CXL_FW_TRANSFER_ACTION_END;
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++                      transfer->slot = mds->fw.next_slot;
 +              } else {
 +                      transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
 +              }
 +      }
 +
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_TRANSFER_FW,
 +              .size_in = size_in,
 +              .payload_in = transfer,
 +              .poll_interval_ms = 1000,
 +              .poll_count = 30,
 +      };
 +
-       if (cxlds->fw.oneshot || remaining == 0) {
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      if (rc < 0) {
 +              rc = FW_UPLOAD_ERR_RW_ERROR;
 +              goto out_free;
 +      }
 +
 +      *written = cur_size;
 +
 +      /* Activate FW if oneshot or if the last slice was written */
-                       cxlds->fw.next_slot);
-               rc = cxl_mem_activate_fw(cxlds, cxlds->fw.next_slot);
++      if (mds->fw.oneshot || remaining == 0) {
 +              dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++                      mds->fw.next_slot);
++              rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
 +              if (rc < 0) {
 +                      dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
 +                              rc);
 +                      rc = FW_UPLOAD_ERR_HW_ERROR;
 +                      goto out_free;
 +              }
 +      }
 +
 +      rc = FW_UPLOAD_ERR_NONE;
 +
 +out_free:
 +      kfree(transfer);
 +      return rc;
 +}
 +
 +static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
 +{
-       if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state))
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +
 +      /*
 +       * cxl_internal_send_cmd() handles background operations synchronously.
 +       * No need to wait for completions here - any errors would've been
 +       * reported and handled during the ->write() call(s).
 +       * Just check if a cancel request was received, and return success.
 +       */
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 +              return cxl_fw_do_cancel(fwl);
 +
 +      return FW_UPLOAD_ERR_NONE;
 +}
 +
 +static void cxl_fw_cancel(struct fw_upload *fwl)
 +{
-       set_bit(CXL_FW_CANCEL, cxlds->fw.state);
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +
- int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds)
++      set_bit(CXL_FW_CANCEL, mds->fw.state);
 +}
 +
 +static const struct fw_upload_ops cxl_memdev_fw_ops = {
 +        .prepare = cxl_fw_prepare,
 +        .write = cxl_fw_write,
 +        .poll_complete = cxl_fw_poll_complete,
 +        .cancel = cxl_fw_cancel,
 +        .cleanup = cxl_fw_cleanup,
 +};
 +
 +static void devm_cxl_remove_fw_upload(void *fwl)
 +{
 +      firmware_upload_unregister(fwl);
 +}
 +
-       if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxlds->enabled_cmds))
++int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
 +{
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +      struct device *dev = &cxlds->cxlmd->dev;
 +      struct fw_upload *fwl;
 +      int rc;
 +
-                                      &cxl_memdev_fw_ops, cxlds);
++      if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
 +              return 0;
 +
 +      fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
++                                     &cxl_memdev_fw_ops, mds);
 +      if (IS_ERR(fwl))
 +              return dev_err_probe(dev, PTR_ERR(fwl),
 +                                   "Failed to register firmware loader\n");
 +
 +      rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
 +                                    fwl);
 +      if (rc)
 +              dev_err(dev,
 +                      "Failed to add firmware loader remove action: %d\n",
 +                      rc);
 +
 +      return rc;
 +}
 +EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
 +
  static const struct file_operations cxl_memdev_fops = {
        .owner = THIS_MODULE,
        .unlocked_ioctl = cxl_memdev_ioctl,
        .llseek = noop_llseek,
  };
  
-       struct cxl_dev_state *cxlds = data;
 +static void put_sanitize(void *data)
 +{
-       sysfs_put(cxlds->security.sanitize_node);
++      struct cxl_memdev_state *mds = data;
 +
-       cxlds->security.sanitize_node = sysfs_get_dirent(sec, "state");
++      sysfs_put(mds->security.sanitize_node);
 +}
 +
 +static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
 +{
 +      struct cxl_dev_state *cxlds = cxlmd->cxlds;
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 +      struct device *dev = &cxlmd->dev;
 +      struct kernfs_node *sec;
 +
 +      sec = sysfs_get_dirent(dev->kobj.sd, "security");
 +      if (!sec) {
 +              dev_err(dev, "sysfs_get_dirent 'security' failed\n");
 +              return -ENODEV;
 +      }
-       if (!cxlds->security.sanitize_node) {
++      mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
 +      sysfs_put(sec);
-       return devm_add_action_or_reset(cxlds->dev, put_sanitize, cxlds);
++      if (!mds->security.sanitize_node) {
 +              dev_err(dev, "sysfs_get_dirent 'state' failed\n");
 +              return -ENODEV;
 +      }
 +
++      return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
 + }
 +
  struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
  {
        struct cxl_memdev *cxlmd;
        if (rc)
                goto err;
  
 +      rc = cxl_memdev_security_init(cxlmd);
 +      if (rc)
 +              goto err;
 +
        rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
        if (rc)
                return ERR_PTR(rc);
diff --combined drivers/cxl/cxl.h
index ec69bda93aeebe5588849814e1b88cb33fb9a673,f0c428cb9a7164eda5a5922588ccf9b2a9ae4d29..690dbcda02e5bea102c6a3a3bc4931477ce639eb
@@@ -56,7 -56,7 +56,7 @@@
  #define   CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
  #define   CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
  #define   CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
- #define   CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
+ #define   CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12)
  #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
  #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
  #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
@@@ -176,22 -176,14 +176,22 @@@ static inline int ways_to_eiw(unsigned 
  /* CXL 2.0 8.2.8.4 Mailbox Registers */
  #define CXLDEV_MBOX_CAPS_OFFSET 0x00
  #define   CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
 +#define   CXLDEV_MBOX_CAP_BG_CMD_IRQ BIT(6)
 +#define   CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK GENMASK(10, 7)
  #define CXLDEV_MBOX_CTRL_OFFSET 0x04
  #define   CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
 +#define   CXLDEV_MBOX_CTRL_BG_CMD_IRQ BIT(2)
  #define CXLDEV_MBOX_CMD_OFFSET 0x08
  #define   CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
  #define   CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
  #define CXLDEV_MBOX_STATUS_OFFSET 0x10
 +#define   CXLDEV_MBOX_STATUS_BG_CMD BIT(0)
  #define   CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
  #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
 +#define   CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
 +#define   CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK GENMASK_ULL(22, 16)
 +#define   CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK GENMASK_ULL(47, 32)
 +#define   CXLDEV_MBOX_BG_CMD_COMMAND_VENDOR_MASK GENMASK_ULL(63, 48)
  #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
  
  /*
@@@ -262,10 -254,10 +262,10 @@@ void cxl_probe_component_regs(struct de
  void cxl_probe_device_regs(struct device *dev, void __iomem *base,
                           struct cxl_device_reg_map *map);
  int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
-                          struct cxl_register_map *map,
+                          const struct cxl_register_map *map,
                           unsigned long map_mask);
  int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs,
-                       struct cxl_register_map *map);
+                       const struct cxl_register_map *map);
  
  enum cxl_regloc_type;
  int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
@@@ -298,8 -290,8 +298,8 @@@ resource_size_t cxl_rcrb_to_component(s
  #define CXL_DECODER_F_MASK  GENMASK(5, 0)
  
  enum cxl_decoder_type {
-        CXL_DECODER_ACCELERATOR = 2,
-        CXL_DECODER_EXPANDER = 3,
+       CXL_DECODER_DEVMEM = 2,
+       CXL_DECODER_HOSTONLYMEM = 3,
  };
  
  /*
@@@ -718,7 -710,6 +718,6 @@@ struct cxl_endpoint_dvsec_info 
  struct cxl_hdm;
  struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
                                   struct cxl_endpoint_dvsec_info *info);
- int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm);
  int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
                                struct cxl_endpoint_dvsec_info *info);
  int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
diff --combined drivers/cxl/cxlmem.h
index ce6f085e5ba87ae589900fab09565a0b957a4509,9aa8876a4eeac8b563ed0a48e8fac984d2a08162..25234a491371c094241d0d0b9ed0571661e260fc
@@@ -5,7 -5,6 +5,7 @@@
  #include <uapi/linux/cxl_mem.h>
  #include <linux/cdev.h>
  #include <linux/uuid.h>
 +#include <linux/rcuwait.h>
  #include "cxl.h"
  
  /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@@ -39,6 -38,7 +39,7 @@@
   * @detach_work: active memdev lost a port in its ancestry
   * @cxl_nvb: coordinate removal of @cxl_nvd if present
   * @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
+  * @endpoint: connection to the CXL port topology for this memory device
   * @id: id number of this memdev instance.
   * @depth: endpoint port depth
   */
@@@ -49,6 -49,7 +50,7 @@@ struct cxl_memdev 
        struct work_struct detach_work;
        struct cxl_nvdimm_bridge *cxl_nvb;
        struct cxl_nvdimm *cxl_nvd;
+       struct cxl_port *endpoint;
        int id;
        int depth;
  };
@@@ -83,7 -84,6 +85,8 @@@ static inline bool is_cxl_endpoint(stru
  }
  
  struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
- int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds);
++struct cxl_memdev_state;
++int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
  int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
                         resource_size_t base, resource_size_t len,
                         resource_size_t skipped);
@@@ -110,9 -110,6 +113,9 @@@ static inline struct cxl_ep *cxl_ep_loa
   *            variable sized output commands, it tells the exact number of bytes
   *            written.
   * @min_out: (input) internal command output payload size validation
 + * @poll_count: (input) Number of timeouts to attempt.
 + * @poll_interval_ms: (input) Time between mailbox background command polling
 + *                    interval timeouts.
   * @return_code: (output) Error code returned from hardware.
   *
   * This is the primary mechanism used to send commands to the hardware.
@@@ -128,8 -125,6 +131,8 @@@ struct cxl_mbox_cmd 
        size_t size_in;
        size_t size_out;
        size_t min_out;
 +      int poll_count;
 +      int poll_interval_ms;
        u16 return_code;
  };
  
@@@ -202,7 -197,7 +205,7 @@@ static inline int cxl_mbox_cmd_rc2errno
   */
  #define CXL_CAPACITY_MULTIPLIER SZ_256M
  
- /**
+ /*
   * Event Interrupt Policy
   *
   * CXL rev 3.0 section 8.2.9.2.4; Table 8-52
@@@ -222,8 -217,8 +225,8 @@@ struct cxl_event_interrupt_policy 
  /**
   * struct cxl_event_state - Event log driver state
   *
-  * @event_buf: Buffer to receive event data
-  * @event_log_lock: Serialize event_buf and log use
+  * @buf: Buffer to receive event data
+  * @log_lock: Serialize event_buf and log use
   */
  struct cxl_event_state {
        struct cxl_get_event_payload *buf;
@@@ -261,101 -256,20 +264,115 @@@ struct cxl_poison_state 
        struct mutex lock;  /* Protect reads of poison list */
  };
  
 +/*
 + * Get FW Info
 + * CXL rev 3.0 section 8.2.9.3.1; Table 8-56
 + */
 +struct cxl_mbox_get_fw_info {
 +      u8 num_slots;
 +      u8 slot_info;
 +      u8 activation_cap;
 +      u8 reserved[13];
 +      char slot_1_revision[16];
 +      char slot_2_revision[16];
 +      char slot_3_revision[16];
 +      char slot_4_revision[16];
 +} __packed;
 +
 +#define CXL_FW_INFO_SLOT_INFO_CUR_MASK                        GENMASK(2, 0)
 +#define CXL_FW_INFO_SLOT_INFO_NEXT_MASK                       GENMASK(5, 3)
 +#define CXL_FW_INFO_SLOT_INFO_NEXT_SHIFT              3
 +#define CXL_FW_INFO_ACTIVATION_CAP_HAS_LIVE_ACTIVATE  BIT(0)
 +
 +/*
 + * Transfer FW Input Payload
 + * CXL rev 3.0 section 8.2.9.3.2; Table 8-57
 + */
 +struct cxl_mbox_transfer_fw {
 +      u8 action;
 +      u8 slot;
 +      u8 reserved[2];
 +      __le32 offset;
 +      u8 reserved2[0x78];
 +      u8 data[];
 +} __packed;
 +
 +#define CXL_FW_TRANSFER_ACTION_FULL   0x0
 +#define CXL_FW_TRANSFER_ACTION_INITIATE       0x1
 +#define CXL_FW_TRANSFER_ACTION_CONTINUE       0x2
 +#define CXL_FW_TRANSFER_ACTION_END    0x3
 +#define CXL_FW_TRANSFER_ACTION_ABORT  0x4
 +
 +/*
 + * CXL rev 3.0 section 8.2.9.3.2 mandates 128-byte alignment for FW packages
 + * and for each part transferred in a Transfer FW command.
 + */
 +#define CXL_FW_TRANSFER_ALIGNMENT     128
 +
 +/*
 + * Activate FW Input Payload
 + * CXL rev 3.0 section 8.2.9.3.3; Table 8-58
 + */
 +struct cxl_mbox_activate_fw {
 +      u8 action;
 +      u8 slot;
 +} __packed;
 +
 +#define CXL_FW_ACTIVATE_ONLINE                0x0
 +#define CXL_FW_ACTIVATE_OFFLINE               0x1
 +
 +/* FW state bits */
 +#define CXL_FW_STATE_BITS             32
 +#define CXL_FW_CANCEL         BIT(0)
 +
 +/**
 + * struct cxl_fw_state - Firmware upload / activation state
 + *
 + * @state: fw_uploader state bitmask
 + * @oneshot: whether the fw upload fits in a single transfer
 + * @num_slots: Number of FW slots available
 + * @cur_slot: Slot number currently active
 + * @next_slot: Slot number for the new firmware
 + */
 +struct cxl_fw_state {
 +      DECLARE_BITMAP(state, CXL_FW_STATE_BITS);
 +      bool oneshot;
 +      int num_slots;
 +      int cur_slot;
 +      int next_slot;
 +};
 +
 +/**
 + * struct cxl_security_state - Device security state
 + *
 + * @state: state of last security operation
 + * @poll: polling for sanitization is enabled, device has no mbox irq support
 + * @poll_tmo_secs: polling timeout
 + * @poll_dwork: polling work item
 + * @sanitize_node: sanitation sysfs file to notify
 + */
 +struct cxl_security_state {
 +      unsigned long state;
 +      bool poll;
 +      int poll_tmo_secs;
 +      struct delayed_work poll_dwork;
 +      struct kernfs_node *sanitize_node;
 +};
 +
+ /*
+  * enum cxl_devtype - delineate type-2 from a generic type-3 device
+  * @CXL_DEVTYPE_DEVMEM - Vendor specific CXL Type-2 device implementing HDM-D or
+  *                     HDM-DB, no requirement that this device implements a
+  *                     mailbox, or other memory-device-standard manageability
+  *                     flows.
+  * @CXL_DEVTYPE_CLASSMEM - Common class definition of a CXL Type-3 device with
+  *                       HDM-H and class-mandatory memory device registers
+  */
+ enum cxl_devtype {
+       CXL_DEVTYPE_DEVMEM,
+       CXL_DEVTYPE_CLASSMEM,
+ };
  /**
   * struct cxl_dev_state - The driver device state
   *
   * @cxl_dvsec: Offset to the PCIe device DVSEC
   * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
   * @media_ready: Indicate whether the device media is usable
+  * @dpa_res: Overall DPA resource tree for the device
+  * @pmem_res: Active Persistent memory capacity configuration
+  * @ram_res: Active Volatile memory capacity configuration
+  * @component_reg_phys: register base of component registers
+  * @serial: PCIe Device Serial Number
+  * @type: Generic Memory Class device or Vendor Specific Memory device
+  */
+ struct cxl_dev_state {
+       struct device *dev;
+       struct cxl_memdev *cxlmd;
+       struct cxl_regs regs;
+       int cxl_dvsec;
+       bool rcd;
+       bool media_ready;
+       struct resource dpa_res;
+       struct resource pmem_res;
+       struct resource ram_res;
+       resource_size_t component_reg_phys;
+       u64 serial;
+       enum cxl_devtype type;
+ };
+ /**
+  * struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data
+  *
+  * CXL 8.1.12.1 PCI Header - Class Code Register Memory Device defines
+  * common memory device functionality like the presence of a mailbox and
+  * the functionality related to that like Identify Memory Device and Get
+  * Partition Info
+  * @cxlds: Core driver state common across Type-2 and Type-3 devices
   * @payload_size: Size of space for payload
   *                (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
   * @lsa_size: Size of Label Storage Area
   * @firmware_version: Firmware version for the memory device.
   * @enabled_cmds: Hardware commands found enabled in CEL.
   * @exclusive_cmds: Commands that are kernel-internal only
-  * @dpa_res: Overall DPA resource tree for the device
-  * @pmem_res: Active Persistent memory capacity configuration
-  * @ram_res: Active Volatile memory capacity configuration
   * @total_bytes: sum of all possible capacities
   * @volatile_only_bytes: hard volatile capacity
   * @persistent_only_bytes: hard persistent capacity
   * @active_persistent_bytes: sum of hard + soft persistent
   * @next_volatile_bytes: volatile capacity change pending device reset
   * @next_persistent_bytes: persistent capacity change pending device reset
-  * @component_reg_phys: register base of component registers
-  * @info: Cached DVSEC information about the device.
-  * @serial: PCIe Device Serial Number
   * @event: event log driver state
   * @poison: poison driver state info
 + * @fw: firmware upload / activation state
   * @mbox_send: @dev specific transport for transmitting mailbox commands
   *
-  * See section 8.2.9.5.2 Capacity Configuration and Label Storage for
+  * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
   * details on capacity parameters.
   */
- struct cxl_dev_state {
-       struct device *dev;
-       struct cxl_memdev *cxlmd;
-       struct cxl_regs regs;
-       int cxl_dvsec;
-       bool rcd;
-       bool media_ready;
+ struct cxl_memdev_state {
+       struct cxl_dev_state cxlds;
        size_t payload_size;
        size_t lsa_size;
        struct mutex mbox_mutex; /* Protects device mailbox and firmware */
        char firmware_version[0x10];
        DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
        DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
-       struct resource dpa_res;
-       struct resource pmem_res;
-       struct resource ram_res;
        u64 total_bytes;
        u64 volatile_only_bytes;
        u64 persistent_only_bytes;
        u64 partition_align_bytes;
        u64 active_volatile_bytes;
        u64 active_persistent_bytes;
        u64 next_volatile_bytes;
        u64 next_persistent_bytes;
-       resource_size_t component_reg_phys;
-       u64 serial;
        struct cxl_event_state event;
        struct cxl_poison_state poison;
-       int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
 +      struct cxl_security_state security;
 +      struct cxl_fw_state fw;
 +
 +      struct rcuwait mbox_wait;
+       int (*mbox_send)(struct cxl_memdev_state *mds,
+                        struct cxl_mbox_cmd *cmd);
  };
  
+ static inline struct cxl_memdev_state *
+ to_cxl_memdev_state(struct cxl_dev_state *cxlds)
+ {
+       if (cxlds->type != CXL_DEVTYPE_CLASSMEM)
+               return NULL;
+       return container_of(cxlds, struct cxl_memdev_state, cxlds);
+ }
  enum cxl_opcode {
        CXL_MBOX_OP_INVALID             = 0x0000,
        CXL_MBOX_OP_RAW                 = CXL_MBOX_OP_INVALID,
        CXL_MBOX_OP_GET_EVT_INT_POLICY  = 0x0102,
        CXL_MBOX_OP_SET_EVT_INT_POLICY  = 0x0103,
        CXL_MBOX_OP_GET_FW_INFO         = 0x0200,
 +      CXL_MBOX_OP_TRANSFER_FW         = 0x0201,
        CXL_MBOX_OP_ACTIVATE_FW         = 0x0202,
        CXL_MBOX_OP_SET_TIMESTAMP       = 0x0301,
        CXL_MBOX_OP_GET_SUPPORTED_LOGS  = 0x0400,
        CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
        CXL_MBOX_OP_SCAN_MEDIA          = 0x4304,
        CXL_MBOX_OP_GET_SCAN_MEDIA      = 0x4305,
 +      CXL_MBOX_OP_SANITIZE            = 0x4400,
 +      CXL_MBOX_OP_SECURE_ERASE        = 0x4401,
        CXL_MBOX_OP_GET_SECURITY_STATE  = 0x4500,
        CXL_MBOX_OP_SET_PASSPHRASE      = 0x4501,
        CXL_MBOX_OP_DISABLE_PASSPHRASE  = 0x4502,
@@@ -801,18 -724,20 +835,20 @@@ enum 
        CXL_PMEM_SEC_PASS_USER,
  };
  
- int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
+ int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
                          struct cxl_mbox_cmd *cmd);
- int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
+ int cxl_dev_state_identify(struct cxl_memdev_state *mds);
  int cxl_await_media_ready(struct cxl_dev_state *cxlds);
- int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
- int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
- struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
- void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
- void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
- void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status);
- int cxl_set_timestamp(struct cxl_dev_state *cxlds);
- int cxl_poison_state_init(struct cxl_dev_state *cxlds);
+ int cxl_enumerate_cmds(struct cxl_memdev_state *mds);
+ int cxl_mem_create_range_info(struct cxl_memdev_state *mds);
+ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev);
+ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+                               unsigned long *cmds);
+ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+                                 unsigned long *cmds);
+ void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
+ int cxl_set_timestamp(struct cxl_memdev_state *mds);
+ int cxl_poison_state_init(struct cxl_memdev_state *mds);
  int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
                       struct cxl_region *cxlr);
  int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
@@@ -831,8 -756,6 +867,8 @@@ static inline void cxl_mem_active_dec(v
  }
  #endif
  
- int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd);
++int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
 +
  struct cxl_hdm {
        struct cxl_component_regs regs;
        unsigned int decoder_count;
diff --combined drivers/cxl/pci.c
index 4468f53ba5a89b1074cc198ea258b7a5d076162c,3f78082014cce9de37cc5fd3f3c03aef4aed9ef3..18cfb7ae17a3782ff783a031e21d59dd30eb0fec
@@@ -84,92 -84,9 +84,92 @@@ static int cxl_pci_mbox_wait_for_doorbe
                            status & CXLMDEV_DEV_FATAL ? " fatal" : "",        \
                            status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
  
-               if (cxlds->security.sanitize_node)
-                       sysfs_notify_dirent(cxlds->security.sanitize_node);
 +struct cxl_dev_id {
 +      struct cxl_dev_state *cxlds;
 +};
 +
 +static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
 +                         irq_handler_t handler, irq_handler_t thread_fn)
 +{
 +      struct device *dev = cxlds->dev;
 +      struct cxl_dev_id *dev_id;
 +
 +      /* dev_id must be globally unique and must contain the cxlds */
 +      dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
 +      if (!dev_id)
 +              return -ENOMEM;
 +      dev_id->cxlds = cxlds;
 +
 +      return devm_request_threaded_irq(dev, irq, handler, thread_fn,
 +                                       IRQF_SHARED | IRQF_ONESHOT,
 +                                       NULL, dev_id);
 +}
 +
 +static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
 +{
 +      u64 reg;
 +
 +      reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +      return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100;
 +}
 +
 +static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
 +{
 +      u64 reg;
 +      u16 opcode;
 +      struct cxl_dev_id *dev_id = id;
 +      struct cxl_dev_state *cxlds = dev_id->cxlds;
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 +
 +      if (!cxl_mbox_background_complete(cxlds))
 +              return IRQ_NONE;
 +
 +      reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +      opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
 +      if (opcode == CXL_MBOX_OP_SANITIZE) {
-               rcuwait_wake_up(&cxlds->mbox_wait);
++              if (mds->security.sanitize_node)
++                      sysfs_notify_dirent(mds->security.sanitize_node);
 +
 +              dev_dbg(cxlds->dev, "Sanitization operation ended\n");
 +      } else {
 +              /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
-       struct cxl_dev_state *cxlds;
-       cxlds = container_of(work,
-                            struct cxl_dev_state, security.poll_dwork.work);
++              rcuwait_wake_up(&mds->mbox_wait);
 +      }
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/*
 + * Sanitization operation polling mode.
 + */
 +static void cxl_mbox_sanitize_work(struct work_struct *work)
 +{
-       mutex_lock(&cxlds->mbox_mutex);
++      struct cxl_memdev_state *mds =
++              container_of(work, typeof(*mds), security.poll_dwork.work);
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +
-               cxlds->security.poll_tmo_secs = 0;
++      mutex_lock(&mds->mbox_mutex);
 +      if (cxl_mbox_background_complete(cxlds)) {
-               if (cxlds->security.sanitize_node)
-                       sysfs_notify_dirent(cxlds->security.sanitize_node);
++              mds->security.poll_tmo_secs = 0;
 +              put_device(cxlds->dev);
 +
-               int timeout = cxlds->security.poll_tmo_secs + 10;
++              if (mds->security.sanitize_node)
++                      sysfs_notify_dirent(mds->security.sanitize_node);
 +
 +              dev_dbg(cxlds->dev, "Sanitization operation ended\n");
 +      } else {
-               cxlds->security.poll_tmo_secs = min(15 * 60, timeout);
-               queue_delayed_work(system_wq, &cxlds->security.poll_dwork,
++              int timeout = mds->security.poll_tmo_secs + 10;
 +
-       mutex_unlock(&cxlds->mbox_mutex);
++              mds->security.poll_tmo_secs = min(15 * 60, timeout);
++              queue_delayed_work(system_wq, &mds->security.poll_dwork,
 +                                 timeout * HZ);
 +      }
++      mutex_unlock(&mds->mbox_mutex);
 +}
 +
  /**
   * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
-  * @cxlds: The device state to communicate with.
+  * @mds: The memory device driver data
   * @mbox_cmd: Command to send to the memory device.
   *
   * Context: Any context. Expects mbox_mutex to be held.
   * not need to coordinate with each other. The driver only uses the primary
   * mailbox.
   */
- static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
+ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
                                   struct cxl_mbox_cmd *mbox_cmd)
  {
+       struct cxl_dev_state *cxlds = &mds->cxlds;
        void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
        struct device *dev = cxlds->dev;
        u64 cmd_reg, status_reg;
        size_t out_len;
        int rc;
  
-       lockdep_assert_held(&cxlds->mbox_mutex);
+       lockdep_assert_held(&mds->mbox_mutex);
  
        /*
         * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
                return -EBUSY;
        }
  
-       if (cxlds->security.poll_tmo_secs > 0) {
 +      /*
 +       * With sanitize polling, hardware might be done and the poller still
 +       * not be in sync. Ensure no new command comes in until so. Keep the
 +       * hardware semantics and only allow device health status.
 +       */
++      if (mds->security.poll_tmo_secs > 0) {
 +              if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO)
 +                      return -EBUSY;
 +      }
 +
        cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
                             mbox_cmd->opcode);
        if (mbox_cmd->size_in) {
        mbox_cmd->return_code =
                FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
  
-                       if (cxlds->security.poll_tmo_secs != -1) {
 +      /*
 +       * Handle the background command in a synchronous manner.
 +       *
 +       * All other mailbox commands will serialize/queue on the mbox_mutex,
 +       * which we currently hold. Furthermore this also guarantees that
 +       * cxl_mbox_background_complete() checks are safe amongst each other,
 +       * in that no new bg operation can occur in between.
 +       *
 +       * Background operations are timesliced in accordance with the nature
 +       * of the command. In the event of timeout, the mailbox state is
 +       * indeterminate until the next successful command submission and the
 +       * driver can get back in sync with the hardware state.
 +       */
 +      if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
 +              u64 bg_status_reg;
 +              int i, timeout;
 +
 +              /*
 +               * Sanitization is a special case which monopolizes the device
 +               * and cannot be timesliced. Handle asynchronously instead,
 +               * and allow userspace to poll(2) for completion.
 +               */
 +              if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
-                               cxlds->security.poll_tmo_secs = timeout;
++                      if (mds->security.poll_tmo_secs != -1) {
 +                              /* hold the device throughout */
 +                              get_device(cxlds->dev);
 +
 +                              /* give first timeout a second */
 +                              timeout = 1;
-                                                  &cxlds->security.poll_dwork,
++                              mds->security.poll_tmo_secs = timeout;
 +                              queue_delayed_work(system_wq,
-                       if (rcuwait_wait_event_timeout(&cxlds->mbox_wait,
++                                                 &mds->security.poll_dwork,
 +                                                 timeout * HZ);
 +                      }
 +
 +                      dev_dbg(dev, "Sanitization operation started\n");
 +                      goto success;
 +              }
 +
 +              dev_dbg(dev, "Mailbox background operation (0x%04x) started\n",
 +                      mbox_cmd->opcode);
 +
 +              timeout = mbox_cmd->poll_interval_ms;
 +              for (i = 0; i < mbox_cmd->poll_count; i++) {
++                      if (rcuwait_wait_event_timeout(&mds->mbox_wait,
 +                                     cxl_mbox_background_complete(cxlds),
 +                                     TASK_UNINTERRUPTIBLE,
 +                                     msecs_to_jiffies(timeout)) > 0)
 +                              break;
 +              }
 +
 +              if (!cxl_mbox_background_complete(cxlds)) {
 +                      dev_err(dev, "timeout waiting for background (%d ms)\n",
 +                              timeout * mbox_cmd->poll_count);
 +                      return -ETIMEDOUT;
 +              }
 +
 +              bg_status_reg = readq(cxlds->regs.mbox +
 +                                    CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +              mbox_cmd->return_code =
 +                      FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK,
 +                                bg_status_reg);
 +              dev_dbg(dev,
 +                      "Mailbox background operation (0x%04x) completed\n",
 +                      mbox_cmd->opcode);
 +      }
 +
        if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
                dev_dbg(dev, "Mailbox operation had an error: %s\n",
                        cxl_mbox_cmd_rc2str(mbox_cmd));
                return 0; /* completed but caller must check return_code */
        }
  
 +success:
        /* #7 */
        cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
        out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
                 * have requested less data than the hardware supplied even
                 * within spec.
                 */
-               size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
+               size_t n;
  
+               n = min3(mbox_cmd->size_out, mds->payload_size, out_len);
                memcpy_fromio(mbox_cmd->payload_out, payload, n);
                mbox_cmd->size_out = n;
        } else {
        return 0;
  }
  
- static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
+                            struct cxl_mbox_cmd *cmd)
  {
        int rc;
  
-       mutex_lock_io(&cxlds->mbox_mutex);
-       rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
-       mutex_unlock(&cxlds->mbox_mutex);
+       mutex_lock_io(&mds->mbox_mutex);
+       rc = __cxl_pci_mbox_send_cmd(mds, cmd);
+       mutex_unlock(&mds->mbox_mutex);
  
        return rc;
  }
  
- static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
+ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
  {
+       struct cxl_dev_state *cxlds = &mds->cxlds;
        const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+       struct device *dev = cxlds->dev;
        unsigned long timeout;
        u64 md_status;
  
        } while (!time_after(jiffies, timeout));
  
        if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
-               cxl_err(cxlds->dev, md_status,
-                       "timeout awaiting mailbox ready");
+               cxl_err(dev, md_status, "timeout awaiting mailbox ready");
                return -ETIMEDOUT;
        }
  
         * source for future doorbell busy events.
         */
        if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
-               cxl_err(cxlds->dev, md_status, "timeout awaiting mailbox idle");
+               cxl_err(dev, md_status, "timeout awaiting mailbox idle");
                return -ETIMEDOUT;
        }
  
-       cxlds->mbox_send = cxl_pci_mbox_send;
-       cxlds->payload_size =
+       mds->mbox_send = cxl_pci_mbox_send;
+       mds->payload_size =
                1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
  
        /*
         * there's no point in going forward. If the size is too large, there's
         * no harm is soft limiting it.
         */
-       cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
-       if (cxlds->payload_size < 256) {
-               dev_err(cxlds->dev, "Mailbox is too small (%zub)",
-                       cxlds->payload_size);
+       mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M);
+       if (mds->payload_size < 256) {
+               dev_err(dev, "Mailbox is too small (%zub)",
+                       mds->payload_size);
                return -ENXIO;
        }
  
-       dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
-               cxlds->payload_size);
+       dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
  
-       rcuwait_init(&cxlds->mbox_wait);
++      rcuwait_init(&mds->mbox_wait);
 +
 +      if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
 +              u32 ctrl;
 +              int irq, msgnum;
 +              struct pci_dev *pdev = to_pci_dev(cxlds->dev);
 +
 +              msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
 +              irq = pci_irq_vector(pdev, msgnum);
 +              if (irq < 0)
 +                      goto mbox_poll;
 +
 +              if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
 +                      goto mbox_poll;
 +
 +              /* enable background command mbox irq support */
 +              ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
 +              ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
 +              writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
 +
 +              return 0;
 +      }
 +
 +mbox_poll:
-       cxlds->security.poll = true;
-       INIT_DELAYED_WORK(&cxlds->security.poll_dwork, cxl_mbox_sanitize_work);
++      mds->security.poll = true;
++      INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
 +
 +      dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
        return 0;
  }
  
@@@ -554,19 -368,6 +557,6 @@@ static bool is_cxl_restricted(struct pc
        return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
  }
  
- /*
-  * CXL v3.0 6.2.3 Table 6-4
-  * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
-  * mode, otherwise it's 68B flits mode.
-  */
- static bool cxl_pci_flit_256(struct pci_dev *pdev)
- {
-       u16 lnksta2;
-       pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2);
-       return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
- }
  static int cxl_pci_ras_unmask(struct pci_dev *pdev)
  {
        struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
                addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
                orig_val = readl(addr);
  
-               mask = CXL_RAS_UNCORRECTABLE_MASK_MASK;
-               if (!cxl_pci_flit_256(pdev))
-                       mask &= ~CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
+               mask = CXL_RAS_UNCORRECTABLE_MASK_MASK |
+                      CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
                val = orig_val & ~mask;
                writel(val, addr);
                dev_dbg(&pdev->dev,
@@@ -622,18 -422,18 +611,18 @@@ static void free_event_buf(void *buf
  
  /*
   * There is a single buffer for reading event logs from the mailbox.  All logs
-  * share this buffer protected by the cxlds->event_log_lock.
+  * share this buffer protected by the mds->event_log_lock.
   */
- static int cxl_mem_alloc_event_buf(struct cxl_dev_state *cxlds)
+ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
  {
        struct cxl_get_event_payload *buf;
  
-       buf = kvmalloc(cxlds->payload_size, GFP_KERNEL);
+       buf = kvmalloc(mds->payload_size, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
-       cxlds->event.buf = buf;
+       mds->event.buf = buf;
  
-       return devm_add_action_or_reset(cxlds->dev, free_event_buf, buf);
+       return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
  }
  
  static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
        return 0;
  }
  
 -struct cxl_dev_id {
 -      struct cxl_dev_state *cxlds;
 -};
 -
  static irqreturn_t cxl_event_thread(int irq, void *id)
  {
        struct cxl_dev_id *dev_id = id;
        struct cxl_dev_state *cxlds = dev_id->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
        u32 status;
  
        do {
                status &= CXLDEV_EVENT_STATUS_ALL;
                if (!status)
                        break;
-               cxl_mem_get_event_records(cxlds, status);
+               cxl_mem_get_event_records(mds, status);
                cond_resched();
        } while (status);
  
  
  static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
  {
 -      struct device *dev = cxlds->dev;
 -      struct pci_dev *pdev = to_pci_dev(dev);
 -      struct cxl_dev_id *dev_id;
 +      struct pci_dev *pdev = to_pci_dev(cxlds->dev);
        int irq;
  
        if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
                return -ENXIO;
  
 -      /* dev_id must be globally unique and must contain the cxlds */
 -      dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
 -      if (!dev_id)
 -              return -ENOMEM;
 -      dev_id->cxlds = cxlds;
 -
        irq =  pci_irq_vector(pdev,
                              FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
        if (irq < 0)
                return irq;
  
 -      return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
 -                                       IRQF_SHARED | IRQF_ONESHOT, NULL,
 -                                       dev_id);
 +      return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread);
  }
  
- static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
+ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
                                    struct cxl_event_interrupt_policy *policy)
  {
        struct cxl_mbox_cmd mbox_cmd = {
        };
        int rc;
  
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc < 0)
-               dev_err(cxlds->dev, "Failed to get event interrupt policy : %d",
-                       rc);
+               dev_err(mds->cxlds.dev,
+                       "Failed to get event interrupt policy : %d", rc);
  
        return rc;
  }
  
- static int cxl_event_config_msgnums(struct cxl_dev_state *cxlds,
+ static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
                                    struct cxl_event_interrupt_policy *policy)
  {
        struct cxl_mbox_cmd mbox_cmd;
                .size_in = sizeof(*policy),
        };
  
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc < 0) {
-               dev_err(cxlds->dev, "Failed to set event interrupt policy : %d",
+               dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d",
                        rc);
                return rc;
        }
  
        /* Retrieve final interrupt settings */
-       return cxl_event_get_int_policy(cxlds, policy);
+       return cxl_event_get_int_policy(mds, policy);
  }
  
- static int cxl_event_irqsetup(struct cxl_dev_state *cxlds)
+ static int cxl_event_irqsetup(struct cxl_memdev_state *mds)
  {
+       struct cxl_dev_state *cxlds = &mds->cxlds;
        struct cxl_event_interrupt_policy policy;
        int rc;
  
-       rc = cxl_event_config_msgnums(cxlds, &policy);
+       rc = cxl_event_config_msgnums(mds, &policy);
        if (rc)
                return rc;
  
@@@ -789,7 -605,7 +780,7 @@@ static bool cxl_event_int_is_fw(u8 sett
  }
  
  static int cxl_event_config(struct pci_host_bridge *host_bridge,
-                           struct cxl_dev_state *cxlds)
+                           struct cxl_memdev_state *mds)
  {
        struct cxl_event_interrupt_policy policy;
        int rc;
        if (!host_bridge->native_cxl_error)
                return 0;
  
-       rc = cxl_mem_alloc_event_buf(cxlds);
+       rc = cxl_mem_alloc_event_buf(mds);
        if (rc)
                return rc;
  
-       rc = cxl_event_get_int_policy(cxlds, &policy);
+       rc = cxl_event_get_int_policy(mds, &policy);
        if (rc)
                return rc;
  
            cxl_event_int_is_fw(policy.warn_settings) ||
            cxl_event_int_is_fw(policy.failure_settings) ||
            cxl_event_int_is_fw(policy.fatal_settings)) {
-               dev_err(cxlds->dev, "FW still in control of Event Logs despite _OSC settings\n");
+               dev_err(mds->cxlds.dev,
+                       "FW still in control of Event Logs despite _OSC settings\n");
                return -EBUSY;
        }
  
-       rc = cxl_event_irqsetup(cxlds);
+       rc = cxl_event_irqsetup(mds);
        if (rc)
                return rc;
  
-       cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+       cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
  
        return 0;
  }
  static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  {
        struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
+       struct cxl_memdev_state *mds;
+       struct cxl_dev_state *cxlds;
        struct cxl_register_map map;
        struct cxl_memdev *cxlmd;
-       struct cxl_dev_state *cxlds;
        int rc;
  
        /*
                return rc;
        pci_set_master(pdev);
  
-       cxlds = cxl_dev_state_create(&pdev->dev);
-       if (IS_ERR(cxlds))
-               return PTR_ERR(cxlds);
+       mds = cxl_memdev_state_create(&pdev->dev);
+       if (IS_ERR(mds))
+               return PTR_ERR(mds);
+       cxlds = &mds->cxlds;
        pci_set_drvdata(pdev, cxlds);
  
        cxlds->rcd = is_cxl_restricted(pdev);
        else
                dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
  
-       rc = cxl_pci_setup_mailbox(cxlds);
 +      rc = cxl_alloc_irq_vectors(pdev);
 +      if (rc)
 +              return rc;
 +
+       rc = cxl_pci_setup_mailbox(mds);
        if (rc)
                return rc;
  
-       rc = cxl_enumerate_cmds(cxlds);
+       rc = cxl_enumerate_cmds(mds);
        if (rc)
                return rc;
  
-       rc = cxl_set_timestamp(cxlds);
+       rc = cxl_set_timestamp(mds);
        if (rc)
                return rc;
  
-       rc = cxl_poison_state_init(cxlds);
+       rc = cxl_poison_state_init(mds);
        if (rc)
                return rc;
  
-       rc = cxl_dev_state_identify(cxlds);
+       rc = cxl_dev_state_identify(mds);
        if (rc)
                return rc;
  
-       rc = cxl_mem_create_range_info(cxlds);
+       rc = cxl_mem_create_range_info(mds);
        if (rc)
                return rc;
  
 -      rc = cxl_alloc_irq_vectors(pdev);
 -      if (rc)
 -              return rc;
 -
        cxlmd = devm_cxl_add_memdev(cxlds);
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
  
-       rc = cxl_memdev_setup_fw_upload(cxlds);
++      rc = cxl_memdev_setup_fw_upload(mds);
 +      if (rc)
 +              return rc;
 +
-       rc = cxl_event_config(host_bridge, cxlds);
+       rc = cxl_event_config(host_bridge, mds);
        if (rc)
                return rc;
  
diff --combined drivers/cxl/security.c
index 9da6785dfd315e5fcf16e66ce08ad08ec78eb714,8c98fc674fa761d77b190ffca6f08cd1499b1ece..21856a3f408eee530c69da2748e7f1a0c311d9d8
@@@ -14,7 -14,7 +14,7 @@@ static unsigned long cxl_pmem_get_secur
  {
        struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        unsigned long security_flags = 0;
        struct cxl_get_security_output {
                __le32 flags;
                .payload_out = &out,
        };
  
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc < 0)
                return 0;
  
        sec_out = le32_to_cpu(out.flags);
-       cxlds->security.state = sec_out;
 +      /* cache security state */
++      mds->security.state = sec_out;
 +
        if (ptype == NVDIMM_MASTER) {
                if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
                        set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
@@@ -70,7 -67,7 +70,7 @@@ static int cxl_pmem_security_change_key
  {
        struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_mbox_cmd mbox_cmd;
        struct cxl_set_pass set_pass;
  
@@@ -87,7 -84,7 +87,7 @@@
                .payload_in = &set_pass,
        };
  
-       return cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       return cxl_internal_send_cmd(mds, &mbox_cmd);
  }
  
  static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@@ -96,7 -93,7 +96,7 @@@
  {
        struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_disable_pass dis_pass;
        struct cxl_mbox_cmd mbox_cmd;
  
                .payload_in = &dis_pass,
        };
  
-       return cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       return cxl_internal_send_cmd(mds, &mbox_cmd);
  }
  
  static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@@ -131,12 -128,12 +131,12 @@@ static int cxl_pmem_security_freeze(str
  {
        struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_mbox_cmd mbox_cmd = {
                .opcode = CXL_MBOX_OP_FREEZE_SECURITY,
        };
  
-       return cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       return cxl_internal_send_cmd(mds, &mbox_cmd);
  }
  
  static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
  {
        struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        u8 pass[NVDIMM_PASSPHRASE_LEN];
        struct cxl_mbox_cmd mbox_cmd;
        int rc;
                .payload_in = pass,
        };
  
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc < 0)
                return rc;
  
@@@ -169,7 -166,7 +169,7 @@@ static int cxl_pmem_security_passphrase
  {
        struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
        struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_mbox_cmd mbox_cmd;
        struct cxl_pass_erase erase;
        int rc;
                .payload_in = &erase,
        };
  
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       rc = cxl_internal_send_cmd(mds, &mbox_cmd);
        if (rc < 0)
                return rc;
  
index 420f01106b52c52e34fc2a08f23706f5bc90ddb4,6fb5718588f372387dfe7ad3cc1006a21072f1a7..464fc39ed2776b5ea1f89d2e82b7d072fd21424c
@@@ -8,14 -8,11 +8,14 @@@
  #include <linux/sizes.h>
  #include <linux/bits.h>
  #include <asm/unaligned.h>
 +#include <crypto/sha2.h>
  #include <cxlmem.h>
  
  #include "trace.h"
  
  #define LSA_SIZE SZ_128K
 +#define FW_SIZE SZ_64M
 +#define FW_SLOTS 3
  #define DEV_SIZE SZ_2G
  #define EFFECT(x) (1U << x)
  
  
  static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
  
 +enum cxl_command_effects {
 +      CONF_CHANGE_COLD_RESET = 0,
 +      CONF_CHANGE_IMMEDIATE,
 +      DATA_CHANGE_IMMEDIATE,
 +      POLICY_CHANGE_IMMEDIATE,
 +      LOG_CHANGE_IMMEDIATE,
 +      SECURITY_CHANGE_IMMEDIATE,
 +      BACKGROUND_OP,
 +      SECONDARY_MBOX_SUPPORTED,
 +};
 +
 +#define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
 +
  static struct cxl_cel_entry mock_cel[] = {
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
 -              .effect = cpu_to_le16(0),
 +              .effect = CXL_CMD_EFFECT_NONE,
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
 -              .effect = cpu_to_le16(0),
 +              .effect = CXL_CMD_EFFECT_NONE,
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
 -              .effect = cpu_to_le16(0),
 +              .effect = CXL_CMD_EFFECT_NONE,
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
 -              .effect = cpu_to_le16(0),
 +              .effect = CXL_CMD_EFFECT_NONE,
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
 -              .effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
 +              .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
 +                                    EFFECT(DATA_CHANGE_IMMEDIATE)),
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
 -              .effect = cpu_to_le16(0),
 +              .effect = CXL_CMD_EFFECT_NONE,
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
 -              .effect = cpu_to_le16(0),
 +              .effect = CXL_CMD_EFFECT_NONE,
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
 -              .effect = cpu_to_le16(0),
 +              .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
        },
        {
                .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
 -              .effect = cpu_to_le16(0),
 +              .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
 +      },
 +      {
 +              .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
 +              .effect = CXL_CMD_EFFECT_NONE,
 +      },
 +      {
 +              .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
 +              .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
 +                                    EFFECT(BACKGROUND_OP)),
 +      },
 +      {
 +              .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
 +              .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
 +                                    EFFECT(CONF_CHANGE_IMMEDIATE)),
        },
  };
  
@@@ -133,17 -102,13 +133,17 @@@ struct mock_event_log 
  };
  
  struct mock_event_store {
-       struct cxl_dev_state *cxlds;
+       struct cxl_memdev_state *mds;
        struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
        u32 ev_status;
  };
  
  struct cxl_mockmem_data {
        void *lsa;
 +      void *fw;
 +      int fw_slot;
 +      int fw_staged;
 +      size_t fw_size;
        u32 security_state;
        u8 user_pass[NVDIMM_PASSPHRASE_LEN];
        u8 master_pass[NVDIMM_PASSPHRASE_LEN];
@@@ -215,8 -180,7 +215,7 @@@ static void mes_add_event(struct mock_e
        log->nr_events++;
  }
  
- static int mock_get_event(struct cxl_dev_state *cxlds,
-                         struct cxl_mbox_cmd *cmd)
+ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
  {
        struct cxl_get_event_payload *pl;
        struct mock_event_log *log;
  
        memset(cmd->payload_out, 0, cmd->size_out);
  
-       log = event_find_log(cxlds->dev, log_type);
+       log = event_find_log(dev, log_type);
        if (!log || event_log_empty(log))
                return 0;
  
        return 0;
  }
  
- static int mock_clear_event(struct cxl_dev_state *cxlds,
-                           struct cxl_mbox_cmd *cmd)
+ static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
        struct mock_event_log *log;
        if (log_type >= CXL_EVENT_TYPE_MAX)
                return -EINVAL;
  
-       log = event_find_log(cxlds->dev, log_type);
+       log = event_find_log(dev, log_type);
        if (!log)
                return 0; /* No mock data in this log */
  
         * However, this is not good behavior for the host so test it.
         */
        if (log->clear_idx + pl->nr_recs > log->cur_idx) {
-               dev_err(cxlds->dev,
+               dev_err(dev,
                        "Attempting to clear more events than returned!\n");
                return -EINVAL;
        }
             nr < pl->nr_recs;
             nr++, handle++) {
                if (handle != le16_to_cpu(pl->handles[nr])) {
-                       dev_err(cxlds->dev, "Clearing events out of order\n");
+                       dev_err(dev, "Clearing events out of order\n");
                        return -EINVAL;
                }
        }
@@@ -328,7 -291,7 +326,7 @@@ static void cxl_mock_event_trigger(stru
                        event_reset_log(log);
        }
  
-       cxl_mem_get_event_records(mes->cxlds, mes->ev_status);
+       cxl_mem_get_event_records(mes->mds, mes->ev_status);
  }
  
  struct cxl_event_record_raw maint_needed = {
@@@ -488,7 -451,7 +486,7 @@@ static int mock_gsl(struct cxl_mbox_cm
        return 0;
  }
  
- static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_get_log *gl = cmd->payload_in;
        u32 offset = le32_to_cpu(gl->offset);
  
        if (cmd->size_in < sizeof(*gl))
                return -EINVAL;
-       if (length > cxlds->payload_size)
+       if (length > mds->payload_size)
                return -EINVAL;
        if (offset + length > sizeof(mock_cel))
                return -EINVAL;
        return 0;
  }
  
- static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_identify id = {
                .fw_revision = { "mock fw v1 " },
        return 0;
  }
  
- static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_id(struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_identify id = {
                .fw_revision = { "mock fw v1 " },
        return 0;
  }
  
- static int mock_partition_info(struct cxl_dev_state *cxlds,
-                              struct cxl_mbox_cmd *cmd)
+ static int mock_partition_info(struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_get_partition_info pi = {
                .active_volatile_cap =
        return 0;
  }
  
- static int mock_sanitize(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
++static int mock_sanitize(struct cxl_mockmem_data *mdata,
++                       struct cxl_mbox_cmd *cmd)
 +{
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      if (cmd->size_in != 0)
 +              return -EINVAL;
 +
 +      if (cmd->size_out != 0)
 +              return -EINVAL;
 +
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +
 +      return 0; /* assume less than 2 secs, no bg */
 +}
 +
- static int mock_secure_erase(struct cxl_dev_state *cxlds,
++static int mock_secure_erase(struct cxl_mockmem_data *mdata,
 +                           struct cxl_mbox_cmd *cmd)
 +{
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      if (cmd->size_in != 0)
 +              return -EINVAL;
 +
 +      if (cmd->size_out != 0)
 +              return -EINVAL;
 +
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +
 +      return 0;
 +}
 +
- static int mock_get_security_state(struct cxl_dev_state *cxlds,
+ static int mock_get_security_state(struct cxl_mockmem_data *mdata,
                                   struct cxl_mbox_cmd *cmd)
  {
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        if (cmd->size_in)
                return -EINVAL;
  
@@@ -650,9 -564,9 +642,9 @@@ static void user_plimit_check(struct cx
                mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
  }
  
- static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
+                              struct cxl_mbox_cmd *cmd)
  {
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        struct cxl_set_pass *set_pass;
  
        if (cmd->size_in != sizeof(*set_pass))
        return -EINVAL;
  }
  
- static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
+                                  struct cxl_mbox_cmd *cmd)
  {
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        struct cxl_disable_pass *dis_pass;
  
        if (cmd->size_in != sizeof(*dis_pass))
        return 0;
  }
  
- static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_freeze_security(struct cxl_mockmem_data *mdata,
+                               struct cxl_mbox_cmd *cmd)
  {
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        if (cmd->size_in != 0)
                return -EINVAL;
  
        return 0;
  }
  
- static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_unlock_security(struct cxl_mockmem_data *mdata,
+                               struct cxl_mbox_cmd *cmd)
  {
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
                return -EINVAL;
  
        return 0;
  }
  
- static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
+ static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
                                        struct cxl_mbox_cmd *cmd)
  {
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        struct cxl_pass_erase *erase;
  
        if (cmd->size_in != sizeof(*erase))
        return 0;
  }
  
- static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_get_lsa(struct cxl_mockmem_data *mdata,
+                       struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        void *lsa = mdata->lsa;
        u32 offset, length;
  
        return 0;
  }
  
- static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int mock_set_lsa(struct cxl_mockmem_data *mdata,
+                       struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        void *lsa = mdata->lsa;
        u32 offset, length;
  
        return 0;
  }
  
- static int mock_health_info(struct cxl_dev_state *cxlds,
-                           struct cxl_mbox_cmd *cmd)
+ static int mock_health_info(struct cxl_mbox_cmd *cmd)
  {
        struct cxl_mbox_health_info health_info = {
                /* set flags for maint needed, perf degraded, hw replacement */
@@@ -1195,90 -1105,12 +1183,90 @@@ static struct attribute *cxl_mock_mem_c
  };
  ATTRIBUTE_GROUPS(cxl_mock_mem_core);
  
- static int mock_fw_info(struct cxl_dev_state *cxlds,
-                           struct cxl_mbox_cmd *cmd)
++static int mock_fw_info(struct cxl_mockmem_data *mdata,
++                      struct cxl_mbox_cmd *cmd)
 +{
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      struct cxl_mbox_get_fw_info fw_info = {
 +              .num_slots = FW_SLOTS,
 +              .slot_info = (mdata->fw_slot & 0x7) |
 +                           ((mdata->fw_staged & 0x7) << 3),
 +              .activation_cap = 0,
 +      };
 +
 +      strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
 +      strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
 +      strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
 +      strcpy(fw_info.slot_4_revision, "");
 +
 +      if (cmd->size_out < sizeof(fw_info))
 +              return -EINVAL;
 +
 +      memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
 +      return 0;
 +}
 +
- static int mock_transfer_fw(struct cxl_dev_state *cxlds,
++static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
 +                          struct cxl_mbox_cmd *cmd)
 +{
 +      struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      void *fw = mdata->fw;
 +      size_t offset, length;
 +
 +      offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
 +      length = cmd->size_in - sizeof(*transfer);
 +      if (offset + length > FW_SIZE)
 +              return -EINVAL;
 +
 +      switch (transfer->action) {
 +      case CXL_FW_TRANSFER_ACTION_FULL:
 +              if (offset != 0)
 +                      return -EINVAL;
 +              fallthrough;
 +      case CXL_FW_TRANSFER_ACTION_END:
 +              if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
 +                      return -EINVAL;
 +              mdata->fw_size = offset + length;
 +              break;
 +      case CXL_FW_TRANSFER_ACTION_INITIATE:
 +      case CXL_FW_TRANSFER_ACTION_CONTINUE:
 +              break;
 +      case CXL_FW_TRANSFER_ACTION_ABORT:
 +              return 0;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      memcpy(fw + offset, transfer->data, length);
 +      return 0;
 +}
 +
- static int mock_activate_fw(struct cxl_dev_state *cxlds,
++static int mock_activate_fw(struct cxl_mockmem_data *mdata,
 +                          struct cxl_mbox_cmd *cmd)
 +{
 +      struct cxl_mbox_activate_fw *activate = cmd->payload_in;
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +
 +      if (activate->slot == 0 || activate->slot > FW_SLOTS)
 +              return -EINVAL;
 +
 +      switch (activate->action) {
 +      case CXL_FW_ACTIVATE_ONLINE:
 +              mdata->fw_slot = activate->slot;
 +              mdata->fw_staged = 0;
 +              return 0;
 +      case CXL_FW_ACTIVATE_OFFLINE:
 +              mdata->fw_staged = activate->slot;
 +              return 0;
 +      }
 +
 +      return -EINVAL;
 +}
 +
- static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
+                             struct cxl_mbox_cmd *cmd)
  {
+       struct cxl_dev_state *cxlds = &mds->cxlds;
        struct device *dev = cxlds->dev;
+       struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
        int rc = -EIO;
  
        switch (cmd->opcode) {
                rc = mock_gsl(cmd);
                break;
        case CXL_MBOX_OP_GET_LOG:
-               rc = mock_get_log(cxlds, cmd);
+               rc = mock_get_log(mds, cmd);
                break;
        case CXL_MBOX_OP_IDENTIFY:
                if (cxlds->rcd)
-                       rc = mock_rcd_id(cxlds, cmd);
+                       rc = mock_rcd_id(cmd);
                else
-                       rc = mock_id(cxlds, cmd);
+                       rc = mock_id(cmd);
                break;
        case CXL_MBOX_OP_GET_LSA:
-               rc = mock_get_lsa(cxlds, cmd);
+               rc = mock_get_lsa(mdata, cmd);
                break;
        case CXL_MBOX_OP_GET_PARTITION_INFO:
-               rc = mock_partition_info(cxlds, cmd);
+               rc = mock_partition_info(cmd);
                break;
        case CXL_MBOX_OP_GET_EVENT_RECORD:
-               rc = mock_get_event(cxlds, cmd);
+               rc = mock_get_event(dev, cmd);
                break;
        case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
-               rc = mock_clear_event(cxlds, cmd);
+               rc = mock_clear_event(dev, cmd);
                break;
        case CXL_MBOX_OP_SET_LSA:
-               rc = mock_set_lsa(cxlds, cmd);
+               rc = mock_set_lsa(mdata, cmd);
                break;
        case CXL_MBOX_OP_GET_HEALTH_INFO:
-               rc = mock_health_info(cxlds, cmd);
+               rc = mock_health_info(cmd);
                break;
-               rc = mock_sanitize(cxlds, cmd);
 +      case CXL_MBOX_OP_SANITIZE:
-               rc = mock_secure_erase(cxlds, cmd);
++              rc = mock_sanitize(mdata, cmd);
 +              break;
 +      case CXL_MBOX_OP_SECURE_ERASE:
++              rc = mock_secure_erase(mdata, cmd);
 +              break;
        case CXL_MBOX_OP_GET_SECURITY_STATE:
-               rc = mock_get_security_state(cxlds, cmd);
+               rc = mock_get_security_state(mdata, cmd);
                break;
        case CXL_MBOX_OP_SET_PASSPHRASE:
-               rc = mock_set_passphrase(cxlds, cmd);
+               rc = mock_set_passphrase(mdata, cmd);
                break;
        case CXL_MBOX_OP_DISABLE_PASSPHRASE:
-               rc = mock_disable_passphrase(cxlds, cmd);
+               rc = mock_disable_passphrase(mdata, cmd);
                break;
        case CXL_MBOX_OP_FREEZE_SECURITY:
-               rc = mock_freeze_security(cxlds, cmd);
+               rc = mock_freeze_security(mdata, cmd);
                break;
        case CXL_MBOX_OP_UNLOCK:
-               rc = mock_unlock_security(cxlds, cmd);
+               rc = mock_unlock_security(mdata, cmd);
                break;
        case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
-               rc = mock_passphrase_secure_erase(cxlds, cmd);
+               rc = mock_passphrase_secure_erase(mdata, cmd);
                break;
        case CXL_MBOX_OP_GET_POISON:
                rc = mock_get_poison(cxlds, cmd);
        case CXL_MBOX_OP_CLEAR_POISON:
                rc = mock_clear_poison(cxlds, cmd);
                break;
-               rc = mock_fw_info(cxlds, cmd);
 +      case CXL_MBOX_OP_GET_FW_INFO:
-               rc = mock_transfer_fw(cxlds, cmd);
++              rc = mock_fw_info(mdata, cmd);
 +              break;
 +      case CXL_MBOX_OP_TRANSFER_FW:
-               rc = mock_activate_fw(cxlds, cmd);
++              rc = mock_transfer_fw(mdata, cmd);
 +              break;
 +      case CXL_MBOX_OP_ACTIVATE_FW:
++              rc = mock_activate_fw(mdata, cmd);
 +              break;
        default:
                break;
        }
@@@ -1372,11 -1189,6 +1360,11 @@@ static void label_area_release(void *ls
        vfree(lsa);
  }
  
 +static void fw_buf_release(void *buf)
 +{
 +      vfree(buf);
 +}
 +
  static bool is_rcd(struct platform_device *pdev)
  {
        const struct platform_device_id *id = platform_get_device_id(pdev);
@@@ -1397,6 -1209,7 +1385,7 @@@ static int cxl_mock_mem_probe(struct pl
  {
        struct device *dev = &pdev->dev;
        struct cxl_memdev *cxlmd;
+       struct cxl_memdev_state *mds;
        struct cxl_dev_state *cxlds;
        struct cxl_mockmem_data *mdata;
        int rc;
        mdata->lsa = vmalloc(LSA_SIZE);
        if (!mdata->lsa)
                return -ENOMEM;
 +      mdata->fw = vmalloc(FW_SIZE);
 +      if (!mdata->fw)
 +              return -ENOMEM;
 +      mdata->fw_slot = 2;
 +
        rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
        if (rc)
                return rc;
  
-       cxlds = cxl_dev_state_create(dev);
-       if (IS_ERR(cxlds))
-               return PTR_ERR(cxlds);
 +      rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
 +      if (rc)
 +              return rc;
 +
+       mds = cxl_memdev_state_create(dev);
+       if (IS_ERR(mds))
+               return PTR_ERR(mds);
+       mds->mbox_send = cxl_mock_mbox_send;
+       mds->payload_size = SZ_4K;
+       mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
  
+       cxlds = &mds->cxlds;
        cxlds->serial = pdev->id;
-       cxlds->mbox_send = cxl_mock_mbox_send;
-       cxlds->payload_size = SZ_4K;
-       cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
        if (is_rcd(pdev)) {
                cxlds->rcd = true;
                cxlds->component_reg_phys = CXL_RESOURCE_NONE;
        }
  
-       rc = cxl_enumerate_cmds(cxlds);
+       rc = cxl_enumerate_cmds(mds);
        if (rc)
                return rc;
  
-       rc = cxl_poison_state_init(cxlds);
+       rc = cxl_poison_state_init(mds);
        if (rc)
                return rc;
  
-       rc = cxl_set_timestamp(cxlds);
+       rc = cxl_set_timestamp(mds);
        if (rc)
                return rc;
  
        cxlds->media_ready = true;
-       rc = cxl_dev_state_identify(cxlds);
+       rc = cxl_dev_state_identify(mds);
        if (rc)
                return rc;
  
-       rc = cxl_mem_create_range_info(cxlds);
+       rc = cxl_mem_create_range_info(mds);
        if (rc)
                return rc;
  
-       mdata->mes.cxlds = cxlds;
+       mdata->mes.mds = mds;
        cxl_mock_add_event_logs(&mdata->mes);
  
        cxlmd = devm_cxl_add_memdev(cxlds);
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
  
-       rc = cxl_memdev_setup_fw_upload(cxlds);
++      rc = cxl_memdev_setup_fw_upload(mds);
 +      if (rc)
 +              return rc;
 +
-       cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+       cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
  
        return 0;
  }
@@@ -1505,40 -1307,9 +1496,40 @@@ static ssize_t security_lock_store(stru
  
  static DEVICE_ATTR_RW(security_lock);
  
 +static ssize_t fw_buf_checksum_show(struct device *dev,
 +                                  struct device_attribute *attr, char *buf)
 +{
 +      struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
 +      u8 hash[SHA256_DIGEST_SIZE];
 +      unsigned char *hstr, *hptr;
 +      struct sha256_state sctx;
 +      ssize_t written = 0;
 +      int i;
 +
 +      sha256_init(&sctx);
 +      sha256_update(&sctx, mdata->fw, mdata->fw_size);
 +      sha256_final(&sctx, hash);
 +
 +      hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
 +      if (!hstr)
 +              return -ENOMEM;
 +
 +      hptr = hstr;
 +      for (i = 0; i < SHA256_DIGEST_SIZE; i++)
 +              hptr += sprintf(hptr, "%02x", hash[i]);
 +
 +      written = sysfs_emit(buf, "%s\n", hstr);
 +
 +      kfree(hstr);
 +      return written;
 +}
 +
 +static DEVICE_ATTR_RO(fw_buf_checksum);
 +
  static struct attribute *cxl_mock_mem_attrs[] = {
        &dev_attr_security_lock.attr,
        &dev_attr_event_trigger.attr,
 +      &dev_attr_fw_buf_checksum.attr,
        NULL
  };
  ATTRIBUTE_GROUPS(cxl_mock_mem);
This page took 0.235654 seconds and 4 git commands to generate.