#include "sysemu/block-backend.h"
#include "qemu/log.h"
+#include "qemu/module.h"
#include "qemu/cutils.h"
#include "trace.h"
#include "nvme.h"
return NVME_INVALID_FIELD | NVME_DNR;
}
+static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+ uint64_t prp1, uint64_t prp2)
+{
+ QEMUSGList qsg;
+ QEMUIOVector iov;
+ uint16_t status = NVME_SUCCESS;
+
+ if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+ if (qsg.nsg > 0) {
+ if (dma_buf_write(ptr, len, &qsg)) {
+ status = NVME_INVALID_FIELD | NVME_DNR;
+ }
+ qemu_sglist_destroy(&qsg);
+ } else {
+ if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
+ status = NVME_INVALID_FIELD | NVME_DNR;
+ }
+ qemu_iovec_destroy(&iov);
+ }
+ return status;
+}
+
static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
uint64_t prp1, uint64_t prp2)
{
}
qemu_sglist_destroy(&qsg);
} else {
- if (unlikely(qemu_iovec_to_buf(&iov, 0, ptr, len) != len)) {
+ if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
trace_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
uint64_t slba = le64_to_cpu(rw->slba);
uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
- uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
- uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS);
+ uint64_t offset = slba << data_shift;
+ uint32_t count = nlb << data_shift;
if (unlikely(slba + nlb > ns->id_ns.nsze)) {
trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
req->has_sg = false;
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
BLOCK_ACCT_WRITE);
- req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, aio_slba, aio_nlb,
+ req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count,
BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
return NVME_NO_COMPLETE;
}
return ret;
}
-
static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
{
NvmeIdentify *c = (NvmeIdentify *)cmd;
}
}
+static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
+{
+ trace_nvme_setfeat_timestamp(ts);
+
+ n->host_timestamp = le64_to_cpu(ts);
+ n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+}
+
+static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
+{
+ uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+ uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms;
+
+ union nvme_timestamp {
+ struct {
+ uint64_t timestamp:48;
+ uint64_t sync:1;
+ uint64_t origin:3;
+ uint64_t rsvd1:12;
+ };
+ uint64_t all;
+ };
+
+ union nvme_timestamp ts;
+ ts.all = 0;
+
+ /*
+ * If the sum of the Timestamp value set by the host and the elapsed
+ * time exceeds 2^48, the value returned should be reduced modulo 2^48.
+ */
+ ts.timestamp = (n->host_timestamp + elapsed_time) & 0xffffffffffff;
+
+ /* If the host timestamp is non-zero, set the timestamp origin */
+ ts.origin = n->host_timestamp ? 0x01 : 0x00;
+
+ trace_nvme_getfeat_timestamp(ts.all);
+
+ return cpu_to_le64(ts.all);
+}
+
+static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
+{
+ uint64_t prp1 = le64_to_cpu(cmd->prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->prp2);
+
+ uint64_t timestamp = nvme_get_timestamp(n);
+
+ return nvme_dma_read_prp(n, (uint8_t *)×tamp,
+ sizeof(timestamp), prp1, prp2);
+}
+
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
{
uint32_t dw10 = le32_to_cpu(cmd->cdw10);
result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
trace_nvme_getfeat_numq(result);
break;
+ case NVME_TIMESTAMP:
+ return nvme_get_feature_timestamp(n, cmd);
+ break;
default:
trace_nvme_err_invalid_getfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
return NVME_SUCCESS;
}
+static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
+{
+ uint16_t ret;
+ uint64_t timestamp;
+ uint64_t prp1 = le64_to_cpu(cmd->prp1);
+ uint64_t prp2 = le64_to_cpu(cmd->prp2);
+
+ ret = nvme_dma_write_prp(n, (uint8_t *)×tamp,
+ sizeof(timestamp), prp1, prp2);
+ if (ret != NVME_SUCCESS) {
+ return ret;
+ }
+
+ nvme_set_timestamp(n, timestamp);
+
+ return NVME_SUCCESS;
+}
+
static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
{
uint32_t dw10 = le32_to_cpu(cmd->cdw10);
req->cqe.result =
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
break;
+
+ case NVME_TIMESTAMP:
+ return nvme_set_feature_timestamp(n, cmd);
+ break;
+
default:
trace_nvme_err_invalid_setfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
NVME_AQA_ASQS(n->bar.aqa) + 1);
+ nvme_set_timestamp(n, 0ULL);
+
return 0;
}
pci_conf[PCI_INTERRUPT_PIN] = 1;
pci_config_set_prog_interface(pci_dev->config, 0x2);
pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS);
- pcie_endpoint_cap_init(&n->parent_obj, 0x80);
+ pcie_endpoint_cap_init(pci_dev, 0x80);
n->num_namespaces = 1;
n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4);
memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
"nvme", n->reg_size);
- pci_register_bar(&n->parent_obj, 0,
+ pci_register_bar(pci_dev, 0,
PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
&n->iomem);
- msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4, NULL);
+ msix_init_exclusive_bar(pci_dev, n->num_queues, 4, NULL);
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
id->sqes = (0x6 << 4) | 0x6;
id->cqes = (0x4 << 4) | 0x4;
id->nn = cpu_to_le32(n->num_namespaces);
- id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS);
+ id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP);
id->psd[0].mp = cpu_to_le16(0x9c4);
id->psd[0].enlat = cpu_to_le32(0x10);
id->psd[0].exlat = cpu_to_le32(0x4);
n->bar.cap = 0;
NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
NVME_CAP_SET_CQR(n->bar.cap, 1);
- NVME_CAP_SET_AMS(n->bar.cap, 1);
NVME_CAP_SET_TO(n->bar.cap, 0xf);
NVME_CAP_SET_CSS(n->bar.cap, 1);
NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
"nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
- pci_register_bar(&n->parent_obj, NVME_CMBLOC_BIR(n->bar.cmbloc),
+ pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 |
PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);