]> Git Repo - qemu.git/blame_incremental - hw/acpi/nvdimm.c
pci-ids: add virtio 1.0 ids to spec
[qemu.git] / hw / acpi / nvdimm.c
... / ...
CommitLineData
1/*
2 * NVDIMM ACPI Implementation
3 *
4 * Copyright(C) 2015 Intel Corporation.
5 *
6 * Author:
7 * Xiao Guangrong <[email protected]>
8 *
9 * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
10 * and the DSM specification can be found at:
11 * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
12 *
13 * Currently, it only supports PMEM Virtualization.
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, see <http://www.gnu.org/licenses/>
27 */
28
29#include "qemu/osdep.h"
30#include "hw/acpi/acpi.h"
31#include "hw/acpi/aml-build.h"
32#include "hw/mem/nvdimm.h"
33
34static int nvdimm_plugged_device_list(Object *obj, void *opaque)
35{
36 GSList **list = opaque;
37
38 if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
39 DeviceState *dev = DEVICE(obj);
40
41 if (dev->realized) { /* only realized NVDIMMs matter */
42 *list = g_slist_append(*list, DEVICE(obj));
43 }
44 }
45
46 object_child_foreach(obj, nvdimm_plugged_device_list, opaque);
47 return 0;
48}
49
50/*
51 * inquire plugged NVDIMM devices and link them into the list which is
52 * returned to the caller.
53 *
54 * Note: it is the caller's responsibility to free the list to avoid
55 * memory leak.
56 */
57static GSList *nvdimm_get_plugged_device_list(void)
58{
59 GSList *list = NULL;
60
61 object_child_foreach(qdev_get_machine(), nvdimm_plugged_device_list,
62 &list);
63 return list;
64}
65
66#define NVDIMM_UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
67 { (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
68 (b) & 0xff, ((b) >> 8) & 0xff, (c) & 0xff, ((c) >> 8) & 0xff, \
69 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }
70
71/*
72 * define Byte Addressable Persistent Memory (PM) Region according to
73 * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure.
74 */
75static const uint8_t nvdimm_nfit_spa_uuid[] =
76 NVDIMM_UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33,
77 0x18, 0xb7, 0x8c, 0xdb);
78
79/*
80 * NVDIMM Firmware Interface Table
81 * @signature: "NFIT"
82 *
83 * It provides information that allows OSPM to enumerate NVDIMM present in
84 * the platform and associate system physical address ranges created by the
85 * NVDIMMs.
86 *
87 * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
88 */
89struct NvdimmNfitHeader {
90 ACPI_TABLE_HEADER_DEF
91 uint32_t reserved;
92} QEMU_PACKED;
93typedef struct NvdimmNfitHeader NvdimmNfitHeader;
94
95/*
96 * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware
97 * Interface Table (NFIT).
98 */
99
100/*
101 * System Physical Address Range Structure
102 *
103 * It describes the system physical address ranges occupied by NVDIMMs and
104 * the types of the regions.
105 */
106struct NvdimmNfitSpa {
107 uint16_t type;
108 uint16_t length;
109 uint16_t spa_index;
110 uint16_t flags;
111 uint32_t reserved;
112 uint32_t proximity_domain;
113 uint8_t type_guid[16];
114 uint64_t spa_base;
115 uint64_t spa_length;
116 uint64_t mem_attr;
117} QEMU_PACKED;
118typedef struct NvdimmNfitSpa NvdimmNfitSpa;
119
120/*
121 * Memory Device to System Physical Address Range Mapping Structure
122 *
123 * It enables identifying each NVDIMM region and the corresponding SPA
124 * describing the memory interleave
125 */
126struct NvdimmNfitMemDev {
127 uint16_t type;
128 uint16_t length;
129 uint32_t nfit_handle;
130 uint16_t phys_id;
131 uint16_t region_id;
132 uint16_t spa_index;
133 uint16_t dcr_index;
134 uint64_t region_len;
135 uint64_t region_offset;
136 uint64_t region_dpa;
137 uint16_t interleave_index;
138 uint16_t interleave_ways;
139 uint16_t flags;
140 uint16_t reserved;
141} QEMU_PACKED;
142typedef struct NvdimmNfitMemDev NvdimmNfitMemDev;
143
144/*
145 * NVDIMM Control Region Structure
146 *
147 * It describes the NVDIMM and if applicable, Block Control Window.
148 */
149struct NvdimmNfitControlRegion {
150 uint16_t type;
151 uint16_t length;
152 uint16_t dcr_index;
153 uint16_t vendor_id;
154 uint16_t device_id;
155 uint16_t revision_id;
156 uint16_t sub_vendor_id;
157 uint16_t sub_device_id;
158 uint16_t sub_revision_id;
159 uint8_t reserved[6];
160 uint32_t serial_number;
161 uint16_t fic;
162 uint16_t num_bcw;
163 uint64_t bcw_size;
164 uint64_t cmd_offset;
165 uint64_t cmd_size;
166 uint64_t status_offset;
167 uint64_t status_size;
168 uint16_t flags;
169 uint8_t reserved2[6];
170} QEMU_PACKED;
171typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion;
172
173/*
174 * Module serial number is a unique number for each device. We use the
175 * slot id of NVDIMM device to generate this number so that each device
176 * associates with a different number.
177 *
178 * 0x123456 is a magic number we arbitrarily chose.
179 */
180static uint32_t nvdimm_slot_to_sn(int slot)
181{
182 return 0x123456 + slot;
183}
184
185/*
186 * handle is used to uniquely associate nfit_memdev structure with NVDIMM
187 * ACPI device - nfit_memdev.nfit_handle matches with the value returned
188 * by ACPI device _ADR method.
189 *
190 * We generate the handle with the slot id of NVDIMM device and reserve
191 * 0 for NVDIMM root device.
192 */
193static uint32_t nvdimm_slot_to_handle(int slot)
194{
195 return slot + 1;
196}
197
198/*
199 * index uniquely identifies the structure, 0 is reserved which indicates
200 * that the structure is not valid or the associated structure is not
201 * present.
202 *
203 * Each NVDIMM device needs two indexes, one for nfit_spa and another for
204 * nfit_dc which are generated by the slot id of NVDIMM device.
205 */
206static uint16_t nvdimm_slot_to_spa_index(int slot)
207{
208 return (slot + 1) << 1;
209}
210
211/* See the comments of nvdimm_slot_to_spa_index(). */
212static uint32_t nvdimm_slot_to_dcr_index(int slot)
213{
214 return nvdimm_slot_to_spa_index(slot) + 1;
215}
216
217/* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
218static void
219nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
220{
221 NvdimmNfitSpa *nfit_spa;
222 uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
223 NULL);
224 uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
225 NULL);
226 uint32_t node = object_property_get_int(OBJECT(dev), PC_DIMM_NODE_PROP,
227 NULL);
228 int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
229 NULL);
230
231 nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa));
232
233 nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range
234 Structure */);
235 nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa));
236 nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
237
238 /*
239 * Control region is strict as all the device info, such as SN, index,
240 * is associated with slot id.
241 */
242 nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for
243 management during hot add/online
244 operation */ |
245 2 /* Data in Proximity Domain field is
246 valid*/);
247
248 /* NUMA node. */
249 nfit_spa->proximity_domain = cpu_to_le32(node);
250 /* the region reported as PMEM. */
251 memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid,
252 sizeof(nvdimm_nfit_spa_uuid));
253
254 nfit_spa->spa_base = cpu_to_le64(addr);
255 nfit_spa->spa_length = cpu_to_le64(size);
256
257 /* It is the PMEM and can be cached as writeback. */
258 nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ |
259 0x8000ULL /* EFI_MEMORY_NV */);
260}
261
262/*
263 * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping
264 * Structure
265 */
266static void
267nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
268{
269 NvdimmNfitMemDev *nfit_memdev;
270 uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
271 NULL);
272 uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
273 NULL);
274 int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
275 NULL);
276 uint32_t handle = nvdimm_slot_to_handle(slot);
277
278 nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev));
279
280 nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address
281 Range Map Structure*/);
282 nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev));
283 nfit_memdev->nfit_handle = cpu_to_le32(handle);
284
285 /*
286 * associate memory device with System Physical Address Range
287 * Structure.
288 */
289 nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
290 /* associate memory device with Control Region Structure. */
291 nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
292
293 /* The memory region on the device. */
294 nfit_memdev->region_len = cpu_to_le64(size);
295 nfit_memdev->region_dpa = cpu_to_le64(addr);
296
297 /* Only one interleave for PMEM. */
298 nfit_memdev->interleave_ways = cpu_to_le16(1);
299}
300
301/*
302 * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure.
303 */
304static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
305{
306 NvdimmNfitControlRegion *nfit_dcr;
307 int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
308 NULL);
309 uint32_t sn = nvdimm_slot_to_sn(slot);
310
311 nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr));
312
313 nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */);
314 nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr));
315 nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
316
317 /* vendor: Intel. */
318 nfit_dcr->vendor_id = cpu_to_le16(0x8086);
319 nfit_dcr->device_id = cpu_to_le16(1);
320
321 /* The _DSM method is following Intel's DSM specification. */
322 nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported
323 in ACPI 6.0 is 1. */);
324 nfit_dcr->serial_number = cpu_to_le32(sn);
325 nfit_dcr->fic = cpu_to_le16(0x201 /* Format Interface Code. See Chapter
326 2: NVDIMM Device Specific Method
327 (DSM) in DSM Spec Rev1.*/);
328}
329
330static GArray *nvdimm_build_device_structure(GSList *device_list)
331{
332 GArray *structures = g_array_new(false, true /* clear */, 1);
333
334 for (; device_list; device_list = device_list->next) {
335 DeviceState *dev = device_list->data;
336
337 /* build System Physical Address Range Structure. */
338 nvdimm_build_structure_spa(structures, dev);
339
340 /*
341 * build Memory Device to System Physical Address Range Mapping
342 * Structure.
343 */
344 nvdimm_build_structure_memdev(structures, dev);
345
346 /* build NVDIMM Control Region Structure. */
347 nvdimm_build_structure_dcr(structures, dev);
348 }
349
350 return structures;
351}
352
353static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
354 GArray *table_data, GArray *linker)
355{
356 GArray *structures = nvdimm_build_device_structure(device_list);
357 unsigned int header;
358
359 acpi_add_table(table_offsets, table_data);
360
361 /* NFIT header. */
362 header = table_data->len;
363 acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
364 /* NVDIMM device structures. */
365 g_array_append_vals(table_data, structures->data, structures->len);
366
367 build_header(linker, table_data,
368 (void *)(table_data->data + header), "NFIT",
369 sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
370 g_array_free(structures, true);
371}
372
373#define NVDIMM_COMMON_DSM "NCAL"
374
375static void nvdimm_build_common_dsm(Aml *dev)
376{
377 Aml *method, *ifctx, *function;
378 uint8_t byte_list[1];
379
380 method = aml_method(NVDIMM_COMMON_DSM, 4, AML_NOTSERIALIZED);
381 function = aml_arg(2);
382
383 /*
384 * function 0 is called to inquire what functions are supported by
385 * OSPM
386 */
387 ifctx = aml_if(aml_equal(function, aml_int(0)));
388 byte_list[0] = 0 /* No function Supported */;
389 aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
390 aml_append(method, ifctx);
391
392 /* No function is supported yet. */
393 byte_list[0] = 1 /* Not Supported */;
394 aml_append(method, aml_return(aml_buffer(1, byte_list)));
395
396 aml_append(dev, method);
397}
398
399static void nvdimm_build_device_dsm(Aml *dev)
400{
401 Aml *method;
402
403 method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
404 aml_append(method, aml_return(aml_call4(NVDIMM_COMMON_DSM, aml_arg(0),
405 aml_arg(1), aml_arg(2), aml_arg(3))));
406 aml_append(dev, method);
407}
408
409static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
410{
411 for (; device_list; device_list = device_list->next) {
412 DeviceState *dev = device_list->data;
413 int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
414 NULL);
415 uint32_t handle = nvdimm_slot_to_handle(slot);
416 Aml *nvdimm_dev;
417
418 nvdimm_dev = aml_device("NV%02X", slot);
419
420 /*
421 * ACPI 6.0: 9.20 NVDIMM Devices:
422 *
423 * _ADR object that is used to supply OSPM with unique address
424 * of the NVDIMM device. This is done by returning the NFIT Device
425 * handle that is used to identify the associated entries in ACPI
426 * table NFIT or _FIT.
427 */
428 aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
429
430 nvdimm_build_device_dsm(nvdimm_dev);
431 aml_append(root_dev, nvdimm_dev);
432 }
433}
434
435static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
436 GArray *table_data, GArray *linker)
437{
438 Aml *ssdt, *sb_scope, *dev;
439
440 acpi_add_table(table_offsets, table_data);
441
442 ssdt = init_aml_allocator();
443 acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader));
444
445 sb_scope = aml_scope("\\_SB");
446
447 dev = aml_device("NVDR");
448
449 /*
450 * ACPI 6.0: 9.20 NVDIMM Devices:
451 *
452 * The ACPI Name Space device uses _HID of ACPI0012 to identify the root
453 * NVDIMM interface device. Platform firmware is required to contain one
454 * such device in _SB scope if NVDIMMs support is exposed by platform to
455 * OSPM.
456 * For each NVDIMM present or intended to be supported by platform,
457 * platform firmware also exposes an ACPI Namespace Device under the
458 * root device.
459 */
460 aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
461
462 nvdimm_build_common_dsm(dev);
463 nvdimm_build_device_dsm(dev);
464
465 nvdimm_build_nvdimm_devices(device_list, dev);
466
467 aml_append(sb_scope, dev);
468
469 aml_append(ssdt, sb_scope);
470 /* copy AML table into ACPI tables blob and patch header there */
471 g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
472 build_header(linker, table_data,
473 (void *)(table_data->data + table_data->len - ssdt->buf->len),
474 "SSDT", ssdt->buf->len, 1, NULL, "NVDIMM");
475 free_aml_allocator();
476}
477
478void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
479 GArray *linker)
480{
481 GSList *device_list;
482
483 /* no NVDIMM device is plugged. */
484 device_list = nvdimm_get_plugged_device_list();
485 if (!device_list) {
486 return;
487 }
488 nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
489 nvdimm_build_ssdt(device_list, table_offsets, table_data, linker);
490 g_slist_free(device_list);
491}
This page took 0.027433 seconds and 4 git commands to generate.