1 // SPDX-License-Identifier: GPL-2.0
3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
10 #include <linux/init.h>
11 #include <linux/acpi.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/memblock.h>
15 #include <linux/of_fdt.h>
16 #include <linux/serial_core.h>
19 #include <asm/loongson.h>
22 EXPORT_SYMBOL(acpi_disabled);
24 int acpi_pci_disabled;
25 EXPORT_SYMBOL(acpi_pci_disabled);
26 int acpi_strict = 1; /* We have no workarounds on LoongArch */
32 #define MAX_CORE_PIC 256
34 #define PREFIX "ACPI: "
36 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
42 return early_memremap(phys, size);
44 void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
49 early_memunmap(map, size);
52 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
54 if (!memblock_is_memory(phys))
55 return ioremap(phys, size);
57 return ioremap_cache(phys, size);
61 static int set_processor_mask(u32 id, u32 flags)
66 if (num_processors >= nr_cpu_ids) {
67 pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
68 " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
73 if (cpuid == loongson_sysconf.boot_cpu_id)
76 cpu = cpumask_next_zero(-1, cpu_present_mask);
78 if (flags & ACPI_MADT_ENABLED) {
80 set_cpu_possible(cpu, true);
81 set_cpu_present(cpu, true);
82 __cpu_number_map[cpuid] = cpu;
83 __cpu_logical_map[cpu] = cpuid;
92 acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
94 struct acpi_madt_core_pic *processor = NULL;
96 processor = (struct acpi_madt_core_pic *)header;
97 if (BAD_MADT_ENTRY(processor, end))
100 acpi_table_print_madt_entry(&header->common);
102 set_processor_mask(processor->core_id, processor->flags);
109 acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
112 struct acpi_madt_eio_pic *eiointc = NULL;
114 eiointc = (struct acpi_madt_eio_pic *)header;
115 if (BAD_MADT_ENTRY(eiointc, end))
118 core = eiointc->node * CORES_PER_EIO_NODE;
119 set_bit(core, &(loongson_sysconf.cores_io_master));
124 static void __init acpi_process_madt(void)
129 for (i = 0; i < NR_CPUS; i++) {
130 __cpu_number_map[i] = -1;
131 __cpu_logical_map[i] = -1;
134 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
135 acpi_parse_processor, MAX_CORE_PIC);
137 acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
138 acpi_parse_eio_master, MAX_IO_PICS);
140 loongson_sysconf.nr_cpus = num_processors;
143 #ifndef CONFIG_SUSPEND
144 int (*acpi_suspend_lowlevel)(void);
146 int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
149 void __init acpi_boot_table_init(void)
152 * If acpi_disabled, bail out
158 * Initialize the ACPI boot-time table parser.
160 if (acpi_table_init()) {
165 loongson_sysconf.boot_cpu_id = read_csr_cpuid();
168 * Process the Multiple APIC Description Table (MADT), if present
172 /* Do not enable ACPI SPCR console by default */
173 acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
178 if (earlycon_acpi_spcr_enable)
179 early_init_dt_scan_chosen_stdout();
182 #ifdef CONFIG_ACPI_NUMA
184 static __init int setup_node(int pxm)
186 return acpi_map_pxm_to_node(pxm);
190 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
191 * I/O localities since SRAT does not list them. I/O localities are
192 * not supported at this point.
194 unsigned int numa_distance_cnt;
196 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
198 return slit->locality_count;
201 void __init numa_set_distance(int from, int to, int distance)
203 if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
204 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
209 node_distances[from][to] = distance;
212 /* Callback for Proximity Domain -> CPUID mapping */
214 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
220 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
224 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
226 pxm = pa->proximity_domain_lo;
227 if (acpi_srat_revision >= 2) {
228 pxm |= (pa->proximity_domain_hi[0] << 8);
229 pxm |= (pa->proximity_domain_hi[1] << 16);
230 pxm |= (pa->proximity_domain_hi[2] << 24);
232 node = setup_node(pxm);
234 pr_err("SRAT: Too many proximity domains %x\n", pxm);
239 if (pa->apic_id >= CONFIG_NR_CPUS) {
240 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
241 pxm, pa->apic_id, node);
245 early_numa_add_cpu(pa->apic_id, node);
247 set_cpuid_to_node(pa->apic_id, node);
248 node_set(node, numa_nodes_parsed);
249 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
252 void __init acpi_numa_arch_fixup(void) {}
255 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
257 memblock_reserve(addr, size);
260 #ifdef CONFIG_ACPI_HOTPLUG_CPU
262 #include <acpi/processor.h>
264 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
266 #ifdef CONFIG_ACPI_NUMA
269 nid = acpi_get_node(handle);
270 if (nid != NUMA_NO_NODE) {
271 set_cpuid_to_node(physid, nid);
272 node_set(nid, numa_nodes_parsed);
273 set_cpu_numa_node(cpu, nid);
274 cpumask_set_cpu(cpu, cpumask_of_node(nid));
280 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
284 cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
286 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
290 acpi_map_cpu2node(handle, cpu, physid);
296 EXPORT_SYMBOL(acpi_map_cpu);
298 int acpi_unmap_cpu(int cpu)
300 #ifdef CONFIG_ACPI_NUMA
301 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
303 set_cpu_present(cpu, false);
306 pr_info("cpu%d hot remove!\n", cpu);
310 EXPORT_SYMBOL(acpi_unmap_cpu);
312 #endif /* CONFIG_ACPI_HOTPLUG_CPU */