1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Regents of the University of California
5 * Copyright (C) 2017 SiFive
8 #include <linux/syscalls.h>
9 #include <asm/cacheflush.h>
10 #include <asm/cpufeature.h>
11 #include <asm/hwprobe.h>
13 #include <asm/switch_to.h>
14 #include <asm/uaccess.h>
15 #include <asm/unistd.h>
16 #include <asm-generic/mman-common.h>
17 #include <vdso/vsyscall.h>
19 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
20 unsigned long prot, unsigned long flags,
21 unsigned long fd, off_t offset,
22 unsigned long page_shift_offset)
24 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
27 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
28 offset >> (PAGE_SHIFT - page_shift_offset));
32 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
33 unsigned long, prot, unsigned long, flags,
34 unsigned long, fd, off_t, offset)
36 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
40 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
41 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
42 unsigned long, prot, unsigned long, flags,
43 unsigned long, fd, off_t, offset)
46 * Note that the shift for mmap2 is constant (12),
47 * regardless of PAGE_SIZE
49 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
54 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
55 * having a direct 'fence.i' instruction available to userspace (which we
56 * can't trap!), that's not actually viable when running on Linux because the
57 * kernel might schedule a process on another hart. There is no way for
58 * userspace to handle this without invoking the kernel (as it doesn't know the
59 * thread->hart mappings), so we've defined a RISC-V specific system call to
60 * flush the instruction cache.
62 * sys_riscv_flush_icache() is defined to flush the instruction cache over an
63 * address range, with the flush applying to either all threads or just the
64 * caller. We don't currently do anything with the address range, that's just
65 * in there for forwards compatibility.
67 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
70 /* Check the reserved flags. */
71 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
74 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
80 * The hwprobe interface, for allowing userspace to probe to see which features
81 * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more
84 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
85 const struct cpumask *cpus)
91 for_each_cpu(cpu, cpus) {
95 case RISCV_HWPROBE_KEY_MVENDORID:
96 cpu_id = riscv_cached_mvendorid(cpu);
98 case RISCV_HWPROBE_KEY_MIMPID:
99 cpu_id = riscv_cached_mimpid(cpu);
101 case RISCV_HWPROBE_KEY_MARCHID:
102 cpu_id = riscv_cached_marchid(cpu);
112 * If there's a mismatch for the given set, return -1 in the
124 static u64 hwprobe_misaligned(const struct cpumask *cpus)
129 for_each_cpu(cpu, cpus) {
130 int this_perf = per_cpu(misaligned_access_speed, cpu);
135 if (perf != this_perf) {
136 perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
142 return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
147 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
148 const struct cpumask *cpus)
151 case RISCV_HWPROBE_KEY_MVENDORID:
152 case RISCV_HWPROBE_KEY_MARCHID:
153 case RISCV_HWPROBE_KEY_MIMPID:
154 hwprobe_arch_id(pair, cpus);
157 * The kernel already assumes that the base single-letter ISA
158 * extensions are supported on all harts, and only supports the
159 * IMA base, so just cheat a bit here and tell that to
162 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
163 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
166 case RISCV_HWPROBE_KEY_IMA_EXT_0:
169 pair->value |= RISCV_HWPROBE_IMA_FD;
171 if (riscv_isa_extension_available(NULL, c))
172 pair->value |= RISCV_HWPROBE_IMA_C;
176 case RISCV_HWPROBE_KEY_CPUPERF_0:
177 pair->value = hwprobe_misaligned(cpus);
181 * For forward compatibility, unknown keys don't fail the whole
182 * call, but get their element key set to -1 and value set to 0
183 * indicating they're unrecognized.
192 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
193 size_t pair_count, size_t cpu_count,
194 unsigned long __user *cpus_user,
201 /* Check the reserved flags. */
206 * The interface supports taking in a CPU mask, and returns values that
207 * are consistent across that mask. Allow userspace to specify NULL and
208 * 0 as a shortcut to all online CPUs.
210 cpumask_clear(&cpus);
211 if (!cpu_count && !cpus_user) {
212 cpumask_copy(&cpus, cpu_online_mask);
214 if (cpu_count > cpumask_size())
215 cpu_count = cpumask_size();
217 ret = copy_from_user(&cpus, cpus_user, cpu_count);
222 * Userspace must provide at least one online CPU, without that
223 * there's no way to define what is supported.
225 cpumask_and(&cpus, &cpus, cpu_online_mask);
226 if (cpumask_empty(&cpus))
230 for (out = 0; out < pair_count; out++, pairs++) {
231 struct riscv_hwprobe pair;
233 if (get_user(pair.key, &pairs->key))
237 hwprobe_one_pair(&pair, &cpus);
238 ret = put_user(pair.key, &pairs->key);
240 ret = put_user(pair.value, &pairs->value);
251 static int __init init_hwprobe_vdso_data(void)
253 struct vdso_data *vd = __arch_get_k_vdso_data();
254 struct arch_vdso_data *avd = &vd->arch_data;
256 struct riscv_hwprobe pair;
260 * Initialize vDSO data with the answers for the "all CPUs" case, to
261 * save a syscall in the common case.
263 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
265 hwprobe_one_pair(&pair, cpu_online_mask);
267 WARN_ON_ONCE(pair.key < 0);
269 avd->all_cpu_hwprobe_values[key] = pair.value;
271 * Smash together the vendor, arch, and impl IDs to see if
272 * they're all 0 or any negative.
274 if (key <= RISCV_HWPROBE_KEY_MIMPID)
275 id_bitsmash |= pair.value;
279 * If the arch, vendor, and implementation ID are all the same across
280 * all harts, then assume all CPUs are the same, and allow the vDSO to
281 * answer queries for arbitrary masks. However if all values are 0 (not
282 * populated) or any value returns -1 (varies across CPUs), then the
283 * vDSO should defer to the kernel for exotic cpu masks.
285 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
289 arch_initcall_sync(init_hwprobe_vdso_data);
291 #endif /* CONFIG_MMU */
293 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
294 size_t, pair_count, size_t, cpu_count, unsigned long __user *,
295 cpus, unsigned int, flags)
297 return do_riscv_hwprobe(pairs, pair_count, cpu_count,