2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/cpufeature.h>
28 #include <asm/fpu/api.h>
30 #include "i915_memcpy.h"
32 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
33 #define CI_BUG_ON(expr) BUG_ON(expr)
35 #define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
38 static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
40 static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
45 asm("movntdqa (%0), %%xmm0\n"
46 "movntdqa 16(%0), %%xmm1\n"
47 "movntdqa 32(%0), %%xmm2\n"
48 "movntdqa 48(%0), %%xmm3\n"
49 "movaps %%xmm0, (%1)\n"
50 "movaps %%xmm1, 16(%1)\n"
51 "movaps %%xmm2, 32(%1)\n"
52 "movaps %%xmm3, 48(%1)\n"
53 :: "r" (src), "r" (dst) : "memory");
59 asm("movntdqa (%0), %%xmm0\n"
60 "movaps %%xmm0, (%1)\n"
61 :: "r" (src), "r" (dst) : "memory");
69 static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
74 asm("movntdqa (%0), %%xmm0\n"
75 "movntdqa 16(%0), %%xmm1\n"
76 "movntdqa 32(%0), %%xmm2\n"
77 "movntdqa 48(%0), %%xmm3\n"
78 "movups %%xmm0, (%1)\n"
79 "movups %%xmm1, 16(%1)\n"
80 "movups %%xmm2, 32(%1)\n"
81 "movups %%xmm3, 48(%1)\n"
82 :: "r" (src), "r" (dst) : "memory");
88 asm("movntdqa (%0), %%xmm0\n"
89 "movups %%xmm0, (%1)\n"
90 :: "r" (src), "r" (dst) : "memory");
99 * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
100 * @dst: destination pointer
101 * @src: source pointer
102 * @len: how many bytes to copy
104 * i915_memcpy_from_wc copies @len bytes from @src to @dst using
105 * non-temporal instructions where available. Note that all arguments
106 * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
109 * To test whether accelerated reads from WC are supported, use
110 * i915_memcpy_from_wc(NULL, NULL, 0);
112 * Returns true if the copy was successful, false if the preconditions
115 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
117 if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
120 if (static_branch_likely(&has_movntdqa)) {
122 __memcpy_ntdqa(dst, src, len >> 4);
130 * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
131 * @dst: destination pointer
132 * @src: source pointer
133 * @len: how many bytes to copy
135 * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
136 * @src to @dst using * non-temporal instructions where available, but
137 * accepts that its arguments may not be aligned, but are valid for the
138 * potential 16-byte read past the end.
140 void i915_unaligned_memcpy_from_wc(void *dst, const void *src, unsigned long len)
144 CI_BUG_ON(!i915_has_memcpy_from_wc());
146 addr = (unsigned long)src;
147 if (!IS_ALIGNED(addr, 16)) {
148 unsigned long x = min(ALIGN(addr, 16) - addr, len);
158 __memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
161 void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
164 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
165 * emulation. So don't enable movntdqa in hypervisor guest.
167 if (static_cpu_has(X86_FEATURE_XMM4_1) &&
168 !boot_cpu_has(X86_FEATURE_HYPERVISOR))
169 static_branch_enable(&has_movntdqa);