]>
Commit | Line | Data |
---|---|---|
0016a4cf PM |
1 | /* |
2 | * Floating-point, VMX/Altivec and VSX loads and stores | |
3 | * for use in instruction emulation. | |
4 | * | |
5 | * Copyright 2010 Paul Mackerras, IBM Corp. <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <asm/processor.h> | |
14 | #include <asm/ppc_asm.h> | |
15 | #include <asm/ppc-opcode.h> | |
16 | #include <asm/reg.h> | |
17 | #include <asm/asm-offsets.h> | |
18 | #include <linux/errno.h> | |
19 | ||
cd64d169 SM |
20 | #ifdef CONFIG_PPC_FPU |
21 | ||
0016a4cf PM |
22 | #define STKFRM (PPC_MIN_STKFRM + 16) |
23 | ||
c22435a5 | 24 | /* Get the contents of frN into *p; N is in r3 and p is in r4. */ |
0016a4cf PM |
25 | _GLOBAL(get_fpr) |
26 | mflr r0 | |
c22435a5 PM |
27 | mfmsr r6 |
28 | ori r7, r6, MSR_FP | |
29 | MTMSRD(r7) | |
30 | isync | |
0016a4cf PM |
31 | rlwinm r3,r3,3,0xf8 |
32 | bcl 20,31,1f | |
c22435a5 PM |
33 | reg = 0 |
34 | .rept 32 | |
35 | stfd reg, 0(r4) | |
36 | b 2f | |
0016a4cf PM |
37 | reg = reg + 1 |
38 | .endr | |
39 | 1: mflr r5 | |
40 | add r5,r3,r5 | |
41 | mtctr r5 | |
42 | mtlr r0 | |
43 | bctr | |
c22435a5 PM |
44 | 2: MTMSRD(r6) |
45 | isync | |
46 | blr | |
0016a4cf | 47 | |
c22435a5 | 48 | /* Put the contents of *p into frN; N is in r3 and p is in r4. */ |
0016a4cf PM |
49 | _GLOBAL(put_fpr) |
50 | mflr r0 | |
c22435a5 PM |
51 | mfmsr r6 |
52 | ori r7, r6, MSR_FP | |
53 | MTMSRD(r7) | |
54 | isync | |
0016a4cf PM |
55 | rlwinm r3,r3,3,0xf8 |
56 | bcl 20,31,1f | |
c22435a5 PM |
57 | reg = 0 |
58 | .rept 32 | |
59 | lfd reg, 0(r4) | |
60 | b 2f | |
0016a4cf PM |
61 | reg = reg + 1 |
62 | .endr | |
63 | 1: mflr r5 | |
64 | add r5,r3,r5 | |
65 | mtctr r5 | |
66 | mtlr r0 | |
67 | bctr | |
c22435a5 | 68 | 2: MTMSRD(r6) |
0016a4cf | 69 | isync |
0016a4cf | 70 | blr |
0016a4cf | 71 | |
c22435a5 PM |
72 | #ifdef CONFIG_ALTIVEC |
73 | /* Get the contents of vrN into *p; N is in r3 and p is in r4. */ | |
74 | _GLOBAL(get_vr) | |
0016a4cf | 75 | mflr r0 |
0016a4cf | 76 | mfmsr r6 |
c22435a5 | 77 | oris r7, r6, MSR_VEC@h |
cd64d169 | 78 | MTMSRD(r7) |
0016a4cf | 79 | isync |
4716e488 | 80 | rlwinm r3,r3,3,0xf8 |
0016a4cf | 81 | bcl 20,31,1f |
c22435a5 PM |
82 | reg = 0 |
83 | .rept 32 | |
84 | stvx reg, 0, r4 | |
85 | b 2f | |
0016a4cf PM |
86 | reg = reg + 1 |
87 | .endr | |
88 | 1: mflr r5 | |
4716e488 | 89 | add r5,r3,r5 |
0016a4cf PM |
90 | mtctr r5 |
91 | mtlr r0 | |
92 | bctr | |
c22435a5 PM |
93 | 2: MTMSRD(r6) |
94 | isync | |
95 | blr | |
0016a4cf | 96 | |
c22435a5 | 97 | /* Put the contents of *p into vrN; N is in r3 and p is in r4. */ |
0016a4cf PM |
98 | _GLOBAL(put_vr) |
99 | mflr r0 | |
c22435a5 PM |
100 | mfmsr r6 |
101 | oris r7, r6, MSR_VEC@h | |
102 | MTMSRD(r7) | |
103 | isync | |
4716e488 | 104 | rlwinm r3,r3,3,0xf8 |
0016a4cf | 105 | bcl 20,31,1f |
c22435a5 PM |
106 | reg = 0 |
107 | .rept 32 | |
108 | lvx reg, 0, r4 | |
109 | b 2f | |
0016a4cf PM |
110 | reg = reg + 1 |
111 | .endr | |
112 | 1: mflr r5 | |
4716e488 | 113 | add r5,r3,r5 |
0016a4cf PM |
114 | mtctr r5 |
115 | mtlr r0 | |
116 | bctr | |
c22435a5 | 117 | 2: MTMSRD(r6) |
0016a4cf | 118 | isync |
0016a4cf | 119 | blr |
0016a4cf PM |
120 | #endif /* CONFIG_ALTIVEC */ |
121 | ||
122 | #ifdef CONFIG_VSX | |
df99e6eb | 123 | /* Get the contents of vsN into vs0; N is in r3. */ |
0016a4cf PM |
124 | _GLOBAL(get_vsr) |
125 | mflr r0 | |
126 | rlwinm r3,r3,3,0x1f8 | |
127 | bcl 20,31,1f | |
df99e6eb | 128 | blr /* vs0 is already in vs0 */ |
0016a4cf PM |
129 | nop |
130 | reg = 1 | |
131 | .rept 63 | |
132 | XXLOR(0,reg,reg) | |
133 | blr | |
134 | reg = reg + 1 | |
135 | .endr | |
136 | 1: mflr r5 | |
137 | add r5,r3,r5 | |
138 | mtctr r5 | |
139 | mtlr r0 | |
140 | bctr | |
141 | ||
df99e6eb | 142 | /* Put the contents of vs0 into vsN; N is in r3. */ |
0016a4cf PM |
143 | _GLOBAL(put_vsr) |
144 | mflr r0 | |
145 | rlwinm r3,r3,3,0x1f8 | |
146 | bcl 20,31,1f | |
c2ce6f9f | 147 | blr /* v0 is already in v0 */ |
0016a4cf PM |
148 | nop |
149 | reg = 1 | |
150 | .rept 63 | |
151 | XXLOR(reg,0,0) | |
152 | blr | |
153 | reg = reg + 1 | |
154 | .endr | |
155 | 1: mflr r5 | |
156 | add r5,r3,r5 | |
157 | mtctr r5 | |
158 | mtlr r0 | |
159 | bctr | |
160 | ||
161 | /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */ | |
350779a2 | 162 | _GLOBAL(load_vsrn) |
0016a4cf PM |
163 | PPC_STLU r1,-STKFRM(r1) |
164 | mflr r0 | |
165 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
166 | mfmsr r6 | |
167 | oris r7,r6,MSR_VSX@h | |
168 | cmpwi cr7,r3,0 | |
169 | li r8,STKFRM-16 | |
cd64d169 | 170 | MTMSRD(r7) |
0016a4cf PM |
171 | isync |
172 | beq cr7,1f | |
c75df6f9 | 173 | STXVD2X(0,R1,R8) |
350779a2 PM |
174 | 1: LXVD2X(0,R0,R4) |
175 | #ifdef __LITTLE_ENDIAN__ | |
176 | XXSWAPD(0,0) | |
177 | #endif | |
178 | beq cr7,4f | |
0016a4cf | 179 | bl put_vsr |
c75df6f9 | 180 | LXVD2X(0,R1,R8) |
0016a4cf PM |
181 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
182 | mtlr r0 | |
cd64d169 | 183 | MTMSRD(r6) |
0016a4cf | 184 | isync |
0016a4cf PM |
185 | addi r1,r1,STKFRM |
186 | blr | |
0016a4cf PM |
187 | |
188 | /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ | |
350779a2 | 189 | _GLOBAL(store_vsrn) |
0016a4cf PM |
190 | PPC_STLU r1,-STKFRM(r1) |
191 | mflr r0 | |
192 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
193 | mfmsr r6 | |
194 | oris r7,r6,MSR_VSX@h | |
0016a4cf | 195 | li r8,STKFRM-16 |
cd64d169 | 196 | MTMSRD(r7) |
0016a4cf | 197 | isync |
c75df6f9 | 198 | STXVD2X(0,R1,R8) |
0016a4cf | 199 | bl get_vsr |
350779a2 PM |
200 | #ifdef __LITTLE_ENDIAN__ |
201 | XXSWAPD(0,0) | |
202 | #endif | |
203 | STXVD2X(0,R0,R4) | |
c75df6f9 | 204 | LXVD2X(0,R1,R8) |
350779a2 | 205 | PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
0016a4cf | 206 | mtlr r0 |
cd64d169 | 207 | MTMSRD(r6) |
0016a4cf PM |
208 | isync |
209 | mr r3,r9 | |
210 | addi r1,r1,STKFRM | |
211 | blr | |
0016a4cf | 212 | #endif /* CONFIG_VSX */ |
cd64d169 | 213 | |
350779a2 PM |
214 | /* Convert single-precision to double, without disturbing FPRs. */ |
215 | /* conv_sp_to_dp(float *sp, double *dp) */ | |
216 | _GLOBAL(conv_sp_to_dp) | |
217 | mfmsr r6 | |
218 | ori r7, r6, MSR_FP | |
219 | MTMSRD(r7) | |
220 | isync | |
221 | stfd fr0, -16(r1) | |
222 | lfs fr0, 0(r3) | |
223 | stfd fr0, 0(r4) | |
224 | lfd fr0, -16(r1) | |
225 | MTMSRD(r6) | |
226 | isync | |
227 | blr | |
228 | ||
229 | /* Convert single-precision to double, without disturbing FPRs. */ | |
230 | /* conv_sp_to_dp(double *dp, float *sp) */ | |
231 | _GLOBAL(conv_dp_to_sp) | |
232 | mfmsr r6 | |
233 | ori r7, r6, MSR_FP | |
234 | MTMSRD(r7) | |
235 | isync | |
236 | stfd fr0, -16(r1) | |
237 | lfd fr0, 0(r3) | |
238 | stfs fr0, 0(r4) | |
239 | lfd fr0, -16(r1) | |
240 | MTMSRD(r6) | |
241 | isync | |
242 | blr | |
243 | ||
cd64d169 | 244 | #endif /* CONFIG_PPC_FPU */ |