2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) IBM Corporation, 2011
20 #include <asm/ppc_asm.h>
23 /* 0 == don't use VMX, 1 == use VMX */
24 #define SELFTEST_CASE 0
28 #define LVS(VRT,RA,RB) lvsl VRT,RA,RB
29 #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
31 #define LVS(VRT,RA,RB) lvsr VRT,RA,RB
32 #define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
37 EX_TABLE(100b,.Ldo_err1)
42 EX_TABLE(200b,.Ldo_err2)
48 EX_TABLE(300b,.Ldo_err3)
53 EX_TABLE(400b,.Ldo_err4)
58 ld r16,STK_REG(R16)(r1)
59 ld r15,STK_REG(R15)(r1)
60 ld r14,STK_REG(R14)(r1)
63 ld r0,STACKFRAMESIZE+16(r1)
66 #endif /* CONFIG_ALTIVEC */
69 ld r22,STK_REG(R22)(r1)
70 ld r21,STK_REG(R21)(r1)
71 ld r20,STK_REG(R20)(r1)
72 ld r19,STK_REG(R19)(r1)
73 ld r18,STK_REG(R18)(r1)
74 ld r17,STK_REG(R17)(r1)
75 ld r16,STK_REG(R16)(r1)
76 ld r15,STK_REG(R15)(r1)
77 ld r14,STK_REG(R14)(r1)
79 addi r1,r1,STACKFRAMESIZE
81 ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
82 ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
83 ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
84 b __copy_tofrom_user_base
87 _GLOBAL(__copy_tofrom_user_power7)
91 std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
92 std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
93 std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
98 test_feature = SELFTEST_CASE
101 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
105 /* Get the source 8B aligned */
133 stdu r1,-STACKFRAMESIZE(r1)
134 std r14,STK_REG(R14)(r1)
135 std r15,STK_REG(R15)(r1)
136 std r16,STK_REG(R16)(r1)
137 std r17,STK_REG(R17)(r1)
138 std r18,STK_REG(R18)(r1)
139 std r19,STK_REG(R19)(r1)
140 std r20,STK_REG(R20)(r1)
141 std r21,STK_REG(R21)(r1)
142 std r22,STK_REG(R22)(r1)
143 std r0,STACKFRAMESIZE+16(r1)
148 /* Now do cacheline (128B) sized loads and stores. */
181 err2; std r19,104(r3)
182 err2; std r20,112(r3)
183 err2; std r21,120(r3)
189 ld r14,STK_REG(R14)(r1)
190 ld r15,STK_REG(R15)(r1)
191 ld r16,STK_REG(R16)(r1)
192 ld r17,STK_REG(R17)(r1)
193 ld r18,STK_REG(R18)(r1)
194 ld r19,STK_REG(R19)(r1)
195 ld r20,STK_REG(R20)(r1)
196 ld r21,STK_REG(R21)(r1)
197 ld r22,STK_REG(R22)(r1)
198 addi r1,r1,STACKFRAMESIZE
200 /* Up to 127B to go */
224 /* Up to 63B to go */
237 /* Up to 31B to go */
246 9: clrldi r5,r5,(64-4)
248 /* Up to 15B to go */
252 err1; lwz r0,0(r4) /* Less chance of a reject with word ops */
278 .Lunwind_stack_nonvmx_copy:
279 addi r1,r1,STACKFRAMESIZE
283 #ifdef CONFIG_ALTIVEC
286 stdu r1,-STACKFRAMESIZE(r1)
287 bl enter_vmx_usercopy
289 ld r0,STACKFRAMESIZE+16(r1)
290 ld r3,STK_REG(R31)(r1)
291 ld r4,STK_REG(R30)(r1)
292 ld r5,STK_REG(R29)(r1)
296 * We prefetch both the source and destination using enhanced touch
297 * instructions. We use a stream ID of 0 for the load side and
298 * 1 for the store side.
302 ori r9,r9,1 /* stream=1 */
304 srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
308 1: lis r0,0x0E00 /* depth=7 */
311 ori r10,r7,1 /* stream=1 */
313 lis r8,0x8000 /* GO=1 */
316 /* setup read stream 0 */
317 dcbt 0,r6,0b01000 /* addr from */
318 dcbt 0,r7,0b01010 /* length and depth from */
319 /* setup write stream 1 */
320 dcbtst 0,r9,0b01000 /* addr to */
321 dcbtst 0,r10,0b01010 /* length and depth to */
323 dcbt 0,r8,0b01010 /* all streams GO */
325 beq cr1,.Lunwind_stack_nonvmx_copy
328 * If source and destination are not relatively aligned we use a
329 * slower permute loop.
332 rldicl. r6,r6,0,(64-4)
333 bne .Lvmx_unaligned_copy
335 /* Get the destination 16B aligned */
366 /* Get the desination 128B aligned */
405 std r14,STK_REG(R14)(r1)
406 std r15,STK_REG(R15)(r1)
407 std r16,STK_REG(R16)(r1)
417 * Now do cacheline sized loads and stores. By this stage the
418 * cacheline stores are also cacheline aligned.
442 ld r14,STK_REG(R14)(r1)
443 ld r15,STK_REG(R15)(r1)
444 ld r16,STK_REG(R16)(r1)
446 /* Up to 127B to go */
477 /* Up to 15B to go */
478 11: clrldi r5,r5,(64-4)
502 15: addi r1,r1,STACKFRAMESIZE
503 b exit_vmx_usercopy /* tail call optimise */
505 .Lvmx_unaligned_copy:
506 /* Get the destination 16B aligned */
530 err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
539 /* Get the desination 128B aligned */
549 LVS(v16,0,r4) /* Setup permute control vector */
583 err3; stvx v10,r3,r10
584 err3; stvx v11,r3,r11
590 std r14,STK_REG(R14)(r1)
591 std r15,STK_REG(R15)(r1)
592 std r16,STK_REG(R16)(r1)
602 * Now do cacheline sized loads and stores. By this stage the
603 * cacheline stores are also cacheline aligned.
626 err4; stvx v10,r3,r10
627 err4; stvx v11,r3,r11
628 err4; stvx v12,r3,r12
629 err4; stvx v13,r3,r14
630 err4; stvx v14,r3,r15
631 err4; stvx v15,r3,r16
635 ld r14,STK_REG(R14)(r1)
636 ld r15,STK_REG(R15)(r1)
637 ld r16,STK_REG(R16)(r1)
639 /* Up to 127B to go */
656 err3; stvx v10,r3,r10
657 err3; stvx v11,r3,r11
677 /* Up to 15B to go */
678 11: clrldi r5,r5,(64-4)
679 addi r4,r4,-16 /* Unwind the +16 load offset */
682 err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
705 15: addi r1,r1,STACKFRAMESIZE
706 b exit_vmx_usercopy /* tail call optimise */
707 #endif /* CONFIG_ALTIVEC */