1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Support for Vector Instructions
5 * Assembler macros to generate .byte/.word code for particular
6 * vector instructions that are supported by recent binutils (>= 2.26) only.
8 * Copyright IBM Corp. 2015
12 #ifndef __ASM_S390_VX_INSN_INTERNAL_H
13 #define __ASM_S390_VX_INSN_INTERNAL_H
15 #ifndef __ASM_S390_VX_INSN_H
16 #error only <asm/vx-insn.h> can be included directly
21 /* Macros to generate vector instruction byte code */
23 /* GR_NUM - Retrieve general-purpose register number
25 * @opd: Operand to store register number
26 * @r64: String designation register in the format "%rN"
83 /* VX_NUM - Retrieve vector register number
85 * @opd: Operand to store register number
86 * @vxr: String designation register in the format "%vN"
88 * The vector register number is used for as input number to the
89 * instruction and, as well as, to compute the RXB field of the
195 /* RXB - Compute most significant bit used vector registers
197 * @rxb: Operand to store computed RXB value
198 * @v1: First vector register designated operand
199 * @v2: Second vector register designated operand
200 * @v3: Third vector register designated operand
201 * @v4: Fourth vector register designated operand
203 .macro RXB rxb v1 v2=0 v3=0 v4=0
219 /* MRXB - Generate Element Size Control and RXB value
221 * @m: Element size control
222 * @v1: First vector register designated operand (for RXB)
223 * @v2: Second vector register designated operand (for RXB)
224 * @v3: Third vector register designated operand (for RXB)
225 * @v4: Fourth vector register designated operand (for RXB)
227 .macro MRXB m v1 v2=0 v3=0 v4=0
229 RXB rxb, \v1, \v2, \v3, \v4
230 .byte (\m << 4) | rxb
233 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
235 * @m: Element size control
237 * @v1: First vector register designated operand (for RXB)
238 * @v2: Second vector register designated operand (for RXB)
239 * @v3: Third vector register designated operand (for RXB)
240 * @v4: Fourth vector register designated operand (for RXB)
242 .macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
243 MRXB \m, \v1, \v2, \v3, \v4
247 /* Vector support instructions */
249 /* VECTOR GENERATE BYTE MASK */
252 .word (0xE700 | ((v1&15) << 4))
263 /* VECTOR LOAD VR ELEMENT FROM GR */
264 .macro VLVG v, gr, disp, m
268 .word 0xE700 | ((v1&15) << 4) | r3
269 .word (b2 << 12) | (\disp)
272 .macro VLVGB v, gr, index, base
273 VLVG \v, \gr, \index, \base, 0
275 .macro VLVGH v, gr, index
276 VLVG \v, \gr, \index, 1
278 .macro VLVGF v, gr, index
279 VLVG \v, \gr, \index, 2
281 .macro VLVGG v, gr, index
282 VLVG \v, \gr, \index, 3
285 /* VECTOR LOAD REGISTER */
289 .word 0xE700 | ((v1&15) << 4) | (v2&15)
291 MRXBOPC 0, 0x56, v1, v2
295 .macro VL v, disp, index="%r0", base
299 .word 0xE700 | ((v1&15) << 4) | x2
300 .word (b2 << 12) | (\disp)
304 /* VECTOR LOAD ELEMENT */
305 .macro VLEx vr1, disp, index="%r0", base, m3, opc
309 .word 0xE700 | ((v1&15) << 4) | x2
310 .word (b2 << 12) | (\disp)
311 MRXBOPC \m3, \opc, v1
313 .macro VLEB vr1, disp, index="%r0", base, m3
314 VLEx \vr1, \disp, \index, \base, \m3, 0x00
316 .macro VLEH vr1, disp, index="%r0", base, m3
317 VLEx \vr1, \disp, \index, \base, \m3, 0x01
319 .macro VLEF vr1, disp, index="%r0", base, m3
320 VLEx \vr1, \disp, \index, \base, \m3, 0x03
322 .macro VLEG vr1, disp, index="%r0", base, m3
323 VLEx \vr1, \disp, \index, \base, \m3, 0x02
326 /* VECTOR LOAD ELEMENT IMMEDIATE */
327 .macro VLEIx vr1, imm2, m3, opc
329 .word 0xE700 | ((v1&15) << 4)
331 MRXBOPC \m3, \opc, v1
333 .macro VLEIB vr1, imm2, index
334 VLEIx \vr1, \imm2, \index, 0x40
336 .macro VLEIH vr1, imm2, index
337 VLEIx \vr1, \imm2, \index, 0x41
339 .macro VLEIF vr1, imm2, index
340 VLEIx \vr1, \imm2, \index, 0x43
342 .macro VLEIG vr1, imm2, index
343 VLEIx \vr1, \imm2, \index, 0x42
346 /* VECTOR LOAD GR FROM VR ELEMENT */
347 .macro VLGV gr, vr, disp, base="%r0", m
351 .word 0xE700 | (r1 << 4) | (v3&15)
352 .word (b2 << 12) | (\disp)
355 .macro VLGVB gr, vr, disp, base="%r0"
356 VLGV \gr, \vr, \disp, \base, 0
358 .macro VLGVH gr, vr, disp, base="%r0"
359 VLGV \gr, \vr, \disp, \base, 1
361 .macro VLGVF gr, vr, disp, base="%r0"
362 VLGV \gr, \vr, \disp, \base, 2
364 .macro VLGVG gr, vr, disp, base="%r0"
365 VLGV \gr, \vr, \disp, \base, 3
368 /* VECTOR LOAD MULTIPLE */
369 .macro VLM vfrom, vto, disp, base, hint=3
373 .word 0xE700 | ((v1&15) << 4) | (v3&15)
374 .word (b2 << 12) | (\disp)
375 MRXBOPC \hint, 0x36, v1, v3
379 .macro VST vr1, disp, index="%r0", base
383 .word 0xE700 | ((v1&15) << 4) | (x2&15)
384 .word (b2 << 12) | (\disp)
388 /* VECTOR STORE MULTIPLE */
389 .macro VSTM vfrom, vto, disp, base, hint=3
393 .word 0xE700 | ((v1&15) << 4) | (v3&15)
394 .word (b2 << 12) | (\disp)
395 MRXBOPC \hint, 0x3E, v1, v3
399 .macro VPERM vr1, vr2, vr3, vr4
404 .word 0xE700 | ((v1&15) << 4) | (v2&15)
405 .word ((v3&15) << 12)
406 MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
409 /* VECTOR UNPACK LOGICAL LOW */
410 .macro VUPLL vr1, vr2, m3
413 .word 0xE700 | ((v1&15) << 4) | (v2&15)
415 MRXBOPC \m3, 0xD4, v1, v2
417 .macro VUPLLB vr1, vr2
420 .macro VUPLLH vr1, vr2
423 .macro VUPLLF vr1, vr2
427 /* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
428 .macro VPDI vr1, vr2, vr3, m4
432 .word 0xE700 | ((v1&15) << 4) | (v2&15)
433 .word ((v3&15) << 12)
434 MRXBOPC \m4, 0x84, v1, v2, v3
437 /* VECTOR REPLICATE */
438 .macro VREP vr1, vr3, imm2, m4
441 .word 0xE700 | ((v1&15) << 4) | (v3&15)
443 MRXBOPC \m4, 0x4D, v1, v3
445 .macro VREPB vr1, vr3, imm2
446 VREP \vr1, \vr3, \imm2, 0
448 .macro VREPH vr1, vr3, imm2
449 VREP \vr1, \vr3, \imm2, 1
451 .macro VREPF vr1, vr3, imm2
452 VREP \vr1, \vr3, \imm2, 2
454 .macro VREPG vr1, vr3, imm2
455 VREP \vr1, \vr3, \imm2, 3
458 /* VECTOR MERGE HIGH */
459 .macro VMRH vr1, vr2, vr3, m4
463 .word 0xE700 | ((v1&15) << 4) | (v2&15)
464 .word ((v3&15) << 12)
465 MRXBOPC \m4, 0x61, v1, v2, v3
467 .macro VMRHB vr1, vr2, vr3
468 VMRH \vr1, \vr2, \vr3, 0
470 .macro VMRHH vr1, vr2, vr3
471 VMRH \vr1, \vr2, \vr3, 1
473 .macro VMRHF vr1, vr2, vr3
474 VMRH \vr1, \vr2, \vr3, 2
476 .macro VMRHG vr1, vr2, vr3
477 VMRH \vr1, \vr2, \vr3, 3
480 /* VECTOR MERGE LOW */
481 .macro VMRL vr1, vr2, vr3, m4
485 .word 0xE700 | ((v1&15) << 4) | (v2&15)
486 .word ((v3&15) << 12)
487 MRXBOPC \m4, 0x60, v1, v2, v3
489 .macro VMRLB vr1, vr2, vr3
490 VMRL \vr1, \vr2, \vr3, 0
492 .macro VMRLH vr1, vr2, vr3
493 VMRL \vr1, \vr2, \vr3, 1
495 .macro VMRLF vr1, vr2, vr3
496 VMRL \vr1, \vr2, \vr3, 2
498 .macro VMRLG vr1, vr2, vr3
499 VMRL \vr1, \vr2, \vr3, 3
503 /* Vector integer instructions */
506 .macro VN vr1, vr2, vr3
510 .word 0xE700 | ((v1&15) << 4) | (v2&15)
511 .word ((v3&15) << 12)
512 MRXBOPC 0, 0x68, v1, v2, v3
515 /* VECTOR EXCLUSIVE OR */
516 .macro VX vr1, vr2, vr3
520 .word 0xE700 | ((v1&15) << 4) | (v2&15)
521 .word ((v3&15) << 12)
522 MRXBOPC 0, 0x6D, v1, v2, v3
525 /* VECTOR GALOIS FIELD MULTIPLY SUM */
526 .macro VGFM vr1, vr2, vr3, m4
530 .word 0xE700 | ((v1&15) << 4) | (v2&15)
531 .word ((v3&15) << 12)
532 MRXBOPC \m4, 0xB4, v1, v2, v3
534 .macro VGFMB vr1, vr2, vr3
535 VGFM \vr1, \vr2, \vr3, 0
537 .macro VGFMH vr1, vr2, vr3
538 VGFM \vr1, \vr2, \vr3, 1
540 .macro VGFMF vr1, vr2, vr3
541 VGFM \vr1, \vr2, \vr3, 2
543 .macro VGFMG vr1, vr2, vr3
544 VGFM \vr1, \vr2, \vr3, 3
547 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
548 .macro VGFMA vr1, vr2, vr3, vr4, m5
553 .word 0xE700 | ((v1&15) << 4) | (v2&15)
554 .word ((v3&15) << 12) | (\m5 << 8)
555 MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
557 .macro VGFMAB vr1, vr2, vr3, vr4
558 VGFMA \vr1, \vr2, \vr3, \vr4, 0
560 .macro VGFMAH vr1, vr2, vr3, vr4
561 VGFMA \vr1, \vr2, \vr3, \vr4, 1
563 .macro VGFMAF vr1, vr2, vr3, vr4
564 VGFMA \vr1, \vr2, \vr3, \vr4, 2
566 .macro VGFMAG vr1, vr2, vr3, vr4
567 VGFMA \vr1, \vr2, \vr3, \vr4, 3
570 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
571 .macro VSRLB vr1, vr2, vr3
575 .word 0xE700 | ((v1&15) << 4) | (v2&15)
576 .word ((v3&15) << 12)
577 MRXBOPC 0, 0x7D, v1, v2, v3
580 /* VECTOR REPLICATE IMMEDIATE */
581 .macro VREPI vr1, imm2, m3
583 .word 0xE700 | ((v1&15) << 4)
585 MRXBOPC \m3, 0x45, v1
587 .macro VREPIB vr1, imm2
590 .macro VREPIH vr1, imm2
593 .macro VREPIF vr1, imm2
596 .macro VREPIG vr1, imm2
601 .macro VA vr1, vr2, vr3, m4
605 .word 0xE700 | ((v1&15) << 4) | (v2&15)
606 .word ((v3&15) << 12)
607 MRXBOPC \m4, 0xF3, v1, v2, v3
609 .macro VAB vr1, vr2, vr3
610 VA \vr1, \vr2, \vr3, 0
612 .macro VAH vr1, vr2, vr3
613 VA \vr1, \vr2, \vr3, 1
615 .macro VAF vr1, vr2, vr3
616 VA \vr1, \vr2, \vr3, 2
618 .macro VAG vr1, vr2, vr3
619 VA \vr1, \vr2, \vr3, 3
621 .macro VAQ vr1, vr2, vr3
622 VA \vr1, \vr2, \vr3, 4
625 /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
626 .macro VESRAV vr1, vr2, vr3, m4
630 .word 0xE700 | ((v1&15) << 4) | (v2&15)
631 .word ((v3&15) << 12)
632 MRXBOPC \m4, 0x7A, v1, v2, v3
635 .macro VESRAVB vr1, vr2, vr3
636 VESRAV \vr1, \vr2, \vr3, 0
638 .macro VESRAVH vr1, vr2, vr3
639 VESRAV \vr1, \vr2, \vr3, 1
641 .macro VESRAVF vr1, vr2, vr3
642 VESRAV \vr1, \vr2, \vr3, 2
644 .macro VESRAVG vr1, vr2, vr3
645 VESRAV \vr1, \vr2, \vr3, 3
648 /* VECTOR ELEMENT ROTATE LEFT LOGICAL */
649 .macro VERLL vr1, vr3, disp, base="%r0", m4
653 .word 0xE700 | ((v1&15) << 4) | (v3&15)
654 .word (b2 << 12) | (\disp)
655 MRXBOPC \m4, 0x33, v1, v3
657 .macro VERLLB vr1, vr3, disp, base="%r0"
658 VERLL \vr1, \vr3, \disp, \base, 0
660 .macro VERLLH vr1, vr3, disp, base="%r0"
661 VERLL \vr1, \vr3, \disp, \base, 1
663 .macro VERLLF vr1, vr3, disp, base="%r0"
664 VERLL \vr1, \vr3, \disp, \base, 2
666 .macro VERLLG vr1, vr3, disp, base="%r0"
667 VERLL \vr1, \vr3, \disp, \base, 3
670 /* VECTOR SHIFT LEFT DOUBLE BY BYTE */
671 .macro VSLDB vr1, vr2, vr3, imm4
675 .word 0xE700 | ((v1&15) << 4) | (v2&15)
676 .word ((v3&15) << 12) | (\imm4)
677 MRXBOPC 0, 0x77, v1, v2, v3
680 #endif /* __ASSEMBLY__ */
681 #endif /* __ASM_S390_VX_INSN_INTERNAL_H */