2 * Copyright (C) 2013 Huawei Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
35 #define AARCH64_INSN_SF_BIT BIT(31)
36 #define AARCH64_INSN_N_BIT BIT(22)
38 static int aarch64_insn_encoding_class[] = {
39 AARCH64_INSN_CLS_UNKNOWN,
40 AARCH64_INSN_CLS_UNKNOWN,
41 AARCH64_INSN_CLS_UNKNOWN,
42 AARCH64_INSN_CLS_UNKNOWN,
43 AARCH64_INSN_CLS_LDST,
44 AARCH64_INSN_CLS_DP_REG,
45 AARCH64_INSN_CLS_LDST,
46 AARCH64_INSN_CLS_DP_FPSIMD,
47 AARCH64_INSN_CLS_DP_IMM,
48 AARCH64_INSN_CLS_DP_IMM,
49 AARCH64_INSN_CLS_BR_SYS,
50 AARCH64_INSN_CLS_BR_SYS,
51 AARCH64_INSN_CLS_LDST,
52 AARCH64_INSN_CLS_DP_REG,
53 AARCH64_INSN_CLS_LDST,
54 AARCH64_INSN_CLS_DP_FPSIMD,
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
59 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
65 if (!aarch64_insn_is_hint(insn))
68 switch (insn & 0xFE0) {
69 case AARCH64_INSN_HINT_YIELD:
70 case AARCH64_INSN_HINT_WFE:
71 case AARCH64_INSN_HINT_WFI:
72 case AARCH64_INSN_HINT_SEV:
73 case AARCH64_INSN_HINT_SEVL:
80 bool aarch64_insn_is_branch_imm(u32 insn)
82 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85 aarch64_insn_is_bcond(insn));
88 static DEFINE_SPINLOCK(patch_lock);
90 static void __kprobes *patch_map(void *addr, int fixmap)
92 unsigned long uintaddr = (uintptr_t) addr;
93 bool module = !core_kernel_text(uintaddr);
96 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
97 page = vmalloc_to_page(addr);
98 else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
99 page = virt_to_page(addr);
104 set_fixmap(fixmap, page_to_phys(page));
106 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
109 static void __kprobes patch_unmap(int fixmap)
111 clear_fixmap(fixmap);
114 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
117 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
122 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
124 *insnp = le32_to_cpu(val);
129 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
132 unsigned long flags = 0;
135 spin_lock_irqsave(&patch_lock, flags);
136 waddr = patch_map(addr, FIX_TEXT_POKE0);
138 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
140 patch_unmap(FIX_TEXT_POKE0);
141 spin_unlock_irqrestore(&patch_lock, flags);
146 int __kprobes aarch64_insn_write(void *addr, u32 insn)
148 insn = cpu_to_le32(insn);
149 return __aarch64_insn_write(addr, insn);
152 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
154 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
157 return aarch64_insn_is_b(insn) ||
158 aarch64_insn_is_bl(insn) ||
159 aarch64_insn_is_svc(insn) ||
160 aarch64_insn_is_hvc(insn) ||
161 aarch64_insn_is_smc(insn) ||
162 aarch64_insn_is_brk(insn) ||
163 aarch64_insn_is_nop(insn);
167 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
168 * Section B2.6.5 "Concurrent modification and execution of instructions":
169 * Concurrent modification and execution of instructions can lead to the
170 * resulting instruction performing any behavior that can be achieved by
171 * executing any sequence of instructions that can be executed from the
172 * same Exception level, except where the instruction before modification
173 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
174 * or SMC instruction.
176 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
178 return __aarch64_insn_hotpatch_safe(old_insn) &&
179 __aarch64_insn_hotpatch_safe(new_insn);
182 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
187 /* A64 instructions must be word aligned */
188 if ((uintptr_t)tp & 0x3)
191 ret = aarch64_insn_write(tp, insn);
193 flush_icache_range((uintptr_t)tp,
194 (uintptr_t)tp + AARCH64_INSN_SIZE);
199 struct aarch64_insn_patch {
206 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
209 struct aarch64_insn_patch *pp = arg;
211 /* The first CPU becomes master */
212 if (atomic_inc_return(&pp->cpu_count) == 1) {
213 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
214 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
217 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
218 * which ends with "dsb; isb" pair guaranteeing global
221 /* Notify other processors with an additional increment. */
222 atomic_inc(&pp->cpu_count);
224 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
232 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
234 struct aarch64_insn_patch patch = {
238 .cpu_count = ATOMIC_INIT(0),
244 return stop_machine(aarch64_insn_patch_text_cb, &patch,
248 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
253 /* Unsafe to patch multiple instructions without synchronizaiton */
255 ret = aarch64_insn_read(addrs[0], &insn);
259 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
261 * ARMv8 architecture doesn't guarantee all CPUs see
262 * the new instruction after returning from function
263 * aarch64_insn_patch_text_nosync(). So send IPIs to
264 * all other CPUs to achieve instruction
267 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
268 kick_all_cpus_sync();
273 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
276 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
277 u32 *maskp, int *shiftp)
283 case AARCH64_INSN_IMM_26:
287 case AARCH64_INSN_IMM_19:
291 case AARCH64_INSN_IMM_16:
295 case AARCH64_INSN_IMM_14:
299 case AARCH64_INSN_IMM_12:
303 case AARCH64_INSN_IMM_9:
307 case AARCH64_INSN_IMM_7:
311 case AARCH64_INSN_IMM_6:
312 case AARCH64_INSN_IMM_S:
316 case AARCH64_INSN_IMM_R:
330 #define ADR_IMM_HILOSPLIT 2
331 #define ADR_IMM_SIZE SZ_2M
332 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
333 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
334 #define ADR_IMM_LOSHIFT 29
335 #define ADR_IMM_HISHIFT 5
337 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
339 u32 immlo, immhi, mask;
343 case AARCH64_INSN_IMM_ADR:
345 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
346 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
347 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
348 mask = ADR_IMM_SIZE - 1;
351 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
352 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
358 return (insn >> shift) & mask;
361 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
364 u32 immlo, immhi, mask;
368 case AARCH64_INSN_IMM_ADR:
370 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
371 imm >>= ADR_IMM_HILOSPLIT;
372 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
374 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
375 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
378 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
379 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
385 /* Update the immediate field. */
386 insn &= ~(mask << shift);
387 insn |= (imm & mask) << shift;
392 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
394 enum aarch64_insn_register reg)
398 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
399 pr_err("%s: unknown register encoding %d\n", __func__, reg);
404 case AARCH64_INSN_REGTYPE_RT:
405 case AARCH64_INSN_REGTYPE_RD:
408 case AARCH64_INSN_REGTYPE_RN:
411 case AARCH64_INSN_REGTYPE_RT2:
412 case AARCH64_INSN_REGTYPE_RA:
415 case AARCH64_INSN_REGTYPE_RM:
419 pr_err("%s: unknown register type encoding %d\n", __func__,
424 insn &= ~(GENMASK(4, 0) << shift);
425 insn |= reg << shift;
430 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
436 case AARCH64_INSN_SIZE_8:
439 case AARCH64_INSN_SIZE_16:
442 case AARCH64_INSN_SIZE_32:
445 case AARCH64_INSN_SIZE_64:
449 pr_err("%s: unknown size encoding %d\n", __func__, type);
453 insn &= ~GENMASK(31, 30);
459 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
465 * PC: A 64-bit Program Counter holding the address of the current
466 * instruction. A64 instructions must be word-aligned.
468 BUG_ON((pc & 0x3) || (addr & 0x3));
470 offset = ((long)addr - (long)pc);
471 BUG_ON(offset < -range || offset >= range);
476 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
477 enum aarch64_insn_branch_type type)
483 * B/BL support [-128M, 128M) offset
484 * ARM64 virtual address arrangement guarantees all kernel and module
485 * texts are within +/-128M.
487 offset = branch_imm_common(pc, addr, SZ_128M);
490 case AARCH64_INSN_BRANCH_LINK:
491 insn = aarch64_insn_get_bl_value();
493 case AARCH64_INSN_BRANCH_NOLINK:
494 insn = aarch64_insn_get_b_value();
498 return AARCH64_BREAK_FAULT;
501 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
505 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
506 enum aarch64_insn_register reg,
507 enum aarch64_insn_variant variant,
508 enum aarch64_insn_branch_type type)
513 offset = branch_imm_common(pc, addr, SZ_1M);
516 case AARCH64_INSN_BRANCH_COMP_ZERO:
517 insn = aarch64_insn_get_cbz_value();
519 case AARCH64_INSN_BRANCH_COMP_NONZERO:
520 insn = aarch64_insn_get_cbnz_value();
524 return AARCH64_BREAK_FAULT;
528 case AARCH64_INSN_VARIANT_32BIT:
530 case AARCH64_INSN_VARIANT_64BIT:
531 insn |= AARCH64_INSN_SF_BIT;
535 return AARCH64_BREAK_FAULT;
538 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
540 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
544 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
545 enum aarch64_insn_condition cond)
550 offset = branch_imm_common(pc, addr, SZ_1M);
552 insn = aarch64_insn_get_bcond_value();
554 BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
557 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
561 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
563 return aarch64_insn_get_hint_value() | op;
566 u32 __kprobes aarch64_insn_gen_nop(void)
568 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
571 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
572 enum aarch64_insn_branch_type type)
577 case AARCH64_INSN_BRANCH_NOLINK:
578 insn = aarch64_insn_get_br_value();
580 case AARCH64_INSN_BRANCH_LINK:
581 insn = aarch64_insn_get_blr_value();
583 case AARCH64_INSN_BRANCH_RETURN:
584 insn = aarch64_insn_get_ret_value();
588 return AARCH64_BREAK_FAULT;
591 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
594 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
595 enum aarch64_insn_register base,
596 enum aarch64_insn_register offset,
597 enum aarch64_insn_size_type size,
598 enum aarch64_insn_ldst_type type)
603 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
604 insn = aarch64_insn_get_ldr_reg_value();
606 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
607 insn = aarch64_insn_get_str_reg_value();
611 return AARCH64_BREAK_FAULT;
614 insn = aarch64_insn_encode_ldst_size(size, insn);
616 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
618 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
621 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
625 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
626 enum aarch64_insn_register reg2,
627 enum aarch64_insn_register base,
629 enum aarch64_insn_variant variant,
630 enum aarch64_insn_ldst_type type)
636 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
637 insn = aarch64_insn_get_ldp_pre_value();
639 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
640 insn = aarch64_insn_get_stp_pre_value();
642 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
643 insn = aarch64_insn_get_ldp_post_value();
645 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
646 insn = aarch64_insn_get_stp_post_value();
650 return AARCH64_BREAK_FAULT;
654 case AARCH64_INSN_VARIANT_32BIT:
655 /* offset must be multiples of 4 in the range [-256, 252] */
656 BUG_ON(offset & 0x3);
657 BUG_ON(offset < -256 || offset > 252);
660 case AARCH64_INSN_VARIANT_64BIT:
661 /* offset must be multiples of 8 in the range [-512, 504] */
662 BUG_ON(offset & 0x7);
663 BUG_ON(offset < -512 || offset > 504);
665 insn |= AARCH64_INSN_SF_BIT;
669 return AARCH64_BREAK_FAULT;
672 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
675 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
678 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
681 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
685 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
686 enum aarch64_insn_register src,
687 int imm, enum aarch64_insn_variant variant,
688 enum aarch64_insn_adsb_type type)
693 case AARCH64_INSN_ADSB_ADD:
694 insn = aarch64_insn_get_add_imm_value();
696 case AARCH64_INSN_ADSB_SUB:
697 insn = aarch64_insn_get_sub_imm_value();
699 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
700 insn = aarch64_insn_get_adds_imm_value();
702 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
703 insn = aarch64_insn_get_subs_imm_value();
707 return AARCH64_BREAK_FAULT;
711 case AARCH64_INSN_VARIANT_32BIT:
713 case AARCH64_INSN_VARIANT_64BIT:
714 insn |= AARCH64_INSN_SF_BIT;
718 return AARCH64_BREAK_FAULT;
721 BUG_ON(imm & ~(SZ_4K - 1));
723 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
725 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
727 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
730 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
731 enum aarch64_insn_register src,
733 enum aarch64_insn_variant variant,
734 enum aarch64_insn_bitfield_type type)
740 case AARCH64_INSN_BITFIELD_MOVE:
741 insn = aarch64_insn_get_bfm_value();
743 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
744 insn = aarch64_insn_get_ubfm_value();
746 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
747 insn = aarch64_insn_get_sbfm_value();
751 return AARCH64_BREAK_FAULT;
755 case AARCH64_INSN_VARIANT_32BIT:
756 mask = GENMASK(4, 0);
758 case AARCH64_INSN_VARIANT_64BIT:
759 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
760 mask = GENMASK(5, 0);
764 return AARCH64_BREAK_FAULT;
767 BUG_ON(immr & ~mask);
768 BUG_ON(imms & ~mask);
770 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
772 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
774 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
776 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
779 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
781 enum aarch64_insn_variant variant,
782 enum aarch64_insn_movewide_type type)
787 case AARCH64_INSN_MOVEWIDE_ZERO:
788 insn = aarch64_insn_get_movz_value();
790 case AARCH64_INSN_MOVEWIDE_KEEP:
791 insn = aarch64_insn_get_movk_value();
793 case AARCH64_INSN_MOVEWIDE_INVERSE:
794 insn = aarch64_insn_get_movn_value();
798 return AARCH64_BREAK_FAULT;
801 BUG_ON(imm & ~(SZ_64K - 1));
804 case AARCH64_INSN_VARIANT_32BIT:
805 BUG_ON(shift != 0 && shift != 16);
807 case AARCH64_INSN_VARIANT_64BIT:
808 insn |= AARCH64_INSN_SF_BIT;
809 BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
814 return AARCH64_BREAK_FAULT;
817 insn |= (shift >> 4) << 21;
819 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
821 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
824 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
825 enum aarch64_insn_register src,
826 enum aarch64_insn_register reg,
828 enum aarch64_insn_variant variant,
829 enum aarch64_insn_adsb_type type)
834 case AARCH64_INSN_ADSB_ADD:
835 insn = aarch64_insn_get_add_value();
837 case AARCH64_INSN_ADSB_SUB:
838 insn = aarch64_insn_get_sub_value();
840 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
841 insn = aarch64_insn_get_adds_value();
843 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
844 insn = aarch64_insn_get_subs_value();
848 return AARCH64_BREAK_FAULT;
852 case AARCH64_INSN_VARIANT_32BIT:
853 BUG_ON(shift & ~(SZ_32 - 1));
855 case AARCH64_INSN_VARIANT_64BIT:
856 insn |= AARCH64_INSN_SF_BIT;
857 BUG_ON(shift & ~(SZ_64 - 1));
861 return AARCH64_BREAK_FAULT;
865 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
867 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
869 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
871 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
874 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
875 enum aarch64_insn_register src,
876 enum aarch64_insn_variant variant,
877 enum aarch64_insn_data1_type type)
882 case AARCH64_INSN_DATA1_REVERSE_16:
883 insn = aarch64_insn_get_rev16_value();
885 case AARCH64_INSN_DATA1_REVERSE_32:
886 insn = aarch64_insn_get_rev32_value();
888 case AARCH64_INSN_DATA1_REVERSE_64:
889 BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
890 insn = aarch64_insn_get_rev64_value();
894 return AARCH64_BREAK_FAULT;
898 case AARCH64_INSN_VARIANT_32BIT:
900 case AARCH64_INSN_VARIANT_64BIT:
901 insn |= AARCH64_INSN_SF_BIT;
905 return AARCH64_BREAK_FAULT;
908 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
910 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
913 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
914 enum aarch64_insn_register src,
915 enum aarch64_insn_register reg,
916 enum aarch64_insn_variant variant,
917 enum aarch64_insn_data2_type type)
922 case AARCH64_INSN_DATA2_UDIV:
923 insn = aarch64_insn_get_udiv_value();
925 case AARCH64_INSN_DATA2_SDIV:
926 insn = aarch64_insn_get_sdiv_value();
928 case AARCH64_INSN_DATA2_LSLV:
929 insn = aarch64_insn_get_lslv_value();
931 case AARCH64_INSN_DATA2_LSRV:
932 insn = aarch64_insn_get_lsrv_value();
934 case AARCH64_INSN_DATA2_ASRV:
935 insn = aarch64_insn_get_asrv_value();
937 case AARCH64_INSN_DATA2_RORV:
938 insn = aarch64_insn_get_rorv_value();
942 return AARCH64_BREAK_FAULT;
946 case AARCH64_INSN_VARIANT_32BIT:
948 case AARCH64_INSN_VARIANT_64BIT:
949 insn |= AARCH64_INSN_SF_BIT;
953 return AARCH64_BREAK_FAULT;
956 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
958 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
960 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
963 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
964 enum aarch64_insn_register src,
965 enum aarch64_insn_register reg1,
966 enum aarch64_insn_register reg2,
967 enum aarch64_insn_variant variant,
968 enum aarch64_insn_data3_type type)
973 case AARCH64_INSN_DATA3_MADD:
974 insn = aarch64_insn_get_madd_value();
976 case AARCH64_INSN_DATA3_MSUB:
977 insn = aarch64_insn_get_msub_value();
981 return AARCH64_BREAK_FAULT;
985 case AARCH64_INSN_VARIANT_32BIT:
987 case AARCH64_INSN_VARIANT_64BIT:
988 insn |= AARCH64_INSN_SF_BIT;
992 return AARCH64_BREAK_FAULT;
995 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
997 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
999 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1002 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1006 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1007 enum aarch64_insn_register src,
1008 enum aarch64_insn_register reg,
1010 enum aarch64_insn_variant variant,
1011 enum aarch64_insn_logic_type type)
1016 case AARCH64_INSN_LOGIC_AND:
1017 insn = aarch64_insn_get_and_value();
1019 case AARCH64_INSN_LOGIC_BIC:
1020 insn = aarch64_insn_get_bic_value();
1022 case AARCH64_INSN_LOGIC_ORR:
1023 insn = aarch64_insn_get_orr_value();
1025 case AARCH64_INSN_LOGIC_ORN:
1026 insn = aarch64_insn_get_orn_value();
1028 case AARCH64_INSN_LOGIC_EOR:
1029 insn = aarch64_insn_get_eor_value();
1031 case AARCH64_INSN_LOGIC_EON:
1032 insn = aarch64_insn_get_eon_value();
1034 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1035 insn = aarch64_insn_get_ands_value();
1037 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1038 insn = aarch64_insn_get_bics_value();
1042 return AARCH64_BREAK_FAULT;
1046 case AARCH64_INSN_VARIANT_32BIT:
1047 BUG_ON(shift & ~(SZ_32 - 1));
1049 case AARCH64_INSN_VARIANT_64BIT:
1050 insn |= AARCH64_INSN_SF_BIT;
1051 BUG_ON(shift & ~(SZ_64 - 1));
1055 return AARCH64_BREAK_FAULT;
1059 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1061 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1063 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1065 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1069 * Decode the imm field of a branch, and return the byte offset as a
1070 * signed value (so it can be used when computing a new branch
1073 s32 aarch64_get_branch_offset(u32 insn)
1077 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1078 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1079 return (imm << 6) >> 4;
1082 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1083 aarch64_insn_is_bcond(insn)) {
1084 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1085 return (imm << 13) >> 11;
1088 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1089 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1090 return (imm << 18) >> 16;
1093 /* Unhandled instruction */
1098 * Encode the displacement of a branch in the imm field and return the
1099 * updated instruction.
1101 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1103 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1104 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1107 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1108 aarch64_insn_is_bcond(insn))
1109 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1112 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1113 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1116 /* Unhandled instruction */
1120 bool aarch32_insn_is_wide(u32 insn)
1122 return insn >= 0xe800;
1126 * Macros/defines for extracting register numbers from instruction.
1128 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1130 return (insn & (0xf << offset)) >> offset;
1133 #define OPC2_MASK 0x7
1134 #define OPC2_OFFSET 5
1135 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1137 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1140 #define CRM_MASK 0xf
1141 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1143 return insn & CRM_MASK;