#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "qemu/aes.h"
+#include "crypto/aes.h"
#include "helper_regs.h"
/*****************************************************************************/
int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
if (le_rel == float_relation_unordered) {
r->u32[i] = 0xc0000000;
- /* ALL_IN does not need to be updated here. */
+ all_in = 1;
} else {
float32 bneg = float32_chs(b->f[i]);
int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
}
}
-#if defined(HOST_WORDS_BIGENDIAN)
-#define LEFT 0
-#define RIGHT 1
-#else
-#define LEFT 1
-#define RIGHT 0
-#endif
/* The specification says that the results are undefined if all of the
* shift counts are not identical. We check to make sure that they are
* to conform to what real hardware appears to do. */
} \
} \
}
-VSHIFT(l, LEFT)
-VSHIFT(r, RIGHT)
+VSHIFT(l, 1)
+VSHIFT(r, 0)
#undef VSHIFT
-#undef LEFT
-#undef RIGHT
#define VSL(suffix, element, mask) \
void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
+ ppc_avr_t result;
int i;
VECTOR_FOR_INORDER_I(i, u32) {
- r->AVRW(i) = b->AVRW(i) ^
+ result.AVRW(i) = b->AVRW(i) ^
(AES_Te0[a->AVRB(AES_shifts[4*i + 0])] ^
AES_Te1[a->AVRB(AES_shifts[4*i + 1])] ^
AES_Te2[a->AVRB(AES_shifts[4*i + 2])] ^
AES_Te3[a->AVRB(AES_shifts[4*i + 3])]);
}
+ *r = result;
}
void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
+ ppc_avr_t result;
int i;
VECTOR_FOR_INORDER_I(i, u8) {
- r->AVRB(i) = b->AVRB(i) ^ (AES_Te4[a->AVRB(AES_shifts[i])] & 0xFF);
+ result.AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]);
}
+ *r = result;
}
void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
+ ppc_avr_t result;
int i;
VECTOR_FOR_INORDER_I(i, u8) {
- r->AVRB(i) = b->AVRB(i) ^ (AES_Td4[a->AVRB(AES_ishifts[i])] & 0xFF);
+ result.AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]);
}
+ *r = result;
}
#define ROTRu32(v, n) (((v) >> (n)) | ((v) << (32-n)))
void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
+ ppc_avr_t result;
int i;
+
VECTOR_FOR_INORDER_I(i, u8) {
int indexA = c->u8[i] >> 4;
int indexB = c->u8[i] & 0xF;
#if defined(HOST_WORDS_BIGENDIAN)
- r->u8[i] = a->u8[indexA] ^ b->u8[indexB];
+ result.u8[i] = a->u8[indexA] ^ b->u8[indexB];
#else
- r->u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
+ result.u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
#endif
}
+ *r = result;
}
#undef VECTOR_FOR_INORDER_I