* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+
#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
#include "exec/helper-proto.h"
#include "crypto/aes.h"
#include "fpu/softfloat.h"
+#include "qapi/error.h"
+#include "qemu/guest-random.h"
#include "helper_regs.h"
/*****************************************************************************/
/* if x = 0xab, returns 0xababababababababa */
#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
-/* substract 1 from each byte, and with inverse, check if MSB is set at each
+/*
+ * subtract 1 from each byte, and with inverse, check if MSB is set at each
* byte.
* i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
* (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
#undef haszero
#undef hasvalue
-/* Return invalid random number.
- *
- * FIXME: Add rng backend or other mechanism to get cryptographically suitable
- * random number
+/*
+ * Return a random number.
*/
-target_ulong helper_darn32(void)
+uint64_t helper_darn32(void)
{
- return -1;
+ Error *err = NULL;
+ uint32_t ret;
+
+ if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
+ qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ return -1;
+ }
+
+ return ret;
}
-target_ulong helper_darn64(void)
+uint64_t helper_darn64(void)
{
- return -1;
-}
+ Error *err = NULL;
+ uint64_t ret;
-#endif
+ if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
+ qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ return -1;
+ }
-#if defined(TARGET_PPC64)
+ return ret;
+}
uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
{
uint64_t ra = 0;
for (i = 0; i < 8; i++) {
- int index = (rs >> (i*8)) & 0xFF;
+ int index = (rs >> (i * 8)) & 0xFF;
if (index < 64) {
if (rb & PPC_BIT(index)) {
ra |= 1 << i;
/* 602 specific instructions */
/* mfrom is the most crazy instruction ever seen, imho ! */
/* Real implementation uses a ROM table. Do the same */
-/* Extremely decomposed:
+/*
+ * Extremely decomposed:
* -arg / 256
* return 256 * log10(10 + 1.0) + 0.5
*/
for (index = 0; index < ARRAY_SIZE(r->element); index++)
#else
#define VECTOR_FOR_INORDER_I(index, element) \
- for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
+ for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--)
#endif
/* Saturating arithmetic helpers. */
{
int i, j = (sh & 0xf);
- VECTOR_FOR_INORDER_I(i, u8) {
- r->u8[i] = j++;
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ r->VsrB(i) = j++;
}
}
{
int i, j = 0x10 - (sh & 0xf);
- VECTOR_FOR_INORDER_I(i, u8) {
- r->u8[i] = j++;
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ r->VsrB(i) = j++;
}
}
-void helper_mtvscr(CPUPPCState *env, ppc_avr_t *r)
+void helper_mtvscr(CPUPPCState *env, uint32_t vscr)
{
-#if defined(HOST_WORDS_BIGENDIAN)
- env->vscr = r->u32[3];
-#else
- env->vscr = r->u32[0];
-#endif
- set_flush_to_zero(vscr_nj, &env->vec_status);
+ env->vscr = vscr & ~(1u << VSCR_SAT);
+ /* Which bit we set is completely arbitrary, but clear the rest. */
+ env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
+ env->vscr_sat.u64[1] = 0;
+ set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
+}
+
+uint32_t helper_mfvscr(CPUPPCState *env)
+{
+ uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0;
+ return env->vscr | (sat << VSCR_SAT);
+}
+
+static inline void set_vscr_sat(CPUPPCState *env)
+{
+ /* The choice of non-zero value is arbitrary. */
+ env->vscr_sat.u32[0] = 1;
}
void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
r->element[i] = a->element[i] op b->element[i]; \
} \
}
-#define VARITH(suffix, element) \
- VARITH_DO(add##suffix, +, element) \
- VARITH_DO(sub##suffix, -, element)
-VARITH(ubm, u8)
-VARITH(uhm, u16)
-VARITH(uwm, u32)
-VARITH(udm, u64)
VARITH_DO(muluwm, *, u32)
#undef VARITH_DO
#undef VARITH
}
#define VARITHSAT_DO(name, op, optype, cvt, element) \
- void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
- ppc_avr_t *b) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \
+ ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
{ \
int sat = 0; \
int i; \
\
for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- switch (sizeof(r->element[0])) { \
- case 1: \
- VARITHSAT_CASE(optype, op, cvt, element); \
- break; \
- case 2: \
- VARITHSAT_CASE(optype, op, cvt, element); \
- break; \
- case 4: \
- VARITHSAT_CASE(optype, op, cvt, element); \
- break; \
- } \
+ VARITHSAT_CASE(optype, op, cvt, element); \
} \
if (sat) { \
- env->vscr |= (1 << VSCR_SAT); \
+ vscr_sat->u32[0] = 1; \
} \
}
#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
} \
}
-/* VABSDU - Vector absolute difference unsigned
+/*
+ * VABSDU - Vector absolute difference unsigned
* name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
} \
}
-/* VCMPNEZ - Vector compare not equal to zero
+/*
+ * VCMPNEZ - Vector compare not equal to zero
* suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
} \
} \
if (sat) { \
- env->vscr |= (1 << VSCR_SAT); \
+ set_vscr_sat(env); \
} \
}
VCT(uxs, cvtsduw, u32)
{
target_ulong count = 0;
int i;
- VECTOR_FOR_INORDER_I(i, u8) {
- if (r->u8[i] & 0x01) {
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ if (r->VsrB(i) & 0x01) {
break;
}
count++;
{
target_ulong count = 0;
int i;
-#if defined(HOST_WORDS_BIGENDIAN)
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
-#else
- for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
-#endif
- if (r->u8[i] & 0x01) {
+ if (r->VsrB(i) & 0x01) {
break;
}
count++;
}
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
}
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
-#define VMINMAX_DO(name, compare, element) \
- void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
- { \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- if (a->element[i] compare b->element[i]) { \
- r->element[i] = b->element[i]; \
- } else { \
- r->element[i] = a->element[i]; \
- } \
- } \
- }
-#define VMINMAX(suffix, element) \
- VMINMAX_DO(min##suffix, >, element) \
- VMINMAX_DO(max##suffix, <, element)
-VMINMAX(sb, s8)
-VMINMAX(sh, s16)
-VMINMAX(sw, s32)
-VMINMAX(sd, s64)
-VMINMAX(ub, u8)
-VMINMAX(uh, u16)
-VMINMAX(uw, u32)
-VMINMAX(ud, u64)
-#undef VMINMAX_DO
-#undef VMINMAX
-
void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
int i;
}
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
}
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
ppc_avr_t result;
int i;
- VECTOR_FOR_INORDER_I(i, u8) {
- int s = c->u8[i] & 0x1f;
-#if defined(HOST_WORDS_BIGENDIAN)
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int s = c->VsrB(i) & 0x1f;
int index = s & 0xf;
-#else
- int index = 15 - (s & 0xf);
-#endif
if (s & 0x10) {
- result.u8[i] = b->u8[index];
+ result.VsrB(i) = b->VsrB(index);
} else {
- result.u8[i] = a->u8[index];
+ result.VsrB(i) = a->VsrB(index);
}
}
*r = result;
ppc_avr_t result;
int i;
- VECTOR_FOR_INORDER_I(i, u8) {
- int s = c->u8[i] & 0x1f;
-#if defined(HOST_WORDS_BIGENDIAN)
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int s = c->VsrB(i) & 0x1f;
int index = 15 - (s & 0xf);
-#else
- int index = s & 0xf;
-#endif
if (s & 0x10) {
- result.u8[i] = a->u8[index];
+ result.VsrB(i) = a->VsrB(index);
} else {
- result.u8[i] = b->u8[index];
+ result.VsrB(i) = b->VsrB(index);
}
}
*r = result;
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
#else
-#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)])
+#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)])
#define VBPERMD_INDEX(i) (1 - i)
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
#define EXTRACT_BIT(avr, i, index) \
int index = VBPERMQ_INDEX(b, i);
if (index < 128) {
- uint64_t mask = (1ull << (63-(index & 0x3F)));
+ uint64_t mask = (1ull << (63 - (index & 0x3F)));
if (a->u64[VBPERMQ_DW(index)] & mask) {
perm |= (0x8000 >> i);
}
VECTOR_FOR_INORDER_I(i, u8) {
#if defined(HOST_WORDS_BIGENDIAN)
- t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
+ t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
#else
- t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (7-(i & 7));
+ t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (7 - (i & 7));
#endif
}
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i, j; \
- trgtyp prod[sizeof(ppc_avr_t)/sizeof(a->srcfld[0])]; \
+ trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
\
VECTOR_FOR_INORDER_I(i, srcfld) { \
prod[i] = 0; \
for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
- if (a->srcfld[i] & (1ull<<j)) { \
+ if (a->srcfld[i] & (1ull << j)) { \
prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
} \
} \
} \
\
VECTOR_FOR_INORDER_I(i, trgfld) { \
- r->trgfld[i] = prod[2*i] ^ prod[2*i+1]; \
+ r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
} \
}
VECTOR_FOR_INORDER_I(i, u64) {
prod[i] = 0;
for (j = 0; j < 64; j++) {
- if (a->u64[i] & (1ull<<j)) {
+ if (a->u64[i] & (1ull << j)) {
prod[i] ^= (((__uint128_t)b->u64[i]) << j);
}
}
VECTOR_FOR_INORDER_I(i, u64) {
prod[i].VsrD(1) = prod[i].VsrD(0) = 0;
for (j = 0; j < 64; j++) {
- if (a->u64[i] & (1ull<<j)) {
+ if (a->u64[i] & (1ull << j)) {
ppc_avr_t bshift;
if (j == 0) {
bshift.VsrD(0) = 0;
VECTOR_FOR_INORDER_I(j, u32) {
uint32_t e = x[i]->u32[j];
- result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
- ((e >> 6) & 0x3e0) |
- ((e >> 3) & 0x1f));
+ result.u16[4 * i + j] = (((e >> 9) & 0xfc00) |
+ ((e >> 6) & 0x3e0) |
+ ((e >> 3) & 0x1f));
}
}
*r = result;
\
VECTOR_FOR_INORDER_I(i, from) { \
result.to[i] = cvt(a0->from[i], &sat); \
- result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
+ result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\
} \
*r = result; \
if (dosat && sat) { \
- env->vscr |= (1 << VSCR_SAT); \
+ set_vscr_sat(env); \
} \
}
#define I(x, y) (x)
VEXTU_X_DO(vextuwrx, 32, 0)
#undef VEXTU_X_DO
-/* The specification says that the results are undefined if all of the
- * shift counts are not identical. We check to make sure that they are
- * to conform to what real hardware appears to do. */
+/*
+ * The specification says that the results are undefined if all of the
+ * shift counts are not identical. We check to make sure that they
+ * are to conform to what real hardware appears to do.
+ */
#define VSHIFT(suffix, leftp) \
void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
VSHIFT(r, 0)
#undef VSHIFT
-#define VSL(suffix, element, mask) \
- void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
- { \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- unsigned int shift = b->element[i] & mask; \
- \
- r->element[i] = a->element[i] << shift; \
- } \
- }
-VSL(b, u8, 0x7)
-VSL(h, u16, 0x0F)
-VSL(w, u32, 0x1F)
-VSL(d, u64, 0x3F)
-#undef VSL
-
void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i;
size = ARRAY_SIZE(r->u8);
for (i = 0; i < size; i++) {
- shift = b->u8[i] & 0x7; /* extract shift value */
- bytes = (a->u8[i] << 8) + /* extract adjacent bytes */
- (((i + 1) < size) ? a->u8[i + 1] : 0);
- r->u8[i] = (bytes << shift) >> 8; /* shift and store result */
+ shift = b->VsrB(i) & 0x7; /* extract shift value */
+ bytes = (a->VsrB(i) << 8) + /* extract adjacent bytes */
+ (((i + 1) < size) ? a->VsrB(i + 1) : 0);
+ r->VsrB(i) = (bytes << shift) >> 8; /* shift and store result */
}
}
int i;
unsigned int shift, bytes;
- /* Use reverse order, as destination and source register can be same. Its
- * being modified in place saving temporary, reverse order will guarantee
- * that computed result is not fed back.
+ /*
+ * Use reverse order, as destination and source register can be
+ * same. Its being modified in place saving temporary, reverse
+ * order will guarantee that computed result is not fed back.
*/
for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
- shift = b->u8[i] & 0x7; /* extract shift value */
- bytes = ((i ? a->u8[i - 1] : 0) << 8) + a->u8[i];
+ shift = b->VsrB(i) & 0x7; /* extract shift value */
+ bytes = ((i ? a->VsrB(i - 1) : 0) << 8) + a->VsrB(i);
/* extract adjacent bytes */
- r->u8[i] = (bytes >> shift) & 0xFF; /* shift and store result */
+ r->VsrB(i) = (bytes >> shift) & 0xFF; /* shift and store result */
}
}
int i;
ppc_avr_t result;
-#if defined(HOST_WORDS_BIGENDIAN)
for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
int index = sh + i;
if (index > 0xf) {
- result.u8[i] = b->u8[index - 0x10];
- } else {
- result.u8[i] = a->u8[index];
- }
- }
-#else
- for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
- int index = (16 - sh) + i;
- if (index > 0xf) {
- result.u8[i] = a->u8[index - 0x10];
+ result.VsrB(i) = b->VsrB(index - 0x10);
} else {
- result.u8[i] = b->u8[index];
+ result.VsrB(i) = a->VsrB(index);
}
}
-#endif
*r = result;
}
#if defined(HOST_WORDS_BIGENDIAN)
memmove(&r->u8[0], &a->u8[sh], 16 - sh);
- memset(&r->u8[16-sh], 0, sh);
+ memset(&r->u8[16 - sh], 0, sh);
#else
memmove(&r->u8[sh], &a->u8[0], 16 - sh);
memset(&r->u8[0], 0, sh);
#endif
}
-/* Experimental testing shows that hardware masks the immediate. */
-#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
-#if defined(HOST_WORDS_BIGENDIAN)
-#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
-#else
-#define SPLAT_ELEMENT(element) \
- (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
-#endif
-#define VSPLT(suffix, element) \
- void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
- { \
- uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- r->element[i] = s; \
- } \
- }
-VSPLT(b, u8)
-VSPLT(h, u16)
-VSPLT(w, u32)
-#undef VSPLT
-#undef SPLAT_ELEMENT
-#undef _SPLAT_MASKED
#if defined(HOST_WORDS_BIGENDIAN)
#define VINSERT(suffix, element) \
void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
VEXTRACT(d, u64)
#undef VEXTRACT
-void helper_xxextractuw(CPUPPCState *env, target_ulong xtn,
- target_ulong xbn, uint32_t index)
+void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = { };
size_t es = sizeof(uint32_t);
uint32_t ext_index;
int i;
- getVSR(xbn, &xb, env);
- memset(&xt, 0, sizeof(xt));
-
-#if defined(HOST_WORDS_BIGENDIAN)
ext_index = index;
for (i = 0; i < es; i++, ext_index++) {
- xt.u8[8 - es + i] = xb.u8[ext_index % 16];
- }
-#else
- ext_index = 15 - index;
- for (i = es - 1; i >= 0; i--, ext_index--) {
- xt.u8[8 + i] = xb.u8[ext_index % 16];
+ t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16);
}
-#endif
- putVSR(xtn, &xt, env);
+ *xt = t;
}
-void helper_xxinsertw(CPUPPCState *env, target_ulong xtn,
- target_ulong xbn, uint32_t index)
+void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = *xt;
size_t es = sizeof(uint32_t);
int ins_index, i = 0;
- getVSR(xbn, &xb, env);
- getVSR(xtn, &xt, env);
-
-#if defined(HOST_WORDS_BIGENDIAN)
ins_index = index;
for (i = 0; i < es && ins_index < 16; i++, ins_index++) {
- xt.u8[ins_index] = xb.u8[8 - es + i];
+ t.VsrB(ins_index) = xb->VsrB(8 - es + i);
}
-#else
- ins_index = 15 - index;
- for (i = es - 1; i >= 0 && ins_index >= 0; i--, ins_index--) {
- xt.u8[ins_index] = xb.u8[8 + i];
- }
-#endif
- putVSR(xtn, &xt, env);
+ *xt = t;
}
#define VEXT_SIGNED(name, element, cast) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
{ \
int i; \
- VECTOR_FOR_INORDER_I(i, element) { \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
r->element[i] = (cast)b->element[i]; \
} \
}
void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
{ \
int i; \
- VECTOR_FOR_INORDER_I(i, element) { \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
r->element[i] = -b->element[i]; \
} \
}
VNEG(vnegd, s64)
#undef VNEG
-#define VSPLTI(suffix, element, splat_type) \
- void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
- { \
- splat_type x = (int8_t)(splat << 3) >> 3; \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- r->element[i] = x; \
- } \
- }
-VSPLTI(b, s8, int8_t)
-VSPLTI(h, s16, int16_t)
-VSPLTI(w, s32, int32_t)
-#undef VSPLTI
-
-#define VSR(suffix, element, mask) \
- void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
- { \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- unsigned int shift = b->element[i] & mask; \
- r->element[i] = a->element[i] >> shift; \
- } \
- }
-VSR(ab, s8, 0x7)
-VSR(ah, s16, 0xF)
-VSR(aw, s32, 0x1F)
-VSR(ad, s64, 0x3F)
-VSR(b, u8, 0x7)
-VSR(h, u16, 0xF)
-VSR(w, u32, 0x1F)
-VSR(d, u64, 0x3F)
-#undef VSR
-
void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int sh = (b->VsrB(0xf) >> 3) & 0xf;
ppc_avr_t result;
int sat = 0;
-#if defined(HOST_WORDS_BIGENDIAN)
- upper = ARRAY_SIZE(r->s32)-1;
-#else
- upper = 0;
-#endif
- t = (int64_t)b->s32[upper];
+ upper = ARRAY_SIZE(r->s32) - 1;
+ t = (int64_t)b->VsrSW(upper);
for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
- t += a->s32[i];
- result.s32[i] = 0;
+ t += a->VsrSW(i);
+ result.VsrSW(i) = 0;
}
- result.s32[upper] = cvtsdsw(t, &sat);
+ result.VsrSW(upper) = cvtsdsw(t, &sat);
*r = result;
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
ppc_avr_t result;
int sat = 0;
-#if defined(HOST_WORDS_BIGENDIAN)
upper = 1;
-#else
- upper = 0;
-#endif
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
- int64_t t = (int64_t)b->s32[upper + i * 2];
+ int64_t t = (int64_t)b->VsrSW(upper + i * 2);
- result.u64[i] = 0;
+ result.VsrD(i) = 0;
for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
- t += a->s32[2 * i + j];
+ t += a->VsrSW(2 * i + j);
}
- result.s32[upper + i * 2] = cvtsdsw(t, &sat);
+ result.VsrSW(upper + i * 2) = cvtsdsw(t, &sat);
}
*r = result;
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
}
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
}
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
}
if (sat) {
- env->vscr |= (1 << VSCR_SAT);
+ set_vscr_sat(env);
}
}
ppc_avr_t result; \
\
for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
- uint16_t e = b->u16[hi ? i : i+4]; \
+ uint16_t e = b->u16[hi ? i : i + 4]; \
uint8_t a = (e >> 15) ? 0xff : 0; \
uint8_t r = (e >> 10) & 0x1f; \
uint8_t g = (e >> 5) & 0x1f; \
{ \
int i; \
\
- VECTOR_FOR_INORDER_I(i, element) { \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
r->element[i] = name(b->element[i]); \
} \
}
{
if (n & 1) {
bcd->u8[BCD_DIG_BYTE(n)] &= 0x0F;
- bcd->u8[BCD_DIG_BYTE(n)] |= (digit<<4);
+ bcd->u8[BCD_DIG_BYTE(n)] |= (digit << 4);
} else {
bcd->u8[BCD_DIG_BYTE(n)] &= 0xF0;
bcd->u8[BCD_DIG_BYTE(n)] |= digit;
static uint16_t get_national_digit(ppc_avr_t *reg, int n)
{
-#if defined(HOST_WORDS_BIGENDIAN)
- return reg->u16[7 - n];
-#else
- return reg->u16[n];
-#endif
+ return reg->VsrH(7 - n);
}
static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n)
{
-#if defined(HOST_WORDS_BIGENDIAN)
- reg->u16[7 - n] = val;
-#else
- reg->u16[n] = val;
-#endif
+ reg->VsrH(7 - n) = val;
}
static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b)
*r = result;
}
-#define ROTRu32(v, n) (((v) >> (n)) | ((v) << (32 - n)))
-
void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
{
int st = (st_six & 0x10) != 0;
for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
if (st == 0) {
if ((six & (0x8 >> i)) == 0) {
- r->VsrW(i) = ROTRu32(a->VsrW(i), 7) ^
- ROTRu32(a->VsrW(i), 18) ^
+ r->VsrW(i) = ror32(a->VsrW(i), 7) ^
+ ror32(a->VsrW(i), 18) ^
(a->VsrW(i) >> 3);
} else { /* six.bit[i] == 1 */
- r->VsrW(i) = ROTRu32(a->VsrW(i), 17) ^
- ROTRu32(a->VsrW(i), 19) ^
+ r->VsrW(i) = ror32(a->VsrW(i), 17) ^
+ ror32(a->VsrW(i), 19) ^
(a->VsrW(i) >> 10);
}
} else { /* st == 1 */
if ((six & (0x8 >> i)) == 0) {
- r->VsrW(i) = ROTRu32(a->VsrW(i), 2) ^
- ROTRu32(a->VsrW(i), 13) ^
- ROTRu32(a->VsrW(i), 22);
+ r->VsrW(i) = ror32(a->VsrW(i), 2) ^
+ ror32(a->VsrW(i), 13) ^
+ ror32(a->VsrW(i), 22);
} else { /* six.bit[i] == 1 */
- r->VsrW(i) = ROTRu32(a->VsrW(i), 6) ^
- ROTRu32(a->VsrW(i), 11) ^
- ROTRu32(a->VsrW(i), 25);
+ r->VsrW(i) = ror32(a->VsrW(i), 6) ^
+ ror32(a->VsrW(i), 11) ^
+ ror32(a->VsrW(i), 25);
}
}
}
}
-#undef ROTRu32
-
-#define ROTRu64(v, n) (((v) >> (n)) | ((v) << (64-n)))
-
void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
{
int st = (st_six & 0x10) != 0;
for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
if (st == 0) {
- if ((six & (0x8 >> (2*i))) == 0) {
- r->VsrD(i) = ROTRu64(a->VsrD(i), 1) ^
- ROTRu64(a->VsrD(i), 8) ^
+ if ((six & (0x8 >> (2 * i))) == 0) {
+ r->VsrD(i) = ror64(a->VsrD(i), 1) ^
+ ror64(a->VsrD(i), 8) ^
(a->VsrD(i) >> 7);
} else { /* six.bit[2*i] == 1 */
- r->VsrD(i) = ROTRu64(a->VsrD(i), 19) ^
- ROTRu64(a->VsrD(i), 61) ^
+ r->VsrD(i) = ror64(a->VsrD(i), 19) ^
+ ror64(a->VsrD(i), 61) ^
(a->VsrD(i) >> 6);
}
} else { /* st == 1 */
- if ((six & (0x8 >> (2*i))) == 0) {
- r->VsrD(i) = ROTRu64(a->VsrD(i), 28) ^
- ROTRu64(a->VsrD(i), 34) ^
- ROTRu64(a->VsrD(i), 39);
+ if ((six & (0x8 >> (2 * i))) == 0) {
+ r->VsrD(i) = ror64(a->VsrD(i), 28) ^
+ ror64(a->VsrD(i), 34) ^
+ ror64(a->VsrD(i), 39);
} else { /* six.bit[2*i] == 1 */
- r->VsrD(i) = ROTRu64(a->VsrD(i), 14) ^
- ROTRu64(a->VsrD(i), 18) ^
- ROTRu64(a->VsrD(i), 41);
+ r->VsrD(i) = ror64(a->VsrD(i), 14) ^
+ ror64(a->VsrD(i), 18) ^
+ ror64(a->VsrD(i), 41);
}
}
}
}
-#undef ROTRu64
-
void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
ppc_avr_t result;
int i;
- VECTOR_FOR_INORDER_I(i, u8) {
- int indexA = c->u8[i] >> 4;
- int indexB = c->u8[i] & 0xF;
-#if defined(HOST_WORDS_BIGENDIAN)
- result.u8[i] = a->u8[indexA] ^ b->u8[indexB];
-#else
- result.u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
-#endif
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int indexA = c->VsrB(i) >> 4;
+ int indexB = c->VsrB(i) & 0xF;
+
+ result.VsrB(i) = a->VsrB(indexA) ^ b->VsrB(indexB);
}
*r = result;
}