|
From: <sv...@va...> - 2016-08-15 21:54:18
|
Author: carll
Date: Mon Aug 15 22:54:04 2016
New Revision: 15938
Log:
Power PC test suite for ISA 3.0, part 5 of 5
The test suite support for the Power PC ISA 3.0 instructions added in
VEX commit 3244 is added in this commit.
bugzilla 364948
Modified:
trunk/NEWS
trunk/memcheck/mc_translate.c
trunk/memcheck/tests/vbit-test/irops.c
trunk/none/tests/ppc64/ppc64_helpers.h
trunk/none/tests/ppc64/test_isa_3_0.c
trunk/none/tests/ppc64/test_isa_3_0_altivec.stdout.exp
trunk/none/tests/ppc64/test_isa_3_0_other.stdout.exp
trunk/tests/min_power_isa.c
Modified: trunk/NEWS
==============================================================================
--- trunk/NEWS (original)
+++ trunk/NEWS Mon Aug 15 22:54:04 2016
@@ -142,6 +142,11 @@
365273 Invalid write to stack location reported after signal handler runs
365912 ppc64BE segfault during jm-insns test (RELRO)
366344 Multiple unhandled instruction for Aarch64
+359767 Valgrind does not support the IBM POWER ISA 3.0 instructions, part 1/5
+361207 Valgrind does not support the IBM POWER ISA 3.0 instructions, part 2/5
+362329 Valgrind does not support the IBM POWER ISA 3.0 instructions, part 3/5
+363858 Valgrind does not support the IBM POWER ISA 3.0 instructions, part 4/5
+364948 Valgrind does not support the IBM POWER ISA 3.0 instructions, part 5/5
n-i-bz Fix incorrect (or infinite loop) unwind on RHEL7 x86 and amd64
n-i-bz massif --pages-as-heap=yes does not report peak caused by mmap+munmap
Modified: trunk/memcheck/mc_translate.c
==============================================================================
--- trunk/memcheck/mc_translate.c (original)
+++ trunk/memcheck/mc_translate.c Mon Aug 15 22:54:04 2016
@@ -1461,7 +1461,6 @@
ty = typeOfIRExpr(mce->sb->tyenv, vatom);
tl_assert(ty != Ity_I1);
- tl_assert(ty != Ity_I128);
if (isAlwaysDefd(mce, offset, sizeofIRType(ty))) {
/* later: no ... */
/* emit code to emit a complaint if any of the vbits are 1. */
@@ -1783,8 +1782,25 @@
operation. Here are some special cases which use PCast only
twice rather than three times. */
- /* I32 x I64 x I64 x I64 -> I64 */
/* Standard FP idiom: rm x FParg1 x FParg2 x FParg3 -> FPresult */
+
+ if (t1 == Ity_I32 && t2 == Ity_I128 && t3 == Ity_I128 && t4 == Ity_I128
+ && finalVty == Ity_I128) {
+ if (0) VG_(printf)("mkLazy4: I32 x I128 x I128 x I128 -> I128\n");
+ /* Widen 1st arg to I128. Since 1st arg is typically a rounding
+ mode indication which is fully defined, this should get
+ folded out later. */
+ at = mkPCastTo(mce, Ity_I128, va1);
+ /* Now fold in 2nd, 3rd, 4th args. */
+ at = mkUifU(mce, Ity_I128, at, va2);
+ at = mkUifU(mce, Ity_I128, at, va3);
+ at = mkUifU(mce, Ity_I128, at, va4);
+ /* and PCast once again. */
+ at = mkPCastTo(mce, Ity_I128, at);
+ return at;
+ }
+
+ /* I32 x I64 x I64 x I64 -> I64 */
if (t1 == Ity_I32 && t2 == Ity_I64 && t3 == Ity_I64 && t4 == Ity_I64
&& finalVty == Ity_I64) {
if (0) VG_(printf)("mkLazy4: I32 x I64 x I64 x I64 -> I64\n");
@@ -2510,6 +2526,7 @@
case Iop_QNarrowUn32Uto16Ux4:
case Iop_QNarrowUn32Sto16Sx4:
case Iop_QNarrowUn32Sto16Ux4:
+ case Iop_F32toF16x4:
return Iop_NarrowUn32to16x4;
case Iop_QNarrowUn16Uto8Ux8:
case Iop_QNarrowUn16Sto8Sx8:
@@ -2581,6 +2598,7 @@
case Iop_NarrowUn16to8x8:
case Iop_NarrowUn32to16x4:
case Iop_NarrowUn64to32x2:
+ case Iop_F32toF16x4:
at1 = assignNew('V', mce, Ity_I64, unop(narrow_op, vatom1));
return at1;
default:
@@ -2619,6 +2637,7 @@
case Iop_Widen16Sto32x4: pcast = mkPCast32x4; break;
case Iop_Widen32Uto64x2: pcast = mkPCast64x2; break;
case Iop_Widen32Sto64x2: pcast = mkPCast64x2; break;
+ case Iop_F16toF32x4: pcast = mkPCast32x4; break;
default: VG_(tool_panic)("vectorWidenI64");
}
tl_assert(isShadowAtom(mce,vatom1));
@@ -2807,6 +2826,13 @@
/* I32(rm) x F32 x F32 x F32 -> F32 */
return mkLazy4(mce, Ity_I32, vatom1, vatom2, vatom3, vatom4);
+ case Iop_MAddF128:
+ case Iop_MSubF128:
+ case Iop_NegMAddF128:
+ case Iop_NegMSubF128:
+ /* I32(rm) x F128 x F128 x F128 -> F128 */
+ return mkLazy4(mce, Ity_I128, vatom1, vatom2, vatom3, vatom4);
+
/* V256-bit data-steering */
case Iop_64x4toV256:
return assignNew('V', mce, Ity_V256,
@@ -2839,12 +2865,12 @@
tl_assert(sameKindedAtoms(atom3,vatom3));
switch (op) {
case Iop_AddF128:
- case Iop_AddD128:
case Iop_SubF128:
- case Iop_SubD128:
case Iop_MulF128:
- case Iop_MulD128:
case Iop_DivF128:
+ case Iop_AddD128:
+ case Iop_SubD128:
+ case Iop_MulD128:
case Iop_DivD128:
case Iop_QuantizeD128:
/* I32(rm) x F128/D128 x F128/D128 -> F128/D128 */
@@ -3419,6 +3445,8 @@
case Iop_CipherLV128:
case Iop_NCipherV128:
case Iop_NCipherLV128:
+ case Iop_MulI128by10E:
+ case Iop_MulI128by10ECarry:
return binary64Ix2(mce, vatom1, vatom2);
case Iop_QNarrowBin64Sto32Sx4:
@@ -3723,6 +3751,7 @@
case Iop_ShrV128:
case Iop_ShlV128:
+ case Iop_I128StoBCD128:
/* Same scheme as with all other shifts. Note: 10 Nov 05:
this is wrong now, scalar shifts are done properly lazily.
Vector shifts should be fixed too. */
@@ -3862,6 +3891,10 @@
case Iop_D128toI32U: /* IRRoundingMode(I32) x D128 -> unsigned I32 */
return mkLazy2(mce, Ity_I32, vatom1, vatom2);
+ case Iop_F128toI128S: /* IRRoundingMode(I32) x F128 -> signed I128 */
+ case Iop_RndF128: /* IRRoundingMode(I32) x F128 -> F128 */
+ return mkLazy2(mce, Ity_I128, vatom1, vatom2);
+
case Iop_F128toI64S: /* IRRoundingMode(I32) x F128 -> signed I64 */
case Iop_F128toI64U: /* IRRoundingMode(I32) x F128 -> unsigned I64 */
case Iop_F128toF64: /* IRRoundingMode(I32) x F128 -> F64 */
@@ -4373,8 +4406,20 @@
case Iop_NegF128:
case Iop_AbsF128:
+ case Iop_RndF128:
+ case Iop_TruncF128toI64S: /* F128 -> I64S */
+ case Iop_TruncF128toI32S: /* F128 -> I32S (result stored in 64-bits) */
+ case Iop_TruncF128toI64U: /* F128 -> I64U */
+ case Iop_TruncF128toI32U: /* F128 -> I32U (result stored in 64-bits) */
return mkPCastTo(mce, Ity_I128, vatom);
+ case Iop_BCD128toI128S:
+ case Iop_MulI128by10:
+ case Iop_MulI128by10Carry:
+ case Iop_F16toF64x2:
+ case Iop_F64toF16x2:
+ return vatom;
+
case Iop_I32StoF128: /* signed I32 -> F128 */
case Iop_I64StoF128: /* signed I64 -> F128 */
case Iop_I32UtoF128: /* unsigned I32 -> F128 */
@@ -4510,6 +4555,7 @@
case Iop_Clz8x16:
case Iop_Cls8x16:
case Iop_Abs8x16:
+ case Iop_Ctz8x16:
return mkPCast8x16(mce, vatom);
case Iop_CmpNEZ16x4:
@@ -4522,6 +4568,7 @@
case Iop_Clz16x8:
case Iop_Cls16x8:
case Iop_Abs16x8:
+ case Iop_Ctz16x8:
return mkPCast16x8(mce, vatom);
case Iop_CmpNEZ32x2:
@@ -4539,6 +4586,7 @@
case Iop_FtoI32Sx4_RZ:
case Iop_Abs32x4:
case Iop_RSqrtEst32Ux4:
+ case Iop_Ctz32x4:
return mkPCast32x4(mce, vatom);
case Iop_CmpwNEZ32:
@@ -4551,6 +4599,7 @@
case Iop_CipherSV128:
case Iop_Clz64x2:
case Iop_Abs64x2:
+ case Iop_Ctz64x2:
return mkPCast64x2(mce, vatom);
case Iop_PwBitMtxXpose64x2:
@@ -4568,6 +4617,7 @@
case Iop_QNarrowUn64Sto32Sx2:
case Iop_QNarrowUn64Sto32Ux2:
case Iop_QNarrowUn64Uto32Ux2:
+ case Iop_F32toF16x4:
return vectorNarrowUnV128(mce, op, vatom);
case Iop_Widen8Sto16x8:
@@ -4576,6 +4626,7 @@
case Iop_Widen16Uto32x4:
case Iop_Widen32Sto64x2:
case Iop_Widen32Uto64x2:
+ case Iop_F16toF32x4:
return vectorWidenI64(mce, op, vatom);
case Iop_PwAddL32Ux2:
Modified: trunk/memcheck/tests/vbit-test/irops.c
==============================================================================
--- trunk/memcheck/tests/vbit-test/irops.c (original)
+++ trunk/memcheck/tests/vbit-test/irops.c Mon Aug 15 22:54:04 2016
@@ -239,6 +239,10 @@
{ DEFOP(Iop_SubF128, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_MulF128, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_DivF128, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_MAddF128, UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_MSubF128, UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_NegMAddF128, UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_NegMSubF128, UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_NegF128, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_AbsF128, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_SqrtF128, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
@@ -252,8 +256,14 @@
{ DEFOP(Iop_F128toI64S, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_F128toI32U, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_F128toI64U, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_F128toI128S, UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_F128toF64, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_F128toF32, UNDEF_ALL), .s390x = 1, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_RndF128, UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_TruncF128toI32S,UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_TruncF128toI32U,UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_TruncF128toI64U,UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
+ { DEFOP(Iop_TruncF128toI64S,UNDEF_ALL), .s390x = 0, .amd64 = 0, .x86 = 0, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_AtanF64, UNDEF_ALL), .s390x = 0, .amd64 = 1, .x86 = 1, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_Yl2xF64, UNDEF_ALL), .s390x = 0, .amd64 = 1, .x86 = 1, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
{ DEFOP(Iop_Yl2xp1F64, UNDEF_ALL), .s390x = 0, .amd64 = 1, .x86 = 1, .arm = 0, .ppc64 = 0, .ppc32 = 0, .mips32 = 0, .mips64 = 0, .tilegx = 0 },
@@ -625,6 +635,8 @@
{ DEFOP(Iop_Fixed32SToF32x4_RN, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F32toF16x4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_F16toF32x4, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F64toF16x2, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_F16toF64x2, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Add32F0x4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Sub32F0x4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Mul32F0x4, UNDEF_UNKNOWN), },
@@ -804,6 +816,10 @@
{ DEFOP(Iop_Clz16x8, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Clz32x4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Clz64x2, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_Ctz8x16, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_Ctz16x8, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_Ctz32x4, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_Ctz64x2, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Cls8x16, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Cls16x8, UNDEF_UNKNOWN), },
{ DEFOP(Iop_Cls32x4, UNDEF_UNKNOWN), },
@@ -980,6 +996,11 @@
{ DEFOP(Iop_GetMSBs8x16, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RecipEst32Ux4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_RSqrtEst32Ux4, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_MulI128by10, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_MulI128by10Carry, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_MulI128by10E, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_MulI128by10ECarry, UNDEF_UNKNOWN), },
+
/* ------------------ 256-bit SIMD Integer. ------------------ */
{ DEFOP(Iop_V256to64_0, UNDEF_UNKNOWN), },
{ DEFOP(Iop_V256to64_1, UNDEF_UNKNOWN), },
@@ -1068,6 +1089,8 @@
{ DEFOP(Iop_Min64Fx4, UNDEF_UNKNOWN), },
{ DEFOP(Iop_BCDAdd, UNDEF_ALL), .ppc64 = 1, .ppc32 = 1 },
{ DEFOP(Iop_BCDSub, UNDEF_ALL), .ppc64 = 1, .ppc32 = 1 },
+ { DEFOP(Iop_I128StoBCD128, UNDEF_UNKNOWN), },
+ { DEFOP(Iop_BCD128toI128S, UNDEF_UNKNOWN), },
{ DEFOP(Iop_PolynomialMulAdd8x16, UNDEF_ALL_8x16), .ppc64 = 1, .ppc32 = 1 },
{ DEFOP(Iop_PolynomialMulAdd16x8, UNDEF_ALL_16x8), .ppc64 = 1, .ppc32 = 1 },
{ DEFOP(Iop_PolynomialMulAdd32x4, UNDEF_ALL_32x4), .ppc64 = 1, .ppc32 = 1 },
@@ -1241,6 +1264,43 @@
}
break;
+ case Iop_MAddF128:
+ case Iop_MSubF128:
+ case Iop_NegMAddF128:
+ case Iop_NegMSubF128:
+ case Iop_F128toI128S:
+ case Iop_RndF128:
+ case Iop_I64UtoF128:
+ case Iop_I64StoF128:
+ case Iop_F64toF128:
+ case Iop_F128toF64:
+ case Iop_F128toF32:
+ case Iop_TruncF128toI32S:
+ case Iop_TruncF128toI32U:
+ case Iop_TruncF128toI64U:
+ case Iop_TruncF128toI64S:
+ case Iop_F16toF32x4:
+ case Iop_F32toF16x4:
+ case Iop_F64toF16x2:
+ case Iop_F16toF64x2:
+ case Iop_MulI128by10:
+ case Iop_MulI128by10Carry:
+ case Iop_MulI128by10E:
+ case Iop_MulI128by10ECarry: {
+ /* IROps require a processor that supports ISA 2.07 (Power 8) or newer */
+ rc = system(MIN_POWER_ISA " 3.0 ");
+ rc /= 256;
+ /* MIN_POWER_ISA returns 0 if underlying HW supports the
+ * specified ISA or newer. Returns 1 if the HW does not support
+ * the specified ISA. Returns 2 on error.
+ */
+ if (rc == 1) return NULL;
+ if (rc > 2) {
+ panic(" ERROR, min_power_isa() return code is invalid.\n");
+ }
+ }
+ break;
+
/* Other */
default:
break;
Modified: trunk/none/tests/ppc64/ppc64_helpers.h
==============================================================================
--- trunk/none/tests/ppc64/ppc64_helpers.h (original)
+++ trunk/none/tests/ppc64/ppc64_helpers.h Mon Aug 15 22:54:04 2016
@@ -30,6 +30,14 @@
#define AB_DPRINTF(fmt, args...) do { } while (0)
#endif
+/* Exhaustive tests?
+ * Due to the excessive size of the test results, allow a #ifdef to
+ * enable/disable most of the input values.
+ * Off by default.
+ */
+// #define EXHAUSTIVE_TESTS 1
+
+
#define ALLCR "cr0","cr1","cr2","cr3","cr4","cr5","cr6","cr7"
#define SET_CR(_arg) \
@@ -183,29 +191,29 @@
/* Extract one CR field */
static int extract_cr_rn(unsigned long local_cr,unsigned long rn) {
- unsigned int masked_cr;
- unsigned long shifted_value;
+ unsigned int masked_cr;
+ unsigned long shifted_value;
- shifted_value = local_cr >> ( ( (7 - rn) * 4 ) );
- masked_cr = shifted_value & 0xf;
- return masked_cr;
+ shifted_value = local_cr >> ( ( (7 - rn) * 4 ) );
+ masked_cr = shifted_value & 0xf;
+ return masked_cr;
}
/* Display one CR field */
static void dissect_cr_rn(unsigned long local_cr, unsigned long rn) {
- unsigned int masked_cr;
+ unsigned int masked_cr;
- masked_cr = extract_cr_rn(local_cr, rn);
- __dissect_cr(masked_cr);
+ masked_cr = extract_cr_rn(local_cr, rn);
+ __dissect_cr(masked_cr);
}
/* Display all of the CR fields... */
static void dissect_cr(unsigned long local_cr) {
- unsigned int crn;
+ unsigned int crn;
- for (crn = 0; crn < 8; crn++) {
- dissect_cr_rn(local_cr, crn);
- }
+ for (crn = 0; crn < 8; crn++) {
+ dissect_cr_rn(local_cr, crn);
+ }
}
/* dissect the fpscr bits that are valid under valgrind.
@@ -422,6 +430,7 @@
*/
static unsigned long dfp128_vals[] = {
+#ifdef EXHAUSTIVE_TESTS
// Some finite numbers
0x2208000000000000ULL, 0x0000000000000001ULL, // 1 *10^0
0xa208800000000000ULL, 0x0000000000000001ULL, // -1 *10^1
@@ -457,13 +466,21 @@
// flavors of NAN
0x7c00000000000000ULL, 0x0000000000000000ULL, // quiet
- 0xfc00000000000000ULL, 0xc00100035b007700ULL,
- 0x7e00000000000000ULL, 0xfe000000d0e0a0d0ULL, // signaling
+ 0xfc00000000000000ULL, 0xc00100035b007700ULL, // NAN
+ 0x7e00000000000000ULL, 0xfe000000d0e0a0d0ULL, // signaling NAN
// flavors of Infinity
0x7800000000000000ULL, 0x0000000000000000ULL, // +inf
0xf800000000000000ULL, 0x0000000000000000ULL, // -inf
0xf900000000000000ULL, 0x0000000000000000ULL // -inf
+#else
+ 0x2208000000000000ULL, 0x0000000000000001ULL, // 1 *10^0
+ 0x77ffffffffffffffULL, 0xffffffffffffffffULL, // max possible value *10^6111 (largest exp)
+ 0xa208000000000000ULL, 0x0000000000000000ULL, // -0*10^0
+ 0xfc00000000000000ULL, 0xc00100035b007700ULL, // NAN
+ 0x7e00000000000000ULL, 0xfe000000d0e0a0d0ULL, // signaling NAN
+ 0xf800000000000000ULL, 0x0000000000000000ULL, // -inf
+#endif
};
#define NUM_DFP128_VALS (sizeof(dfp128_vals) / 8)
@@ -472,7 +489,7 @@
/* Todo: update dfp64_vals to match dfp128_vals content. */
static unsigned long dfp64_vals[] = {
- //
+#ifdef EXHAUSTIVE_TESTS
0x77fcffffffffffffULL, // max possible value 9..9 *10^369 (largest exp)
0x0000000000000001ULL, // min possible nonzero value 1 *10^-398. (smallest exp)
0x4248000000000001ULL, // 1*10^260
@@ -498,6 +515,12 @@
0x7800000000000000ULL, //+Inf
0xf800000000000000ULL, //-Inf
0x7a34000000000000ULL, //+Inf
+#else
+ 0x77fcffffffffffffULL, // max possible value 9..9 *10^369 (largest exp)
+ 0x4248000000000000ULL, // 0 * 10 ^260
+ 0xfe000000d0e0a0d0ULL, //signaling NaN
+ 0xf800000000000000ULL, //-Inf
+#endif
};
#define NUM_DFP64_VALS (sizeof(dfp64_vals) / 8)
@@ -1348,21 +1371,33 @@
/* a table of exponent values for use in the float precision tests. */
unsigned long exponent_table[] = {
+#ifdef EXHAUSTIVE_TESTS
0x0000, /* +/-0 or +/-DENormalized, depending on associated mantissa. */
0x1a, /* within NORmalized for 16,32,64,128-bit. */
0x1f, /* +/-INF or +/-NaN for 16bit, NORmalized for 32,64,128 */
0xff, /* +/-INF or +/-NaN for 32bit, NORmalized for 64,128 */
0x7ff, /* +/-INF or +/-NaN for 32 and 64bit, NORmalized for 128 */
0x7fff, /* +/-INF or +/-NaN for 128bit. */
-#define MAX_EXPONENTS 6
+#else
+ 0x0000, /* +/-0 or +/-DENormalized, depending on associated mantissa. */
+ 0xff, /* +/-INF or +/-NaN for 32bit, NORmalized for 64,128 */
+ 0x7ff, /* +/-INF or +/-NaN for 32 and 64bit, NORmalized for 128 */
+ 0x7fff, /* +/-INF or +/-NaN for 128bit. */
+#endif
};
+#define MAX_EXPONENTS (sizeof(exponent_table) / sizeof(unsigned long))
unsigned long mantissa_table[] = {
+#ifdef EXHAUSTIVE_TESTS
0xbeefbeefbeef, /* NOR or DEN or NaN */
0x000000000000, /* ZERO or INF */
0x7fffffffffff, /* NOR or DEN or NaN */
-#define MAX_MANTISSAS 3
+#else
+ 0x000000000000, /* ZERO or INF */
+ 0x7fffffffffff, /* NOR or DEN or NaN */
+#endif
};
+#define MAX_MANTISSAS (sizeof(mantissa_table) / sizeof(unsigned long))
/* build in 64-bit chunks, low doubleword is zero. */
static unsigned long * float_vsxargs;
@@ -1522,9 +1557,15 @@
char_args = memalign(32, MAX_CHAR_ARGS_ARRAY_SIZE * sizeof(char));
+#ifdef EXHAUSTIVE_TESTS
for (ichar = 'a'; ichar <= 'z'; ichar++) { char_args[i++] = ichar; }
for (ichar = '0'; ichar <= '9'; ichar++) { char_args[i++] = ichar; }
for (ichar = 'A'; ichar <= 'Z'; ichar++) { char_args[i++] = ichar; }
+#else
+ for (ichar = 'a'; ichar <= 'z'; ichar+=6) { char_args[i++] = ichar; }
+ for (ichar = '0'; ichar <= '9'; ichar+=6) { char_args[i++] = ichar; }
+ for (ichar = 'A'; ichar <= 'Z'; ichar+=6) { char_args[i++] = ichar; }
+#endif
char_args[i++] = ' ';
char_args[i++] = '+';
@@ -1645,7 +1686,7 @@
// Permutes work against two (non-paired) VSX regs, so these are
// also grouped by twos.
vsxargs = memalign(16, MAX_VSX_ARRAY_SIZE * sizeof(unsigned long));
-
+#ifdef EXHAUSTIVE_TESTS
vsxargs[i++] = 0x0000000000000000UL; vsxargs[i++] = 0x0000000000000000UL;
vsxargs[i++] = 0x0102030405060708UL; vsxargs[i++] = 0x0102010201020102UL;
@@ -1663,6 +1704,13 @@
vsxargs[i++] = 0x0011223344556677UL; vsxargs[i++] = 0x8899aabbccddeeffUL;
vsxargs[i++] = 0xf0e0d0c0b0a09080UL; vsxargs[i++] = 0x7060504030201000UL;
+#else
+ vsxargs[i++] = 0x0000000000000000UL; vsxargs[i++] = 0x0000000000000000UL;
+ vsxargs[i++] = 0x0102030405060708UL; vsxargs[i++] = 0x0102010201020102UL;
+
+ vsxargs[i++] = 0x0011223344556677UL; vsxargs[i++] = 0x8899aabbccddeeffUL;
+ vsxargs[i++] = 0xf0e0d0c0b0a09080UL; vsxargs[i++] = 0x7060504030201000UL;
+#endif
// these next three groups are specific for vector rotate tests.
// bits 11:15,19:23,27:31 of each 32-bit word contain mb,me,sh values.
@@ -1693,12 +1741,16 @@
vpcv = memalign(16, MAX_VPCV_SIZE * sizeof(unsigned long));
+#ifdef EXHAUSTIVE_TESTS
/* These two lines are complementary pairs of each other. */
vpcv[i++]=0x12021a0817141317ULL; vpcv[i++]=0x100d1b05070f0205ULL;
vpcv[i++]=0x0d1d0517080b0c08ULL; vpcv[i++]=0x0f12041a18101d1cULL;
vpcv[i++]=0x100d1b070f020505ULL; vpcv[i++]=0x0e201f1400130105ULL;
vpcv[i++]=0x0705030a0b01ea0cULL; vpcv[i++]=0x0e0c09010602080dULL;
-
+#else
+ vpcv[i++]=0x12021a0817141317ULL; vpcv[i++]=0x100d1b05070f0205ULL;
+ vpcv[i++]=0x0705030a0b01ea0cULL; vpcv[i++]=0x0e0c09010602080dULL;
+#endif
nb_vpcv=i;
AB_DPRINTF("Registered %d permute control vectors \n", nb_vpcv);
@@ -1791,9 +1843,11 @@
{
long sign_index;
long sign_value;
- int scramble;
unsigned long i = 0;
unsigned long value;
+ #ifdef EXHAUSTIVE_TESTS
+ int scramble;
+#endif
if (verbose) printf("%s\n", __FUNCTION__);
@@ -1814,6 +1868,7 @@
i+=2;
}
+#ifdef EXHAUSTIVE_TESTS
for (scramble = 1; scramble <= 4; scramble++) {
packed_decimal_table[i] = 0x3210321032103210 * scramble;
packed_decimal_table[i+1] = sign_value;
@@ -1824,6 +1879,7 @@
if (verbose>3) printf("\n");
i+=2;
}
+#endif
/* Add some entries that will provide interesting output from
* the convert TO tests.
@@ -1839,6 +1895,7 @@
i += 2;
+#ifdef EXHAUSTIVE_TESTS
packed_decimal_table[i] = 0x0000000000000000;
packed_decimal_table[i+1] = sign_value;
packed_decimal_table[i+1] += 0x0000000098765430;
@@ -1860,6 +1917,7 @@
if (verbose>3) printf("\n");
i += 2;
+#endif
packed_decimal_table[i] = 0x0030000000000000;
packed_decimal_table[i+1] = sign_value;
@@ -1910,11 +1968,16 @@
#define NR_NATIONAL_DECIMAL_SIGNS 2
unsigned int national_decimal_values[] = {
+#ifdef EXHAUSTIVE_TESTS
0x0030, 0x0031, 0x0032, 0x0033, 0x0034,
0x0035, 0x0036, 0x0037, 0x0038, 0x0039
+#else
+ 0x0030, 0x0031,
+ 0x0035, 0x0039
+#endif
};
-#define NR_NATIONAL_DECIMAL_VALUES 10
+#define NR_NATIONAL_DECIMAL_VALUES (sizeof(national_decimal_values) / sizeof(unsigned int))
static unsigned long * national_decimal_table;
@@ -2003,6 +2066,7 @@
}
i += 2;
}
+#ifdef EXHAUSTIVE_TESTS
{ /* a few more for fun */
national_decimal_table[i] = 0x0031003200330034;
national_decimal_table[i+1] = 0x0035003600370000;
@@ -2026,6 +2090,7 @@
}
i += 2;
}
+#endif
}
if (verbose > 2) printf("\n");
@@ -2174,22 +2239,36 @@
printf(" ]");
}
-// Randomly chosen coverage for k includes values: 0,2,4,7,9
-#define SELECTIVE_INCREMENT_ZONED(k) \
+#ifdef EXHAUSTIVE_TESTS
+// Randomly chosen exhaustive coverage for k includes values: 0,2,4,7,9
+# define SELECTIVE_INCREMENT_ZONED(k) \
if (k == 7) k = 9; \
else if (k == 4) k = 7; \
else if (k == 2) k = 4; \
else if (k == 0) k = 2; \
else k++;
-
-// Randomly chosen coverage for signs includes values: 0,1,4,a,b,f
-#define SELECTIVE_INCREMENT_SIGNS(signs) \
+// Randomly chosen exhaustive coverage for signs includes values: 0,1,4,a,b,f
+# define SELECTIVE_INCREMENT_SIGNS(signs) \
if (signs == 0x0) signs = 0x1; \
else if (signs == 0x1) signs = 0x4; \
else if (signs == 0x4) signs = 0xa; \
else if (signs == 0xa) signs = 0xb; \
else if (signs == 0xb) signs = 0xf; \
else signs++;
+#else
+// Randomly chosen coverage for k includes values: 0,7,9
+# define SELECTIVE_INCREMENT_ZONED(k) \
+ if (k == 7) k = 9; \
+ else if (k == 0) k = 7; \
+ else k++;
+// Randomly chosen coverage for signs includes values: 0,4,b,f
+# define SELECTIVE_INCREMENT_SIGNS(signs) \
+ if (signs == 0x0) signs = 0x4; \
+ else if (signs == 0x4) signs = 0xb; \
+ else if (signs == 0xb) signs = 0xf; \
+ else signs++;
+#endif
+
static void build_zoned_decimal_table(void)
{
Modified: trunk/none/tests/ppc64/test_isa_3_0.c
==============================================================================
--- trunk/none/tests/ppc64/test_isa_3_0.c (original)
+++ trunk/none/tests/ppc64/test_isa_3_0.c Mon Aug 15 22:54:04 2016
@@ -224,6 +224,34 @@
PPC_XER_CA = 0x02000000,
};
+static void test_cnttzw (void)
+{
+ __asm__ __volatile__ ("cnttzw 17, 14");
+}
+
+static void test_cnttzd (void)
+{
+ __asm__ __volatile__ ("cnttzd 17, 14");
+}
+
+static void test_dotted_cnttzw (void)
+{
+ __asm__ __volatile__ ("cnttzw. 17, 14");
+}
+
+static void test_dotted_cnttzd (void)
+{
+ __asm__ __volatile__ ("cnttzd. 17, 14");
+}
+
+static test_list_t testgroup_logical_one[] = {
+ { &test_cnttzw , "cnttzw" },
+ { &test_cnttzd , "cnttzd" },
+ { &test_dotted_cnttzw, "cnttzw." },
+ { &test_dotted_cnttzd, "cnttzd." },
+ { NULL , NULL },
+};
+
static void test_modsw (void)
{
__asm__ __volatile__ ("modsw 17, 14, 15");
@@ -252,6 +280,60 @@
{ NULL , NULL },
};
+static void test_dotted_extswsli (void)
+{
+ switch(x_shift) {
+ case SH_0:
+ __asm__ __volatile__ ("extswsli. %0, %1, %2" : "=r" (r17) : "r" (r14), "i" (SH_0) );
+ break;
+
+ case SH_1:
+ __asm__ __volatile__ ("extswsli. %0, %1, %2" : "=r" (r17) : "r" (r14), "i" (SH_1) );
+ break;
+
+ case SH_2:
+ __asm__ __volatile__ ("extswsli. %0, %1, %2" : "=r" (r17) : "r" (r14), "i" (SH_2) );
+ break;
+
+ case SH_3:
+ __asm__ __volatile__ ("extswsli. %0, %1, %2" : "=r" (r17) : "r" (r14), "i" (SH_3) );
+ break;
+
+ default:
+ printf("Unhandled shift value for extswsli. %d\n", x_shift);
+ }
+}
+
+static void test_extswsli (void)
+{
+ switch(x_shift) {
+ case SH_0:
+ __asm__ __volatile__ ("extswsli %0, %1, %2" : "=r" (r17) : "r" (r14), "i"(SH_0));
+ break;
+
+ case SH_1:
+ __asm__ __volatile__ ("extswsli %0, %1, %2" : "=r" (r17) : "r" (r14), "i"(SH_1));
+ break;
+
+ case SH_2:
+ __asm__ __volatile__ ("extswsli %0, %1, %2" : "=r" (r17) : "r" (r14), "i"(SH_2));
+ break;
+
+ case SH_3:
+ __asm__ __volatile__ ("extswsli %0, %1, %2" : "=r" (r17) : "r" (r14), "i"(SH_3));
+ break;
+
+ default:
+ printf("Unhandled shift value for extswsli %d\n", x_shift);
+ }
+}
+
+static test_list_t testgroup_shifted_one[] = {
+ { &test_extswsli , "extswsli " },
+ { &test_dotted_extswsli, "extswsli." },
+ { NULL , NULL },
+};
+
static void test_maddhd (void)
{
__asm__ __volatile__ ("maddhd 17, 14, 15, 16");
@@ -290,57 +372,6 @@
{ NULL , NULL },
};
-static void test_dotted_extswsli (void)
-{
-#define EXTSWSLI_dotted_SHIFT(SH_X) \
- __asm__ __volatile__ ("extswsli. %0,%1,%2" : "=r" (r17) : "r" (r14), "i" (SH_X) );
- switch(x_shift) {
- case SH_0:
- EXTSWSLI_dotted_SHIFT(SH_0);
- break;
- case SH_1:
- EXTSWSLI_dotted_SHIFT(SH_1);
- break;
- case SH_2:
- EXTSWSLI_dotted_SHIFT(SH_2);
- break;
- case SH_3:
- EXTSWSLI_dotted_SHIFT(SH_3);
- break;
- default:
- printf("Unhandled shift value for extswsli. %d\n",x_shift);
- }
-}
-
-static void test_extswsli (void)
-{
-#define EXTSWSLI_SHIFT(x) \
- __asm__ __volatile__ ("extswsli %0,%1,%2":"=r" (r17):"r" (r14),"i"(x));
-
- switch(x_shift) {
- case SH_0:
- EXTSWSLI_SHIFT(SH_0);
- break;
- case SH_1:
- EXTSWSLI_SHIFT(SH_1);
- break;
- case SH_2:
- EXTSWSLI_SHIFT(SH_2);
- break;
- case SH_3:
- EXTSWSLI_SHIFT(SH_3);
- break;
- default:
- printf("Unhandled shift value for extswsli %d\n",x_shift);
- }
-}
-
-static test_list_t testgroup_shifted_one[] = {
- { &test_extswsli, "extswsli ",},
- { &test_dotted_extswsli, "extswsli.",},
- { NULL, NULL,},
-};
-
static void test_vabsdub(void) {
__asm__ __volatile__ ("vabsdub %0, %1, %2" : "+v" (vec_xt): "v" (vec_xa), "v" (vec_xb));
}
@@ -1031,6 +1062,22 @@
__asm__ __volatile__ ("vprtybq %0, %1" : "=v"(vec_xt) : "v"(vec_xb));
}
+static void test_vctzb(void) {
+ __asm__ __volatile__ ("vctzb %0, %1" : "=v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_vctzh(void) {
+ __asm__ __volatile__ ("vctzh %0, %1" : "=v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_vctzw(void) {
+ __asm__ __volatile__ ("vctzw %0, %1" : "=v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_vctzd(void) {
+ __asm__ __volatile__ ("vctzd %0, %1" : "=v"(vec_xt) : "v"(vec_xb));
+}
+
static test_list_t testgroup_vector_extend_sign[] = {
{ &test_vextsb2w, "vextsb2w" },
{ &test_vextsb2d, "vextsb2d" },
@@ -1042,6 +1089,10 @@
{ &test_vprtybw , "vprtybw " },
{ &test_vprtybd , "vprtybd " },
{ &test_vprtybq , "vprtybq " },
+ { &test_vctzb , "vctzb " },
+ { &test_vctzh , "vctzh " },
+ { &test_vctzw , "vctzw " },
+ { &test_vctzd , "vctzd " },
{ NULL , NULL },
};
@@ -1224,8 +1275,8 @@
{ &test_xststdcqp, "xststdcqp " },
{ &test_xststdcdp, "xststdcdp " },
{ &test_xststdcsp, "xststdcsp " },
- { &test_xvtstdcdp, "xvtstdcdp " },
{ &test_xvtstdcsp, "xvtstdcsp " },
+ { &test_xvtstdcdp, "xvtstdcdp " },
{ NULL , NULL },
};
@@ -1370,6 +1421,18 @@
{ NULL , NULL },
};
+static void test_bcdtrunc_p0(void) {
+ __asm__ __volatile__ ("bcdtrunc. %0, %1, %2, 0": "=v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_bcdtrunc_p1(void) {
+ __asm__ __volatile__ ("bcdtrunc. %0, %1, %2, 1": "=v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_bcdutrunc(void) {
+ __asm__ __volatile__ ("bcdutrunc. %0, %1, %2 ": "=v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
static void test_bcdadd_p0(void) {
__asm__ __volatile__ ("bcdadd. %0, %1, %2, 0" : "=v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
}
@@ -1446,6 +1509,34 @@
__asm__ __volatile__ ("bcdctn. %0, %1 " : "=v"(vec_xt) : "v"(vec_xb));
}
+static void test_vmul10uq(void) {
+ __asm__ __volatile__ ("vmul10uq %0, %1 " : "=v"(vec_xt) : "v"(vec_xa));
+}
+
+static void test_vmul10cuq(void) {
+ __asm__ __volatile__ ("vmul10cuq %0, %1 " : "=v"(vec_xt) : "v"(vec_xa));
+}
+
+static void test_vmul10euq(void) {
+ __asm__ __volatile__ ("vmul10euq %0, %1, %2 " : "=v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_vmul10ecuq(void) {
+ __asm__ __volatile__ ("vmul10ecuq %0, %1, %2 " : "=v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_bcdctsq(void) {
+ __asm__ __volatile__ ("bcdctsq. %0, %1 " : "=v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_bcdcfsq_p0(void) {
+ __asm__ __volatile__ ("bcdcfsq. %0, %1, 0 " : "=v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_bcdcfsq_p1(void) {
+ __asm__ __volatile__ ("bcdcfsq. %0, %1, 1 " : "=v"(vec_xt) : "v"(vec_xb));
+}
+
static test_list_t testgroup_bcd_misc[] = {
{ &test_bcdadd_p0 , "bcdadd. p0" },
{ &test_bcdadd_p1 , "bcdadd. p1" },
@@ -1455,7 +1546,7 @@
{ &test_bcdcfn_p1 , "bcdcfn. p1" },
{ &test_bcdcfz_p0 , "bcdcfz. p0" }, /* The p0, p1 substrings are used later */
{ &test_bcdcfz_p1 , "bcdcfz. p1" }, /* " " */
- { &test_bcdctn , "bcdctn." },
+ { &test_bcdctn , "bcdctn. " },
{ &test_bcdctz_p0 , "bcdctz. p0" }, /* note: p0, p1 substrings are used later */
{ &test_bcdctz_p1 , "bcdctz. p1" }, /* " " */
{ &test_bcdcpsgn , "bcdcpsgn." },
@@ -1466,6 +1557,16 @@
{ &test_bcdus , "bcdus. " },
{ &test_bcdsr_p0 , "bcdsr. p0" },
{ &test_bcdsr_p1 , "bcdsr. p1" },
+ { &test_bcdtrunc_p0 , "bcdtrunc. p0" },
+ { &test_bcdtrunc_p1 , "bcdtrunc. p1" },
+ { &test_bcdutrunc , "bcdutrunc. " },
+ { &test_vmul10uq , "vmul10uq " },
+ { &test_vmul10cuq , "vmul10cuq " },
+ { &test_vmul10euq , "vmul10euq " },
+ { &test_vmul10ecuq , "vmul10ecuq " },
+ { &test_bcdctsq , "bcdctsq." },
+ { &test_bcdcfsq_p0 , "bcdcfsq. p0" },
+ { &test_bcdcfsq_p1 , "bcdcfsq. p1" },
{ NULL , NULL },
};
@@ -1615,15 +1716,72 @@
__asm__ __volatile__ ("xsiexpdp %0, %1, %2 " : "+wa" (vec_xt): "r" (r14), "r" (r15));
}
+static void test_xscvhpdp(void) {
+ __asm__ __volatile__ ("xscvhpdp %x0, %x1 " : "+wa" (vec_xt) : "wa" (vec_xb));
+}
+
+static void test_xscvdphp(void) {
+ __asm__ __volatile__ ("xscvdphp %x0, %x1 " : "+wi" (vec_xt) : "wi" (vec_xb));
+}
+
+static void test_xvcvhpsp(void) {
+ __asm__ __volatile__ ("xvcvhpsp %x0, %x1 " : "+ww" (vec_xt) : "ww" (vec_xb));
+}
+
+static void test_xvcvsphp(void) {
+ __asm__ __volatile__ ("xvcvsphp %x0, %x1 " : "+ww" (vec_xt) : "ww" (vec_xb));
+}
+
static test_list_t testgroup_vector_scalar_two_double[] = {
{ &test_xsiexpdp, "xsiexpdp" },
+ { &test_xscvhpdp, "xscvhpdp" },
+ { &test_xscvdphp, "xscvdphp" },
+ { &test_xvcvhpsp, "xvcvhpsp" },
+ { &test_xvcvsphp, "xvcvsphp" },
{ NULL , NULL },
};
+
static void test_xsabsqp(void) {
__asm__ __volatile__ ("xsabsqp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
}
+static void test_xscvdpqp(void) {
+ __asm__ __volatile__ ("xscvdpqp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvqpdp(void) {
+ __asm__ __volatile__ ("xscvqpdp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvqpdpo(void) {
+ __asm__ __volatile__ ("xscvqpdpo %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvqpsdz(void) {
+ __asm__ __volatile__ ("xscvqpsdz %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvqpswz(void) {
+ __asm__ __volatile__ ("xscvqpswz %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvqpudz(void) {
+ __asm__ __volatile__ ("xscvqpudz %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvqpuwz(void) {
+ __asm__ __volatile__ ("xscvqpuwz %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvsdqp(void) {
+ __asm__ __volatile__ ("xscvsdqp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xscvudqp(void) {
+ __asm__ __volatile__ ("xscvudqp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
static void test_xsxexpqp(void) {
__asm__ __volatile__ ("xsxexpqp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
}
@@ -1640,26 +1798,125 @@
__asm__ __volatile__ ("xsnabsqp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
}
+static void test_xssqrtqp(void) {
+ __asm__ __volatile__ ("xssqrtqp %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
+static void test_xssqrtqpo(void) {
+ __asm__ __volatile__ ("xssqrtqpo %0, %1" : "+v"(vec_xt) : "v"(vec_xb));
+}
+
static test_list_t testgroup_vector_scalar_two_quad[] = {
{ &test_xsabsqp , "xsabsqp " },
+ { &test_xscvdpqp , "xscvdpqp " },
+ { &test_xscvqpdp , "xscvqpdp " },
+ { &test_xscvqpdpo, "xscvqpdpo " },
+ { &test_xscvqpsdz, "xscvqpsdz " },
+ { &test_xscvqpswz, "xscvqpswz " },
+ { &test_xscvqpudz, "xscvqpudz " },
+ { &test_xscvqpuwz, "xscvqpuwz " },
+ { &test_xscvsdqp , "xscvsdqp " },
+ { &test_xscvudqp , "xscvudqp " },
{ &test_xsxexpqp , "xsxexpqp " },
{ &test_xsxsigqp , "xsxsigqp " },
{ &test_xsnegqp , "xsnegqp " },
{ &test_xsnabsqp , "xsnabsqp " },
+ { &test_xssqrtqp , "xssqrtqp " },
+ { &test_xssqrtqpo, "xssqrtqpo " },
{ NULL , NULL },
};
+static void test_xsaddqp(void) {
+ __asm__ __volatile__ ("xsaddqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsaddqpo(void) {
+ __asm__ __volatile__ ("xsaddqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
static void test_xscpsgnqp(void) {
__asm__ __volatile__ ("xscpsgnqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
}
+static void test_xsdivqp(void) {
+ __asm__ __volatile__ ("xsdivqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsdivqpo(void) {
+ __asm__ __volatile__ ("xsdivqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
static void test_xsiexpqp(void) {
__asm__ __volatile__ ("xsiexpqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
}
+static void test_xsmaddqp(void) {
+ __asm__ __volatile__ ("xsmaddqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsmaddqpo(void) {
+ __asm__ __volatile__ ("xsmaddqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsmsubqp(void) {
+ __asm__ __volatile__ ("xsmsubqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsmsubqpo(void) {
+ __asm__ __volatile__ ("xsmsubqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsmulqp(void) {
+ __asm__ __volatile__ ("xsmulqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsmulqpo(void) {
+ __asm__ __volatile__ ("xsmulqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsnmaddqp(void) {
+ __asm__ __volatile__ ("xsnmaddqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsnmaddqpo(void) {
+ __asm__ __volatile__ ("xsnmaddqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsnmsubqp(void) {
+ __asm__ __volatile__ ("xsnmsubqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xsnmsubqpo(void) {
+ __asm__ __volatile__ ("xsnmsubqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xssubqp(void) {
+ __asm__ __volatile__ ("xssubqp %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
+static void test_xssubqpo(void) {
+ __asm__ __volatile__ ("xssubqpo %0, %1, %2" : "+v"(vec_xt) : "v"(vec_xa), "v"(vec_xb));
+}
+
static test_list_t testgroup_vector_three_quad[] = {
+ { &test_xsaddqp , "xsaddqp " },
+ { &test_xsaddqpo , "xsaddqpo " },
{ &test_xscpsgnqp , "xscpsgnqp " },
+ { &test_xsdivqp , "xsdivqp " },
+ { &test_xsdivqpo , "xsdivqpo " },
{ &test_xsiexpqp , "xsiexpqp " },
+ { &test_xsmaddqp , "xsmaddqp " },
+ { &test_xsmaddqpo , "xsmaddqpo " },
+ { &test_xsmsubqp , "xsmsubqp " },
+ { &test_xsmsubqpo , "xsmsubqpo " },
+ { &test_xsmulqp , "xsmulqp " },
+ { &test_xsmulqpo , "xsmulqpo " },
+ { &test_xsnmaddqp , "xsnmaddqp " },
+ { &test_xsnmaddqpo, "xsnmaddqpo " },
+ { &test_xsnmsubqp , "xsnmsubqp " },
+ { &test_xsnmsubqpo, "xsnmsubqpo " },
+ { &test_xssubqp , "xssubqp " },
+ { &test_xssubqpo , "xssubqpo " },
{ NULL , NULL },
};
@@ -1739,6 +1996,79 @@
{ NULL , NULL },
};
+#define XSRQPI(R,RMC) \
+ SET_FPSCR_ZERO \
+ SET_CR_ZERO \
+ __asm__ __volatile__ \
+ ("xsrqpi %1, %0, %2, %3" : "=v"(vec_xt) : "i"(R), "v"(vec_xb), "i"(RMC)); \
+ GET_CR(local_cr); \
+ GET_FPSCR(local_fpscr);
+
+#define XSRQPIX(R,RMC) \
+ SET_FPSCR_ZERO \
+ SET_CR_ZERO \
+ __asm__ __volatile__ \
+ ("xsrqpix %1, %0, %2, %3" : "=v"(vec_xt) : "i"(R), "v"(vec_xb), "i"(RMC));\
+ GET_CR(local_cr); \
+ GET_FPSCR(local_fpscr);
+
+#define XSRQPXP(R,RMC) \
+ SET_FPSCR_ZERO \
+ SET_CR_ZERO \
+ __asm__ __volatile__ \
+ ("xsrqpxp %1, %0, %2, %3" : "=v"(vec_xt) : "i"(R), "v"(vec_xb), "i"(RMC));\
+ GET_CR(local_cr); \
+ GET_FPSCR(local_fpscr);
+
+/* For the scalar round to quad instructions, x_index is used to key into
+ * two fields; x_index bit [2] becomes the one-bit 'R' and x_index bits [0, 1]
+ * becomes the two-bit 'RMC'.
+ */
+static void test_xsrqpi(void) {
+ switch(x_index) {
+ case 0: XSRQPI(0, 0); break;
+ case 1: XSRQPI(0, 1); break;
+ case 2: XSRQPI(0, 2); break;
+ case 3: XSRQPI(0, 3); break;
+ case 4: XSRQPI(1, 0); break;
+ case 5: XSRQPI(1, 1); break;
+ case 6: XSRQPI(1, 2); break;
+ case 7: XSRQPI(1, 3); break;
+ }
+}
+static void test_xsrqpix(void) {
+ switch(x_index) {
+ case 0: XSRQPIX(0, 0); break;
+ case 1: XSRQPIX(0, 1); break;
+ case 2: XSRQPIX(0, 2); break;
+ case 3: XSRQPIX(0, 3); break;
+ case 4: XSRQPIX(1, 0); break;
+ case 5: XSRQPIX(1, 1); break;
+ case 6: XSRQPIX(1, 2); break;
+ case 7: XSRQPIX(1, 3); break;
+ }
+}
+
+static void test_xsrqpxp(void) {
+ switch(x_index) {
+ case 0: XSRQPXP(0, 0); break;
+ case 1: XSRQPXP(0, 1); break;
+ case 2: XSRQPXP(0, 2); break;
+ case 3: XSRQPXP(0, 3); break;
+ case 4: XSRQPXP(1, 0); break;
+ case 5: XSRQPXP(1, 1); break;
+ case 6: XSRQPXP(1, 2); break;
+ case 7: XSRQPXP(1, 3); break;
+ }
+}
+
+static test_list_t testgroup_vector_scalar_rounding_quads[] = {
+ { &test_xsrqpi , "xsrqpi " },
+ { &test_xsrqpix, "xsrqpix" },
+ { &test_xsrqpxp, "xsrqpxp" },
+ { NULL , NULL },
+};
+
/* ###### begin all_tests table. */
@@ -1758,11 +2088,21 @@
PPC_INTEGER | PPC_ARITH | PPC_TWO_ARGS,
},
{
+ testgroup_shifted_one,
+ "ppc one argument plus shift",
+ PPC_MISC | PPC_CR | PPC_TWO_ARGS,
+ },
+ {
testgroup_three_args,
"ppc three parameter ops",
PPC_INTEGER | PPC_ARITH | PPC_THREE_ARGS,
},
{
+ testgroup_logical_one,
+ "ppc count zeros",
+ PPC_INTEGER | PPC_LOGICAL | PPC_ONE_ARG,
+ },
+ {
testgroup_set_boolean,
"ppc set boolean",
PPC_INTEGER | PPC_LOGICAL | PPC_ONE_IMM,
@@ -1805,12 +2145,17 @@
{
testgroup_vector_scalar_compare_quads,
"ppc vector scalar compare exponents quads",
- PPC_ALTIVEC_QUAD | PPC_COMPARE,
+ PPC_ALTIVEC_QUAD | PPC_COMPARE | PPC_COMPARE_ARGS,
+ },
+ {
+ testgroup_vector_scalar_rounding_quads,
+ "ppc vector scalar rounding quads",
+ PPC_ALTIVEC_QUAD | PPC_ROUND,
},
{
testgroup_vsx_xxpermute,
"ppc vector permutes",
- PPC_ALTIVEC | PPC_PERMUTE | PPC_THREE_ARGS,
+ PPC_ALTIVEC | PPC_PERMUTE,
},
{
testgroup_vector_four,
@@ -1848,11 +2193,6 @@
PPC_MISC | PPC_TWO_ARGS,
},
{
- testgroup_shifted_one,
- "ppc one argument plus shift",
- PPC_MISC | PPC_THREE_ARGS,
- },
- {
testgroup_vector_scalar_compare_exp_double,
"ppc vector scalar compare exponents doubles",
PPC_ALTIVEC_DOUBLE | PPC_COMPARE | PPC_COMPARE_ARGS,
@@ -1920,6 +2260,47 @@
}
}
+#define instruction_sets_cr0_to_zero(inst_name) \
+ ( (strncmp(inst_name, "cnttzw.", 7) == 0 ) || \
+ (strncmp(inst_name, "cnttzd.", 7) == 0 ) )
+
+static void testfunction_logical_one (const char* instruction_name,
+ test_func_t func,
+ unsigned int test_flags) {
+ int i;
+ volatile HWord_t res;
+ volatile unsigned int cr;
+
+ VERBOSE_FUNCTION_CALLOUT
+
+ for (i = 0; i < nb_iargs; i++) {
+
+ r14 = iargs[i];
+
+ /* The logical instructions will set CR fields to zero, so
+ * lets start with some non zero content in CR0.
+ */
+ SET_CR0_FIELD(0xF);
+
+ (*func)();
+
+ res = r17;
+ GET_CR(cr);
+
+ printf("%s %016lx => %016lx",
+ instruction_name, (long unsigned)iargs[i], (long unsigned)res);
+
+ if (instruction_sets_cr0_to_zero(instruction_name)
+ && ((cr & 0xF0000000) != 0 )) {
+ /* The dotted version sets the CR0 to 0, verify */
+ printf(" Expected cr0 to be zero, it is (%08x)\n", cr & 0xF0000000);
+ }
+ printf("\n");
+
+ if (verbose) printf("\n");
+ }
+}
+
void testfunction_one_arg_with_shift (const char* instruction_name,
test_func_t test_function,
unsigned int ignore_test_flags)
@@ -2775,6 +3156,41 @@
}
}
+static void testfunction_vector_scalar_rounding_quads (const char* instruction_name,
+ test_func_t test_function,
+ unsigned int ignore_test_flags) {
+ /* Uses global variable x_index */
+ /* For this function, x_index is used as a key into R and RMC values.
+ * Also note, the fpscr.rn value may be used to affect the rounding mode.
+ * that variation is not evaluated here. */
+ int j;
+
+ VERBOSE_FUNCTION_CALLOUT
+
+ for (j = 0; j < nb_float_vsxargs - 1; j++) {
+ for (x_index = 0; x_index < 8; x_index++) {
+ vec_xb[0] = float_vsxargs[j];
+ vec_xb[1] = float_vsxargs[j+1];
+
+ printf("%s %016lx%016lx (R=%x) (RMC=%x) => ",
+ instruction_name,
+ vec_xb[1], vec_xb[0],
+ (x_index & 0x4) >> 2, x_index & 0x3);
+
+ SET_CR_ZERO
+ SET_FPSCR_ZERO
+
+ (*test_function)();
+
+ GET_FPSCR(local_fpscr);
+
+ printf("%016lx%016lx", vec_xt[1], vec_xt[0]);
+ dissect_fpscr(local_fpscr);
+ printf("\n");
+ }
+ }
+}
+
static void testfunction_vector_three_special (const char* instruction_name,
test_func_t test_function,
unsigned int ignore_test_flags){
@@ -3191,18 +3607,19 @@
/* ######## begin grand testing loops. */
typedef struct insn_sel_flags_t_struct {
- int one_arg, two_args, three_args, four_args, cmp_args;
- int arith, logical, compare, popcnt, ldst, insert_extract;
- int integer, floats, p405, altivec, altivec_double, altivec_quad;
- int faltivec, vector, misc, dfp, bcd, no_op, pc_immediate;
- int cr;
+ unsigned int one_arg, two_args, three_args, four_args, cmp_args, ld_args, st_args,
+ one_imed_args;
+ unsigned int arith, logical, compare, popcnt, ldst, insert_extract, permute, round;
+ unsigned int integer, altivec, altivec_quad, altivec_double, dfp, bcd, misc,
+ no_op, pc_immediate;
+ unsigned int cr;
} insn_sel_flags_t;
static void do_tests ( insn_sel_flags_t seln_flags)
{
test_group_t group_function;
test_list_t *tests;
- int nb_args, type, family;
+ unsigned int nb_args, type, family;
int i, j, n;
n = 0;
@@ -3231,7 +3648,10 @@
(nb_args == 2 && !seln_flags.two_args) ||
(nb_args == 3 && !seln_flags.three_args) ||
(nb_args == 4 && !seln_flags.four_args) ||
- (nb_args == 5 && !seln_flags.cmp_args))
+ (nb_args == 5 && !seln_flags.cmp_args) ||
+ (nb_args == 6 && !seln_flags.ld_args) ||
+ (nb_args == 7 && !seln_flags.st_args) ||
+ (nb_args == 8 && !seln_flags.one_imed_args))
continue;
/* Check instruction type */
@@ -3246,15 +3666,21 @@
/* Check instruction family */
family = all_tests[i].flags & PPC_FAMILY_MASK;
- if ((family == PPC_INTEGER && !seln_flags.integer) ||
- (family == PPC_ALTIVEC && !seln_flags.altivec) ||
- (family == PPC_ALTIVEC_DOUBLE && !seln_flags.altivec_double) ||
- (family == PPC_ALTIVEC_QUAD && !seln_flags.altivec_quad) ||
- (family == PPC_DFP && !seln_flags.dfp) ||
- (family == PPC_BCD && !seln_flags.bcd) ||
- (family == PPC_NO_OP && !seln_flags.no_op) ||
- (family == PPC_PC_IMMEDIATE && !seln_flags.pc_immediate) ||
- (family == PPC_MISC && !seln_flags.misc))
+
+ /* do each check each case individually to reduce computation */
+ if (family == PPC_INTEGER && seln_flags.integer == 0) continue;
+ if (family == PPC_ALTIVEC && seln_flags.altivec == 0) continue;
+ if (family == PPC_DFP && seln_flags.dfp == 0) continue;
+ if (family == PPC_BCD && seln_flags.bcd == 0) continue;
+ if (family == PPC_NO_OP && seln_flags.no_op == 0) continue;
+ if (family == PPC_MISC && seln_flags.misc == 0) continue;
+ if (family == PPC_ALTIVEC_DOUBLE && seln_flags.altivec_double == 0)
+ continue;
+
+ if (family == PPC_ALTIVEC_QUAD && seln_flags.altivec_quad == 0)
+ continue;
+
+ if (family == PPC_PC_IMMEDIATE && seln_flags.pc_immediate == 0)
continue;
/* Check flags update */
@@ -3291,6 +3717,10 @@
group_function = &testfunction_set_boolean;
break;
+ case PPC_ONE_ARG:
+ group_function = &testfunction_logical_one;
+ break;
+
default:
printf("ERROR: PPC_LOGICAL, unhandled number of arguments. 0x%08x\n",
nb_args);
@@ -3304,7 +3734,7 @@
default:
printf("ERROR: PPC_INTEGER, unhandled type 0x%08x\n", type);
continue;
- } /* switch (nb_args) */
+ } /* switch (type) */
break;
case PPC_ALTIVEC:
@@ -3409,7 +3839,6 @@
printf("ERROR: PPC_MISC, unhandled number of arguments. 0x%08x\n", nb_args);
continue;
} /* switch(PPC_MISC, nb_args) */
-
break;
case PPC_ALTIVEC_QUAD:
@@ -3430,6 +3859,10 @@
group_function = &testfunction_vector_scalar_compare_quads;
break;
+ case PPC_ROUND:
+ group_function = &testfunction_vector_scalar_rounding_quads;
+ break;
+
default:
printf("ERROR: PPC_ALTIVEC_QUAD, unhandled type. %d\n", type);
continue;
@@ -3456,6 +3889,12 @@
printf("ERROR: PPC_ALTIVEC_DOUBLE, PPC_COMPARE, unhandled number of arguments. 0x%08x\n", nb_args);
continue;
} /* switch(PPC_COMPARE, nb_args) */
+ break;
+
+ default:
+ printf("ERROR: PPC_ALTIVEC_DOUBLE, unhandled type. %d\n", type);
+ continue;
+
} /* switch(type) */
break;
@@ -3537,25 +3976,30 @@
flags.three_args = 1;
flags.four_args = 1;
flags.cmp_args = 1;
+ flags.ld_args = 1;
+ flags.st_args = 1;
+ flags.one_imed_args = 1;
// Type
flags.arith = 1;
flags.logical = 1;
- flags.popcnt = 1;
flags.compare = 1;
flags.ldst = 1;
+ flags.popcnt = 1;
flags.insert_extract = 1;
+ flags.permute = 1;
+ flags.round = 1;
// Family
flags.integer = 0;
- flags.misc = 0;
+ flags.altivec = 0;
+ flags.altivec_double = 0;
+ flags.altivec_quad = 0;
flags.dfp = 0;
flags.bcd = 0;
+ flags.misc = 0;
flags.no_op = 0;
flags.pc_immediate = 0;
- flags.altivec = 0;
- flags.altivec_double = 0;
- flags.altivec_quad = 0;
// Flags
flags.cr = 2;
@@ -3644,6 +4088,9 @@
printf(" three_args = %d\n", flags.three_args);
printf(" four_args = %d\n", flags.four_args);
printf(" cmp_args = %d\n", flags.cmp_args);
+ printf(" load_args = %d\n", flags.ld_args);
+ printf(" store_args = %d\n", flags.st_args);
+ printf(" one_im_args = %d\n", flags.one_imed_args);
printf(" type: \n");
printf(" arith = %d\n", flags.arith);
printf(" logical = %d\n", flags.logical);
@@ -3657,6 +4104,7 @@
printf(" altivec quad = %d\n", flags.altivec_quad);
printf(" DFP = %d\n", flags.dfp);
printf(" BCD = %d\n", flags.bcd);
+ printf(" PC immediate shifted = %d\n", flags.pc_immediate);
printf(" misc = %d\n", flags.misc);
printf(" cr update: \n");
printf(" cr = %d\n", flags.cr);
Modified: trunk/none/tests/ppc64/test_isa_3_0_altivec.stdout.exp
==============================================================================
--- trunk/none/tests/ppc64/test_isa_3_0_altivec.stdout.exp (original)
+++ trunk/none/tests/ppc64/test_isa_3_0_altivec.stdout.exp Mon Aug 15 22:54:04 2016
@@ -2,817 +2,245 @@
Test instruction group [ppc vector absolutes]
vabsdub xa:0000000000000000 0000000000000000 xb:0000000000000000 0000000000000000 => xt:0000000000000000 0000000000000000 (00000000)
vabsdub xa:0000000000000000 0000000000000000 xb:0102030405060708 0102030405060708 => xt:0102030405060708 0102030405060708 (00000000)
-vabsdub xa:0000000000000000 0000000000000000 xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa (00000000)
-vabsdub xa:0000000000000000 0000000000000000 xb:5555555555555555 5555555555555555 => xt:5555555555555555 5555555555555555 (00000000)
-vabsdub xa:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa xb:0000000000000000 0000000000000000 => xt:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa (00000000)
-vabsdub xa:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa xb:0102030405060708 0102030405060708 => xt:a9a8a7a6a5a4a3a2 a9a8a7a6a5a4a3a2 (00000000)
-vabsdub xa:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:0000000000000000 0000000000000000 (00000000)
-vabsdub xa:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa xb:5555555555555555 5555555555555555 => xt:5555555555555555 5555555555555555 (00000000)
-vabsdub xa:0102010201020102 08090a0b0c0d0e0f xb:0000000000000000 0000000000000000 => xt:08090a0b0c0d0e0f 0102010201020102 (00000000)
-vabsdub xa:0102010201020102 08090a0b0c0d0e0f xb:0102030405060708 0102030405060708 => xt:0707070707070707 0000020204040606 (00000000)
-vabsdub xa:0102010201020102 08090a0b0c0d0e0f xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:a2a1a09f9e9d9c9b a9a8a9a8a9a8a9a8 (00000000)
-vabsdub xa:0102010201020102 08090a0b0c0d0e0f xb:5555555555555555 5555555555555555 => xt:4d4c4b4a49484746 5453545354535453 (00000000)
-vabsdub xa:070d111d1e555e70 7ea1a5a7abadb0ba xb:0000000000000000 0000000000000000 => xt:7ea1a5a7abadb0ba 070d111d1e555e70 (00000000)
-vabsdub xa:070d111d1e555e70 7ea1a5a7abadb0ba xb:0102030405060708 0102030405060708 => xt:7d9fa2a3a6a7a9b2 060b0e19194f5768 (00000000)
-vabsdub xa:070d111d1e555e70 7ea1a5a7abadb0ba xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:2c09050301030610 a39d998d8c554c3a (00000000)
-vabsdub xa:070d111d1e555e70 7ea1a5a7abadb0ba xb:5555555555555555 5555555555555555 => xt:294c505256585b65 4e4844383700091b (00000000)
-vabsdub xa:ced0deede5ecef00 00115e7eadbabec0 xb:0000000000000000 0000000000000000 => xt:00115e7eadbabec0 ced0deede5ecef00 (00000000)
-vabsdub xa:ced0deede5ecef00 00115e7eadbabec0 xb:0102030405060708 0102030405060708 => xt:010f5b7aa8b4b7b8 cdcedbe9e0e6e808 (00000000)
-vabsdub xa:ced0deede5ecef00 00115e7eadbabec0 xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:aa994c2c03101416 242634433b4245aa (00000000)
-vabsdub xa:ced0deede5ecef00 00115e7eadbabec0 xb:5555555555555555 5555555555555555 => xt:554409295865696b 797b899890979a55 (00000000)
vabsdub xa:8899aabbccddeeff 0011223344556677 xb:0000000000000000 0000000000000000 => xt:0011223344556677 8899aabbccddeeff (00000000)
vabsdub xa:8899aabbccddeeff 0011223344556677 xb:0102030405060708 0102030405060708 => xt:010f1f2f3f4f5f6f 8797a7b7c7d7e7f7 (00000000)
-vabsdub xa:8899aabbccddeeff 0011223344556677 xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:aa99887766554433 2211001122334455 (00000000)
-vabsdub xa:8899aabbccddeeff 0011223344556677 xb:5555555555555555 5555555555555555 => xt:5544332211001122 33445566778899aa (00000000)
vabsdub xa:0000100800001010 0000100000001002 xb:0000000000000000 0000000000000000 => xt:0000100000001002 0000100800001010 (00000000)
vabsdub xa:0000100800001010 0000100000001002 xb:0102030405060708 0102030405060708 => xt:01020d0405060906 01020d0405060908 (00000000)
-vabsdub xa:0000100800001010 0000100000001002 xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:aaaa9aaaaaaa9aa8 aaaa9aa2aaaa9a9a (00000000)
-vabsdub xa:0000100800001010 0000100000001002 xb:5555555555555555 5555555555555555 => xt:5555455555554553 5555454d55554545 (00000000)
vabsdub xa:00001c0800001c10 00001c0000001c02 xb:0000000000000000 0000000000000000 => xt:00001c0000001c02 00001c0800001c10 (00000000)
vabsdub xa:00001c0800001c10 00001c0000001c02 xb:0102030405060708 0102030405060708 => xt:0102190405061506 0102190405061508 (00000000)
-vabsdub xa:00001c0800001c10 00001c0000001c02 xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:aaaa8eaaaaaa8ea8 aaaa8ea2aaaa8e9a (00000000)
-vabsdub xa:00001c0800001c10 00001c0000001c02 xb:5555555555555555 5555555555555555 => xt:5555395555553953 5555394d55553945 (00000000)
vabsdub xa:00001f0800001f10 00001f0000001f02 xb:0000000000000000 0000000000000000 => xt:00001f0000001f02 00001f0800001f10 (00000000)
vabsdub xa:00001f0800001f10 00001f0000001f02 xb:0102030405060708 0102030405060708 => xt:01021c0405061806 01021c0405061808 (00000000)
-vabsdub xa:00001f0800001f10 00001f0000001f02 xb:aaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaa => xt:aaaa8baaaaaa8ba8 aaaa8ba2aaaa8b9a (00000000)
-vabsdub xa:00001f0800001f10 00001f0000001f02 xb:5555555555555555 5555555555555555 => xt:5555365555553653 5555364d55553645 (00000000)
vabsduh xa:0000000000000000 0000000000000000 xb:0000000000000000 0000000000000000 => xt:0000000000000000 0000000000000000 (00000000)
vabsduh xa:0000000...
[truncated message content] |