diff --git a/decode-test.c b/decode-test.c index 3e27ee3..f1f5638 100644 --- a/decode-test.c +++ b/decode-test.c @@ -513,6 +513,9 @@ main(int argc, char** argv) TEST("\xc4\xe3\x75\x22\xc0\x00", "UD"); // VEX.L != 0 TEST("\xc4\xe3\xf5\x22\xc0\x00", "UD"); // VEX.L != 0 + TEST("\xc5\xf1\x71\xd7\x02", "vpsrlw xmm1, xmm7, 0x2"); + TEST("\xc5\xf5\x71\xd7\x02", "vpsrlw ymm1, ymm7, 0x2"); + TEST("\xc5\xf5\x71\x00\x02", "UD"); // Must have register operand TEST("\xc4\xe2\x71\x45\xc2", "vpsrlvd xmm0, xmm1, xmm2"); TEST("\xc4\xe2\x75\x45\xc2", "vpsrlvd ymm0, ymm1, ymm2"); TEST("\xc4\xe2\xf1\x45\xc2", "vpsrlvq xmm0, xmm1, xmm2"); diff --git a/encode-test.inc b/encode-test.inc index 575f6d8..13786a4 100644 --- a/encode-test.inc +++ b/encode-test.inc @@ -209,6 +209,8 @@ TEST("\xc4\x62\x7d\x19\xc2", VBROADCASTSD256rr, 0, FE_XMM8, FE_XMM2); TEST("\xc4\x62\x7d\x1a\xc2", VBROADCASTF128_256rr, 0, FE_XMM8, FE_XMM2); TEST("\xc4\xe2\x71\x9d\xc2", VFNMADD132SSrrr, 0, FE_XMM0, FE_XMM1, FE_XMM2); TEST("\xc4\xe2\xf1\x9d\xc2", VFNMADD132SDrrr, 0, FE_XMM0, FE_XMM1, FE_XMM2); +TEST("\xc5\xf1\x71\xd7\x02", VPSRLW128rri, 0, FE_XMM1, FE_XMM7, 0x2); +TEST("\xc5\xf5\x71\xd7\x02", VPSRLW256rri, 0, FE_XMM1, FE_XMM7, 0x2); // Test RVMR encoding TEST("\xc4\xe3\x71\x4a\xc2\x30", VBLENDVPS128rrrr, 0, FE_XMM0, FE_XMM1, FE_XMM2, FE_XMM3); diff --git a/instrs.txt b/instrs.txt index a76e341..a474044 100644 --- a/instrs.txt +++ b/instrs.txt @@ -1010,16 +1010,16 @@ VEX.F3.0f6f RM Vx Wx - - VMOVDQU F=AVX VEX.66.0f70 RMI Vx Wx Ib - VPSHUFD F=AVX VEX.F3.0f70 RMI Vx Wx Ib - VPSHUFHW F=AVX VEX.F2.0f70 RMI Vx Wx Ib - VPSHUFLW F=AVX -VEX.66.0f71/2r VMI Hx Wx Ib - VPSRLW F=AVX -VEX.66.0f71/4r VMI Hx Wx Ib - VPSRAW F=AVX -VEX.66.0f71/6r VMI Hx Wx Ib - VPSLLW F=AVX -VEX.66.0f72/2r VMI Hx Wx Ib - VPSRLD F=AVX -VEX.66.0f72/4r VMI Hx Wx Ib - VPSRAD F=AVX -VEX.66.0f72/6r VMI Hx Wx Ib - VPSLLD F=AVX -VEX.66.0f73/2r VMI Hx Wx Ib - VPSRLQ F=AVX -VEX.66.0f73/3r VMI Hx Wx Ib - VPSRLDQ F=AVX -VEX.66.0f73/6r VMI Hx Wx Ib - VPSLLQ F=AVX -VEX.66.0f73/7r VMI Hx Wx Ib - VPSLLDQ F=AVX +VEX.66.0f71/2r VMI Hx Ux Ib - VPSRLW F=AVX +VEX.66.0f71/4r VMI Hx Ux Ib - VPSRAW F=AVX +VEX.66.0f71/6r VMI Hx Ux Ib - VPSLLW F=AVX +VEX.66.0f72/2r VMI Hx Ux Ib - VPSRLD F=AVX +VEX.66.0f72/4r VMI Hx Ux Ib - VPSRAD F=AVX +VEX.66.0f72/6r VMI Hx Ux Ib - VPSLLD F=AVX +VEX.66.0f73/2r VMI Hx Ux Ib - VPSRLQ F=AVX +VEX.66.0f73/3r VMI Hx Ux Ib - VPSRLDQ F=AVX +VEX.66.0f73/6r VMI Hx Ux Ib - VPSLLQ F=AVX +VEX.66.0f73/7r VMI Hx Ux Ib - VPSLLDQ F=AVX VEX.66.0f74 RVM Vx Hx Wx - VPCMPEQB F=AVX VEX.66.0f75 RVM Vx Hx Wx - VPCMPEQW F=AVX VEX.66.0f76 RVM Vx Hx Wx - VPCMPEQD F=AVX