diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td index 35630c5a7e661df548c36b8d884ec833c21db167..18c9de22b7e8a94b5a7e29fc40a54c2ea077f5d0 100644 --- a/llvm/lib/Target/ARM/ARMInstrNEON.td +++ b/llvm/lib/Target/ARM/ARMInstrNEON.td @@ -5491,3 +5491,20 @@ def : NEONInstAlias<"vclt${p}.u32 $Qd, $Qn, $Qm", (VCGTuv4i32 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>; def : NEONInstAlias<"vclt${p}.f32 $Qd, $Qn, $Qm", (VCGTfq QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>; + +// Two-operand variants for VEXT +def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm", + (VEXTd8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_7:$imm, pred:$p)>; +def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm", + (VEXTd16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_3:$imm, pred:$p)>; +def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm", + (VEXTd32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_1:$imm, pred:$p)>; + +def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm", + (VEXTq8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_15:$imm, pred:$p)>; +def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm", + (VEXTq16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_7:$imm, pred:$p)>; +def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm", + (VEXTq32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_3:$imm, pred:$p)>; +def : NEONInstAlias<"vext${p}.64 $Vdn, $Vm, $imm", + (VEXTq64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_1:$imm, pred:$p)>; diff --git a/llvm/test/MC/ARM/neon-shuffle-encoding.s b/llvm/test/MC/ARM/neon-shuffle-encoding.s index b40904c04072b40ad67882ed3b54d1efff47c1f5..d62f79a7016348c6c5cffc6c524285c6272ceae8 100644 --- a/llvm/test/MC/ARM/neon-shuffle-encoding.s +++ b/llvm/test/MC/ARM/neon-shuffle-encoding.s @@ -7,6 +7,14 @@ vext.16 d16, d17, d16, #3 vext.32 q8, q9, q8, #3 + vext.8 d17, d16, #3 + vext.8 d7, d11, #5 + vext.8 q3, q8, #3 + vext.8 q9, q4, #7 + vext.16 d1, d26, #3 + vext.32 q5, q8, #3 + + @ CHECK: vext.8 d16, d17, d16, #3 @ encoding: [0xa0,0x03,0xf1,0xf2] @ CHECK: vext.8 d16, d17, d16, #5 @ encoding: [0xa0,0x05,0xf1,0xf2] @ CHECK: vext.8 q8, q9, q8, #3 @ encoding: [0xe0,0x03,0xf2,0xf2] @@ -14,6 +22,13 @@ @ CHECK: vext.16 d16, d17, d16, #3 @ encoding: [0xa0,0x06,0xf1,0xf2] @ CHECK: vext.32 q8, q9, q8, #3 @ encoding: [0xe0,0x0c,0xf2,0xf2] +@ CHECK: vext.8 d17, d17, d16, #3 @ encoding: [0xa0,0x13,0xf1,0xf2] +@ CHECK: vext.8 d7, d7, d11, #5 @ encoding: [0x0b,0x75,0xb7,0xf2] +@ CHECK: vext.8 q3, q3, q8, #3 @ encoding: [0x60,0x63,0xb6,0xf2] +@ CHECK: vext.8 q9, q9, q4, #7 @ encoding: [0xc8,0x27,0xf2,0xf2] +@ CHECK: vext.16 d1, d1, d26, #3 @ encoding: [0x2a,0x16,0xb1,0xf2] +@ CHECK: vext.32 q5, q5, q8, #3 @ encoding: [0x60,0xac,0xba,0xf2] + vtrn.8 d17, d16 vtrn.16 d17, d16