Newer
Older
//====- X86InstrSSE.td - Describe the X86 Instruction Set -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the Evan Cheng and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 SSE instruction set, defining the instructions,
// and properties of the instructions which are needed for code generation,
// machine code emission, and analysis.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// SSE specific DAG Nodes.
//===----------------------------------------------------------------------===//
def X86loadp : SDNode<"X86ISD::LOAD_PACK", SDTLoad,
[SDNPHasChain]>;
def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest,
[SDNPOutFlag]>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest,
[SDNPOutFlag]>;
def X86s2vec : SDNode<"X86ISD::S2VEC",
def X86zexts2vec : SDNode<"X86ISD::ZEXT_S2VEC",
SDTypeProfile<1, 1, []>, []>;
def X86pextrw : SDNode<"X86ISD::PEXTRW",
SDTypeProfile<1, 2, []>, []>;
def X86pinsrw : SDNode<"X86ISD::PINSRW",
SDTypeProfile<1, 3, []>, []>;
//===----------------------------------------------------------------------===//
// SSE pattern fragments
//===----------------------------------------------------------------------===//
def X86loadpf32 : PatFrag<(ops node:$ptr), (f32 (X86loadp node:$ptr))>;
def X86loadpf64 : PatFrag<(ops node:$ptr), (f64 (X86loadp node:$ptr))>;
def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
def loadv16i8 : PatFrag<(ops node:$ptr), (v16i8 (load node:$ptr))>;
def loadv8i16 : PatFrag<(ops node:$ptr), (v8i16 (load node:$ptr))>;
def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
def fp32imm0 : PatLeaf<(f32 fpimm), [{
return N->isExactlyValue(+0.0);
}]>;
def PSxLDQ_imm : SDNodeXForm<imm, [{
// Transformation function: imm >> 3
return getI32Imm(N->getValue() >> 3);
}]>;
// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
// SHUFP* etc. imm.
def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
return getI8Imm(X86::getShuffleSHUFImmediate(N));
// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
// PSHUFHW imm.
def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
}]>;
// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
// PSHUFLW imm.
def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
}]>;
def SSE_splat_mask : PatLeaf<(build_vector), [{
return X86::isSplatMask(N);
}], SHUFFLE_get_shuf_imm>;
def MOVLHPS_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVLHPSMask(N);
}]>;
def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVHLPSMask(N);
def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVHPMask(N);
}]>;
def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVLPMask(N);
}]>;
def MOVS_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVSMask(N);
}]>;
def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isUNPCKLMask(N);
}]>;
def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isUNPCKHMask(N);
}]>;
def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isUNPCKL_v_undef_Mask(N);
}]>;
def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
}], SHUFFLE_get_shuf_imm>;
def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isPSHUFHWMask(N);
}], SHUFFLE_get_pshufhw_imm>;
def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isPSHUFLWMask(N);
}], SHUFFLE_get_pshuflw_imm>;
def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isPSHUFDMask(N);
}], SHUFFLE_get_shuf_imm>;
def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isSHUFPMask(N);
}], SHUFFLE_get_shuf_imm>;
def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isSHUFPMask(N);
//===----------------------------------------------------------------------===//
// SSE scalar FP Instructions
//===----------------------------------------------------------------------===//
// Instruction templates
// SSI - SSE1 instructions with XS prefix.
// SDI - SSE2 instructions with XD prefix.
// PSI - SSE1 instructions with TB prefix.
// PDI - SSE2 instructions with TB and OpSize prefixes.
// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
// S3SI - SSE3 instructions with XD prefix.
// S3DI - SSE3 instructions with TB and OpSize prefixes.
class SSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: I<o, F, ops, asm, pattern>, XS, Requires<[HasSSE1]>;
class SDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: I<o, F, ops, asm, pattern>, XD, Requires<[HasSSE2]>;
class PSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: I<o, F, ops, asm, pattern>, TB, Requires<[HasSSE1]>;
class PDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: I<o, F, ops, asm, pattern>, TB, OpSize, Requires<[HasSSE2]>;
class PSIi8<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: X86Inst<o, F, Imm8, ops, asm>, TB, Requires<[HasSSE1]> {
let Pattern = pattern;
}
class PDIi8<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: X86Inst<o, F, Imm8, ops, asm>, TB, OpSize, Requires<[HasSSE2]> {
let Pattern = pattern;
}
class S3SI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: I<o, F, ops, asm, pattern>, XD, Requires<[HasSSE3]>;
class S3DI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: I<o, F, ops, asm, pattern>, TB, OpSize, Requires<[HasSSE3]>;
//===----------------------------------------------------------------------===//
// Helpers for defining instructions that directly correspond to intrinsics.
class SS_Intr<bits<8> o, string asm, Intrinsic IntId>
: SSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
[(set VR128:$dst, (v4f32 (IntId VR128:$src)))]>;
class SS_Intm<bits<8> o, string asm, Intrinsic IntId>
: SSI<o, MRMSrcMem, (ops VR128:$dst, f32mem:$src), asm,
[(set VR128:$dst, (v4f32 (IntId (load addr:$src))))]>;
class SD_Intr<bits<8> o, string asm, Intrinsic IntId>
: SDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
[(set VR128:$dst, (v2f64 (IntId VR128:$src)))]>;
class SD_Intm<bits<8> o, string asm, Intrinsic IntId>
: SDI<o, MRMSrcMem, (ops VR128:$dst, f64mem:$src), asm,
[(set VR128:$dst, (v2f64 (IntId (load addr:$src))))]>;
class SS_Intrr<bits<8> o, string asm, Intrinsic IntId>
: SSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
[(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
class SS_Intrm<bits<8> o, string asm, Intrinsic IntId>
: SSI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f32mem:$src2), asm,
[(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
class SD_Intrr<bits<8> o, string asm, Intrinsic IntId>
: SDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
[(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
class SD_Intrm<bits<8> o, string asm, Intrinsic IntId>
: SDI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2), asm,
[(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
class PS_Intr<bits<8> o, string asm, Intrinsic IntId>
: PSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
[(set VR128:$dst, (IntId VR128:$src))]>;
class PS_Intm<bits<8> o, string asm, Intrinsic IntId>
: PSI<o, MRMSrcMem, (ops VR128:$dst, f32mem:$src), asm,
[(set VR128:$dst, (IntId (loadv4f32 addr:$src)))]>;
class PD_Intr<bits<8> o, string asm, Intrinsic IntId>
: PDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src), asm,
[(set VR128:$dst, (IntId VR128:$src))]>;
class PD_Intm<bits<8> o, string asm, Intrinsic IntId>
: PDI<o, MRMSrcMem, (ops VR128:$dst, f64mem:$src), asm,
[(set VR128:$dst, (IntId (loadv2f64 addr:$src)))]>;
class PS_Intrr<bits<8> o, string asm, Intrinsic IntId>
: PSI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
[(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
class PS_Intrm<bits<8> o, string asm, Intrinsic IntId>
: PSI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f32mem:$src2), asm,
[(set VR128:$dst, (IntId VR128:$src1, (loadv4f32 addr:$src2)))]>;
class PD_Intrr<bits<8> o, string asm, Intrinsic IntId>
: PDI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
[(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
class PD_Intrm<bits<8> o, string asm, Intrinsic IntId>
: PDI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2), asm,
[(set VR128:$dst, (IntId VR128:$src1, (loadv2f64 addr:$src2)))]>;
class S3S_Intrr<bits<8> o, string asm, Intrinsic IntId>
: S3SI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
[(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
class S3S_Intrm<bits<8> o, string asm, Intrinsic IntId>
: S3SI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2), asm,
[(set VR128:$dst, (v4f32 (IntId VR128:$src1,
(loadv4f32 addr:$src2))))]>;
class S3D_Intrr<bits<8> o, string asm, Intrinsic IntId>
: S3DI<o, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), asm,
[(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
class S3D_Intrm<bits<8> o, string asm, Intrinsic IntId>
: S3DI<o, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2), asm,
[(set VR128:$dst, (v2f64 (IntId VR128:$src1,
(loadv2f64 addr:$src2))))]>;
// Some 'special' instructions
def IMPLICIT_DEF_FR32 : I<0, Pseudo, (ops FR32:$dst),
"#IMPLICIT_DEF $dst",
[(set FR32:$dst, (undef))]>, Requires<[HasSSE2]>;
def IMPLICIT_DEF_FR64 : I<0, Pseudo, (ops FR64:$dst),
"#IMPLICIT_DEF $dst",
[(set FR64:$dst, (undef))]>, Requires<[HasSSE2]>;
// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
// scheduler into a branch sequence.
let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler.
def CMOV_FR32 : I<0, Pseudo,
(ops FR32:$dst, FR32:$t, FR32:$f, i8imm:$cond),
"#CMOV_FR32 PSEUDO!",
[(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond))]>;
def CMOV_FR64 : I<0, Pseudo,
(ops FR64:$dst, FR64:$t, FR64:$f, i8imm:$cond),
"#CMOV_FR64 PSEUDO!",
[(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond))]>;
def CMOV_V4F32 : I<0, Pseudo,
(ops VR128:$dst, VR128:$t, VR128:$f, i8imm:$cond),
"#CMOV_V4F32 PSEUDO!",
[(set VR128:$dst,
(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>;
def CMOV_V2F64 : I<0, Pseudo,
(ops VR128:$dst, VR128:$t, VR128:$f, i8imm:$cond),
"#CMOV_V2F64 PSEUDO!",
[(set VR128:$dst,
(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>;
def CMOV_V2I64 : I<0, Pseudo,
(ops VR128:$dst, VR128:$t, VR128:$f, i8imm:$cond),
"#CMOV_V2I64 PSEUDO!",
[(set VR128:$dst,
(v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>;
}
// Move Instructions
def MOVSSrr : SSI<0x10, MRMSrcReg, (ops FR32:$dst, FR32:$src),
"movss {$src, $dst|$dst, $src}", []>;
def MOVSSrm : SSI<0x10, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
[(set FR32:$dst, (loadf32 addr:$src))]>;
def MOVSDrr : SDI<0x10, MRMSrcReg, (ops FR64:$dst, FR64:$src),
"movsd {$src, $dst|$dst, $src}", []>;
def MOVSDrm : SDI<0x10, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
[(set FR64:$dst, (loadf64 addr:$src))]>;
def MOVSSmr : SSI<0x11, MRMDestMem, (ops f32mem:$dst, FR32:$src),
"movss {$src, $dst|$dst, $src}",
[(store FR32:$src, addr:$dst)]>;
def MOVSDmr : SDI<0x11, MRMDestMem, (ops f64mem:$dst, FR64:$src),
[(store FR64:$src, addr:$dst)]>;
// Arithmetic instructions
let isTwoAddress = 1 in {
let isCommutable = 1 in {
def ADDSSrr : SSI<0x58, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
[(set FR32:$dst, (fadd FR32:$src1, FR32:$src2))]>;
def ADDSDrr : SDI<0x58, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
[(set FR64:$dst, (fadd FR64:$src1, FR64:$src2))]>;
def MULSSrr : SSI<0x59, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
[(set FR32:$dst, (fmul FR32:$src1, FR32:$src2))]>;
def MULSDrr : SDI<0x59, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
[(set FR64:$dst, (fmul FR64:$src1, FR64:$src2))]>;
def ADDSSrm : SSI<0x58, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
[(set FR32:$dst, (fadd FR32:$src1, (loadf32 addr:$src2)))]>;
def ADDSDrm : SDI<0x58, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
[(set FR64:$dst, (fadd FR64:$src1, (loadf64 addr:$src2)))]>;
def MULSSrm : SSI<0x59, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
[(set FR32:$dst, (fmul FR32:$src1, (loadf32 addr:$src2)))]>;
def MULSDrm : SDI<0x59, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
[(set FR64:$dst, (fmul FR64:$src1, (loadf64 addr:$src2)))]>;
def DIVSSrr : SSI<0x5E, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
[(set FR32:$dst, (fdiv FR32:$src1, FR32:$src2))]>;
def DIVSSrm : SSI<0x5E, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
[(set FR32:$dst, (fdiv FR32:$src1, (loadf32 addr:$src2)))]>;
def DIVSDrr : SDI<0x5E, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
[(set FR64:$dst, (fdiv FR64:$src1, FR64:$src2))]>;
def DIVSDrm : SDI<0x5E, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
[(set FR64:$dst, (fdiv FR64:$src1, (loadf64 addr:$src2)))]>;
def SUBSSrr : SSI<0x5C, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
[(set FR32:$dst, (fsub FR32:$src1, FR32:$src2))]>;
def SUBSSrm : SSI<0x5C, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
[(set FR32:$dst, (fsub FR32:$src1, (loadf32 addr:$src2)))]>;
def SUBSDrr : SDI<0x5C, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
[(set FR64:$dst, (fsub FR64:$src1, FR64:$src2))]>;
def SUBSDrm : SDI<0x5C, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
[(set FR64:$dst, (fsub FR64:$src1, (loadf64 addr:$src2)))]>;
def SQRTSSr : SSI<0x51, MRMSrcReg, (ops FR32:$dst, FR32:$src),
"sqrtss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (fsqrt FR32:$src))]>;
def SQRTSSm : SSI<0x51, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
"sqrtss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (fsqrt (loadf32 addr:$src)))]>;
def SQRTSDr : SDI<0x51, MRMSrcReg, (ops FR64:$dst, FR64:$src),
"sqrtsd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (fsqrt FR64:$src))]>;
def SQRTSDm : SDI<0x51, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
"sqrtsd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (fsqrt (loadf64 addr:$src)))]>;
def RSQRTSSr : SSI<0x52, MRMSrcReg, (ops FR32:$dst, FR32:$src),
"rsqrtss {$src, $dst|$dst, $src}", []>;
def RSQRTSSm : SSI<0x52, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
"rsqrtss {$src, $dst|$dst, $src}", []>;
def RCPSSr : SSI<0x53, MRMSrcReg, (ops FR32:$dst, FR32:$src),
"rcpss {$src, $dst|$dst, $src}", []>;
def RCPSSm : SSI<0x53, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
"rcpss {$src, $dst|$dst, $src}", []>;
let isTwoAddress = 1 in {
def MAXSSrr : SSI<0x5F, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
"maxss {$src2, $dst|$dst, $src2}", []>;
def MAXSSrm : SSI<0x5F, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
"maxss {$src2, $dst|$dst, $src2}", []>;
def MAXSDrr : SDI<0x5F, MRMSrcReg, (ops FR64:$dst, FR32:$src1, FR64:$src2),
"maxsd {$src2, $dst|$dst, $src2}", []>;
def MAXSDrm : SDI<0x5F, MRMSrcMem, (ops FR64:$dst, FR32:$src1, f64mem:$src2),
"maxsd {$src2, $dst|$dst, $src2}", []>;
def MINSSrr : SSI<0x5D, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
"minss {$src2, $dst|$dst, $src2}", []>;
def MINSSrm : SSI<0x5D, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
"minss {$src2, $dst|$dst, $src2}", []>;
def MINSDrr : SDI<0x5D, MRMSrcReg, (ops FR64:$dst, FR32:$src1, FR64:$src2),
"minsd {$src2, $dst|$dst, $src2}", []>;
def MINSDrm : SDI<0x5D, MRMSrcMem, (ops FR64:$dst, FR32:$src1, f64mem:$src2),
"minsd {$src2, $dst|$dst, $src2}", []>;
}
// Aliases to match intrinsics which expect XMM operand(s).
let isTwoAddress = 1 in {
let isCommutable = 1 in {
def Int_ADDSSrr : SS_Intrr<0x58, "addss {$src2, $dst|$dst, $src2}",
int_x86_sse_add_ss>;
def Int_ADDSDrr : SD_Intrr<0x58, "addsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_add_sd>;
def Int_MULSSrr : SS_Intrr<0x59, "mulss {$src2, $dst|$dst, $src2}",
int_x86_sse_mul_ss>;
def Int_MULSDrr : SD_Intrr<0x59, "mulsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_mul_sd>;
}
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
def Int_ADDSSrm : SS_Intrm<0x58, "addss {$src2, $dst|$dst, $src2}",
int_x86_sse_add_ss>;
def Int_ADDSDrm : SD_Intrm<0x58, "addsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_add_sd>;
def Int_MULSSrm : SS_Intrm<0x59, "mulss {$src2, $dst|$dst, $src2}",
int_x86_sse_mul_ss>;
def Int_MULSDrm : SD_Intrm<0x59, "mulsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_mul_sd>;
def Int_DIVSSrr : SS_Intrr<0x5E, "divss {$src2, $dst|$dst, $src2}",
int_x86_sse_div_ss>;
def Int_DIVSSrm : SS_Intrm<0x5E, "divss {$src2, $dst|$dst, $src2}",
int_x86_sse_div_ss>;
def Int_DIVSDrr : SD_Intrr<0x5E, "divsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_div_sd>;
def Int_DIVSDrm : SD_Intrm<0x5E, "divsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_div_sd>;
def Int_SUBSSrr : SS_Intrr<0x5C, "subss {$src2, $dst|$dst, $src2}",
int_x86_sse_sub_ss>;
def Int_SUBSSrm : SS_Intrm<0x5C, "subss {$src2, $dst|$dst, $src2}",
int_x86_sse_sub_ss>;
def Int_SUBSDrr : SD_Intrr<0x5C, "subsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_sub_sd>;
def Int_SUBSDrm : SD_Intrm<0x5C, "subsd {$src2, $dst|$dst, $src2}",
int_x86_sse2_sub_sd>;
}
def Int_SQRTSSr : SS_Intr<0x51, "sqrtss {$src, $dst|$dst, $src}",
int_x86_sse_sqrt_ss>;
def Int_SQRTSSm : SS_Intm<0x51, "sqrtss {$src, $dst|$dst, $src}",
int_x86_sse_sqrt_ss>;
def Int_SQRTSDr : SD_Intr<0x51, "sqrtsd {$src, $dst|$dst, $src}",
int_x86_sse2_sqrt_sd>;
def Int_SQRTSDm : SD_Intm<0x51, "sqrtsd {$src, $dst|$dst, $src}",
int_x86_sse2_sqrt_sd>;
def Int_RSQRTSSr : SS_Intr<0x52, "rsqrtss {$src, $dst|$dst, $src}",
int_x86_sse_rsqrt_ss>;
def Int_RSQRTSSm : SS_Intm<0x52, "rsqrtss {$src, $dst|$dst, $src}",
int_x86_sse_rsqrt_ss>;
def Int_RCPSSr : SS_Intr<0x53, "rcpss {$src, $dst|$dst, $src}",
int_x86_sse_rcp_ss>;
def Int_RCPSSm : SS_Intm<0x53, "rcpss {$src, $dst|$dst, $src}",
int_x86_sse_rcp_ss>;
let isTwoAddress = 1 in {
def Int_MAXSSrr : SS_Intrr<0x5F, "maxss {$src2, $dst|$dst, $src2}",
def Int_MAXSSrm : SS_Intrm<0x5F, "maxss {$src2, $dst|$dst, $src2}",
def Int_MAXSDrr : SD_Intrr<0x5F, "maxsd {$src2, $dst|$dst, $src2}",
def Int_MAXSDrm : SD_Intrm<0x5F, "maxsd {$src2, $dst|$dst, $src2}",
def Int_MINSSrr : SS_Intrr<0x5D, "minss {$src2, $dst|$dst, $src2}",
def Int_MINSSrm : SS_Intrm<0x5D, "minss {$src2, $dst|$dst, $src2}",
def Int_MINSDrr : SD_Intrr<0x5D, "minsd {$src2, $dst|$dst, $src2}",
def Int_MINSDrm : SD_Intrm<0x5D, "minsd {$src2, $dst|$dst, $src2}",
}
// Conversion instructions
def CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops R32:$dst, FR32:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint FR32:$src))]>;
def CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops R32:$dst, f32mem:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
def CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops R32:$dst, FR64:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint FR64:$src))]>;
def CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops R32:$dst, f64mem:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
def CVTSD2SSrr: SDI<0x5A, MRMSrcReg, (ops FR32:$dst, FR64:$src),
"cvtsd2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround FR64:$src))]>;
def CVTSD2SSrm: SDI<0x5A, MRMSrcMem, (ops FR32:$dst, f64mem:$src),
"cvtsd2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
def CVTSI2SSrr: SSI<0x2A, MRMSrcReg, (ops FR32:$dst, R32:$src),
"cvtsi2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (sint_to_fp R32:$src))]>;
def CVTSI2SSrm: SSI<0x2A, MRMSrcMem, (ops FR32:$dst, i32mem:$src),
"cvtsi2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
def CVTSI2SDrr: SDI<0x2A, MRMSrcReg, (ops FR64:$dst, R32:$src),
"cvtsi2sd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (sint_to_fp R32:$src))]>;
def CVTSI2SDrm: SDI<0x2A, MRMSrcMem, (ops FR64:$dst, i32mem:$src),
"cvtsi2sd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
// SSE2 instructions with XS prefix
def CVTSS2SDrr: I<0x5A, MRMSrcReg, (ops FR64:$dst, FR32:$src),
"cvtss2sd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (fextend FR32:$src))]>, XS,
Requires<[HasSSE2]>;
def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src),
"cvtss2sd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (fextend (loadf32 addr:$src)))]>, XS,
Requires<[HasSSE2]>;
// Match intrinsics which expect XMM operand(s).
def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (ops R32:$dst, VR128:$src),
"cvtss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (ops R32:$dst, f32mem:$src),
"cvtss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvtss2si
(loadv4f32 addr:$src)))]>;
// Aliases for intrinsics
def Int_CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops R32:$dst, VR128:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvttss2si VR128:$src))]>;
def Int_CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops R32:$dst, f32mem:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvttss2si
(loadv4f32 addr:$src)))]>;
def Int_CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops R32:$dst, VR128:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvttsd2si VR128:$src))]>;
def Int_CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops R32:$dst, f128mem:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvttsd2si
let isTwoAddress = 1 in {
def Int_CVTSI2SSrr: SSI<0x2A, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, R32:$src2),
"cvtsi2ss {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
R32:$src2))]>;
def Int_CVTSI2SSrm: SSI<0x2A, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i32mem:$src2),
"cvtsi2ss {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
(loadi32 addr:$src2)))]>;
}
// Comparison instructions
let isTwoAddress = 1 in {
def CMPSSrr : SSI<0xC2, MRMSrcReg,
(ops FR32:$dst, FR32:$src1, FR32:$src, SSECC:$cc),
"cmp${cc}ss {$src, $dst|$dst, $src}",
[]>;
def CMPSSrm : SSI<0xC2, MRMSrcMem,
(ops FR32:$dst, FR32:$src1, f32mem:$src, SSECC:$cc),
"cmp${cc}ss {$src, $dst|$dst, $src}", []>;
def CMPSDrr : SDI<0xC2, MRMSrcReg,
(ops FR64:$dst, FR64:$src1, FR64:$src, SSECC:$cc),
"cmp${cc}sd {$src, $dst|$dst, $src}", []>;
def CMPSDrm : SDI<0xC2, MRMSrcMem,
(ops FR64:$dst, FR64:$src1, f64mem:$src, SSECC:$cc),
"cmp${cc}sd {$src, $dst|$dst, $src}", []>;
def UCOMISSrr: PSI<0x2E, MRMSrcReg, (ops FR32:$src1, FR32:$src2),
"ucomiss {$src2, $src1|$src1, $src2}",
[(X86cmp FR32:$src1, FR32:$src2)]>;
def UCOMISSrm: PSI<0x2E, MRMSrcMem, (ops FR32:$src1, f32mem:$src2),
"ucomiss {$src2, $src1|$src1, $src2}",
[(X86cmp FR32:$src1, (loadf32 addr:$src2))]>;
def UCOMISDrr: PDI<0x2E, MRMSrcReg, (ops FR64:$src1, FR64:$src2),
"ucomisd {$src2, $src1|$src1, $src2}",
[(X86cmp FR64:$src1, FR64:$src2)]>;
def UCOMISDrm: PDI<0x2E, MRMSrcMem, (ops FR64:$src1, f64mem:$src2),
"ucomisd {$src2, $src1|$src1, $src2}",
[(X86cmp FR64:$src1, (loadf64 addr:$src2))]>;
// Aliases to match intrinsics which expect XMM operand(s).
let isTwoAddress = 1 in {
def Int_CMPSSrr : SSI<0xC2, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
"cmp${cc}ss {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
VR128:$src, imm:$cc))]>;
def Int_CMPSSrm : SSI<0xC2, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f32mem:$src, SSECC:$cc),
"cmp${cc}ss {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
(load addr:$src), imm:$cc))]>;
def Int_CMPSDrr : SDI<0xC2, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
"cmp${cc}sd {$src, $dst|$dst, $src}", []>;
def Int_CMPSDrm : SDI<0xC2, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f64mem:$src, SSECC:$cc),
"cmp${cc}sd {$src, $dst|$dst, $src}", []>;
}
def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
"ucomiss {$src2, $src1|$src1, $src2}",
[(X86ucomi (v4f32 VR128:$src1), VR128:$src2)]>;
def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
"ucomiss {$src2, $src1|$src1, $src2}",
[(X86ucomi (v4f32 VR128:$src1), (loadv4f32 addr:$src2))]>;
def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
"ucomisd {$src2, $src1|$src1, $src2}",
[(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>;
def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
"ucomisd {$src2, $src1|$src1, $src2}",
[(X86ucomi (v2f64 VR128:$src1), (loadv2f64 addr:$src2))]>;
def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
"comiss {$src2, $src1|$src1, $src2}",
[(X86comi (v4f32 VR128:$src1), VR128:$src2)]>;
def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
"comiss {$src2, $src1|$src1, $src2}",
[(X86comi (v4f32 VR128:$src1), (loadv4f32 addr:$src2))]>;
def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (ops VR128:$src1, VR128:$src2),
"comisd {$src2, $src1|$src1, $src2}",
[(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>;
def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (ops VR128:$src1, f128mem:$src2),
"comisd {$src2, $src1|$src1, $src2}",
[(X86comi (v2f64 VR128:$src1), (loadv2f64 addr:$src2))]>;
// Aliases of packed instructions for scalar use. These all have names that
// start with 'Fs'.
// Alias instructions that map fld0 to pxor for sse.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
def FsFLD0SS : I<0xEF, MRMInitReg, (ops FR32:$dst),
"pxor $dst, $dst", [(set FR32:$dst, fp32imm0)]>,
Requires<[HasSSE1]>, TB, OpSize;
def FsFLD0SD : I<0xEF, MRMInitReg, (ops FR64:$dst),
"pxor $dst, $dst", [(set FR64:$dst, fp64imm0)]>,
Requires<[HasSSE2]>, TB, OpSize;
// Alias instructions to do FR32 / FR64 reg-to-reg copy using movaps / movapd.
// Upper bits are disregarded.
def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (ops FR32:$dst, FR32:$src),
"movaps {$src, $dst|$dst, $src}", []>;
def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (ops FR64:$dst, FR64:$src),
"movapd {$src, $dst|$dst, $src}", []>;
// Alias instructions to load FR32 / FR64 from f128mem using movaps / movapd.
// Upper bits are disregarded.
def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (ops FR32:$dst, f128mem:$src),
[(set FR32:$dst, (X86loadpf32 addr:$src))]>;
def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (ops FR64:$dst, f128mem:$src),
[(set FR64:$dst, (X86loadpf64 addr:$src))]>;
// Alias bitwise logical operations using SSE logical ops on packed FP values.
let isTwoAddress = 1 in {
let isCommutable = 1 in {
def FsANDPSrr : PSI<0x54, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
[(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
def FsANDPDrr : PDI<0x54, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
[(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
def FsORPSrr : PSI<0x56, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
"orps {$src2, $dst|$dst, $src2}", []>;
def FsORPDrr : PDI<0x56, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
"orpd {$src2, $dst|$dst, $src2}", []>;
def FsXORPSrr : PSI<0x57, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
[(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
def FsXORPDrr : PDI<0x57, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
[(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
def FsANDPSrm : PSI<0x54, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
"andps {$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86fand FR32:$src1,
(X86loadpf32 addr:$src2)))]>;
def FsANDPDrm : PDI<0x54, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
"andpd {$src2, $dst|$dst, $src2}",
[(set FR64:$dst, (X86fand FR64:$src1,
(X86loadpf64 addr:$src2)))]>;
def FsORPSrm : PSI<0x56, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
"orps {$src2, $dst|$dst, $src2}", []>;
def FsORPDrm : PDI<0x56, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
"orpd {$src2, $dst|$dst, $src2}", []>;
def FsXORPSrm : PSI<0x57, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
"xorps {$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86fxor FR32:$src1,
(X86loadpf32 addr:$src2)))]>;
def FsXORPDrm : PDI<0x57, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
"xorpd {$src2, $dst|$dst, $src2}",
[(set FR64:$dst, (X86fxor FR64:$src1,
(X86loadpf64 addr:$src2)))]>;
def FsANDNPSrr : PSI<0x55, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
"andnps {$src2, $dst|$dst, $src2}", []>;
def FsANDNPSrm : PSI<0x55, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f128mem:$src2),
"andnps {$src2, $dst|$dst, $src2}", []>;
def FsANDNPDrr : PDI<0x55, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
"andnpd {$src2, $dst|$dst, $src2}", []>;
def FsANDNPDrm : PDI<0x55, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f128mem:$src2),
"andnpd {$src2, $dst|$dst, $src2}", []>;
//===----------------------------------------------------------------------===//
// SSE packed FP Instructions
//===----------------------------------------------------------------------===//
// Some 'special' instructions
def IMPLICIT_DEF_VR128 : I<0, Pseudo, (ops VR128:$dst),
"#IMPLICIT_DEF $dst",
[(set VR128:$dst, (v4f32 (undef)))]>,
Requires<[HasSSE1]>;
// Move Instructions
def MOVAPSrr : PSI<0x28, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"movaps {$src, $dst|$dst, $src}", []>;
def MOVAPSrm : PSI<0x28, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"movaps {$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv4f32 addr:$src))]>;
def MOVAPDrr : PDI<0x28, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"movapd {$src, $dst|$dst, $src}", []>;
def MOVAPDrm : PDI<0x28, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"movapd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv2f64 addr:$src))]>;
def MOVAPSmr : PSI<0x29, MRMDestMem, (ops f128mem:$dst, VR128:$src),
"movaps {$src, $dst|$dst, $src}",
[(store (v4f32 VR128:$src), addr:$dst)]>;
def MOVAPDmr : PDI<0x29, MRMDestMem, (ops f128mem:$dst, VR128:$src),
"movapd {$src, $dst|$dst, $src}",
[(store (v2f64 VR128:$src), addr:$dst)]>;
def MOVUPSrr : PSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"movups {$src, $dst|$dst, $src}", []>;
def MOVUPSrm : PDI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"movups {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
def MOVUPSmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src),
"movups {$src, $dst|$dst, $src}",
[(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
def MOVUPDrr : PDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"movupd {$src, $dst|$dst, $src}", []>;
def MOVUPDrm : PDI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"movupd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
def MOVUPDmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src),
"movupd {$src, $dst|$dst, $src}",
[(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
def MOVDQUrm : I<0x6F, MRMSrcMem, (ops VR128:$dst, i128mem:$src),
"movdqu {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
XS, Requires<[HasSSE2]>;
def MOVDQUmr : I<0x7F, MRMDestMem, (ops i128mem:$dst, VR128:$src),
"movdqu {$src, $dst|$dst, $src}",
[(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
XS, Requires<[HasSSE2]>;
def MOVLPSrm : PSI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
"movlps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (vector_shuffle VR128:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
MOVLP_shuffle_mask)))]>;
def MOVLPDrm : PDI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
"movlpd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v2f64 (vector_shuffle VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)),
MOVLP_shuffle_mask)))]>;
def MOVHPSrm : PSI<0x16, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
"movhps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (vector_shuffle VR128:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
MOVHP_shuffle_mask)))]>;
def MOVHPDrm : PDI<0x16, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
"movhpd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v2f64 (vector_shuffle VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)),
def MOVLPSmr : PSI<0x13, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movlps {$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
(i32 0))), addr:$dst)]>;
def MOVLPDmr : PDI<0x13, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movlpd {$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (v2f64 VR128:$src),
(i32 0))), addr:$dst)]>;
// v2f64 extract element 1 is always custom lowered to unpack high to low
// and extract element 0 so the non-store version isn't too horrible.
def MOVHPSmr : PSI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movhps {$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
(v2f64 (vector_shuffle
(bc_v2f64 (v4f32 VR128:$src)), (undef),
UNPCKH_shuffle_mask)), (i32 0))),
addr:$dst)]>;
def MOVHPDmr : PDI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movhpd {$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
(v2f64 (vector_shuffle VR128:$src, (undef),
UNPCKH_shuffle_mask)), (i32 0))),
addr:$dst)]>;
let isTwoAddress = 1 in {
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"movlhps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
MOVLHPS_shuffle_mask)))]>;
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
// SSE2 instructions without OpSize prefix
def CVTDQ2PSrr : I<0x5B, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvtdq2ps {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
TB, Requires<[HasSSE2]>;
def CVTDQ2PSrm : I<0x5B, MRMSrcMem, (ops VR128:$dst, i128mem:$src),
"cvtdq2ps {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps
(bc_v4i32 (loadv2i64 addr:$src))))]>,
// SSE2 instructions with XS prefix
def CVTDQ2PDrr : I<0xE6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvtdq2pd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
XS, Requires<[HasSSE2]>;
def CVTDQ2PDrm : I<0xE6, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
"cvtdq2pd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd
(bc_v4i32 (loadv2i64 addr:$src))))]>,
XS, Requires<[HasSSE2]>;
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvtps2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"cvtps2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq
// SSE2 packed instructions with XS prefix
def CVTTPS2DQrr : I<0x5B, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvttps2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
XS, Requires<[HasSSE2]>;
def CVTTPS2DQrm : I<0x5B, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"cvttps2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttps2dq
// SSE2 packed instructions with XD prefix
def CVTPD2DQrr : I<0xE6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvtpd2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
XD, Requires<[HasSSE2]>;
def CVTPD2DQrm : I<0xE6, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"cvtpd2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2dq
XD, Requires<[HasSSE2]>;
def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvttpd2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"cvttpd2dq {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
// SSE2 instructions without OpSize prefix
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvtps2pd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
TB, Requires<[HasSSE2]>;
def CVTPS2PDrm : I<0x5A, MRMSrcReg, (ops VR128:$dst, f64mem:$src),
"cvtps2pd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd
TB, Requires<[HasSSE2]>;
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"cvtpd2ps {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
def CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (ops VR128:$dst, f128mem:$src),
"cvtpd2ps {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
def CVTSD2SIrr: SDI<0x2D, MRMSrcReg, (ops R32:$dst, VR128:$src),
"cvtsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
def CVTSD2SIrm: SDI<0x2D, MRMSrcMem, (ops R32:$dst, f128mem:$src),
"cvtsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvtsd2si
(loadv2f64 addr:$src)))]>;
// Match intrinsics which expect XMM operand(s).
// Aliases for intrinsics
let isTwoAddress = 1 in {
def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, R32:$src2),
"cvtsi2sd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
R32:$src2))]>;
def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i32mem:$src2),
"cvtsi2sd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
(loadi32 addr:$src2)))]>;
def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
"cvtsd2ss {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
VR128:$src2))]>;
def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f64mem:$src2),
"cvtsd2ss {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
(loadv2f64 addr:$src2)))]>;
def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
"cvtss2sd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
VR128:$src2))]>, XS,
Requires<[HasSSE2]>;
def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f32mem:$src2),
"cvtss2sd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
(loadv4f32 addr:$src2)))]>, XS,
Requires<[HasSSE2]>;
}
// Arithmetic
let isTwoAddress = 1 in {
let isCommutable = 1 in {
def ADDPSrr : PSI<0x58, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"addps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v4f32 (fadd VR128:$src1, VR128:$src2)))]>;
def ADDPDrr : PDI<0x58, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"addpd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v2f64 (fadd VR128:$src1, VR128:$src2)))]>;
def MULPSrr : PSI<0x59, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"mulps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v4f32 (fmul VR128:$src1, VR128:$src2)))]>;
def MULPDrr : PDI<0x59, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"mulpd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v2f64 (fmul VR128:$src1, VR128:$src2)))]>;
}
def ADDPSrm : PSI<0x58, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
"addps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v4f32 (fadd VR128:$src1,
(load addr:$src2))))]>;
def ADDPDrm : PDI<0x58, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
"addpd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v2f64 (fadd VR128:$src1,
(load addr:$src2))))]>;
def MULPSrm : PSI<0x59, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
"mulps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v4f32 (fmul VR128:$src1,
(load addr:$src2))))]>;
def MULPDrm : PDI<0x59, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
"mulpd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v2f64 (fmul VR128:$src1,
(load addr:$src2))))]>;
def DIVPSrr : PSI<0x5E, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"divps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v4f32 (fdiv VR128:$src1, VR128:$src2)))]>;
def DIVPSrm : PSI<0x5E, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2),
"divps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v4f32 (fdiv VR128:$src1,
(load addr:$src2))))]>;
def DIVPDrr : PDI<0x5E, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),