Newer
Older
//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file describes the X86 SSE instruction set, defining the instructions,
// and properties of the instructions which are needed for code generation,
// machine code emission, and analysis.
//
//===----------------------------------------------------------------------===//
Chris Lattner
committed
//===----------------------------------------------------------------------===//
// SSE specific DAG Nodes.
//===----------------------------------------------------------------------===//
def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
SDTCisFP<0>, SDTCisInt<2> ]>;
def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
SDTCisFP<1>, SDTCisVT<3, i8>]>;
def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
def X86pshufb : SDNode<"X86ISD::PSHUFB",
SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
def X86pextrb : SDNode<"X86ISD::PEXTRB",
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
def X86pextrw : SDNode<"X86ISD::PEXTRW",
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
def X86pinsrb : SDNode<"X86ISD::PINSRB",
SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
def X86pinsrw : SDNode<"X86ISD::PINSRW",
SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
def X86insrtps : SDNode<"X86ISD::INSERTPS",
SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
[SDNPHasChain, SDNPMayLoad]>;
def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
Chris Lattner
committed
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
SDTCisVT<1, v4f32>,
SDTCisVT<2, v4f32>]>;
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
Chris Lattner
committed
//===----------------------------------------------------------------------===//
// SSE Complex Patterns
//===----------------------------------------------------------------------===//
// These are 'extloads' from a scalar to the low element of a vector, zeroing
// the top elements. These are used for the SSE 'ss' and 'sd' instruction
// forms.
def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
Chris Lattner
committed
[SDNPHasChain, SDNPMayLoad]>;
def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
Chris Lattner
committed
[SDNPHasChain, SDNPMayLoad]>;
Chris Lattner
committed
def ssmem : Operand<v4f32> {
let PrintMethod = "printf32mem";
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
Daniel Dunbar
committed
let ParserMatchClass = X86MemAsmOperand;
Chris Lattner
committed
}
def sdmem : Operand<v2f64> {
let PrintMethod = "printf64mem";
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
Daniel Dunbar
committed
let ParserMatchClass = X86MemAsmOperand;
Chris Lattner
committed
}
//===----------------------------------------------------------------------===//
// SSE pattern fragments
//===----------------------------------------------------------------------===//
def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
// Like 'store', but always requires vector alignment.
Dan Gohman
committed
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
(store node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getAlignment() >= 16;
Dan Gohman
committed
}]>;
// Like 'load', but always requires vector alignment.
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 16;
Dan Gohman
committed
}]>;
def alignedloadfsf32 : PatFrag<(ops node:$ptr),
(f32 (alignedload node:$ptr))>;
def alignedloadfsf64 : PatFrag<(ops node:$ptr),
(f64 (alignedload node:$ptr))>;
def alignedloadv4f32 : PatFrag<(ops node:$ptr),
(v4f32 (alignedload node:$ptr))>;
def alignedloadv2f64 : PatFrag<(ops node:$ptr),
(v2f64 (alignedload node:$ptr))>;
def alignedloadv4i32 : PatFrag<(ops node:$ptr),
(v4i32 (alignedload node:$ptr))>;
def alignedloadv2i64 : PatFrag<(ops node:$ptr),
(v2i64 (alignedload node:$ptr))>;
Dan Gohman
committed
// Like 'load', but uses special alignment checks suitable for use in
// memory operands in most SSE instructions, which are required to
// be naturally aligned on some targets but not on others. If the subtarget
// allows unaligned accesses, match any load, though this may require
// setting a feature bit in the processor (on startup, for example).
// Opteron 10h and later implement such a feature.
def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return Subtarget->hasVectorUAMem()
|| cast<LoadSDNode>(N)->getAlignment() >= 16;
Dan Gohman
committed
}]>;
def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
Dan Gohman
committed
def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
Dan Gohman
committed
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
// 16-byte boundary.
// FIXME: 8 byte alignment for mmx reads is not required
def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 8;
}]>;
def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
// MOVNT Support
// Like 'store', but requires the non-temporal bit to be set
def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
return ST->isNonTemporal();
return false;
}]>;
def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
return ST->isNonTemporal() && !ST->isTruncatingStore() &&
ST->getAddressingMode() == ISD::UNINDEXED &&
ST->getAlignment() >= 16;
return false;
}]>;
def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
return ST->isNonTemporal() &&
ST->getAlignment() < 16;
return false;
}]>;
def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
def vzmovl_v2i64 : PatFrag<(ops node:$src),
(bitconvert (v2i64 (X86vzmovl
(v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
def vzmovl_v4i32 : PatFrag<(ops node:$src),
(bitconvert (v4i32 (X86vzmovl
(v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
def vzload_v2i64 : PatFrag<(ops node:$src),
(bitconvert (v2i64 (X86vzload node:$src)))>;
def fp32imm0 : PatLeaf<(f32 fpimm), [{
return N->isExactlyValue(+0.0);
}]>;
Evan Cheng
committed
// BYTE_imm - Transform bit immediates into byte immediates.
def BYTE_imm : SDNodeXForm<imm, [{
return getI32Imm(N->getZExtValue() >> 3);
// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
// SHUFP* etc. imm.
def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
return getI8Imm(X86::getShuffleSHUFImmediate(N));
// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
}]>;
// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
}]>;
// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
// a PALIGNR imm.
def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
return getI8Imm(X86::getShufflePALIGNRImmediate(N));
}]>;
def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
Evan Cheng
committed
}]>;
def movddup : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
def movlp : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
def movl : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
}]>;
def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_shuf_imm>;
def shufp : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_shuf_imm>;
def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_pshufhw_imm>;
def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_pshuflw_imm>;
def palign : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_palign_imm>;
//===----------------------------------------------------------------------===//
// SSE scalar FP Instructions
//===----------------------------------------------------------------------===//
// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
// instruction selection into a branch sequence.
let Uses = [EFLAGS], usesCustomInserter = 1 in {
def CMOV_FR32 : I<0, Pseudo,
(outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
"#CMOV_FR32 PSEUDO!",
[(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
Evan Cheng
committed
EFLAGS))]>;
Evan Cheng
committed
(outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
"#CMOV_FR64 PSEUDO!",
[(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
Evan Cheng
committed
EFLAGS))]>;
Evan Cheng
committed
(outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
"#CMOV_V4F32 PSEUDO!",
[(set VR128:$dst,
(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
Evan Cheng
committed
EFLAGS)))]>;
Evan Cheng
committed
(outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
"#CMOV_V2F64 PSEUDO!",
[(set VR128:$dst,
(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
Evan Cheng
committed
EFLAGS)))]>;
Evan Cheng
committed
(outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
"#CMOV_V2I64 PSEUDO!",
[(set VR128:$dst,
(v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
Evan Cheng
committed
EFLAGS)))]>;
}
//===----------------------------------------------------------------------===//
// SSE1 Instructions
//===----------------------------------------------------------------------===//
// Move Instructions. Register-to-register movss is not used for FR32
// register copies because it's a partial register update; FsMOVAPSrr is
// used instead. Register-to-register movss is not modeled as an INSERT_SUBREG
// because INSERT_SUBREG requires that the insert be implementable in terms of
// a copy, and just mentioned, we don't use movss for copies.
let Constraints = "$src1 = $dst" in
def MOVSSrr : SSI<0x10, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
"movss\t{$src2, $dst|$dst, $src2}",
(movl VR128:$src1, (scalar_to_vector FR32:$src2)))]>;
// Extract the low 32-bit value from one vector and insert it into another.
let AddedComplexity = 15 in
def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
(MOVSSrr (v4f32 VR128:$src1),
(EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
// Implicitly promote a 32-bit scalar to a vector.
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
(INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
// Loading from memory automatically zeroing upper bits.
let canFoldAsLoad = 1, isReMaterializable = 1 in
def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
"movss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (loadf32 addr:$src))]>;
// MOVSSrm zeros the high parts of the register; represent this
// with SUBREG_TO_REG.
let AddedComplexity = 20 in {
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
"movss\t{$src, $dst|$dst, $src}",
[(store FR32:$src, addr:$dst)]>;
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
addr:$dst),
(MOVSSmr addr:$dst,
(EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
// Conversion instructions
def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
"cvttss2si\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (fp_to_sint FR32:$src))]>;
def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
"cvttss2si\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"cvtsi2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (sint_to_fp GR32:$src))]>;
def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"cvtsi2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
// Match intrinsics which expect XMM operand(s).
def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
"cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;
def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
"cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;
def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
"cvtss2si\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
"cvtss2si\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (int_x86_sse_cvtss2si
(load addr:$src)))]>;
// Match intrinsics which expect MM and XMM operand(s).
def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
"cvtps2pi\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
"cvtps2pi\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (int_x86_sse_cvtps2pi
(load addr:$src)))]>;
def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
"cvttps2pi\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
"cvttps2pi\t{$src, $dst|$dst, $src}",
[(set VR64:$dst, (int_x86_sse_cvttps2pi
def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
"cvtpi2ps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
VR64:$src2))]>;
def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
"cvtpi2ps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
(load addr:$src2)))]>;
}
// Aliases for intrinsics
def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
"cvttss2si\t{$src, $dst|$dst, $src}",
[(set GR32:$dst,
(int_x86_sse_cvttss2si VR128:$src))]>;
def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
"cvttss2si\t{$src, $dst|$dst, $src}",
[(set GR32:$dst,
(int_x86_sse_cvttss2si(load addr:$src)))]>;
def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
"cvtsi2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
GR32:$src2))]>;
def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
"cvtsi2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
(loadi32 addr:$src2)))]>;
}
// Comparison instructions
let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
(outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
"cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
let mayLoad = 1 in
(outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
"cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1 in {
def CMPSSrr_alt : SSIi8<0xC2, MRMSrcReg,
(outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
"cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
let mayLoad = 1 in
def CMPSSrm_alt : SSIi8<0xC2, MRMSrcMem,
(outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
"cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
}
}
let Defs = [EFLAGS] in {
def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
"ucomiss\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86cmp FR32:$src1, FR32:$src2))]>;
def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
"ucomiss\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86cmp FR32:$src1, (loadf32 addr:$src2)))]>;
def COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"comiss\t{$src2, $src1|$src1, $src2}", []>;
def COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
"comiss\t{$src2, $src1|$src1, $src2}", []>;
} // Defs = [EFLAGS]
// Aliases to match intrinsics which expect XMM operand(s).
def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
(outs VR128:$dst),
(ins VR128:$src1, VR128:$src, SSECC:$cc),
"cmp${cc}ss\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_cmp_ss
VR128:$src1,
VR128:$src, imm:$cc))]>;
def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
(outs VR128:$dst),
(ins VR128:$src1, f32mem:$src, SSECC:$cc),
"cmp${cc}ss\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
(load addr:$src), imm:$cc))]>;
}
let Defs = [EFLAGS] in {
def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Evan Cheng
committed
"ucomiss\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ucomi (v4f32 VR128:$src1),
VR128:$src2))]>;
def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
Evan Cheng
committed
"ucomiss\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ucomi (v4f32 VR128:$src1),
(load addr:$src2)))]>;
Evan Cheng
committed
def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Evan Cheng
committed
"comiss\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86comi (v4f32 VR128:$src1),
VR128:$src2))]>;
def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
Evan Cheng
committed
"comiss\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86comi (v4f32 VR128:$src1),
(load addr:$src2)))]>;
} // Defs = [EFLAGS]
// Aliases of packed SSE1 instructions for scalar use. These all have names
// that start with 'Fs'.
// Alias instructions that map fld0 to pxor for sse.
let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
canFoldAsLoad = 1 in
// FIXME: Set encoding to pseudo!
def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
[(set FR32:$dst, fp32imm0)]>,
Requires<[HasSSE1]>, TB, OpSize;
// Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
// disregarded.
def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
"movaps\t{$src, $dst|$dst, $src}", []>;
// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
// disregarded.
let canFoldAsLoad = 1, isReMaterializable = 1 in
def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
// Alias bitwise logical operations using SSE logical ops on packed FP values.
let isCommutable = 1 in {
def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst),
(ins FR32:$src1, FR32:$src2),
"andps\t{$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst),
(ins FR32:$src1, FR32:$src2),
"orps\t{$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst),
(ins FR32:$src1, FR32:$src2),
"xorps\t{$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
}
def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f128mem:$src2),
"andps\t{$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86fand FR32:$src1,
(memopfsf32 addr:$src2)))]>;
def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f128mem:$src2),
"orps\t{$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86for FR32:$src1,
(memopfsf32 addr:$src2)))]>;
def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f128mem:$src2),
"xorps\t{$src2, $dst|$dst, $src2}",
[(set FR32:$dst, (X86fxor FR32:$src1,
(memopfsf32 addr:$src2)))]>;
Chris Lattner
committed
let neverHasSideEffects = 1 in {
def FsANDNPSrr : PSI<0x55, MRMSrcReg,
(outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
"andnps\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattner
committed
let mayLoad = 1 in
def FsANDNPSrm : PSI<0x55, MRMSrcMem,
(outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
"andnps\t{$src2, $dst|$dst, $src2}", []>;
}
Chris Lattner
committed
}
Bruno Cardoso Lopes
committed
/// basic_sse12_fp_binop_rm - SSE 1 & 2 binops come in both scalar and
/// vector forms.
///
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation. This form is unlike the
/// plain scalar form, in that it takes an entire vector (instead of a scalar)
/// and leaves the top elements unmodified (therefore these cannot be commuted).
Chris Lattner
committed
///
/// These three forms can each be reg+reg or reg+mem, so there are a total of
/// six "instructions".
Chris Lattner
committed
///
Bruno Cardoso Lopes
committed
multiclass basic_sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
SDNode OpNode, bit Commutable = 0> {
Chris Lattner
committed
// Scalar operation, reg+reg.
def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
[(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
let isCommutable = Commutable;
}
Bruno Cardoso Lopes
committed
def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
let isCommutable = Commutable;
}
Chris Lattner
committed
// Scalar operation, reg+mem.
Evan Cheng
committed
def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f32mem:$src2),
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Chris Lattner
committed
[(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
Bruno Cardoso Lopes
committed
def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
(ins FR64:$src1, f64mem:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
// Vector operation, reg+reg.
Evan Cheng
committed
def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
let isCommutable = Commutable;
}
Bruno Cardoso Lopes
committed
def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
let isCommutable = Commutable;
}
// Vector operation, reg+mem.
Evan Cheng
committed
def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Evan Cheng
committed
[(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Bruno Cardoso Lopes
committed
def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
// Intrinsic operation, reg+reg.
Evan Cheng
committed
def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
Bruno Cardoso Lopes
committed
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
!strconcat(OpcodeStr, "_ss")) VR128:$src1,
VR128:$src2))]>;
// int_x86_sse_xxx_ss
def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
!strconcat(OpcodeStr, "_sd")) VR128:$src1,
VR128:$src2))]>;
// int_x86_sse2_xxx_sd
// Intrinsic operation, reg+mem.
Evan Cheng
committed
def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, ssmem:$src2),
Bruno Cardoso Lopes
committed
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
!strconcat(OpcodeStr, "_ss")) VR128:$src1,
Chris Lattner
committed
sse_load_f32:$src2))]>;
Bruno Cardoso Lopes
committed
// int_x86_sse_xxx_ss
def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, sdmem:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
!strconcat(OpcodeStr, "_sd")) VR128:$src1,
sse_load_f64:$src2))]>;
// int_x86_sse2_xxx_sd
// Arithmetic instructions
Bruno Cardoso Lopes
committed
defm ADD : basic_sse12_fp_binop_rm<0x58, "add", fadd, 1>;
defm MUL : basic_sse12_fp_binop_rm<0x59, "mul", fmul, 1>;
defm SUB : basic_sse12_fp_binop_rm<0x5C, "sub", fsub>;
defm DIV : basic_sse12_fp_binop_rm<0x5E, "div", fdiv>;
Bruno Cardoso Lopes
committed
/// sse12_fp_binop_rm - Other SSE 1 & 2 binops
///
Bruno Cardoso Lopes
committed
/// This multiclass is like basic_sse12_fp_binop_rm, with the addition of
/// instructions for a full-vector intrinsic form. Operations that map
/// onto C operators don't use this form since they just use the plain
/// vector form instead of having a separate vector intrinsic form.
///
/// This provides a total of eight "instructions".
///
Bruno Cardoso Lopes
committed
multiclass sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
SDNode OpNode, bit Commutable = 0> {
// Scalar operation, reg+reg.
def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
[(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
let isCommutable = Commutable;
}
Bruno Cardoso Lopes
committed
def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
let isCommutable = Commutable;
}
// Scalar operation, reg+mem.
Evan Cheng
committed
def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f32mem:$src2),
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
[(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
Bruno Cardoso Lopes
committed
def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
(ins FR64:$src1, f64mem:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
// Vector operation, reg+reg.
Evan Cheng
committed
def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
let isCommutable = Commutable;
}
Bruno Cardoso Lopes
committed
def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
let isCommutable = Commutable;
}
// Vector operation, reg+mem.
Evan Cheng
committed
def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Evan Cheng
committed
[(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Bruno Cardoso Lopes
committed
def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
// Intrinsic operation, reg+reg.
Evan Cheng
committed
def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
Bruno Cardoso Lopes
committed
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
!strconcat(OpcodeStr, "_ss")) VR128:$src1,
VR128:$src2))]> {
// int_x86_sse_xxx_ss
let isCommutable = Commutable;
}
def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
!strconcat(OpcodeStr, "_sd")) VR128:$src1,
VR128:$src2))]> {
// int_x86_sse2_xxx_sd
let isCommutable = Commutable;
}
// Intrinsic operation, reg+mem.
Evan Cheng
committed
def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, ssmem:$src2),
Bruno Cardoso Lopes
committed
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
!strconcat(OpcodeStr, "_ss")) VR128:$src1,
sse_load_f32:$src2))]>;
Bruno Cardoso Lopes
committed
// int_x86_sse_xxx_ss
def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, sdmem:$src2),
!strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
!strconcat(OpcodeStr, "_sd")) VR128:$src1,
sse_load_f64:$src2))]>;
// int_x86_sse2_xxx_sd
// Vector intrinsic operation, reg+reg.
Evan Cheng
committed
def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
Bruno Cardoso Lopes
committed
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
!strconcat(OpcodeStr, "_ps")) VR128:$src1,
VR128:$src2))]> {
// int_x86_sse_xxx_ps
let isCommutable = Commutable;
}
def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
!strconcat(OpcodeStr, "_pd")) VR128:$src1,
VR128:$src2))]> {
// int_x86_sse2_xxx_pd
let isCommutable = Commutable;
}
// Vector intrinsic operation, reg+mem.
Evan Cheng
committed
def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
Bruno Cardoso Lopes
committed
!strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
!strconcat(OpcodeStr, "_ps")) VR128:$src1,
(memopv4f32 addr:$src2)))]>;
// int_x86_sse_xxx_ps
def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
!strconcat(OpcodeStr, "_pd")) VR128:$src1,
(memopv2f64 addr:$src2)))]>;
// int_x86_sse2_xxx_pd
}
}
Bruno Cardoso Lopes
committed
defm MAX : sse12_fp_binop_rm<0x5F, "max", X86fmax>;
defm MIN : sse12_fp_binop_rm<0x5D, "min", X86fmin>;
//===----------------------------------------------------------------------===//
// SSE packed FP Instructions
// Move Instructions
def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movaps\t{$src, $dst|$dst, $src}", []>;
let canFoldAsLoad = 1, isReMaterializable = 1 in
def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movaps\t{$src, $dst|$dst, $src}",
Dan Gohman
committed
[(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
Dan Gohman
committed
[(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movups\t{$src, $dst|$dst, $src}", []>;
let canFoldAsLoad = 1, isReMaterializable = 1 in
def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}",
Dan Gohman
committed
[(set VR128:$dst, (loadv4f32 addr:$src))]>;
def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
Dan Gohman
committed
[(store (v4f32 VR128:$src), addr:$dst)]>;
// Intrinsic forms of MOVUPS load and store
let canFoldAsLoad = 1, isReMaterializable = 1 in
def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}",
Dan Gohman
committed
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
Dan Gohman
committed
[(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
let AddedComplexity = 20 in {
def MOVLPSrm : PSI<0x12, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
"movlps\t{$src2, $dst|$dst, $src2}",
(movlp VR128:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))]>;
def MOVHPSrm : PSI<0x16, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
"movhps\t{$src2, $dst|$dst, $src2}",
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))]>;
} // AddedComplexity
def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
(MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
(iPTR 0))), addr:$dst)]>;
// v2f64 extract element 1 is always custom lowered to unpack high to low
// and extract element 0 so the non-store version isn't too horrible.
def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
(unpckh (bc_v2f64 (v4f32 VR128:$src)),
(undef)), (iPTR 0))), addr:$dst)]>;
let AddedComplexity = 20 in {
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movlhps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
} // AddedComplexity
Nate Begeman
committed
let AddedComplexity = 20 in {
def : Pat<(v4f32 (movddup VR128:$src, (undef))),
(MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
Nate Begeman
committed
def : Pat<(v2i64 (movddup VR128:$src, (undef))),
(MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
Nate Begeman
committed
}
Evan Cheng
committed
// Arithmetic
/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
///
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation. This form is unlike the
/// plain scalar form, in that it takes an entire vector (instead of a
/// scalar) and leaves the top elements undefined.
///
/// And, we have a special variant form for a full-vector intrinsic form.
///