Skip to content
X86InstrSSE.td 188 KiB
Newer Older
//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file describes the X86 SSE instruction set, defining the instructions,
// and properties of the instructions which are needed for code generation,
// machine code emission, and analysis.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// SSE specific DAG Nodes.
//===----------------------------------------------------------------------===//

def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
                                            SDTCisFP<0>, SDTCisInt<2> ]>;
def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
                                       SDTCisFP<1>, SDTCisVT<3, i8>]>;
def X86fmin    : SDNode<"X86ISD::FMIN",      SDTFPBinOp>;
def X86fmax    : SDNode<"X86ISD::FMAX",      SDTFPBinOp>;
def X86fand    : SDNode<"X86ISD::FAND",      SDTFPBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def X86for     : SDNode<"X86ISD::FOR",       SDTFPBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def X86fxor    : SDNode<"X86ISD::FXOR",      SDTFPBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def X86frsqrt  : SDNode<"X86ISD::FRSQRT",    SDTFPUnaryOp>;
def X86frcp    : SDNode<"X86ISD::FRCP",      SDTFPUnaryOp>;
def X86fsrl    : SDNode<"X86ISD::FSRL",      SDTX86FPShiftOp>;
def X86comi    : SDNode<"X86ISD::COMI",      SDTX86CmpTest>;
def X86ucomi   : SDNode<"X86ISD::UCOMI",     SDTX86CmpTest>;
def X86pshufb  : SDNode<"X86ISD::PSHUFB",
                 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
                                      SDTCisSameAs<0,2>]>>;
def X86pextrb  : SDNode<"X86ISD::PEXTRB",
                 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
def X86pextrw  : SDNode<"X86ISD::PEXTRW",
                 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
def X86pinsrb  : SDNode<"X86ISD::PINSRB",
                 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
                                      SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
def X86pinsrw  : SDNode<"X86ISD::PINSRW",
                 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
                                      SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
def X86insrtps : SDNode<"X86ISD::INSERTPS",
                 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
                                      SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
def X86vzmovl  : SDNode<"X86ISD::VZEXT_MOVL",
                 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
def X86vzload  : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
                        [SDNPHasChain, SDNPMayLoad]>;
def X86vshl    : SDNode<"X86ISD::VSHL",      SDTIntShiftOp>;
def X86vshr    : SDNode<"X86ISD::VSRL",      SDTIntShiftOp>;
def X86cmpps   : SDNode<"X86ISD::CMPPS",     SDTX86VFCMP>;
def X86cmppd   : SDNode<"X86ISD::CMPPD",     SDTX86VFCMP>;
def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
                                          SDTCisVT<1, v4f32>,
                                          SDTCisVT<2, v4f32>]>;
def X86ptest   : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;

//===----------------------------------------------------------------------===//
// SSE Complex Patterns
//===----------------------------------------------------------------------===//

// These are 'extloads' from a scalar to the low element of a vector, zeroing
// the top elements.  These are used for the SSE 'ss' and 'sd' instruction
// forms.
Rafael Espindola's avatar
Rafael Espindola committed
def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
Rafael Espindola's avatar
Rafael Espindola committed
def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],

def ssmem : Operand<v4f32> {
  let PrintMethod = "printf32mem";
  let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
  let ParserMatchClass = X86MemAsmOperand;
}
def sdmem : Operand<v2f64> {
  let PrintMethod = "printf64mem";
  let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
  let ParserMatchClass = X86MemAsmOperand;
//===----------------------------------------------------------------------===//
// SSE pattern fragments
//===----------------------------------------------------------------------===//

def loadv4f32    : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
def loadv2f64    : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
def loadv4i32    : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
def loadv2i64    : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
// Like 'store', but always requires vector alignment.
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
                           (store node:$val, node:$ptr), [{
  return cast<StoreSDNode>(N)->getAlignment() >= 16;
// Like 'load', but always requires vector alignment.
def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
  return cast<LoadSDNode>(N)->getAlignment() >= 16;
def alignedloadfsf32 : PatFrag<(ops node:$ptr),
                               (f32 (alignedload node:$ptr))>;
def alignedloadfsf64 : PatFrag<(ops node:$ptr),
                               (f64 (alignedload node:$ptr))>;
def alignedloadv4f32 : PatFrag<(ops node:$ptr),
                               (v4f32 (alignedload node:$ptr))>;
def alignedloadv2f64 : PatFrag<(ops node:$ptr),
                               (v2f64 (alignedload node:$ptr))>;
def alignedloadv4i32 : PatFrag<(ops node:$ptr),
                               (v4i32 (alignedload node:$ptr))>;
def alignedloadv2i64 : PatFrag<(ops node:$ptr),
                               (v2i64 (alignedload node:$ptr))>;

// Like 'load', but uses special alignment checks suitable for use in
// memory operands in most SSE instructions, which are required to
David Greene's avatar
 
David Greene committed
// be naturally aligned on some targets but not on others.  If the subtarget
// allows unaligned accesses, match any load, though this may require
// setting a feature bit in the processor (on startup, for example).
// Opteron 10h and later implement such a feature.
def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
David Greene's avatar
 
David Greene committed
  return    Subtarget->hasVectorUAMem()
         || cast<LoadSDNode>(N)->getAlignment() >= 16;
def memopfsf32 : PatFrag<(ops node:$ptr), (f32   (memop node:$ptr))>;
def memopfsf64 : PatFrag<(ops node:$ptr), (f64   (memop node:$ptr))>;
def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
// 16-byte boundary.
// FIXME: 8 byte alignment for mmx reads is not required
def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
  return cast<LoadSDNode>(N)->getAlignment() >= 8;
}]>;

def memopv8i8  : PatFrag<(ops node:$ptr), (v8i8  (memop64 node:$ptr))>;
def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;

David Greene's avatar
 
David Greene committed
// MOVNT Support
// Like 'store', but requires the non-temporal bit to be set
def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
                           (st node:$val, node:$ptr), [{
  if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
    return ST->isNonTemporal();
  return false;
}]>;

def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
			           (st node:$val, node:$ptr), [{
  if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
    return ST->isNonTemporal() && !ST->isTruncatingStore() &&
           ST->getAddressingMode() == ISD::UNINDEXED &&
           ST->getAlignment() >= 16;
  return false;
}]>;

def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
			           (st node:$val, node:$ptr), [{
  if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
    return ST->isNonTemporal() &&
           ST->getAlignment() < 16;
  return false;
}]>;

Evan Cheng's avatar
Evan Cheng committed
def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;

def vzmovl_v2i64 : PatFrag<(ops node:$src),
                           (bitconvert (v2i64 (X86vzmovl
                             (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
def vzmovl_v4i32 : PatFrag<(ops node:$src),
                           (bitconvert (v4i32 (X86vzmovl
                             (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;

def vzload_v2i64 : PatFrag<(ops node:$src),
                           (bitconvert (v2i64 (X86vzload node:$src)))>;


def fp32imm0 : PatLeaf<(f32 fpimm), [{
  return N->isExactlyValue(+0.0);
}]>;

// BYTE_imm - Transform bit immediates into byte immediates.
def BYTE_imm  : SDNodeXForm<imm, [{
Evan Cheng's avatar
Evan Cheng committed
  // Transformation function: imm >> 3
  return getI32Imm(N->getZExtValue() >> 3);
// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
// SHUFP* etc. imm.
def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
  return getI8Imm(X86::getShuffleSHUFImmediate(N));
// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
// PSHUFHW imm.
def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
  return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
}]>;

// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
// PSHUFLW imm.
def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
  return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
}]>;

// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
// a PALIGNR imm.
def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
  return getI8Imm(X86::getShufflePALIGNRImmediate(N));
}]>;

def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
                       (vector_shuffle node:$lhs, node:$rhs), [{
  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
  return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
def movddup : PatFrag<(ops node:$lhs, node:$rhs),
                      (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
                      (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
                            (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
                      (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
def movlp : PatFrag<(ops node:$lhs, node:$rhs),
                    (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
def movl : PatFrag<(ops node:$lhs, node:$rhs),
                   (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
                       (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
                       (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
                     (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
                     (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
                           (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
                           (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
                     (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_shuf_imm>;

def shufp : PatFrag<(ops node:$lhs, node:$rhs),
                    (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
                      (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_pshufhw_imm>;
def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
                      (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_pshuflw_imm>;
def palign : PatFrag<(ops node:$lhs, node:$rhs),
                     (vector_shuffle node:$lhs, node:$rhs), [{
  return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
}], SHUFFLE_get_palign_imm>;

//===----------------------------------------------------------------------===//
// SSE scalar FP Instructions
//===----------------------------------------------------------------------===//
// CMOV* - Used to implement the SSE SELECT DAG operation.  Expanded after
// instruction selection into a branch sequence.
let Uses = [EFLAGS], usesCustomInserter = 1 in {
  def CMOV_FR32 : I<0, Pseudo,
                    (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
                    "#CMOV_FR32 PSEUDO!",
                    [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
  def CMOV_FR64 : I<0, Pseudo,
                    (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
                    "#CMOV_FR64 PSEUDO!",
                    [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
  def CMOV_V4F32 : I<0, Pseudo,
                    (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
                    "#CMOV_V4F32 PSEUDO!",
                    [(set VR128:$dst,
                      (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
  def CMOV_V2F64 : I<0, Pseudo,
                    (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
                    "#CMOV_V2F64 PSEUDO!",
                    [(set VR128:$dst,
                      (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
  def CMOV_V2I64 : I<0, Pseudo,
                    (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
                    "#CMOV_V2I64 PSEUDO!",
                    [(set VR128:$dst,
                      (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
//===----------------------------------------------------------------------===//
// SSE 1 & 2 Instructions Classes
//===----------------------------------------------------------------------===//

/// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
                           RegisterClass RC, X86MemOperand x86memop> {
  let isCommutable = 1 in {
    def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
                OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
  }
  def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
              OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
}

/// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
                               string asm, string SSEVer, string FPSizeStr,
                               Operand memopr, ComplexPattern mem_cpat> {
  def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
                  asm, [(set RC:$dst, (
                                !nameconcat<Intrinsic>("int_x86_sse",
                                !strconcat(SSEVer, !strconcat("_",
                                !strconcat(OpcodeStr, FPSizeStr))))
                         RC:$src1, RC:$src2))]>;
  def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
                  asm, [(set RC:$dst, (
                                !nameconcat<Intrinsic>("int_x86_sse",
                                !strconcat(SSEVer, !strconcat("_",
                                !strconcat(OpcodeStr, FPSizeStr))))
                         RC:$src1, mem_cpat:$src2))]>;
}

/// sse12_fp_packed - SSE 1 & 2 packed instructions class
multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
                           RegisterClass RC, ValueType vt,
                           X86MemOperand x86memop, PatFrag mem_frag,
                           Domain d, bit MayLoad = 0> {
  let isCommutable = 1 in
    def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
                OpcodeStr, [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))],d>;
  let mayLoad = MayLoad in
    def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
                OpcodeStr, [(set RC:$dst, (OpNode RC:$src1,
                                                  (mem_frag addr:$src2)))],d>;
/// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
                                      string OpcodeStr, X86MemOperand x86memop,
                                      list<dag> pat_rr, list<dag> pat_rm> {
  let isCommutable = 1 in
    def rr : PI<opc, MRMSrcReg, (outs RC:$dst),
                (ins RC:$src1, RC:$src2), OpcodeStr, pat_rr, d>;
  def rm : PI<opc, MRMSrcMem, (outs RC:$dst),
                (ins RC:$src1, x86memop:$src2), OpcodeStr, pat_rm, d>;
}

/// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
                               string asm, string SSEVer, string FPSizeStr,
                               X86MemOperand x86memop, PatFrag mem_frag,
                               Domain d> {
  def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
                  asm, [(set RC:$dst, (
                                !nameconcat<Intrinsic>("int_x86_sse",
                                !strconcat(SSEVer, !strconcat("_",
                                !strconcat(OpcodeStr, FPSizeStr))))
                         RC:$src1, RC:$src2))], d>;
  def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
                  asm, [(set RC:$dst, (
                                !nameconcat<Intrinsic>("int_x86_sse",
                                !strconcat(SSEVer, !strconcat("_",
                                !strconcat(OpcodeStr, FPSizeStr))))
                         RC:$src1, (mem_frag addr:$src2)))], d>;
}

//===----------------------------------------------------------------------===//
// SSE1 Instructions
//===----------------------------------------------------------------------===//

// Conversion Instructions

// Match intrinsics which expect XMM operand(s).
def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
                    "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;
def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
                    "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;

def CVTDQ2PSrr : PSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
                     "cvtdq2ps\t{$src, $dst|$dst, $src}", []>;
def CVTDQ2PSrm : PSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
                     "cvtdq2ps\t{$src, $dst|$dst, $src}", []>;

// Aliases for intrinsics
def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
                          "cvttss2si\t{$src, $dst|$dst, $src}",
                          [(set GR32:$dst,
                            (int_x86_sse_cvttss2si VR128:$src))]>;
def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
                          "cvttss2si\t{$src, $dst|$dst, $src}",
                          [(set GR32:$dst,
                            (int_x86_sse_cvttss2si(load addr:$src)))]>;

let Constraints = "$src1 = $dst" in {
  def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
                           (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
                           "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
                           [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
                                              GR32:$src2))]>;
  def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
                           (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
                           "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
                           [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
                                              (loadi32 addr:$src2)))]>;
}

// Compare Instructions
let Defs = [EFLAGS] in {
def COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
                  "comiss\t{$src2, $src1|$src1, $src2}", []>;
def COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
                  "comiss\t{$src2, $src1|$src1, $src2}", []>;
} // Defs = [EFLAGS]

//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Move Instructions
//===----------------------------------------------------------------------===//

// Move Instructions. Register-to-register movss/movsd is not used for FR32/64
// register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
// is used instead. Register-to-register movss/movsd is not modeled as an
// INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
// in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
let Constraints = "$src1 = $dst" in {
Dan Gohman's avatar
Dan Gohman committed
def MOVSSrr : SSI<0x10, MRMSrcReg,
                  (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
                  "movss\t{$src2, $dst|$dst, $src2}",
                  [(set (v4f32 VR128:$dst),
Dan Gohman's avatar
Dan Gohman committed
                        (movl VR128:$src1, (scalar_to_vector FR32:$src2)))]>;
def MOVSDrr : SDI<0x10, MRMSrcReg,
                  (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
                  "movsd\t{$src2, $dst|$dst, $src2}",
                  [(set (v2f64 VR128:$dst),
                        (movl VR128:$src1, (scalar_to_vector FR64:$src2)))]>;
}
Dan Gohman's avatar
Dan Gohman committed

// Loading from memory automatically zeroing upper bits.
let canFoldAsLoad = 1, isReMaterializable = 1 in {
def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
                  "movss\t{$src, $dst|$dst, $src}",
                  [(set FR32:$dst, (loadf32 addr:$src))]>;
let AddedComplexity = 20 in
def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
                  "movsd\t{$src, $dst|$dst, $src}",
                  [(set FR64:$dst, (loadf64 addr:$src))]>;
}

let AddedComplexity = 15 in {
Dan Gohman's avatar
Dan Gohman committed
// Extract the low 32-bit value from one vector and insert it into another.
def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
                   (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
// Extract the low 64-bit value from one vector and insert it into another.
def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
          (MOVSDrr (v2f64 VR128:$src1),
                   (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
}
Dan Gohman's avatar
Dan Gohman committed

// Implicitly promote a 32-bit scalar to a vector.
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
          (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
// Implicitly promote a 64-bit scalar to a vector.
def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
Dan Gohman's avatar
Dan Gohman committed

Dan Gohman's avatar
Dan Gohman committed
// MOVSSrm zeros the high parts of the register; represent this
// with SUBREG_TO_REG.
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
          (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
Dan Gohman's avatar
Dan Gohman committed
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
          (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
Dan Gohman's avatar
Dan Gohman committed
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
          (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
// MOVSDrm zeros the high parts of the register; represent this
// with SUBREG_TO_REG.
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
          (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
          (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
          (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
          (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
def : Pat<(v2f64 (X86vzload addr:$src)),
          (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
Dan Gohman's avatar
Dan Gohman committed
}

// Store scalar value to memory.
def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
                  "movss\t{$src, $dst|$dst, $src}",
def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
                  "movsd\t{$src, $dst|$dst, $src}",
                  [(store FR64:$src, addr:$dst)]>;
Dan Gohman's avatar
Dan Gohman committed
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
                 addr:$dst),
          (MOVSSmr addr:$dst,
                   (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
                 addr:$dst),
          (MOVSDmr addr:$dst,
                   (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;

//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Conversion Instructions
//===----------------------------------------------------------------------===//
Dan Gohman's avatar
Dan Gohman committed

def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
                      "cvttss2si\t{$src, $dst|$dst, $src}",
                      [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
                      "cvttss2si\t{$src, $dst|$dst, $src}",
                      [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
                      "cvttsd2si\t{$src, $dst|$dst, $src}",
                      [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
                      "cvttsd2si\t{$src, $dst|$dst, $src}",
                      [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;

def CVTSI2SSrr  : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
                      "cvtsi2ss\t{$src, $dst|$dst, $src}",
                      [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
def CVTSI2SSrm  : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
                      "cvtsi2ss\t{$src, $dst|$dst, $src}",
                      [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
def CVTSI2SDrr  : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
                      "cvtsi2sd\t{$src, $dst|$dst, $src}",
                      [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
def CVTSI2SDrm  : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
                      "cvtsi2sd\t{$src, $dst|$dst, $src}",
                      [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;

// Match intrinsics which expect XMM operand(s).
def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
                         "cvtss2si\t{$src, $dst|$dst, $src}",
                         [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
                         "cvtss2si\t{$src, $dst|$dst, $src}",
                         [(set GR32:$dst, (int_x86_sse_cvtss2si
                                           (load addr:$src)))]>;
def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
                         "cvtsd2si\t{$src, $dst|$dst, $src}",
                         [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
                         "cvtsd2si\t{$src, $dst|$dst, $src}",
                         [(set GR32:$dst, (int_x86_sse2_cvtsd2si
                                           (load addr:$src)))]>;
// Match intrinsics which expect MM and XMM operand(s).
def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
                         "cvtps2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
                         "cvtps2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvtps2pi
                                           (load addr:$src)))]>;
def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
                         "cvtpd2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
                         "cvtpd2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvtpd2pi
                                           (memop addr:$src)))]>;

// Match intrinsics which expect MM and XMM operand(s).
def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
                         "cvttps2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
                         "cvttps2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvttps2pi
                                           (load addr:$src)))]>;
def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
                         "cvttpd2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
                         "cvttpd2pi\t{$src, $dst|$dst, $src}",
                         [(set VR64:$dst, (int_x86_sse_cvttpd2pi
                                           (memop addr:$src)))]>;

let Constraints = "$src1 = $dst" in {
  def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
                           (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
                        "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
                        [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
                                           VR64:$src2))]>;
  def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
                           (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
                        "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
                        [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Compare Instructions
//===----------------------------------------------------------------------===//
let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
  def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
                    (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
                    "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
  def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
                    (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
                    "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
  def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
                    (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
                    "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
  let mayLoad = 1 in
  def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
                    (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
                    "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;

// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1 in {
  def CMPSSrr_alt : SSIi8<0xC2, MRMSrcReg,
                    (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
                    "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
  def CMPSSrm_alt : SSIi8<0xC2, MRMSrcMem,
                    (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
                    "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>;

  def CMPSDrr_alt : SDIi8<0xC2, MRMSrcReg,
                    (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
                    "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
  let mayLoad = 1 in
  def CMPSDrm_alt : SDIi8<0xC2, MRMSrcMem,
                    (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
                    "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
                   "ucomiss\t{$src2, $src1|$src1, $src2}",
                   [(set EFLAGS, (X86cmp FR32:$src1, FR32:$src2))]>;
def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
                   "ucomiss\t{$src2, $src1|$src1, $src2}",
                   [(set EFLAGS, (X86cmp FR32:$src1, (loadf32 addr:$src2)))]>;
def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
                   "ucomisd\t{$src2, $src1|$src1, $src2}",
                   [(set EFLAGS, (X86cmp FR64:$src1, FR64:$src2))]>;
def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
                   "ucomisd\t{$src2, $src1|$src1, $src2}",
                   [(set EFLAGS, (X86cmp FR64:$src1, (loadf64 addr:$src2)))]>;

// Aliases to match intrinsics which expect XMM operand(s).
let Constraints = "$src1 = $dst" in {
  def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
                        (ins VR128:$src1, VR128:$src, SSECC:$cc),
                        "cmp${cc}ss\t{$src, $dst|$dst, $src}",
                        [(set VR128:$dst, (int_x86_sse_cmp_ss
                                             VR128:$src1,
                                             VR128:$src, imm:$cc))]>;
  def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
                        (ins VR128:$src1, f32mem:$src, SSECC:$cc),
                        "cmp${cc}ss\t{$src, $dst|$dst, $src}",
                        [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
                                           (load addr:$src), imm:$cc))]>;

  def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
                        (outs VR128:$dst),
                        (ins VR128:$src1, VR128:$src, SSECC:$cc),
                        "cmp${cc}sd\t{$src, $dst|$dst, $src}",
                        [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
                                           VR128:$src, imm:$cc))]>;
  def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
                        (outs VR128:$dst),
                        (ins VR128:$src1, f64mem:$src, SSECC:$cc),
                        "cmp${cc}sd\t{$src, $dst|$dst, $src}",
                        [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
                                           (load addr:$src), imm:$cc))]>;
def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
                       "ucomiss\t{$src2, $src1|$src1, $src2}",
                       [(set EFLAGS, (X86ucomi (v4f32 VR128:$src1),
                                               VR128:$src2))]>;
def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
                       "ucomiss\t{$src2, $src1|$src1, $src2}",
                       [(set EFLAGS, (X86ucomi (v4f32 VR128:$src1),
                                               (load addr:$src2)))]>;
def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
                       "ucomisd\t{$src2, $src1|$src1, $src2}",
                       [(set EFLAGS, (X86ucomi (v2f64 VR128:$src1),
                                               VR128:$src2))]>;
def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
                       "ucomisd\t{$src2, $src1|$src1, $src2}",
                       [(set EFLAGS, (X86ucomi (v2f64 VR128:$src1),
                                               (load addr:$src2)))]>;
def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
                      "comiss\t{$src2, $src1|$src1, $src2}",
                      [(set EFLAGS, (X86comi (v4f32 VR128:$src1),
                                             VR128:$src2))]>;
def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
                      "comiss\t{$src2, $src1|$src1, $src2}",
                      [(set EFLAGS, (X86comi (v4f32 VR128:$src1),
                                             (load addr:$src2)))]>;
def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
                      "comisd\t{$src2, $src1|$src1, $src2}",
                      [(set EFLAGS, (X86comi (v2f64 VR128:$src1),
                                             VR128:$src2))]>;
def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
                      "comisd\t{$src2, $src1|$src1, $src2}",
                      [(set EFLAGS, (X86comi (v2f64 VR128:$src1),
                                             (load addr:$src2)))]>;
// Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
// names that start with 'Fs'.

// Alias instructions that map fld0 to pxor for sse.
let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
  // FIXME: Set encoding to pseudo!
def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
                 [(set FR32:$dst, fp32imm0)]>,
                 Requires<[HasSSE1]>, TB, OpSize;
def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
                 [(set FR64:$dst, fpimm0)]>,
               Requires<[HasSSE2]>, TB, OpSize;
}
// Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
// bits are disregarded.
let neverHasSideEffects = 1 in {
def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
                     "movaps\t{$src, $dst|$dst, $src}", []>;
def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
                     "movapd\t{$src, $dst|$dst, $src}", []>;
}
// Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
// bits are disregarded.
let canFoldAsLoad = 1, isReMaterializable = 1 in {
def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
                     "movaps\t{$src, $dst|$dst, $src}",
                     [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
                     "movapd\t{$src, $dst|$dst, $src}",
                     [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
}
/// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
///
multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
                                       SDNode OpNode, bit MayLoad = 0> {
  let isAsmParserOnly = 1 in {
    defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR32,
                f32, f128mem, memopfsf32, SSEPackedSingle, MayLoad>, VEX_4V;

    defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode, FR64,
                f64, f128mem, memopfsf64, SSEPackedDouble, MayLoad>, OpSize,
                VEX_4V;
  let Constraints = "$src1 = $dst" in {
    defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                "ps\t{$src2, $dst|$dst, $src2}"), OpNode, FR32, f32,
                f128mem, memopfsf32, SSEPackedSingle, MayLoad>, TB;
    defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                "pd\t{$src2, $dst|$dst, $src2}"), OpNode, FR64, f64,
                f128mem, memopfsf64, SSEPackedDouble, MayLoad>, TB, OpSize;
// Alias bitwise logical operations using SSE logical ops on packed FP values.
defm FsAND  : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
defm FsOR   : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
defm FsXOR  : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
  defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef, 1>;
/// basic_sse12_fp_binop_rm - SSE 1 & 2 binops come in both scalar and
/// vector forms.
///
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation.  This form is unlike the
/// plain scalar form, in that it takes an entire vector (instead of a scalar)
/// and leaves the top elements unmodified (therefore these cannot be commuted).
/// These three forms can each be reg+reg or reg+mem, so there are a total of
/// six "instructions".
multiclass basic_sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
                                   SDNode OpNode> {
  let isAsmParserOnly = 1 in {
    defm V#NAME#SS : sse12_fp_scalar<opc,
        !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                   OpNode, FR32, f32mem>, XS, VEX_4V;
    defm V#NAME#SD : sse12_fp_scalar<opc,
        !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                   OpNode, FR64, f64mem>, XD, VEX_4V;

    defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                      "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
                      VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
                      VEX_4V;

    defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                      "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
                      VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
                      OpSize, VEX_4V;

    defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  "", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;

    defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  "2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;
  let Constraints = "$src1 = $dst" in {
    defm SS : sse12_fp_scalar<opc,
                    !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
                    OpNode, FR32, f32mem>, XS;
    defm SD : sse12_fp_scalar<opc,
                    !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
                    OpNode, FR64, f64mem>, XD;
    defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                "ps\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v4f32,
                f128mem, memopv4f32, SSEPackedSingle>, TB;
    defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                "pd\t{$src2, $dst|$dst, $src2}"), OpNode, VR128, v2f64,
                f128mem, memopv2f64, SSEPackedDouble>, TB, OpSize;
    defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
                  "", "_ss", ssmem, sse_load_f32>, XS;
    defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
                  "2", "_sd", sdmem, sse_load_f64>, XD;
// Arithmetic instructions
defm ADD : basic_sse12_fp_binop_rm<0x58, "add", fadd>;
defm MUL : basic_sse12_fp_binop_rm<0x59, "mul", fmul>;

let isCommutable = 0 in {
  defm SUB : basic_sse12_fp_binop_rm<0x5C, "sub", fsub>;
  defm DIV : basic_sse12_fp_binop_rm<0x5E, "div", fdiv>;
}
/// This multiclass is like basic_sse12_fp_binop_rm, with the addition of
/// instructions for a full-vector intrinsic form.  Operations that map
/// onto C operators don't use this form since they just use the plain
/// vector form instead of having a separate vector intrinsic form.
///
/// This provides a total of eight "instructions".
///
multiclass sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
                             SDNode OpNode> {
  let isAsmParserOnly = 1 in {
    // Scalar operation, reg+reg.
    defm V#NAME#SS : sse12_fp_scalar<opc,
      !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                 OpNode, FR32, f32mem>, XS, VEX_4V;
    defm V#NAME#SD : sse12_fp_scalar<opc,
      !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                 OpNode, FR64, f64mem>, XD, VEX_4V;

    defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                      "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
                      VR128, v4f32, f128mem, memopv4f32, SSEPackedSingle>,
                      VEX_4V;

    defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr,
                      "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), OpNode,
                      VR128, v2f64, f128mem, memopv2f64, SSEPackedDouble>,
                      OpSize, VEX_4V;

    defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  "", "_ss", ssmem, sse_load_f32>, XS, VEX_4V;

    defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  "2", "_sd", sdmem, sse_load_f64>, XD, VEX_4V;

    defm V#NAME#PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  "", "_ps", f128mem, memopv4f32, SSEPackedSingle>, VEX_4V;

    defm V#NAME#PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
       !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
                  "2", "_pd", f128mem, memopv2f64, SSEPackedDouble>, OpSize,
                  VEX_4V;
  let Constraints = "$src1 = $dst" in {
    // Scalar operation, reg+reg.