Skip to content
X86InstrInfo.td 128 KiB
Newer Older
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (ops R16:$dst, i8mem :$src),
                   "movs{bw|x} {$src, $dst|$dst, $src}",
                   [(set R16:$dst, (sextloadi16i8 addr:$src))]>, TB, OpSize;
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (ops R32:$dst, R8 :$src),
Evan Cheng's avatar
Evan Cheng committed
                   "movs{bl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (sext R8:$src))]>, TB;
def MOVSX32rm8 : I<0xBE, MRMSrcMem, (ops R32:$dst, i8mem :$src),
                   "movs{bl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (sextloadi32i8 addr:$src))]>, TB;
def MOVSX32rr16: I<0xBF, MRMSrcReg, (ops R32:$dst, R16:$src),
Evan Cheng's avatar
Evan Cheng committed
                   "movs{wl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (sext R16:$src))]>, TB;
def MOVSX32rm16: I<0xBF, MRMSrcMem, (ops R32:$dst, i16mem:$src),
                   "movs{wl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (sextloadi32i16 addr:$src))]>, TB;

def MOVZX16rr8 : I<0xB6, MRMSrcReg, (ops R16:$dst, R8 :$src),
Evan Cheng's avatar
Evan Cheng committed
                   "movz{bw|x} {$src, $dst|$dst, $src}",
                   [(set R16:$dst, (zext R8:$src))]>, TB, OpSize;
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (ops R16:$dst, i8mem :$src),
                   "movz{bw|x} {$src, $dst|$dst, $src}",
                   [(set R16:$dst, (zextloadi16i8 addr:$src))]>, TB, OpSize;
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (ops R32:$dst, R8 :$src),
Evan Cheng's avatar
Evan Cheng committed
                   "movz{bl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (zext R8:$src))]>, TB;
def MOVZX32rm8 : I<0xB6, MRMSrcMem, (ops R32:$dst, i8mem :$src),
                   "movz{bl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (zextloadi32i8 addr:$src))]>, TB;
def MOVZX32rr16: I<0xB7, MRMSrcReg, (ops R32:$dst, R16:$src),
Evan Cheng's avatar
Evan Cheng committed
                   "movz{wl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (zext R16:$src))]>, TB;
def MOVZX32rm16: I<0xB7, MRMSrcMem, (ops R32:$dst, i16mem:$src),
                   "movz{wl|x} {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (zextloadi32i16 addr:$src))]>, TB;

// Handling 1 bit zextload and sextload
def : Pat<(sextloadi16i1 addr:$src), (MOVSX16rm8  addr:$src)>;
def : Pat<(sextloadi32i1 addr:$src), (MOVSX32rm8  addr:$src)>;
def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8  addr:$src)>;
def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8  addr:$src)>;
// Handling 1 bit extload
def : Pat<(extloadi8i1 addr:$src), (MOV8rm  addr:$src)>;

// Modeling anyext as zext
def : Pat<(i16 (anyext R8 :$src)), (MOVZX16rr8  R8 :$src)>;
def : Pat<(i32 (anyext R8 :$src)), (MOVZX32rr8  R8 :$src)>;
def : Pat<(i32 (anyext R16:$src)), (MOVZX32rr16 R16:$src)>;

//===----------------------------------------------------------------------===//
// XMM Floating point support (requires SSE / SSE2)
//===----------------------------------------------------------------------===//

def MOVSSrr : I<0x10, MRMSrcReg, (ops FR32:$dst, FR32:$src),
                "movss {$src, $dst|$dst, $src}", []>,
              Requires<[HasSSE1]>, XS;
def MOVSDrr : I<0x10, MRMSrcReg, (ops FR64:$dst, FR64:$src),
                "movsd {$src, $dst|$dst, $src}", []>,
              Requires<[HasSSE2]>, XD;
def MOVSSrm : I<0x10, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
                "movss {$src, $dst|$dst, $src}",
                [(set FR32:$dst, (loadf32 addr:$src))]>,
              Requires<[HasSSE1]>, XS;
def MOVSSmr : I<0x11, MRMDestMem, (ops f32mem:$dst, FR32:$src),
                "movss {$src, $dst|$dst, $src}",
                [(store FR32:$src, addr:$dst)]>,
              Requires<[HasSSE1]>, XS;
def MOVSDrm : I<0x10, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
                "movsd {$src, $dst|$dst, $src}",
                [(set FR64:$dst, (loadf64 addr:$src))]>,
              Requires<[HasSSE2]>, XD;
def MOVSDmr : I<0x11, MRMDestMem, (ops f64mem:$dst, FR64:$src),
                "movsd {$src, $dst|$dst, $src}",
                [(store FR64:$src, addr:$dst)]>,
              Requires<[HasSSE2]>, XD;

def CVTTSD2SIrr: I<0x2C, MRMSrcReg, (ops R32:$dst, FR64:$src),
                   "cvttsd2si {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (fp_to_sint FR64:$src))]>,
                 Requires<[HasSSE2]>, XD;
def CVTTSD2SIrm: I<0x2C, MRMSrcMem, (ops R32:$dst, f64mem:$src),
                   "cvttsd2si {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (fp_to_sint (loadf64 addr:$src)))]>,
                 Requires<[HasSSE2]>, XD;
def CVTTSS2SIrr: I<0x2C, MRMSrcReg, (ops R32:$dst, FR32:$src),
                   "cvttss2si {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (fp_to_sint FR32:$src))]>,
                 Requires<[HasSSE1]>, XS;
def CVTTSS2SIrm: I<0x2C, MRMSrcMem, (ops R32:$dst, f32mem:$src),
                   "cvttss2si {$src, $dst|$dst, $src}",
                   [(set R32:$dst, (fp_to_sint (loadf32 addr:$src)))]>,
                 Requires<[HasSSE1]>, XS;
def CVTSD2SSrr: I<0x5A, MRMSrcReg, (ops FR32:$dst, FR64:$src),
                  "cvtsd2ss {$src, $dst|$dst, $src}",
                  [(set FR32:$dst, (fround FR64:$src))]>,
                Requires<[HasSSE2]>, XS;
def CVTSD2SSrm: I<0x5A, MRMSrcMem, (ops FR32:$dst, f64mem:$src), 
                  "cvtsd2ss {$src, $dst|$dst, $src}",
                  [(set FR32:$dst, (fround (loadf64 addr:$src)))]>,
                Requires<[HasSSE2]>, XS;
def CVTSS2SDrr: I<0x5A, MRMSrcReg, (ops FR64:$dst, FR32:$src),
                  "cvtss2sd {$src, $dst|$dst, $src}",
                  [(set FR64:$dst, (fextend FR32:$src))]>,
                Requires<[HasSSE2]>, XD;
def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src),
                  "cvtss2sd {$src, $dst|$dst, $src}",
                  [(set FR64:$dst, (fextend (loadf32 addr:$src)))]>,
                Requires<[HasSSE2]>, XD;
def CVTSI2SSrr: I<0x2A, MRMSrcReg, (ops FR32:$dst, R32:$src),
                  "cvtsi2ss {$src, $dst|$dst, $src}",
                  [(set FR32:$dst, (sint_to_fp R32:$src))]>,
                Requires<[HasSSE2]>, XS;
def CVTSI2SSrm: I<0x2A, MRMSrcMem, (ops FR32:$dst, i32mem:$src),
                  "cvtsi2ss {$src, $dst|$dst, $src}",
                  [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>,
                Requires<[HasSSE2]>, XS;
def CVTSI2SDrr: I<0x2A, MRMSrcReg, (ops FR64:$dst, R32:$src),
                  "cvtsi2sd {$src, $dst|$dst, $src}",
                  [(set FR64:$dst, (sint_to_fp R32:$src))]>,
                Requires<[HasSSE2]>, XD;
def CVTSI2SDrm: I<0x2A, MRMSrcMem, (ops FR64:$dst, i32mem:$src),
                  "cvtsi2sd {$src, $dst|$dst, $src}",
                  [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>,
def SQRTSSrm : I<0x51, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
                 "sqrtss {$src, $dst|$dst, $src}",
                 [(set FR32:$dst, (fsqrt (loadf32 addr:$src)))]>,
               Requires<[HasSSE1]>, XS;
def SQRTSSrr : I<0x51, MRMSrcReg, (ops FR32:$dst, FR32:$src),
                 "sqrtss {$src, $dst|$dst, $src}",
                 [(set FR32:$dst, (fsqrt FR32:$src))]>,
               Requires<[HasSSE1]>, XS;
def SQRTSDrm : I<0x51, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
                 "sqrtsd {$src, $dst|$dst, $src}",
                 [(set FR64:$dst, (fsqrt (loadf64 addr:$src)))]>,
               Requires<[HasSSE2]>, XD;
def SQRTSDrr : I<0x51, MRMSrcReg, (ops FR64:$dst, FR64:$src),
                 "sqrtsd {$src, $dst|$dst, $src}",
                 [(set FR64:$dst, (fsqrt FR64:$src))]>,
               Requires<[HasSSE2]>, XD;
def UCOMISDrr: I<0x2E, MRMSrcReg, (ops FR64:$dst, FR64:$src),
                 "ucomisd {$src, $dst|$dst, $src}", []>,
               Requires<[HasSSE2]>, TB, OpSize;
def UCOMISDrm: I<0x2E, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
                "ucomisd {$src, $dst|$dst, $src}", []>,
               Requires<[HasSSE2]>, TB, OpSize;
def UCOMISSrr: I<0x2E, MRMSrcReg, (ops FR32:$dst, FR32:$src),
                "ucomiss {$src, $dst|$dst, $src}", []>,
               Requires<[HasSSE1]>, TB;
def UCOMISSrm: I<0x2E, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
                "ucomiss {$src, $dst|$dst, $src}", []>,
               Requires<[HasSSE1]>, TB;
Evan Cheng's avatar
Evan Cheng committed
// Pseudo-instructions that map fld0 to xorps/xorpd for sse.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
def FLD0SS : I<0x57, MRMSrcReg, (ops FR32:$dst),
               "xorps $dst, $dst", []>, Requires<[HasSSE1]>, TB;
def FLD0SD : I<0x57, MRMSrcReg, (ops FR64:$dst),
               "xorpd $dst, $dst", []>, Requires<[HasSSE2]>, TB, OpSize;
// SSE Scalar Arithmetic
def ADDSSrr : I<0x58, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "addss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fadd FR32:$src1, FR32:$src2))]>,
              Requires<[HasSSE1]>, XS;
def ADDSDrr : I<0x58, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "addsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fadd FR64:$src1, FR64:$src2))]>,
              Requires<[HasSSE2]>, XD;
def MULSSrr : I<0x59, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "mulss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fmul FR32:$src1, FR32:$src2))]>,
              Requires<[HasSSE1]>, XS;
def MULSDrr : I<0x59, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "mulsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fmul FR64:$src1, FR64:$src2))]>,
              Requires<[HasSSE2]>, XD;
def ADDSSrm : I<0x58, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
                "addss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fadd FR32:$src1, (loadf32 addr:$src2)))]>,
              Requires<[HasSSE1]>, XS;
def ADDSDrm : I<0x58, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
                "addsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fadd FR64:$src1, (loadf64 addr:$src2)))]>,
              Requires<[HasSSE2]>, XD;
def MULSSrm : I<0x59, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
                "mulss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fmul FR32:$src1, (loadf32 addr:$src2)))]>,
              Requires<[HasSSE1]>, XS;
def MULSDrm : I<0x59, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
                "mulsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fmul FR64:$src1, (loadf64 addr:$src2)))]>,
              Requires<[HasSSE2]>, XD;
def DIVSSrr : I<0x5E, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "divss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fdiv FR32:$src1, FR32:$src2))]>,
              Requires<[HasSSE1]>, XS;
def DIVSSrm : I<0x5E, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
                "divss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fdiv FR32:$src1, (loadf32 addr:$src2)))]>,
              Requires<[HasSSE1]>, XS;
def DIVSDrr : I<0x5E, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "divsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fdiv FR64:$src1, FR64:$src2))]>,
              Requires<[HasSSE2]>, XD;
def DIVSDrm : I<0x5E, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
                "divsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fdiv FR64:$src1, (loadf64 addr:$src2)))]>,
              Requires<[HasSSE2]>, XD;
def SUBSSrr : I<0x5C, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "subss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fsub FR32:$src1, FR32:$src2))]>,
              Requires<[HasSSE1]>, XS;
def SUBSSrm : I<0x5C, MRMSrcMem, (ops FR32:$dst, FR32:$src1, f32mem:$src2),
                "subss {$src2, $dst|$dst, $src2}",
                [(set FR32:$dst, (fsub FR32:$src1, (loadf32 addr:$src2)))]>,
              Requires<[HasSSE1]>, XS;
def SUBSDrr : I<0x5C, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
Evan Cheng's avatar
Evan Cheng committed
                "subsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fsub FR64:$src1, FR64:$src2))]>,
              Requires<[HasSSE2]>, XD;
def SUBSDrm : I<0x5C, MRMSrcMem, (ops FR64:$dst, FR64:$src1, f64mem:$src2),
                "subsd {$src2, $dst|$dst, $src2}",
                [(set FR64:$dst, (fsub FR64:$src1, (loadf64 addr:$src2)))]>,
              Requires<[HasSSE2]>, XD;

// SSE Logical
let isCommutable = 1 in {
def ANDPSrr : I<0x54, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
                "andps {$src2, $dst|$dst, $src2}", []>,
              Requires<[HasSSE1]>, TB;
def ANDPDrr : I<0x54, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
                "andpd {$src2, $dst|$dst, $src2}", []>,
              Requires<[HasSSE2]>, TB, OpSize;
def ORPSrr : I<0x56, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
                "orps {$src2, $dst|$dst, $src2}", []>,
             Requires<[HasSSE1]>, TB;
def ORPDrr : I<0x56, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
                "orpd {$src2, $dst|$dst, $src2}", []>,
             Requires<[HasSSE2]>, TB, OpSize;
def XORPSrr : I<0x57, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
                "xorps {$src2, $dst|$dst, $src2}", []>,
              Requires<[HasSSE1]>, TB;
def XORPDrr : I<0x57, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
                "xorpd {$src2, $dst|$dst, $src2}", []>,
              Requires<[HasSSE2]>, TB, OpSize;
}
def ANDNPSrr : I<0x55, MRMSrcReg, (ops FR32:$dst, FR32:$src1, FR32:$src2),
                "andnps {$src2, $dst|$dst, $src2}", []>,
               Requires<[HasSSE1]>, TB;
def ANDNPDrr : I<0x55, MRMSrcReg, (ops FR64:$dst, FR64:$src1, FR64:$src2),
                "andnpd {$src2, $dst|$dst, $src2}", []>,
               Requires<[HasSSE2]>, TB, OpSize;
                (ops FR32:$dst, FR32:$src1, FR32:$src, SSECC:$cc),
                "cmp${cc}ss {$src, $dst|$dst, $src}", []>,
              Requires<[HasSSE1]>, XS;
                (ops FR32:$dst, FR32:$src1, f32mem:$src, SSECC:$cc),
                "cmp${cc}ss {$src, $dst|$dst, $src}", []>,
              Requires<[HasSSE1]>, XS;
                (ops FR64:$dst, FR64:$src1, FR64:$src, SSECC:$cc),
                "cmp${cc}sd {$src, $dst|$dst, $src}", []>,
              Requires<[HasSSE1]>, XD;
                (ops FR64:$dst, FR64:$src1, f64mem:$src, SSECC:$cc),
                "cmp${cc}sd {$src, $dst|$dst, $src}", []>,
              Requires<[HasSSE2]>, XD;

//===----------------------------------------------------------------------===//
Chris Lattner's avatar
Chris Lattner committed
// Floating Point Stack Support
//===----------------------------------------------------------------------===//

// Floating point support.  All FP Stack operations are represented with two 
// instructions here.  The first instruction, generated by the instruction
// selector, uses "RFP" registers: a traditional register file to reference
// floating point values.  These instructions are all psuedo instructions and
// use the "Fp" prefix.  The second instruction is defined with FPI, which is
// the actual instruction emitted by the assembler.  The FP stackifier pass
// converts one to the other after register allocation occurs.
//
// Note that the FpI instruction should have instruction selection info (e.g.
// a pattern) and the FPI instruction should have emission info (e.g. opcode
// encoding and asm printing info).

// FPI - Floating Point Instruction template.
class FPI<bits<8> o, Format F, dag ops, string asm> : I<o, F, ops, asm, []> {}
// FpI - Floating Point Psuedo Instruction template.
class FpI<dag ops, FPFormat fp, list<dag> pattern>
  : X86Inst<0, Pseudo, NoImm, ops, "">, Requires<[FPStack]> {
  let FPForm = fp; let FPFormBits = FPForm.Value;
  let Pattern = pattern;
}

// FpI - Floating Point Psuedo Instruction template.
// TEMPORARY: for FpGETRESULT and FpSETRESULT only. Since
// they must match regardless of X86Vector.
class FpPseudoI<dag ops, FPFormat fp, list<dag> pattern>
  let FPForm = fp; let FPFormBits = FPForm.Value;
def FpGETRESULT : FpPseudoI<(ops RFP:$dst), SpecialFP, []>;     // FPR = ST(0)
let hasOutFlag = 1 in 
  def FpSETRESULT : FpPseudoI<(ops RFP:$src), SpecialFP,
                    [(X86fpset RFP:$src)]>, Imp<[], [ST0]>;     // ST(0) = FPR
def FpMOV       : FpI<(ops RFP:$dst, RFP:$src), SpecialFP, []>; // f1 = fmov f2
// Arithmetic
// Add, Sub, Mul, Div.
def FpADD : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
                [(set RFP:$dst, (fadd RFP:$src1, RFP:$src2))]>;
def FpSUB : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
                [(set RFP:$dst, (fsub RFP:$src1, RFP:$src2))]>;
def FpMUL : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
                [(set RFP:$dst, (fmul RFP:$src1, RFP:$src2))]>;
def FpDIV : FpI<(ops RFP:$dst, RFP:$src1, RFP:$src2), TwoArgFP,
                [(set RFP:$dst, (fdiv RFP:$src1, RFP:$src2))]>;

class FPST0rInst<bits<8> o, string asm>
  : FPI<o, AddRegFrm, (ops RST:$op), asm>, D8;
class FPrST0Inst<bits<8> o, string asm>
  : FPI<o, AddRegFrm, (ops RST:$op), asm>, DC;
class FPrST0PInst<bits<8> o, string asm>
  : FPI<o, AddRegFrm, (ops RST:$op), asm>, DE;

// Binary Ops with a memory source.
def FpADD32m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fadd RFP:$src1,
                                     (extloadf64f32 addr:$src2)))]>;
                // ST(0) = ST(0) + [mem32]
def FpADD64m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fadd RFP:$src1, (loadf64 addr:$src2)))]>;
                // ST(0) = ST(0) + [mem64]
def FpMUL32m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fmul RFP:$src1,
                                     (extloadf64f32 addr:$src2)))]>;
                // ST(0) = ST(0) * [mem32]
def FpMUL64m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fmul RFP:$src1, (loadf64 addr:$src2)))]>;
                // ST(0) = ST(0) * [mem64]
def FpSUB32m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fsub RFP:$src1,
                                    (extloadf64f32 addr:$src2)))]>;
                // ST(0) = ST(0) - [mem32]
def FpSUB64m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fsub RFP:$src1, (loadf64 addr:$src2)))]>;
                // ST(0) = ST(0) - [mem64]
def FpSUBR32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fadd (extloadf64f32 addr:$src2),
                                     RFP:$src1))]>;
                // ST(0) = [mem32] - ST(0)
def FpSUBR64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fsub (loadf64 addr:$src2), RFP:$src1))]>;
                // ST(0) = [mem64] - ST(0)
def FpDIV32m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fdiv RFP:$src1,
                                    (extloadf64f32 addr:$src2)))]>;
                // ST(0) = ST(0) / [mem32]
def FpDIV64m  : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fdiv RFP:$src1, (loadf64 addr:$src2)))]>;
                // ST(0) = ST(0) / [mem64]
def FpDIVR32m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fdiv (extloadf64f32 addr:$src2),
                                     RFP:$src1))]>;
                // ST(0) = [mem32] / ST(0)
def FpDIVR64m : FpI<(ops RFP:$dst, RFP:$src1, f32mem:$src2), OneArgFPRW,
                    [(set RFP:$dst, (fdiv (loadf64 addr:$src2), RFP:$src1))]>;
                // ST(0) = [mem64] / ST(0)


def FADD32m  : FPI<0xD8, MRM0m, (ops f32mem:$src), "fadd{s} $src">;
def FADD64m  : FPI<0xDC, MRM0m, (ops f64mem:$src), "fadd{l} $src">;
def FMUL32m  : FPI<0xD8, MRM1m, (ops f32mem:$src), "fmul{s} $src">;
def FMUL64m  : FPI<0xDC, MRM1m, (ops f64mem:$src), "fmul{l} $src">;
def FSUB32m  : FPI<0xD8, MRM4m, (ops f32mem:$src), "fsub{s} $src">;
def FSUB64m  : FPI<0xDC, MRM4m, (ops f64mem:$src), "fsub{l} $src">;
def FSUBR32m : FPI<0xD8, MRM5m, (ops f32mem:$src), "fsubr{s} $src">;
def FSUBR64m : FPI<0xDC, MRM5m, (ops f64mem:$src), "fsubr{l} $src">;
def FDIV32m  : FPI<0xD8, MRM6m, (ops f32mem:$src), "fdiv{s} $src">;
def FDIV64m  : FPI<0xDC, MRM6m, (ops f64mem:$src), "fdiv{l} $src">;
def FDIVR32m : FPI<0xD8, MRM7m, (ops f32mem:$src), "fdivr{s} $src">;
def FDIVR64m : FPI<0xDC, MRM7m, (ops f64mem:$src), "fdivr{l} $src">;

// FIXME: Implement these when we have a dag-dag isel!
//def FIADD16m  : FPI<0xDE, MRM0m>;    // ST(0) = ST(0) + [mem16int]
//def FIADD32m  : FPI<0xDA, MRM0m>;    // ST(0) = ST(0) + [mem32int]
//def FIMUL16m  : FPI<0xDE, MRM1m>;    // ST(0) = ST(0) * [mem16]
//def FIMUL32m  : FPI<0xDA, MRM1m>;    // ST(0) = ST(0) * [mem32]
//def FISUB16m  : FPI<0xDE, MRM4m>;    // ST(0) = ST(0) - [mem16int]
//def FISUB32m  : FPI<0xDA, MRM4m>;    // ST(0) = ST(0) - [mem32int]
//def FISUBR16m : FPI<0xDE, MRM5m>;    // ST(0) = [mem16int] - ST(0)
//def FISUBR32m : FPI<0xDA, MRM5m>;    // ST(0) = [mem32int] - ST(0)
//def FIDIV16m  : FPI<0xDE, MRM6m>;    // ST(0) = ST(0) / [mem16int]
//def FIDIV32m  : FPI<0xDA, MRM6m>;    // ST(0) = ST(0) / [mem32int]
//def FIDIVR16m : FPI<0xDE, MRM7m>;    // ST(0) = [mem16int] / ST(0)
//def FIDIVR32m : FPI<0xDA, MRM7m>;    // ST(0) = [mem32int] / ST(0)


// NOTE: GAS and apparently all other AT&T style assemblers have a broken notion
// of some of the 'reverse' forms of the fsub and fdiv instructions.  As such,
// we have to put some 'r's in and take them out of weird places.
def FADDST0r   : FPST0rInst <0xC0, "fadd $op">;
def FADDrST0   : FPrST0Inst <0xC0, "fadd {%ST(0), $op|$op, %ST(0)}">;
def FADDPrST0  : FPrST0PInst<0xC0, "faddp $op">;
def FSUBRST0r  : FPST0rInst <0xE8, "fsubr $op">;
def FSUBrST0   : FPrST0Inst <0xE8, "fsub{r} {%ST(0), $op|$op, %ST(0)}">;
def FSUBPrST0  : FPrST0PInst<0xE8, "fsub{r}p $op">;
def FSUBST0r   : FPST0rInst <0xE0, "fsub $op">;
def FSUBRrST0  : FPrST0Inst <0xE0, "fsub{|r} {%ST(0), $op|$op, %ST(0)}">;
def FSUBRPrST0 : FPrST0PInst<0xE0, "fsub{|r}p $op">;
def FMULST0r   : FPST0rInst <0xC8, "fmul $op">;
def FMULrST0   : FPrST0Inst <0xC8, "fmul {%ST(0), $op|$op, %ST(0)}">;
def FMULPrST0  : FPrST0PInst<0xC8, "fmulp $op">;
def FDIVRST0r  : FPST0rInst <0xF8, "fdivr $op">;
def FDIVrST0   : FPrST0Inst <0xF8, "fdiv{r} {%ST(0), $op|$op, %ST(0)}">;
def FDIVPrST0  : FPrST0PInst<0xF8, "fdiv{r}p $op">;
def FDIVST0r   : FPST0rInst <0xF0, "fdiv $op">;
def FDIVRrST0  : FPrST0Inst <0xF0, "fdiv{|r} {%ST(0), $op|$op, %ST(0)}">;
def FDIVRPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p $op">;


// Unary operations.
def FpCHS  : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
                 [(set RFP:$dst, (fneg RFP:$src))]>;
def FpABS  : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
                 [(set RFP:$dst, (fabs RFP:$src))]>;
def FpSQRT : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
                 [(set RFP:$dst, (fsqrt RFP:$src))]>;
def FpSIN  : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
                 [(set RFP:$dst, (fsin RFP:$src))]>;
def FpCOS  : FpI<(ops RFP:$dst, RFP:$src), OneArgFPRW,
                 [(set RFP:$dst, (fcos RFP:$src))]>;
def FpTST  : FpI<(ops RFP:$src), OneArgFP,
                 []>;

def FCHS  : FPI<0xE0, RawFrm, (ops), "fchs">, D9;
def FABS  : FPI<0xE1, RawFrm, (ops), "fabs">, D9;
def FSQRT : FPI<0xFA, RawFrm, (ops), "fsqrt">, D9;
def FSIN  : FPI<0xFE, RawFrm, (ops), "fsin">, D9;
def FCOS  : FPI<0xFF, RawFrm, (ops), "fcos">, D9;
def FTST  : FPI<0xE4, RawFrm, (ops), "ftst">, D9;


// Floating point cmovs.
let isTwoAddress = 1 in {
  def FpCMOVB  : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
  def FpCMOVBE : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
  def FpCMOVE  : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
  def FpCMOVP  : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
  def FpCMOVAE : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
  def FpCMOVA  : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
  def FpCMOVNE : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
  def FpCMOVNP : FpI<(ops RST:$dst, RFP:$src1, RFP:$src2), CondMovFP, []>;
def FCMOVB  : FPI<0xC0, AddRegFrm, (ops RST:$op),
                  "fcmovb {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVBE : FPI<0xD0, AddRegFrm, (ops RST:$op),
                  "fcmovbe {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVE  : FPI<0xC8, AddRegFrm, (ops RST:$op),
                  "fcmove {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVP  : FPI<0xD8, AddRegFrm, (ops RST:$op),
                  "fcmovu  {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVAE : FPI<0xC0, AddRegFrm, (ops RST:$op),
                  "fcmovae {$op, %ST(0)|%ST(0), $op}">, DB;
def FCMOVA  : FPI<0xD0, AddRegFrm, (ops RST:$op),
                  "fcmova {$op, %ST(0)|%ST(0), $op}">, DB;
def FCMOVNE : FPI<0xC8, AddRegFrm, (ops RST:$op),
                  "fcmovne {$op, %ST(0)|%ST(0), $op}">, DB;
def FCMOVNP : FPI<0xD8, AddRegFrm, (ops RST:$op),
                  "fcmovnu {$op, %ST(0)|%ST(0), $op}">, DB;

// Floating point loads & stores.
def FpLD32m  : FpI<(ops RFP:$dst, f32mem:$src), ZeroArgFP,
                   [(set RFP:$dst, (extloadf64f32 addr:$src))]>;
def FpLD64m  : FpI<(ops RFP:$dst, f64mem:$src), ZeroArgFP,
                   [(set RFP:$dst, (loadf64 addr:$src))]>;
def FpILD16m : FpI<(ops RFP:$dst, i16mem:$src), ZeroArgFP,
                   []>;
def FpILD32m : FpI<(ops RFP:$dst, i32mem:$src), ZeroArgFP,
                   []>;
def FpILD64m : FpI<(ops RFP:$dst, i64mem:$src), ZeroArgFP,
                   []>;

// Required for RET of f32 / f64 values.
def : Pat<(X86fld addr:$src, f32), (FpLD32m addr:$src)>;
def : Pat<(X86fld addr:$src, f64), (FpLD64m addr:$src)>;

def FpST32m   : FpI<(ops f32mem:$op, RFP:$src), OneArgFP,
                [(truncstore RFP:$src, addr:$op, f32)]>;
def FpST64m   : FpI<(ops f64mem:$op, RFP:$src), OneArgFP,
                [(store RFP:$src, addr:$op)]>;
def FpSTP32m  : FpI<(ops f32mem:$op, RFP:$src), OneArgFP, []>;
def FpSTP64m  : FpI<(ops f64mem:$op, RFP:$src), OneArgFP, []>;
def FpIST16m  : FpI<(ops i16mem:$op, RFP:$src), OneArgFP, []>;
def FpIST32m  : FpI<(ops i32mem:$op, RFP:$src), OneArgFP, []>;
def FpIST64m  : FpI<(ops i64mem:$op, RFP:$src), OneArgFP, []>;

def FLD32m   : FPI<0xD9, MRM0m, (ops f32mem:$src), "fld{s} $src">;
def FLD64m   : FPI<0xDD, MRM0m, (ops f64mem:$src), "fld{l} $src">;
def FILD16m  : FPI<0xDF, MRM0m, (ops i16mem:$src), "fild{s} $src">;
def FILD32m  : FPI<0xDB, MRM0m, (ops i32mem:$src), "fild{l} $src">;
def FILD64m  : FPI<0xDF, MRM5m, (ops i64mem:$src), "fild{ll} $src">;
def FST32m   : FPI<0xD9, MRM2m, (ops f32mem:$dst), "fst{s} $dst">;
def FST64m   : FPI<0xDD, MRM2m, (ops f64mem:$dst), "fst{l} $dst">;
def FSTP32m  : FPI<0xD9, MRM3m, (ops f32mem:$dst), "fstp{s} $dst">;
def FSTP64m  : FPI<0xDD, MRM3m, (ops f64mem:$dst), "fstp{l} $dst">;
def FIST16m  : FPI<0xDF, MRM2m, (ops i16mem:$dst), "fist{s} $dst">;
def FIST32m  : FPI<0xDB, MRM2m, (ops i32mem:$dst), "fist{l} $dst">;
def FISTP16m : FPI<0xDF, MRM3m, (ops i16mem:$dst), "fistp{s} $dst">;
def FISTP32m : FPI<0xDB, MRM3m, (ops i32mem:$dst), "fistp{l} $dst">;
def FISTP64m : FPI<0xDF, MRM7m, (ops i64mem:$dst), "fistp{ll} $dst">;

// FP Stack manipulation instructions.
def FLDrr   : FPI<0xC0, AddRegFrm, (ops RST:$op), "fld $op">, D9;
def FSTrr   : FPI<0xD0, AddRegFrm, (ops RST:$op), "fst $op">, DD;
def FSTPrr  : FPI<0xD8, AddRegFrm, (ops RST:$op), "fstp $op">, DD;
def FXCH    : FPI<0xC8, AddRegFrm, (ops RST:$op), "fxch $op">, D9;

// Floating point constant loads.
def FpLD0 : FpI<(ops RFP:$dst), ZeroArgFP, []>;
def FpLD1 : FpI<(ops RFP:$dst), ZeroArgFP, []>;

def FLD0 : FPI<0xEE, RawFrm, (ops), "fldz">, D9;
def FLD1 : FPI<0xE8, RawFrm, (ops), "fld1">, D9;


// Floating point compares.
def FpUCOMr   : FpI<(ops RST:$lhs, RST:$rhs), CompareFP,
                    []>;  // FPSW = cmp ST(0) with ST(i)
def FpUCOMIr  : FpI<(ops RST:$lhs, RST:$rhs), CompareFP,
                    []>;  // CC = cmp ST(0) with ST(i)

def FUCOMr    : FPI<0xE0, AddRegFrm,    // FPSW = cmp ST(0) with ST(i)
                    (ops RST:$reg),
                    "fucom $reg">, DD, Imp<[ST0],[]>;
def FUCOMPr   : FPI<0xE8, AddRegFrm,    // FPSW = cmp ST(0) with ST(i), pop
                  (ops RST:$reg),
                  "fucomp $reg">, DD, Imp<[ST0],[]>;
def FUCOMPPr  : FPI<0xE9, RawFrm,       // cmp ST(0) with ST(1), pop, pop
                  (ops),
                  "fucompp">, DA, Imp<[ST0],[]>;

def FUCOMIr  : FPI<0xE8, AddRegFrm,     // CC = cmp ST(0) with ST(i)
                   (ops RST:$reg),
                   "fucomi {$reg, %ST(0)|%ST(0), $reg}">, DB, Imp<[ST0],[]>;
def FUCOMIPr : FPI<0xE8, AddRegFrm,     // CC = cmp ST(0) with ST(i), pop
                 (ops RST:$reg),
                 "fucomip {$reg, %ST(0)|%ST(0), $reg}">, DF, Imp<[ST0],[]>;


// Floating point flag ops.
def FNSTSW8r  : I<0xE0, RawFrm,                  // AX = fp flags
Evan Cheng's avatar
Evan Cheng committed
                  (ops), "fnstsw", []>, DF, Imp<[],[AX]>;
def FNSTCW16m : I<0xD9, MRM7m,                   // [mem16] = X87 control world
Evan Cheng's avatar
Evan Cheng committed
                  (ops i16mem:$dst), "fnstcw $dst", []>;
def FLDCW16m  : I<0xD9, MRM5m,                   // X87 control world = [mem16]
Evan Cheng's avatar
Evan Cheng committed
                  (ops i16mem:$dst), "fldcw $dst", []>;


//===----------------------------------------------------------------------===//
// Miscellaneous Instructions
//===----------------------------------------------------------------------===//

def RDTSC : I<0x31, RawFrm, (ops), "rdtsc", []>, TB, Imp<[],[EAX,EDX]>;