Newer
Older
def SETEr : I<0x94, MRM0r,
[(set R8:$dst, (X86setcc X86_COND_E))]>,
TB; // R8 = ==
def SETEm : I<0x94, MRM0m,
[(store (X86setcc X86_COND_E), addr:$dst)]>,
TB; // [mem8] = ==
def SETNEr : I<0x95, MRM0r,
[(set R8:$dst, (X86setcc X86_COND_NE))]>,
TB; // R8 = !=
def SETNEm : I<0x95, MRM0m,
[(store (X86setcc X86_COND_NE), addr:$dst)]>,
TB; // [mem8] = !=
def SETLr : I<0x9C, MRM0r,
(ops R8 :$dst),
[(set R8:$dst, (X86setcc X86_COND_L))]>,
TB; // R8 = < signed
def SETLm : I<0x9C, MRM0m,
(ops i8mem:$dst),
[(store (X86setcc X86_COND_L), addr:$dst)]>,
TB; // [mem8] = < signed
def SETGEr : I<0x9D, MRM0r,
(ops R8 :$dst),
[(set R8:$dst, (X86setcc X86_COND_GE))]>,
TB; // R8 = >= signed
def SETGEm : I<0x9D, MRM0m,
(ops i8mem:$dst),
[(store (X86setcc X86_COND_GE), addr:$dst)]>,
TB; // [mem8] = >= signed
def SETLEr : I<0x9E, MRM0r,
(ops R8 :$dst),
[(set R8:$dst, (X86setcc X86_COND_LE))]>,
TB; // R8 = <= signed
def SETLEm : I<0x9E, MRM0m,
(ops i8mem:$dst),
[(store (X86setcc X86_COND_LE), addr:$dst)]>,
TB; // [mem8] = <= signed
def SETGr : I<0x9F, MRM0r,
(ops R8 :$dst),
[(set R8:$dst, (X86setcc X86_COND_G))]>,
TB; // R8 = > signed
def SETGm : I<0x9F, MRM0m,
(ops i8mem:$dst),
[(store (X86setcc X86_COND_G), addr:$dst)]>,
TB; // [mem8] = > signed
def SETBr : I<0x92, MRM0r,
(ops R8 :$dst),
[(set R8:$dst, (X86setcc X86_COND_B))]>,
TB; // R8 = < unsign
def SETBm : I<0x92, MRM0m,
(ops i8mem:$dst),
[(store (X86setcc X86_COND_B), addr:$dst)]>,
TB; // [mem8] = < unsign
def SETAEr : I<0x93, MRM0r,
(ops R8 :$dst),
[(set R8:$dst, (X86setcc X86_COND_AE))]>,
TB; // R8 = >= unsign
def SETAEm : I<0x93, MRM0m,
(ops i8mem:$dst),
[(store (X86setcc X86_COND_AE), addr:$dst)]>,
TB; // [mem8] = >= unsign
def SETBEr : I<0x96, MRM0r,
[(set R8:$dst, (X86setcc X86_COND_BE))]>,
TB; // R8 = <= unsign
def SETBEm : I<0x96, MRM0m,
[(store (X86setcc X86_COND_BE), addr:$dst)]>,
TB; // [mem8] = <= unsign
def SETAr : I<0x97, MRM0r,
[(set R8:$dst, (X86setcc X86_COND_A))]>,
TB; // R8 = > signed
def SETAm : I<0x97, MRM0m,
[(store (X86setcc X86_COND_A), addr:$dst)]>,
TB; // [mem8] = > signed
def SETSr : I<0x98, MRM0r,
[(set R8:$dst, (X86setcc X86_COND_S))]>,
def SETSm : I<0x98, MRM0m,
[(store (X86setcc X86_COND_S), addr:$dst)]>,
def SETNSr : I<0x99, MRM0r,
[(set R8:$dst, (X86setcc X86_COND_NS))]>,
def SETNSm : I<0x99, MRM0m,
[(store (X86setcc X86_COND_NS), addr:$dst)]>,
def SETPr : I<0x9A, MRM0r,
[(set R8:$dst, (X86setcc X86_COND_P))]>,
def SETPm : I<0x9A, MRM0m,
[(store (X86setcc X86_COND_P), addr:$dst)]>,
[(set R8:$dst, (X86setcc X86_COND_NP))]>,
[(store (X86setcc X86_COND_NP), addr:$dst)]>,
def CMP8rr : I<0x38, MRMDestReg,
(ops R8 :$src1, R8 :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}",
[(X86cmp R8:$src1, R8:$src2)]>;
def CMP16rr : I<0x39, MRMDestReg,
(ops R16:$src1, R16:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}",
[(X86cmp R16:$src1, R16:$src2)]>, OpSize;
def CMP32rr : I<0x39, MRMDestReg,
(ops R32:$src1, R32:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}",
[(X86cmp R32:$src1, R32:$src2)]>;
def CMP8mr : I<0x38, MRMDestMem,
(ops i8mem :$src1, R8 :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}",
[(X86cmp (loadi8 addr:$src1), R8:$src2)]>;
def CMP16mr : I<0x39, MRMDestMem,
(ops i16mem:$src1, R16:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}",
[(X86cmp (loadi16 addr:$src1), R16:$src2)]>, OpSize;
def CMP32mr : I<0x39, MRMDestMem,
(ops i32mem:$src1, R32:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}",
[(X86cmp (loadi32 addr:$src1), R32:$src2)]>;
def CMP8rm : I<0x3A, MRMSrcMem,
(ops R8 :$src1, i8mem :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}",
[(X86cmp R8:$src1, (loadi8 addr:$src2))]>;
def CMP16rm : I<0x3B, MRMSrcMem,
(ops R16:$src1, i16mem:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}",
[(X86cmp R16:$src1, (loadi16 addr:$src2))]>, OpSize;
def CMP32rm : I<0x3B, MRMSrcMem,
(ops R32:$src1, i32mem:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}",
[(X86cmp R32:$src1, (loadi32 addr:$src2))]>;
def CMP8ri : Ii8<0x80, MRM7r,
(ops R8:$src1, i8imm:$src2),
"cmp{b} {$src2, $src1|$src1, $src2}",
[(X86cmp R8:$src1, imm:$src2)]>;
def CMP16ri : Ii16<0x81, MRM7r,
(ops R16:$src1, i16imm:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}",
[(X86cmp R16:$src1, imm:$src2)]>, OpSize;
def CMP32ri : Ii32<0x81, MRM7r,
(ops R32:$src1, i32imm:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}",
[(X86cmp R32:$src1, imm:$src2)]>;
def CMP8mi : Ii8 <0x80, MRM7m,
(ops i8mem :$src1, i8imm :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}",
[(X86cmp (loadi8 addr:$src1), imm:$src2)]>;
def CMP16mi : Ii16<0x81, MRM7m,
(ops i16mem:$src1, i16imm:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}",
[(X86cmp (loadi16 addr:$src1), imm:$src2)]>, OpSize;
def CMP32mi : Ii32<0x81, MRM7m,
(ops i32mem:$src1, i32imm:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}",
[(X86cmp (loadi32 addr:$src1), imm:$src2)]>;
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (ops R16:$dst, R8 :$src),
"movs{bw|x} {$src, $dst|$dst, $src}",
[(set R16:$dst, (sext R8:$src))]>, TB, OpSize;
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (ops R16:$dst, i8mem :$src),
"movs{bw|x} {$src, $dst|$dst, $src}",
[(set R16:$dst, (sextloadi16i8 addr:$src))]>, TB, OpSize;
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (ops R32:$dst, R8 :$src),
"movs{bl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (sext R8:$src))]>, TB;
def MOVSX32rm8 : I<0xBE, MRMSrcMem, (ops R32:$dst, i8mem :$src),
"movs{bl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (sextloadi32i8 addr:$src))]>, TB;
def MOVSX32rr16: I<0xBF, MRMSrcReg, (ops R32:$dst, R16:$src),
"movs{wl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (sext R16:$src))]>, TB;
def MOVSX32rm16: I<0xBF, MRMSrcMem, (ops R32:$dst, i16mem:$src),
"movs{wl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (sextloadi32i16 addr:$src))]>, TB;
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (ops R16:$dst, R8 :$src),
"movz{bw|x} {$src, $dst|$dst, $src}",
[(set R16:$dst, (zext R8:$src))]>, TB, OpSize;
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (ops R16:$dst, i8mem :$src),
"movz{bw|x} {$src, $dst|$dst, $src}",
[(set R16:$dst, (zextloadi16i8 addr:$src))]>, TB, OpSize;
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (ops R32:$dst, R8 :$src),
"movz{bl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (zext R8:$src))]>, TB;
def MOVZX32rm8 : I<0xB6, MRMSrcMem, (ops R32:$dst, i8mem :$src),
"movz{bl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (zextloadi32i8 addr:$src))]>, TB;
def MOVZX32rr16: I<0xB7, MRMSrcReg, (ops R32:$dst, R16:$src),
"movz{wl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (zext R16:$src))]>, TB;
def MOVZX32rm16: I<0xB7, MRMSrcMem, (ops R32:$dst, i16mem:$src),
"movz{wl|x} {$src, $dst|$dst, $src}",
[(set R32:$dst, (zextloadi32i16 addr:$src))]>, TB;
Evan Cheng
committed
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions
//===----------------------------------------------------------------------===//
def RDTSC : I<0x31, RawFrm, (ops), "rdtsc", [(X86rdtsc)]>,
TB, Imp<[],[EAX,EDX]>;
//===----------------------------------------------------------------------===//
// Alias Instructions
//===----------------------------------------------------------------------===//
// Alias instructions that map movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
def MOV8r0 : I<0x30, MRMInitReg, (ops R8 :$dst),
"xor{b} $dst, $dst",
[(set R8:$dst, 0)]>;
def MOV16r0 : I<0x31, MRMInitReg, (ops R16:$dst),
"xor{w} $dst, $dst",
[(set R16:$dst, 0)]>, OpSize;
def MOV32r0 : I<0x31, MRMInitReg, (ops R32:$dst),
"xor{l} $dst, $dst",
[(set R32:$dst, 0)]>;
//===----------------------------------------------------------------------===//
// DWARF Pseudo Instructions
//
def DWARF_LOC : I<0, Pseudo, (ops i32imm:$line, i32imm:$col, i32imm:$file),
"; .loc $file, $line, $col",
[(dwarf_loc (i32 imm:$line), (i32 imm:$col),
(i32 imm:$file))]>;
def DWARF_LABEL : I<0, Pseudo, (ops i32imm:$id),
"\nLdebug_loc${id:debug}:",
[(dwarf_label (i32 imm:$id))]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
// ConstantPool GlobalAddress, ExternalSymbol
def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
def : Pat<(add R32:$src1, (X86Wrapper tconstpool:$src2)),
(ADD32ri R32:$src1, tconstpool:$src2)>;
def : Pat<(add R32:$src1, (X86Wrapper tglobaladdr :$src2)),
(ADD32ri R32:$src1, tglobaladdr:$src2)>;
def : Pat<(add R32:$src1, (X86Wrapper texternalsym:$src2)),
(ADD32ri R32:$src1, texternalsym:$src2)>;
def : Pat<(store (X86Wrapper tconstpool:$src), addr:$dst),
(MOV32mi addr:$dst, tconstpool:$src)>;
def : Pat<(store (X86Wrapper tglobaladdr:$src), addr:$dst),
(MOV32mi addr:$dst, tglobaladdr:$src)>;
def : Pat<(store (X86Wrapper texternalsym:$src), addr:$dst),
(MOV32mi addr:$dst, texternalsym:$src)>;
// Calls
def : Pat<(X86call tglobaladdr:$dst),
(CALLpcrel32 tglobaladdr:$dst)>;
def : Pat<(X86call texternalsym:$dst),
(CALLpcrel32 texternalsym:$dst)>;
def : Pat<(addc R32:$src1, R32:$src2),
def : Pat<(addc R32:$src1, (load addr:$src2)),
def : Pat<(addc R32:$src1, imm:$src2),
def : Pat<(addc R32:$src1, i32immSExt8:$src2),
def : Pat<(subc R32:$src1, R32:$src2),
def : Pat<(subc R32:$src1, (load addr:$src2)),
def : Pat<(subc R32:$src1, imm:$src2),
def : Pat<(subc R32:$src1, i32immSExt8:$src2),
def : Pat<(truncstore (i8 imm:$src), addr:$dst, i1),
(MOV8mi addr:$dst, imm:$src)>;
def : Pat<(truncstore R8:$src, addr:$dst, i1),
(MOV8mr addr:$dst, R8:$src)>;
// {s|z}extload bool -> {s|z}extload byte
def : Pat<(sextloadi16i1 addr:$src), (MOVSX16rm8 addr:$src)>;
def : Pat<(sextloadi32i1 addr:$src), (MOVSX32rm8 addr:$src)>;
def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
// extload bool -> extload byte
def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
// anyext -> zext
def : Pat<(i16 (anyext R8 :$src)), (MOVZX16rr8 R8 :$src)>;
def : Pat<(i32 (anyext R8 :$src)), (MOVZX32rr8 R8 :$src)>;
def : Pat<(i32 (anyext R16:$src)), (MOVZX32rr16 R16:$src)>;
//===----------------------------------------------------------------------===//
// Some peepholes
//===----------------------------------------------------------------------===//
// (shl x, 1) ==> (add x, x)
def : Pat<(shl R8 :$src1, (i8 1)), (ADD8rr R8 :$src1, R8 :$src1)>;
def : Pat<(shl R16:$src1, (i8 1)), (ADD16rr R16:$src1, R16:$src1)>;
def : Pat<(shl R32:$src1, (i8 1)), (ADD32rr R32:$src1, R32:$src1)>;
// (or (x >> c) | (y << (32 - c))) ==> (shrd32 x, y, c)
def : Pat<(or (srl R32:$src1, CL:$amt),
(shl R32:$src2, (sub 32, CL:$amt))),
(SHRD32rrCL R32:$src1, R32:$src2)>;
def : Pat<(store (or (srl (loadi32 addr:$dst), CL:$amt),
(shl R32:$src2, (sub 32, CL:$amt))), addr:$dst),
(SHRD32mrCL addr:$dst, R32:$src2)>;
// (or (x << c) | (y >> (32 - c))) ==> (shld32 x, y, c)
def : Pat<(or (shl R32:$src1, CL:$amt),
(srl R32:$src2, (sub 32, CL:$amt))),
(SHLD32rrCL R32:$src1, R32:$src2)>;
def : Pat<(store (or (shl (loadi32 addr:$dst), CL:$amt),
(srl R32:$src2, (sub 32, CL:$amt))), addr:$dst),
(SHLD32mrCL addr:$dst, R32:$src2)>;
// (or (x >> c) | (y << (16 - c))) ==> (shrd16 x, y, c)
def : Pat<(or (srl R16:$src1, CL:$amt),
(shl R16:$src2, (sub 16, CL:$amt))),
(SHRD16rrCL R16:$src1, R16:$src2)>;
def : Pat<(store (or (srl (loadi16 addr:$dst), CL:$amt),
(shl R16:$src2, (sub 16, CL:$amt))), addr:$dst),
(SHRD16mrCL addr:$dst, R16:$src2)>;
// (or (x << c) | (y >> (16 - c))) ==> (shld16 x, y, c)
def : Pat<(or (shl R16:$src1, CL:$amt),
(srl R16:$src2, (sub 16, CL:$amt))),
(SHLD16rrCL R16:$src1, R16:$src2)>;
def : Pat<(store (or (shl (loadi16 addr:$dst), CL:$amt),
(srl R16:$src2, (sub 16, CL:$amt))), addr:$dst),
(SHLD16mrCL addr:$dst, R16:$src2)>;
//===----------------------------------------------------------------------===//
// Floating Point Stack Support
//===----------------------------------------------------------------------===//
include "X86InstrFPStack.td"
//===----------------------------------------------------------------------===//
// MMX and XMM Packed Integer support (requires MMX, SSE, and SSE2)
//===----------------------------------------------------------------------===//
include "X86InstrMMX.td"
//===----------------------------------------------------------------------===//
// XMM Floating point support (requires SSE / SSE2)
//===----------------------------------------------------------------------===//
include "X86InstrSSE.td"