Newer
Older
Evan Cheng
committed
Sean Callanan
committed
def TEST8i8 : Ii8<0xA8, RawFrm, (outs), (ins i8imm:$src),
"test{b}\t{$src, %al|%al, $src}", []>;
def TEST16i16 : Ii16<0xA9, RawFrm, (outs), (ins i16imm:$src),
"test{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def TEST32i32 : Ii32<0xA9, RawFrm, (outs), (ins i32imm:$src),
"test{l}\t{$src, %eax|%eax, $src}", []>;
def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2),
Evan Cheng
committed
"test{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and GR8:$src1, (loadi8 addr:$src2)), 0),
Evan Cheng
committed
(implicit EFLAGS)]>;
def TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2),
Evan Cheng
committed
"test{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and GR16:$src1, (loadi16 addr:$src2)), 0),
Evan Cheng
committed
(implicit EFLAGS)]>, OpSize;
def TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2),
Evan Cheng
committed
"test{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and GR32:$src1, (loadi32 addr:$src2)), 0),
Evan Cheng
committed
(implicit EFLAGS)]>;
def TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8
Evan Cheng
committed
(outs), (ins GR8:$src1, i8imm:$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and_su GR8:$src1, imm:$src2), 0),
Evan Cheng
committed
(implicit EFLAGS)]>;
def TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16
Evan Cheng
committed
(outs), (ins GR16:$src1, i16imm:$src2),
"test{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and_su GR16:$src1, imm:$src2), 0),
Evan Cheng
committed
(implicit EFLAGS)]>, OpSize;
def TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32
Evan Cheng
committed
(outs), (ins GR32:$src1, i32imm:$src2),
"test{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and_su GR32:$src1, imm:$src2), 0),
Evan Cheng
committed
(implicit EFLAGS)]>;
def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8
Evan Cheng
committed
(outs), (ins i8mem:$src1, i8imm:$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and (loadi8 addr:$src1), imm:$src2), 0),
Evan Cheng
committed
(implicit EFLAGS)]>;
def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16
Evan Cheng
committed
(outs), (ins i16mem:$src1, i16imm:$src2),
"test{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and (loadi16 addr:$src1), imm:$src2), 0),
Evan Cheng
committed
(implicit EFLAGS)]>, OpSize;
def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32
Evan Cheng
committed
(outs), (ins i32mem:$src1, i32imm:$src2),
"test{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (and (loadi32 addr:$src1), imm:$src2), 0),
Evan Cheng
committed
(implicit EFLAGS)]>;
} // Defs = [EFLAGS]
// Condition code ops, incl. set if equal/not equal/...
Chris Lattner
committed
let Defs = [EFLAGS], Uses = [AH], neverHasSideEffects = 1 in
Evan Cheng
committed
def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH
Chris Lattner
committed
let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
Evan Cheng
committed
def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
Evan Cheng
committed
let Uses = [EFLAGS] in {
def SETEr : I<0x94, MRM0r,
(outs GR8 :$dst), (ins),
"sete\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_E, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = ==
def SETEm : I<0x94, MRM0m,
(outs), (ins i8mem:$dst),
"sete\t$dst",
[(store (X86setcc X86_COND_E, EFLAGS), addr:$dst)]>,
TB; // [mem8] = ==
def SETNEr : I<0x95, MRM0r,
(outs GR8 :$dst), (ins),
"setne\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_NE, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = !=
def SETNEm : I<0x95, MRM0m,
(outs), (ins i8mem:$dst),
"setne\t$dst",
[(store (X86setcc X86_COND_NE, EFLAGS), addr:$dst)]>,
TB; // [mem8] = !=
def SETLr : I<0x9C, MRM0r,
(outs GR8 :$dst), (ins),
"setl\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_L, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = < signed
def SETLm : I<0x9C, MRM0m,
(outs), (ins i8mem:$dst),
"setl\t$dst",
[(store (X86setcc X86_COND_L, EFLAGS), addr:$dst)]>,
TB; // [mem8] = < signed
def SETGEr : I<0x9D, MRM0r,
(outs GR8 :$dst), (ins),
"setge\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_GE, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = >= signed
def SETGEm : I<0x9D, MRM0m,
(outs), (ins i8mem:$dst),
"setge\t$dst",
[(store (X86setcc X86_COND_GE, EFLAGS), addr:$dst)]>,
TB; // [mem8] = >= signed
def SETLEr : I<0x9E, MRM0r,
(outs GR8 :$dst), (ins),
"setle\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_LE, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = <= signed
def SETLEm : I<0x9E, MRM0m,
(outs), (ins i8mem:$dst),
"setle\t$dst",
[(store (X86setcc X86_COND_LE, EFLAGS), addr:$dst)]>,
TB; // [mem8] = <= signed
def SETGr : I<0x9F, MRM0r,
(outs GR8 :$dst), (ins),
"setg\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_G, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = > signed
def SETGm : I<0x9F, MRM0m,
(outs), (ins i8mem:$dst),
"setg\t$dst",
[(store (X86setcc X86_COND_G, EFLAGS), addr:$dst)]>,
TB; // [mem8] = > signed
def SETBr : I<0x92, MRM0r,
(outs GR8 :$dst), (ins),
"setb\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_B, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = < unsign
def SETBm : I<0x92, MRM0m,
(outs), (ins i8mem:$dst),
"setb\t$dst",
[(store (X86setcc X86_COND_B, EFLAGS), addr:$dst)]>,
TB; // [mem8] = < unsign
def SETAEr : I<0x93, MRM0r,
(outs GR8 :$dst), (ins),
"setae\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_AE, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = >= unsign
def SETAEm : I<0x93, MRM0m,
(outs), (ins i8mem:$dst),
"setae\t$dst",
[(store (X86setcc X86_COND_AE, EFLAGS), addr:$dst)]>,
TB; // [mem8] = >= unsign
def SETBEr : I<0x96, MRM0r,
(outs GR8 :$dst), (ins),
"setbe\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_BE, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = <= unsign
def SETBEm : I<0x96, MRM0m,
(outs), (ins i8mem:$dst),
"setbe\t$dst",
[(store (X86setcc X86_COND_BE, EFLAGS), addr:$dst)]>,
TB; // [mem8] = <= unsign
def SETAr : I<0x97, MRM0r,
(outs GR8 :$dst), (ins),
"seta\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_A, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = > signed
def SETAm : I<0x97, MRM0m,
(outs), (ins i8mem:$dst),
"seta\t$dst",
[(store (X86setcc X86_COND_A, EFLAGS), addr:$dst)]>,
TB; // [mem8] = > signed
def SETSr : I<0x98, MRM0r,
(outs GR8 :$dst), (ins),
"sets\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_S, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = <sign bit>
def SETSm : I<0x98, MRM0m,
(outs), (ins i8mem:$dst),
"sets\t$dst",
[(store (X86setcc X86_COND_S, EFLAGS), addr:$dst)]>,
def SETNSr : I<0x99, MRM0r,
(outs GR8 :$dst), (ins),
"setns\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_NS, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = !<sign bit>
def SETNSm : I<0x99, MRM0m,
(outs), (ins i8mem:$dst),
"setns\t$dst",
[(store (X86setcc X86_COND_NS, EFLAGS), addr:$dst)]>,
def SETPr : I<0x9A, MRM0r,
(outs GR8 :$dst), (ins),
"setp\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_P, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = parity
def SETPm : I<0x9A, MRM0m,
(outs), (ins i8mem:$dst),
"setp\t$dst",
[(store (X86setcc X86_COND_P, EFLAGS), addr:$dst)]>,
(outs GR8 :$dst), (ins),
"setnp\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_NP, EFLAGS))]>,
Evan Cheng
committed
TB; // GR8 = not parity
(outs), (ins i8mem:$dst),
"setnp\t$dst",
[(store (X86setcc X86_COND_NP, EFLAGS), addr:$dst)]>,
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
def SETOr : I<0x90, MRM0r,
(outs GR8 :$dst), (ins),
"seto\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_O, EFLAGS))]>,
TB; // GR8 = overflow
def SETOm : I<0x90, MRM0m,
(outs), (ins i8mem:$dst),
"seto\t$dst",
[(store (X86setcc X86_COND_O, EFLAGS), addr:$dst)]>,
TB; // [mem8] = overflow
def SETNOr : I<0x91, MRM0r,
(outs GR8 :$dst), (ins),
"setno\t$dst",
[(set GR8:$dst, (X86setcc X86_COND_NO, EFLAGS))]>,
TB; // GR8 = not overflow
def SETNOm : I<0x91, MRM0m,
(outs), (ins i8mem:$dst),
"setno\t$dst",
[(store (X86setcc X86_COND_NO, EFLAGS), addr:$dst)]>,
TB; // [mem8] = not overflow
Evan Cheng
committed
} // Uses = [EFLAGS]
let Defs = [EFLAGS] in {
Sean Callanan
committed
def CMP8i8 : Ii8<0x3C, RawFrm, (outs), (ins i8imm:$src),
"cmp{b}\t{$src, %al|%al, $src}", []>;
def CMP16i16 : Ii16<0x3D, RawFrm, (outs), (ins i16imm:$src),
"cmp{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def CMP32i32 : Ii32<0x3D, RawFrm, (outs), (ins i32imm:$src),
"cmp{l}\t{$src, %eax|%eax, $src}", []>;
def CMP8rr : I<0x38, MRMDestReg,
(outs), (ins GR8 :$src1, GR8 :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR8:$src1, GR8:$src2), (implicit EFLAGS)]>;
def CMP16rr : I<0x39, MRMDestReg,
(outs), (ins GR16:$src1, GR16:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR16:$src1, GR16:$src2), (implicit EFLAGS)]>, OpSize;
def CMP32rr : I<0x39, MRMDestReg,
(outs), (ins GR32:$src1, GR32:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR32:$src1, GR32:$src2), (implicit EFLAGS)]>;
def CMP8mr : I<0x38, MRMDestMem,
(outs), (ins i8mem :$src1, GR8 :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi8 addr:$src1), GR8:$src2),
(implicit EFLAGS)]>;
def CMP16mr : I<0x39, MRMDestMem,
(outs), (ins i16mem:$src1, GR16:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi16 addr:$src1), GR16:$src2),
(implicit EFLAGS)]>, OpSize;
def CMP32mr : I<0x39, MRMDestMem,
(outs), (ins i32mem:$src1, GR32:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi32 addr:$src1), GR32:$src2),
(implicit EFLAGS)]>;
def CMP8rm : I<0x3A, MRMSrcMem,
(outs), (ins GR8 :$src1, i8mem :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR8:$src1, (loadi8 addr:$src2)),
(implicit EFLAGS)]>;
def CMP16rm : I<0x3B, MRMSrcMem,
(outs), (ins GR16:$src1, i16mem:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR16:$src1, (loadi16 addr:$src2)),
(implicit EFLAGS)]>, OpSize;
def CMP32rm : I<0x3B, MRMSrcMem,
(outs), (ins GR32:$src1, i32mem:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR32:$src1, (loadi32 addr:$src2)),
(implicit EFLAGS)]>;
def CMP8mrmrr : I<0x3A, MRMSrcReg, (outs), (ins GR8:$src1, GR8:$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}", []>;
def CMP16mrmrr : I<0x3B, MRMSrcReg, (outs), (ins GR16:$src1, GR16:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize;
def CMP32mrmrr : I<0x3B, MRMSrcReg, (outs), (ins GR32:$src1, GR32:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}", []>;
def CMP8ri : Ii8<0x80, MRM7r,
(outs), (ins GR8:$src1, i8imm:$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR8:$src1, imm:$src2), (implicit EFLAGS)]>;
def CMP16ri : Ii16<0x81, MRM7r,
(outs), (ins GR16:$src1, i16imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR16:$src1, imm:$src2),
(implicit EFLAGS)]>, OpSize;
def CMP32ri : Ii32<0x81, MRM7r,
(outs), (ins GR32:$src1, i32imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR32:$src1, imm:$src2), (implicit EFLAGS)]>;
def CMP8mi : Ii8 <0x80, MRM7m,
(outs), (ins i8mem :$src1, i8imm :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi8 addr:$src1), imm:$src2),
Evan Cheng
committed
(implicit EFLAGS)]>;
def CMP16mi : Ii16<0x81, MRM7m,
Evan Cheng
committed
(outs), (ins i16mem:$src1, i16imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi16 addr:$src1), imm:$src2),
Evan Cheng
committed
(implicit EFLAGS)]>, OpSize;
def CMP32mi : Ii32<0x81, MRM7m,
Evan Cheng
committed
(outs), (ins i32mem:$src1, i32imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi32 addr:$src1), imm:$src2),
Evan Cheng
committed
(implicit EFLAGS)]>;
def CMP16ri8 : Ii8<0x83, MRM7r,
Evan Cheng
committed
(outs), (ins GR16:$src1, i16i8imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR16:$src1, i16immSExt8:$src2),
Evan Cheng
committed
(implicit EFLAGS)]>, OpSize;
def CMP16mi8 : Ii8<0x83, MRM7m,
Evan Cheng
committed
(outs), (ins i16mem:$src1, i16i8imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi16 addr:$src1), i16immSExt8:$src2),
Evan Cheng
committed
(implicit EFLAGS)]>, OpSize;
def CMP32mi8 : Ii8<0x83, MRM7m,
Evan Cheng
committed
(outs), (ins i32mem:$src1, i32i8imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp (loadi32 addr:$src1), i32immSExt8:$src2),
Evan Cheng
committed
(implicit EFLAGS)]>;
def CMP32ri8 : Ii8<0x83, MRM7r,
Evan Cheng
committed
(outs), (ins GR32:$src1, i32i8imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR32:$src1, i32immSExt8:$src2),
Evan Cheng
committed
(implicit EFLAGS)]>;
} // Defs = [EFLAGS]
// Bit tests.
// TODO: BTC, BTR, and BTS
let Defs = [EFLAGS] in {
def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(X86bt GR16:$src1, GR16:$src2),
def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(X86bt GR32:$src1, GR32:$src2),
// Unlike with the register+register form, the memory+register form of the
// bt instruction does not ignore the high bits of the index. From ISel's
// perspective, this is pretty bizarre. Disable these instructions for now.
//def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
// "bt{w}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi16 addr:$src1), GR16:$src2),
// (implicit EFLAGS)]>, OpSize, TB, Requires<[FastBTMem]>;
//def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
// "bt{l}\t{$src2, $src1|$src1, $src2}",
// [(X86bt (loadi32 addr:$src1), GR32:$src2),
// (implicit EFLAGS)]>, TB, Requires<[FastBTMem]>;
def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(X86bt GR16:$src1, i16immSExt8:$src2),
(implicit EFLAGS)]>, OpSize, TB;
def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(X86bt GR32:$src1, i32immSExt8:$src2),
(implicit EFLAGS)]>, TB;
// Note that these instructions don't need FastBTMem because that
// only applies when the other operand is in a register. When it's
// an immediate, bt is still fast.
def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
[(X86bt (loadi16 addr:$src1), i16immSExt8:$src2),
(implicit EFLAGS)]>, OpSize, TB;
def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
[(X86bt (loadi32 addr:$src1), i32immSExt8:$src2),
(implicit EFLAGS)]>, TB;
} // Defs = [EFLAGS]
// Use movsbl intead of movsbw; we don't care about the high 16 bits
// of the register here. This has a smaller encoding and avoids a
// partial-register update.
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
"", [(set GR16:$dst, (sext GR8:$src))]>, TB;
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
"", [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (sext GR8:$src))]>, TB;
def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (sextloadi32i8 addr:$src))]>, TB;
def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
"movs{wl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (sext GR16:$src))]>, TB;
def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"movs{wl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (sextloadi32i16 addr:$src))]>, TB;
// Use movzbl intead of movzbw; we don't care about the high 16 bits
// of the register here. This has a smaller encoding and avoids a
// partial-register update.
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
"", [(set GR16:$dst, (zext GR8:$src))]>, TB;
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
"", [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (zext GR8:$src))]>, TB;
def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (zextloadi32i8 addr:$src))]>, TB;
def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
"movz{wl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (zext GR16:$src))]>, TB;
def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"movz{wl|x}\t{$src, $dst|$dst, $src}",
Evan Cheng
committed
[(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
// These are the same as the regular regular MOVZX32rr8 and MOVZX32rm8
// except that they use GR32_NOREX for the output operand register class
// instead of GR32. This allows them to operate on h registers on x86-64.
def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
(outs GR32_NOREX:$dst), (ins GR8:$src),
"movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
[]>, TB;
def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
(outs GR32_NOREX:$dst), (ins i8mem:$src),
"movz{bl|x}\t{$src, $dst|$dst, $src} # NOREX",
[]>, TB;
Chris Lattner
committed
let neverHasSideEffects = 1 in {
let Defs = [AX], Uses = [AL] in
def CBW : I<0x98, RawFrm, (outs), (ins),
"{cbtw|cbw}", []>, OpSize; // AX = signext(AL)
let Defs = [EAX], Uses = [AX] in
def CWDE : I<0x98, RawFrm, (outs), (ins),
"{cwtl|cwde}", []>; // EAX = signext(AX)
let Defs = [AX,DX], Uses = [AX] in
def CWD : I<0x99, RawFrm, (outs), (ins),
"{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX)
let Defs = [EAX,EDX], Uses = [EAX] in
def CDQ : I<0x99, RawFrm, (outs), (ins),
"{cltd|cdq}", []>; // EDX:EAX = signext(EAX)
}
Evan Cheng
committed
//===----------------------------------------------------------------------===//
// Alias Instructions
//===----------------------------------------------------------------------===//
// Alias instructions that map movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
isCodeGenOnly = 1 in {
def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins),
"xor{b}\t$dst, $dst",
Evan Cheng
committed
[(set GR8:$dst, 0)]>;
// Use xorl instead of xorw since we don't care about the high 16 bits,
// it's smaller, and it avoids a partial-register update.
def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
"", [(set GR16:$dst, 0)]>;
def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins),
"xor{l}\t$dst, $dst",
Evan Cheng
committed
[(set GR32:$dst, 0)]>;
Evan Cheng
committed
Lauro Ramos Venancio
committed
//===----------------------------------------------------------------------===//
// Thread Local Storage Instructions
//
Rafael Espindola
committed
// All calls clobber the non-callee saved registers. ESP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead.
let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Chris Lattner
committed
Uses = [ESP] in
def TLS_addr32 : I<0, Pseudo, (outs), (ins lea32mem:$sym),
"leal\t$sym, %eax; "
"call\t___tls_get_addr@PLT",
Chris Lattner
committed
[(X86tlsaddr tls32addr:$sym)]>,
Lauro Ramos Venancio
committed
Daniel Dunbar
committed
let AddedComplexity = 5, isCodeGenOnly = 1 in
def GS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movl\t%gs:$src, $dst",
[(set GR32:$dst, (gsload addr:$src))]>, SegGS;
Daniel Dunbar
committed
let AddedComplexity = 5, isCodeGenOnly = 1 in
def FS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movl\t%fs:$src, $dst",
[(set GR32:$dst, (fsload addr:$src))]>, SegFS;
//===----------------------------------------------------------------------===//
// DWARF Pseudo Instructions
//
def DWARF_LOC : I<0, Pseudo, (outs),
(ins i32imm:$line, i32imm:$col, i32imm:$file),
".loc\t$file $line $col",
[(dwarf_loc (i32 imm:$line), (i32 imm:$col),
(i32 imm:$file))]>;
//===----------------------------------------------------------------------===//
// EH Pseudo Instructions
//
let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, isCodeGenOnly = 1 in {
def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
"ret\t#eh_return, addr: $addr",
[(X86ehret GR32:$addr)]>;
}
Andrew Lenharth
committed
//===----------------------------------------------------------------------===//
// Atomic support
//
Andrew Lenharth
committed
Evan Cheng
committed
// Atomic swap. These are just normal xchg instructions. But since a memory
// operand is referenced, the atomicity is ensured.
Evan Cheng
committed
def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
"xchg{l}\t{$val, $ptr|$ptr, $val}",
[(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>;
def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
"xchg{w}\t{$val, $ptr|$ptr, $val}",
[(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>,
OpSize;
def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
"xchg{b}\t{$val, $ptr|$ptr, $val}",
[(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>;
}
let Defs = [EAX, EFLAGS], Uses = [EAX] in {
def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
"lock\n\t"
"cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
Andrew Lenharth
committed
}
let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in {
def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i32mem:$ptr),
"lock\n\t"
"cmpxchg8b\t$ptr",
let Defs = [AX, EFLAGS], Uses = [AX] in {
def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
"lock\n\t"
"cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
[(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
Andrew Lenharth
committed
}
def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
"lock\n\t"
"cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
Andrew Lenharth
committed
}
let Constraints = "$val = $dst", Defs = [EFLAGS] in {
def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
"lock\n\t"
"xadd{l}\t{$val, $ptr|$ptr, $val}",
[(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
Andrew Lenharth
committed
TB, LOCK;
def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
"lock\n\t"
"xadd{w}\t{$val, $ptr|$ptr, $val}",
[(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
Andrew Lenharth
committed
TB, OpSize, LOCK;
def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
"lock\n\t"
"xadd{b}\t{$val, $ptr|$ptr, $val}",
[(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
Andrew Lenharth
committed
TB, LOCK;
}
Evan Cheng
committed
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
// Optimized codegen when the non-memory output is not used.
// FIXME: Use normal add / sub instructions and add lock prefix dynamically.
def LOCK_ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"lock\n\t"
"add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"lock\n\t"
"add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
def LOCK_ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"lock\n\t"
"add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
"lock\n\t"
"add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
"lock\n\t"
"add{w}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
"lock\n\t"
"add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"lock\n\t"
"add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
def LOCK_ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"lock\n\t"
"add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
"lock\n\t"
"inc{b}\t$dst", []>, LOCK;
def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
"lock\n\t"
"inc{w}\t$dst", []>, OpSize, LOCK;
def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
"lock\n\t"
"inc{l}\t$dst", []>, LOCK;
def LOCK_SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"lock\n\t"
"sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"lock\n\t"
"sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
def LOCK_SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"lock\n\t"
"sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
"lock\n\t"
"sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
"lock\n\t"
"sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
"lock\n\t"
"sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"lock\n\t"
"sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"lock\n\t"
"sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
"lock\n\t"
"dec{b}\t$dst", []>, LOCK;
def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
"lock\n\t"
"dec{w}\t$dst", []>, OpSize, LOCK;
def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
"lock\n\t"
"dec{l}\t$dst", []>, LOCK;
// Atomic exchange, and, or, xor
let Constraints = "$val = $dst", Defs = [EFLAGS],
usesCustomDAGSchedInserter = 1 in {
def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
[(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
[(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
[(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
[(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
[(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
[(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
let Constraints = "$val1 = $dst1, $val2 = $dst2",
Defs = [EFLAGS, EAX, EBX, ECX, EDX],
Uses = [EAX, EBX, ECX, EDX],
mayLoad = 1, mayStore = 1,
usesCustomDAGSchedInserter = 1 in {
def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
(ins i64mem:$ptr, GR32:$val1, GR32:$val2),
// Segmentation support instructions.
def LAR16rm : I<0x02, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def LAR16rr : I<0x02, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"lar{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
// i16mem operand in LAR32rm and GR32 operand in LAR32rr is not a typo.
def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"lar{l}\t{$src, $dst|$dst, $src}", []>, TB;
// String manipulation instructions
def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", []>;
def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", []>, OpSize;
def LODSD : I<0xAD, RawFrm, (outs), (ins), "lodsd", []>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//===----------------------------------------------------------------------===//
Bill Wendling
committed
// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
Evan Cheng
committed
def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
(ADD32ri GR32:$src1, tconstpool:$src2)>;
def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
(ADD32ri GR32:$src1, tjumptable:$src2)>;
def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
(ADD32ri GR32:$src1, tglobaladdr:$src2)>;
def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
(ADD32ri GR32:$src1, texternalsym:$src2)>;
def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
(MOV32mi addr:$dst, tglobaladdr:$src)>;
def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
(MOV32mi addr:$dst, texternalsym:$src)>;
// tailcall stuff
def : Pat<(X86tcret GR32:$dst, imm:$off),
(TCRETURNri GR32:$dst, imm:$off)>;
def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
(TCRETURNdi texternalsym:$dst, imm:$off)>;
def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
(TCRETURNdi texternalsym:$dst, imm:$off)>;
(CALLpcrel32 texternalsym:$dst)>;
Evan Cheng
committed
def : Pat<(X86call (i32 imm:$dst)),
(CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
Evan Cheng
committed
def : Pat<(addc GR32:$src1, GR32:$src2),
(ADD32rr GR32:$src1, GR32:$src2)>;
def : Pat<(addc GR32:$src1, (load addr:$src2)),
(ADD32rm GR32:$src1, addr:$src2)>;
def : Pat<(addc GR32:$src1, imm:$src2),
(ADD32ri GR32:$src1, imm:$src2)>;
def : Pat<(addc GR32:$src1, i32immSExt8:$src2),
(ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
def : Pat<(subc GR32:$src1, GR32:$src2),
(SUB32rr GR32:$src1, GR32:$src2)>;
def : Pat<(subc GR32:$src1, (load addr:$src2)),
(SUB32rm GR32:$src1, addr:$src2)>;
def : Pat<(subc GR32:$src1, imm:$src2),
(SUB32ri GR32:$src1, imm:$src2)>;
def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
(SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
Chris Lattner
committed
// Comparisons.
// TEST R,R is smaller than CMP R,0
def : Pat<(parallel (X86cmp GR8:$src1, 0), (implicit EFLAGS)),
Chris Lattner
committed
(TEST8rr GR8:$src1, GR8:$src1)>;
def : Pat<(parallel (X86cmp GR16:$src1, 0), (implicit EFLAGS)),
Chris Lattner
committed
(TEST16rr GR16:$src1, GR16:$src1)>;
def : Pat<(parallel (X86cmp GR32:$src1, 0), (implicit EFLAGS)),
Chris Lattner
committed
(TEST32rr GR32:$src1, GR32:$src1)>;
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
// Conditional moves with folded loads with operands swapped and conditions
// inverted.
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_B, EFLAGS),
(CMOVAE16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_B, EFLAGS),
(CMOVAE32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_AE, EFLAGS),
(CMOVB16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_AE, EFLAGS),
(CMOVB32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_E, EFLAGS),
(CMOVNE16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_E, EFLAGS),
(CMOVNE32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NE, EFLAGS),
(CMOVE16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NE, EFLAGS),
(CMOVE32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_BE, EFLAGS),
(CMOVA16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_BE, EFLAGS),
(CMOVA32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_A, EFLAGS),
(CMOVBE16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_A, EFLAGS),
(CMOVBE32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_L, EFLAGS),
(CMOVGE16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_L, EFLAGS),
(CMOVGE32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_GE, EFLAGS),
(CMOVL16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_GE, EFLAGS),
(CMOVL32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_LE, EFLAGS),
(CMOVG16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_LE, EFLAGS),
(CMOVG32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_G, EFLAGS),
(CMOVLE16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_G, EFLAGS),
(CMOVLE32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_P, EFLAGS),
(CMOVNP16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_P, EFLAGS),
(CMOVNP32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NP, EFLAGS),
(CMOVP16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NP, EFLAGS),
(CMOVP32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_S, EFLAGS),
(CMOVNS16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_S, EFLAGS),
(CMOVNS32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NS, EFLAGS),
(CMOVS16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NS, EFLAGS),
(CMOVS32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_O, EFLAGS),
(CMOVNO16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_O, EFLAGS),
(CMOVNO32rm GR32:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NO, EFLAGS),
(CMOVO16rm GR16:$src2, addr:$src1)>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NO, EFLAGS),
(CMOVO32rm GR32:$src2, addr:$src1)>;
// zextload bool -> zextload byte
def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
// extload bool -> extload byte
def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
// anyext. Define these to do an explicit zero-extend to
// avoid partial-register updates.
def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
def : Pat<(i32 (anyext GR16:$src)), (MOVZX32rr16 GR16:$src)>;
// (and (i32 load), 255) -> (zextload i8)
def : Pat<(i32 (and (nvloadi32 addr:$src), (i32 255))),
(MOVZX32rm8 addr:$src)>;
def : Pat<(i32 (and (nvloadi32 addr:$src), (i32 65535))),
(MOVZX32rm16 addr:$src)>;
//===----------------------------------------------------------------------===//
// Some peepholes
//===----------------------------------------------------------------------===//
// Odd encoding trick: -128 fits into an 8-bit immediate field while
// +128 doesn't, so in this special case use a sub instead of an add.
def : Pat<(add GR16:$src1, 128),
(SUB16ri8 GR16:$src1, -128)>;
def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
(SUB16mi8 addr:$dst, -128)>;
def : Pat<(add GR32:$src1, 128),
(SUB32ri8 GR32:$src1, -128)>;
def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
(SUB32mi8 addr:$dst, -128)>;
// r & (2^16-1) ==> movz
def : Pat<(and GR32:$src1, 0xffff),
(MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, x86_subreg_16bit))>;
// r & (2^8-1) ==> movz
def : Pat<(and GR32:$src1, 0xff),
(MOVZX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src1, GR32_ABCD),
Requires<[In32BitMode]>;
// r & (2^8-1) ==> movz
def : Pat<(and GR16:$src1, 0xff),
(MOVZX16rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD),
Requires<[In32BitMode]>;
// sext_inreg patterns
def : Pat<(sext_inreg GR32:$src, i16),
(MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
def : Pat<(sext_inreg GR32:$src, i8),
(MOVSX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_ABCD),
Requires<[In32BitMode]>;
def : Pat<(sext_inreg GR16:$src, i8),
(MOVSX16rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
Requires<[In32BitMode]>;
// trunc patterns
def : Pat<(i16 (trunc GR32:$src)),
(EXTRACT_SUBREG GR32:$src, x86_subreg_16bit)>;
def : Pat<(i8 (trunc GR32:$src)),
(EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_ABCD),
Requires<[In32BitMode]>;
def : Pat<(i8 (trunc GR16:$src)),
(EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),
x86_subreg_8bit)>,
Requires<[In32BitMode]>;
// h-register tricks
def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
(EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_ABCD),