Newer
Older
TB, OpSize;
def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
(ops R16:$dst, R16:$src1, R16:$src2, i8imm:$src3),
"shrd{w} {$src3, $src2, $dst|$dst, $src2, $src3}">,
TB, OpSize;
Chris Lattner
committed
}
Chris Lattner
committed
let isTwoAddress = 0 in {
def SHLD32mrCL : I<0xA5, MRMDestMem, (ops i32mem:$dst, R32:$src2),
"shld{l} {%cl, $src2, $dst|$dst, $src2, %CL}">,
Imp<[CL],[]>, TB;
Chris Lattner
committed
def SHRD32mrCL : I<0xAD, MRMDestMem, (ops i32mem:$dst, R32:$src2),
"shrd{l} {%cl, $src2, $dst|$dst, $src2, %CL}">,
Imp<[CL],[]>, TB;
def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
(ops i32mem:$dst, R32:$src2, i8imm:$src3),
"shld{l} {$src3, $src2, $dst|$dst, $src2, $src3}">, TB;
def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
(ops i32mem:$dst, R32:$src2, i8imm:$src3),
"shrd{l} {$src3, $src2, $dst|$dst, $src2, $src3}">, TB;
def SHLD16mrCL : I<0xA5, MRMDestMem, (ops i16mem:$dst, R16:$src2),
"shld{w} {%cl, $src2, $dst|$dst, $src2, %CL}">,
Imp<[CL],[]>, TB, OpSize;
def SHRD16mrCL : I<0xAD, MRMDestMem, (ops i16mem:$dst, R16:$src2),
"shrd{w} {%cl, $src2, $dst|$dst, $src2, %CL}">,
Imp<[CL],[]>, TB, OpSize;
def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
(ops i16mem:$dst, R16:$src2, i8imm:$src3),
"shld{w} {$src3, $src2, $dst|$dst, $src2, $src3}">,
TB, OpSize;
def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
(ops i16mem:$dst, R16:$src2, i8imm:$src3),
"shrd{w} {$src3, $src2, $dst|$dst, $src2, $src3}">,
TB, OpSize;
// Arithmetic.
let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y
def ADD8rr : I<0x00, MRMDestReg, (ops R8 :$dst, R8 :$src1, R8 :$src2),
"add{b} {$src2, $dst|$dst, $src2}">;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
def ADD16rr : I<0x01, MRMDestReg, (ops R16:$dst, R16:$src1, R16:$src2),
"add{w} {$src2, $dst|$dst, $src2}">, OpSize;
def ADD32rr : I<0x01, MRMDestReg, (ops R32:$dst, R32:$src1, R32:$src2),
"add{l} {$src2, $dst|$dst, $src2}">;
} // end isConvertibleToThreeAddress
} // end isCommutable
def ADD8rm : I<0x02, MRMSrcMem, (ops R8 :$dst, R8 :$src1, i8mem :$src2),
"add{b} {$src2, $dst|$dst, $src2}">;
def ADD16rm : I<0x03, MRMSrcMem, (ops R16:$dst, R16:$src1, i16mem:$src2),
"add{w} {$src2, $dst|$dst, $src2}">, OpSize;
def ADD32rm : I<0x03, MRMSrcMem, (ops R32:$dst, R32:$src1, i32mem:$src2),
"add{l} {$src2, $dst|$dst, $src2}">;
def ADD8ri : Ii8<0x80, MRM0r, (ops R8:$dst, R8:$src1, i8imm:$src2),
"add{b} {$src2, $dst|$dst, $src2}">;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
def ADD16ri : Ii16<0x81, MRM0r, (ops R16:$dst, R16:$src1, i16imm:$src2),
"add{w} {$src2, $dst|$dst, $src2}">, OpSize;
def ADD32ri : Ii32<0x81, MRM0r, (ops R32:$dst, R32:$src1, i32imm:$src2),
"add{l} {$src2, $dst|$dst, $src2}">;
def ADD16ri8 : Ii8<0x83, MRM0r, (ops R16:$dst, R16:$src1, i8imm:$src2),
"add{w} {$src2, $dst|$dst, $src2}">, OpSize;
def ADD32ri8 : Ii8<0x83, MRM0r, (ops R32:$dst, R32:$src1, i8imm:$src2),
"add{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
let isTwoAddress = 0 in {
def ADD8mr : I<0x00, MRMDestMem, (ops i8mem :$dst, R8 :$src2),
"add{b} {$src2, $dst|$dst, $src2}">;
def ADD16mr : I<0x01, MRMDestMem, (ops i16mem:$dst, R16:$src2),
"add{w} {$src2, $dst|$dst, $src2}">, OpSize;
def ADD32mr : I<0x01, MRMDestMem, (ops i32mem:$dst, R32:$src2),
"add{l} {$src2, $dst|$dst, $src2}">;
def ADD8mi : Ii8<0x80, MRM0m, (ops i8mem :$dst, i8imm :$src2),
"add{b} {$src2, $dst|$dst, $src2}">;
def ADD16mi : Ii16<0x81, MRM0m, (ops i16mem:$dst, i16imm:$src2),
"add{w} {$src2, $dst|$dst, $src2}">, OpSize;
def ADD32mi : Ii32<0x81, MRM0m, (ops i32mem:$dst, i32imm:$src2),
"add{l} {$src2, $dst|$dst, $src2}">;
def ADD16mi8 : Ii8<0x83, MRM0m, (ops i16mem:$dst, i8imm :$src2),
"add{w} {$src2, $dst|$dst, $src2}">, OpSize;
def ADD32mi8 : Ii8<0x83, MRM0m, (ops i32mem:$dst, i8imm :$src2),
"add{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
}
let isCommutable = 1 in { // X = ADC Y, Z --> X = ADC Z, Y
def ADC32rr : I<0x11, MRMDestReg, (ops R32:$dst, R32:$src1, R32:$src2),
"adc{l} {$src2, $dst|$dst, $src2}">;
def ADC32rm : I<0x13, MRMSrcMem , (ops R32:$dst, R32:$src1, i32mem:$src2),
"adc{l} {$src2, $dst|$dst, $src2}">;
def ADC32ri : Ii32<0x81, MRM2r, (ops R32:$dst, R32:$src1, i32imm:$src2),
"adc{l} {$src2, $dst|$dst, $src2}">;
def ADC32ri8 : Ii8<0x83, MRM2r, (ops R32:$dst, R32:$src1, i8imm:$src2),
"adc{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
let isTwoAddress = 0 in {
def ADC32mr : I<0x11, MRMDestMem, (ops i32mem:$dst, R32:$src2),
"adc{l} {$src2, $dst|$dst, $src2}">;
def ADC32mi : Ii32<0x81, MRM2m, (ops i32mem:$dst, i32imm:$src2),
"adc{l} {$src2, $dst|$dst, $src2}">;
def ADC32mi8 : Ii8<0x83, MRM2m, (ops i32mem:$dst, i8imm :$src2),
"adc{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
}
def SUB8rr : I<0x28, MRMDestReg, (ops R8 :$dst, R8 :$src1, R8 :$src2),
"sub{b} {$src2, $dst|$dst, $src2}">;
def SUB16rr : I<0x29, MRMDestReg, (ops R16:$dst, R16:$src1, R16:$src2),
"sub{w} {$src2, $dst|$dst, $src2}">, OpSize;
def SUB32rr : I<0x29, MRMDestReg, (ops R32:$dst, R32:$src1, R32:$src2),
"sub{l} {$src2, $dst|$dst, $src2}">;
def SUB8rm : I<0x2A, MRMSrcMem, (ops R8 :$dst, R8 :$src1, i8mem :$src2),
"sub{b} {$src2, $dst|$dst, $src2}">;
def SUB16rm : I<0x2B, MRMSrcMem, (ops R16:$dst, R16:$src1, i16mem:$src2),
"sub{w} {$src2, $dst|$dst, $src2}">, OpSize;
def SUB32rm : I<0x2B, MRMSrcMem, (ops R32:$dst, R32:$src1, i32mem:$src2),
"sub{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
def SUB8ri : Ii8 <0x80, MRM5r, (ops R8:$dst, R8:$src1, i8imm:$src2),
"sub{b} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
def SUB16ri : Ii16<0x81, MRM5r, (ops R16:$dst, R16:$src1, i16imm:$src2),
"sub{w} {$src2, $dst|$dst, $src2}">, OpSize;
Chris Lattner
committed
def SUB32ri : Ii32<0x81, MRM5r, (ops R32:$dst, R32:$src1, i32imm:$src2),
"sub{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
def SUB16ri8 : Ii8<0x83, MRM5r, (ops R16:$dst, R16:$src1, i8imm:$src2),
"sub{w} {$src2, $dst|$dst, $src2}">, OpSize;
Chris Lattner
committed
def SUB32ri8 : Ii8<0x83, MRM5r, (ops R32:$dst, R32:$src1, i8imm:$src2),
"sub{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
let isTwoAddress = 0 in {
def SUB8mr : I<0x28, MRMDestMem, (ops i8mem :$dst, R8 :$src2),
"sub{b} {$src2, $dst|$dst, $src2}">;
def SUB16mr : I<0x29, MRMDestMem, (ops i16mem:$dst, R16:$src2),
"sub{w} {$src2, $dst|$dst, $src2}">, OpSize;
def SUB32mr : I<0x29, MRMDestMem, (ops i32mem:$dst, R32:$src2),
"sub{l} {$src2, $dst|$dst, $src2}">;
def SUB8mi : Ii8<0x80, MRM5m, (ops i8mem :$dst, i8imm:$src2),
"sub{b} {$src2, $dst|$dst, $src2}">;
def SUB16mi : Ii16<0x81, MRM5m, (ops i16mem:$dst, i16imm:$src2),
"sub{w} {$src2, $dst|$dst, $src2}">, OpSize;
def SUB32mi : Ii32<0x81, MRM5m, (ops i32mem:$dst, i32imm:$src2),
"sub{l} {$src2, $dst|$dst, $src2}">;
def SUB16mi8 : Ii8<0x83, MRM5m, (ops i16mem:$dst, i8imm :$src2),
"sub{w} {$src2, $dst|$dst, $src2}">, OpSize;
def SUB32mi8 : Ii8<0x83, MRM5m, (ops i32mem:$dst, i8imm :$src2),
"sub{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
}
def SBB32rr : I<0x19, MRMDestReg, (ops R32:$dst, R32:$src1, R32:$src2),
"sbb{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
let isTwoAddress = 0 in {
def SBB32mr : I<0x19, MRMDestMem, (ops i32mem:$dst, R32:$src2),
"sbb{l} {$src2, $dst|$dst, $src2}">;
def SBB8mi : Ii32<0x80, MRM3m, (ops i8mem:$dst, i8imm:$src2),
"sbb{b} {$src2, $dst|$dst, $src2}">;
def SBB16mi : Ii32<0x81, MRM3m, (ops i16mem:$dst, i16imm:$src2),
"sbb{w} {$src2, $dst|$dst, $src2}">, OpSize;
def SBB32mi : Ii32<0x81, MRM3m, (ops i32mem:$dst, i32imm:$src2),
"sbb{l} {$src2, $dst|$dst, $src2}">;
def SBB16mi8 : Ii8<0x83, MRM3m, (ops i16mem:$dst, i8imm :$src2),
"sbb{w} {$src2, $dst|$dst, $src2}">, OpSize;
def SBB32mi8 : Ii8<0x83, MRM3m, (ops i32mem:$dst, i8imm :$src2),
"sbb{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
}
def SBB8ri : Ii8<0x80, MRM3r, (ops R8:$dst, R8:$src1, i8imm:$src2),
"sbb{b} {$src2, $dst|$dst, $src2}">;
def SBB16ri : Ii16<0x81, MRM3r, (ops R16:$dst, R16:$src1, i16imm:$src2),
"sbb{w} {$src2, $dst|$dst, $src2}">, OpSize;
Chris Lattner
committed
def SBB32rm : I<0x1B, MRMSrcMem, (ops R32:$dst, R32:$src1, i32mem:$src2),
"sbb{l} {$src2, $dst|$dst, $src2}">;
Chris Lattner
committed
def SBB32ri : Ii32<0x81, MRM3r, (ops R32:$dst, R32:$src1, i32imm:$src2),
"sbb{l} {$src2, $dst|$dst, $src2}">;
def SBB16ri8 : Ii8<0x83, MRM3r, (ops R16:$dst, R16:$src1, i8imm:$src2),
"sbb{w} {$src2, $dst|$dst, $src2}">, OpSize;
Chris Lattner
committed
def SBB32ri8 : Ii8<0x83, MRM3r, (ops R32:$dst, R32:$src1, i8imm:$src2),
"sbb{l} {$src2, $dst|$dst, $src2}">;
let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
def IMUL16rr : I<0xAF, MRMSrcReg, (ops R16:$dst, R16:$src1, R16:$src2),
"imul{w} {$src2, $dst|$dst, $src2}">, TB, OpSize;
def IMUL32rr : I<0xAF, MRMSrcReg, (ops R32:$dst, R32:$src1, R32:$src2),
"imul{l} {$src2, $dst|$dst, $src2}">, TB;
def IMUL16rm : I<0xAF, MRMSrcMem, (ops R16:$dst, R16:$src1, i16mem:$src2),
"imul{w} {$src2, $dst|$dst, $src2}">, TB, OpSize;
def IMUL32rm : I<0xAF, MRMSrcMem, (ops R32:$dst, R32:$src1, i32mem:$src2),
"imul{l} {$src2, $dst|$dst, $src2}">, TB;
} // end Two Address instructions
Chris Lattner
committed
// Suprisingly enough, these are not two address instructions!
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
def IMUL16rri : Ii16<0x69, MRMSrcReg, // R16 = R16*I16
(ops R16:$dst, R16:$src1, i16imm:$src2),
"imul{w} {$src2, $src1, $dst|$dst, $src1, $src2}">,
OpSize;
def IMUL32rri : Ii32<0x69, MRMSrcReg, // R32 = R32*I32
(ops R32:$dst, R32:$src1, i32imm:$src2),
"imul{l} {$src2, $src1, $dst|$dst, $src1, $src2}">;
def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // R16 = R16*I8
(ops R16:$dst, R16:$src1, i8imm:$src2),
"imul{w} {$src2, $src1, $dst|$dst, $src1, $src2}">, OpSize;
def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // R32 = R32*I8
(ops R32:$dst, R32:$src1, i8imm:$src2),
"imul{l} {$src2, $src1, $dst|$dst, $src1, $src2}">;
def IMUL16rmi : Ii16<0x69, MRMSrcMem, // R16 = [mem16]*I16
(ops R32:$dst, i16mem:$src1, i16imm:$src2),
"imul{w} {$src2, $src1, $dst|$dst, $src1, $src2}">, OpSize;
def IMUL32rmi : Ii32<0x69, MRMSrcMem, // R32 = [mem32]*I32
(ops R32:$dst, i32mem:$src1, i32imm:$src2),
"imul{l} {$src2, $src1, $dst|$dst, $src1, $src2}">;
def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // R16 = [mem16]*I8
(ops R32:$dst, i16mem:$src1, i8imm :$src2),
"imul{w} {$src2, $src1, $dst|$dst, $src1, $src2}">, OpSize;
def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // R32 = [mem32]*I8
(ops R32:$dst, i32mem:$src1, i8imm: $src2),
"imul{l} {$src2, $src1, $dst|$dst, $src1, $src2}">;
//===----------------------------------------------------------------------===//
// Test instructions are just like AND, except they don't generate a result.
//
Chris Lattner
committed
def TEST8rr : I<0x84, MRMDestReg, (ops R8:$src1, R8:$src2),
"test{b} {$src2, $src1|$src1, $src2}">;
Chris Lattner
committed
def TEST16rr : I<0x85, MRMDestReg, (ops R16:$src1, R16:$src2),
"test{w} {$src2, $src1|$src1, $src2}">, OpSize;
Chris Lattner
committed
def TEST32rr : I<0x85, MRMDestReg, (ops R32:$src1, R32:$src2),
"test{l} {$src2, $src1|$src1, $src2}">;
Chris Lattner
committed
def TEST8mr : I<0x84, MRMDestMem, (ops i8mem :$src1, R8 :$src2),
"test{b} {$src2, $src1|$src1, $src2}">;
Chris Lattner
committed
def TEST16mr : I<0x85, MRMDestMem, (ops i16mem:$src1, R16:$src2),
"test{w} {$src2, $src1|$src1, $src2}">, OpSize;
Chris Lattner
committed
def TEST32mr : I<0x85, MRMDestMem, (ops i32mem:$src1, R32:$src2),
"test{l} {$src2, $src1|$src1, $src2}">;
Chris Lattner
committed
def TEST8rm : I<0x84, MRMSrcMem, (ops R8 :$src1, i8mem :$src2),
"test{b} {$src2, $src1|$src1, $src2}">;
Chris Lattner
committed
def TEST16rm : I<0x85, MRMSrcMem, (ops R16:$src1, i16mem:$src2),
"test{w} {$src2, $src1|$src1, $src2}">, OpSize;
Chris Lattner
committed
def TEST32rm : I<0x85, MRMSrcMem, (ops R32:$src1, i32mem:$src2),
"test{l} {$src2, $src1|$src1, $src2}">;
def TEST8ri : Ii8 <0xF6, MRM0r, // flags = R8 & imm8
(ops R8:$src1, i8imm:$src2),
"test{b} {$src2, $src1|$src1, $src2}">;
def TEST16ri : Ii16<0xF7, MRM0r, // flags = R16 & imm16
(ops R16:$src1, i16imm:$src2),
"test{w} {$src2, $src1|$src1, $src2}">, OpSize;
def TEST32ri : Ii32<0xF7, MRM0r, // flags = R32 & imm32
(ops R32:$src1, i32imm:$src2),
"test{l} {$src2, $src1|$src1, $src2}">;
def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8
(ops i32mem:$src1, i8imm:$src2),
"test{b} {$src2, $src1|$src1, $src2}">;
def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16
(ops i16mem:$src1, i16imm:$src2),
"test{w} {$src2, $src1|$src1, $src2}">, OpSize;
def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32
(ops i32mem:$src1, i32imm:$src2),
"test{l} {$src2, $src1|$src1, $src2}">;
// Condition code ops, incl. set if equal/not equal/...
Chris Lattner
committed
def SAHF : I<0x9E, RawFrm, (ops), "sahf">, Imp<[AH],[]>; // flags = AH
def LAHF : I<0x9F, RawFrm, (ops), "lahf">, Imp<[],[AH]>; // AH = flags
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
def SETBr : I<0x92, MRM0r,
(ops R8 :$dst), "setb $dst">, TB; // R8 = < unsign
def SETBm : I<0x92, MRM0m,
(ops i8mem:$dst), "setb $dst">, TB; // [mem8] = < unsign
def SETAEr : I<0x93, MRM0r,
(ops R8 :$dst), "setae $dst">, TB; // R8 = >= unsign
def SETAEm : I<0x93, MRM0m,
(ops i8mem:$dst), "setae $dst">, TB; // [mem8] = >= unsign
def SETEr : I<0x94, MRM0r,
(ops R8 :$dst), "sete $dst">, TB; // R8 = ==
def SETEm : I<0x94, MRM0m,
(ops i8mem:$dst), "sete $dst">, TB; // [mem8] = ==
def SETNEr : I<0x95, MRM0r,
(ops R8 :$dst), "setne $dst">, TB; // R8 = !=
def SETNEm : I<0x95, MRM0m,
(ops i8mem:$dst), "setne $dst">, TB; // [mem8] = !=
def SETBEr : I<0x96, MRM0r,
(ops R8 :$dst), "setbe $dst">, TB; // R8 = <= unsign
def SETBEm : I<0x96, MRM0m,
(ops i8mem:$dst), "setbe $dst">, TB; // [mem8] = <= unsign
def SETAr : I<0x97, MRM0r,
(ops R8 :$dst), "seta $dst">, TB; // R8 = > signed
def SETAm : I<0x97, MRM0m,
(ops i8mem:$dst), "seta $dst">, TB; // [mem8] = > signed
def SETSr : I<0x98, MRM0r,
(ops R8 :$dst), "sets $dst">, TB; // R8 = <sign bit>
def SETSm : I<0x98, MRM0m,
(ops i8mem:$dst), "sets $dst">, TB; // [mem8] = <sign bit>
def SETNSr : I<0x99, MRM0r,
(ops R8 :$dst), "setns $dst">, TB; // R8 = !<sign bit>
def SETNSm : I<0x99, MRM0m,
(ops i8mem:$dst), "setns $dst">, TB; // [mem8] = !<sign bit>
def SETPr : I<0x9A, MRM0r,
(ops R8 :$dst), "setp $dst">, TB; // R8 = parity
def SETPm : I<0x9A, MRM0m,
(ops i8mem:$dst), "setp $dst">, TB; // [mem8] = parity
def SETNPr : I<0x9B, MRM0r,
(ops R8 :$dst), "setnp $dst">, TB; // R8 = not parity
def SETNPm : I<0x9B, MRM0m,
(ops i8mem:$dst), "setnp $dst">, TB; // [mem8] = not parity
def SETLr : I<0x9C, MRM0r,
(ops R8 :$dst), "setl $dst">, TB; // R8 = < signed
def SETLm : I<0x9C, MRM0m,
(ops i8mem:$dst), "setl $dst">, TB; // [mem8] = < signed
def SETGEr : I<0x9D, MRM0r,
(ops R8 :$dst), "setge $dst">, TB; // R8 = >= signed
def SETGEm : I<0x9D, MRM0m,
(ops i8mem:$dst), "setge $dst">, TB; // [mem8] = >= signed
def SETLEr : I<0x9E, MRM0r,
(ops R8 :$dst), "setle $dst">, TB; // R8 = <= signed
def SETLEm : I<0x9E, MRM0m,
(ops i8mem:$dst), "setle $dst">, TB; // [mem8] = <= signed
def SETGr : I<0x9F, MRM0r,
(ops R8 :$dst), "setg $dst">, TB; // R8 = < signed
def SETGm : I<0x9F, MRM0m,
(ops i8mem:$dst), "setg $dst">, TB; // [mem8] = < signed
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
def CMP8rr : I<0x38, MRMDestReg,
(ops R8 :$src1, R8 :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}">;
def CMP16rr : I<0x39, MRMDestReg,
(ops R16:$src1, R16:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}">, OpSize;
def CMP32rr : I<0x39, MRMDestReg,
(ops R32:$src1, R32:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}">;
def CMP8mr : I<0x38, MRMDestMem,
(ops i8mem :$src1, R8 :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}">;
def CMP16mr : I<0x39, MRMDestMem,
(ops i16mem:$src1, R16:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}">, OpSize;
def CMP32mr : I<0x39, MRMDestMem,
(ops i32mem:$src1, R32:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}">;
def CMP8rm : I<0x3A, MRMSrcMem,
(ops R8 :$src1, i8mem :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}">;
def CMP16rm : I<0x3B, MRMSrcMem,
(ops R16:$src1, i16mem:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}">, OpSize;
def CMP32rm : I<0x3B, MRMSrcMem,
(ops R32:$src1, i32mem:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}">;
def CMP8ri : Ii8<0x80, MRM7r,
(ops R16:$src1, i8imm:$src2),
"cmp{b} {$src2, $src1|$src1, $src2}">;
def CMP16ri : Ii16<0x81, MRM7r,
(ops R16:$src1, i16imm:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}">, OpSize;
def CMP32ri : Ii32<0x81, MRM7r,
(ops R32:$src1, i32imm:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}">;
def CMP8mi : Ii8 <0x80, MRM7m,
(ops i8mem :$src1, i8imm :$src2),
"cmp{b} {$src2, $src1|$src1, $src2}">;
def CMP16mi : Ii16<0x81, MRM7m,
(ops i16mem:$src1, i16imm:$src2),
"cmp{w} {$src2, $src1|$src1, $src2}">, OpSize;
def CMP32mi : Ii32<0x81, MRM7m,
(ops i32mem:$src1, i32imm:$src2),
"cmp{l} {$src2, $src1|$src1, $src2}">;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (ops R16:$dst, R8 :$src),
"movs{bw|x} {$src, $dst|$dst, $src}">, TB, OpSize;
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (ops R16:$dst, i8mem :$src),
"movs{bw|x} {$src, $dst|$dst, $src}">, TB, OpSize;
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (ops R32:$dst, R8 :$src),
"movs{bl|x} {$src, $dst|$dst, $src}">, TB;
def MOVSX32rm8 : I<0xBE, MRMSrcMem, (ops R32:$dst, i8mem :$src),
"movs{bl|x} {$src, $dst|$dst, $src}">, TB;
def MOVSX32rr16: I<0xBF, MRMSrcReg, (ops R32:$dst, R16:$src),
"movs{wl|x} {$src, $dst|$dst, $src}">, TB;
def MOVSX32rm16: I<0xBF, MRMSrcMem, (ops R32:$dst, i16mem:$src),
"movs{wl|x} {$src, $dst|$dst, $src}">, TB;
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (ops R16:$dst, R8 :$src),
"movz{bw|x} {$src, $dst|$dst, $src}">, TB, OpSize;
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (ops R16:$dst, i8mem :$src),
"movz{bw|x} {$src, $dst|$dst, $src}">, TB, OpSize;
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (ops R32:$dst, R8 :$src),
"movz{bl|x} {$src, $dst|$dst, $src}">, TB;
def MOVZX32rm8 : I<0xB6, MRMSrcMem, (ops R32:$dst, i8mem :$src),
"movz{bl|x} {$src, $dst|$dst, $src}">, TB;
def MOVZX32rr16: I<0xB7, MRMSrcReg, (ops R32:$dst, R16:$src),
"movz{wl|x} {$src, $dst|$dst, $src}">, TB;
def MOVZX32rm16: I<0xB7, MRMSrcMem, (ops R32:$dst, i16mem:$src),
"movz{wl|x} {$src, $dst|$dst, $src}">, TB;
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
//===----------------------------------------------------------------------===//
// XMM Floating point support (requires SSE2)
//===----------------------------------------------------------------------===//
def MOVSSrm : I<0x10, MRMSrcMem, (ops RXMM:$dst, f32mem:$src),
"movss {$src, $dst|$dst, $src}">, XS;
def MOVSSmr : I<0x11, MRMDestMem, (ops f32mem:$dst, RXMM:$src),
"movss {$src, $dst|$dst, $src}">, XS;
def MOVSDrm : I<0x10, MRMSrcMem, (ops RXMM:$dst, f64mem:$src),
"movsd {$src, $dst|$dst, $src}">, XD;
def MOVSDmr : I<0x11, MRMDestMem, (ops f64mem:$dst, RXMM:$src),
"movsd {$src, $dst|$dst, $src}">, XD;
def MOVAPSrr: I<0x28, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"movaps {$src, $dst|$dst, $src}">, TB;
def MOVAPSrm: I<0x28, MRMSrcMem, (ops RXMM:$dst, f32mem:$src),
"movaps {$src, $dst|$dst, $src}">, TB;
def MOVAPSmr: I<0x29, MRMDestMem, (ops f32mem:$dst, RXMM:$src),
"movaps {$src, $dst|$dst, $src}">, TB;
def MOVAPDrr: I<0x28, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"movapd {$src, $dst|$dst, $src}">, TB, OpSize;
def MOVAPDrm: I<0x28, MRMSrcMem, (ops RXMM:$dst, f64mem:$src),
"movapd {$src, $dst|$dst, $src}">, TB, OpSize;
def MOVAPDmr: I<0x29, MRMDestMem, (ops f64mem:$dst, RXMM:$src),
"movapd {$src, $dst|$dst, $src}">, TB, OpSize;
def CVTTSD2SIrr: I<0x2C, MRMSrcReg, (ops R32:$dst, RXMM:$src),
"cvttsd2si {$src, $dst|$dst, $src}">, XD;
def CVTTSD2SIrm: I<0x2C, MRMSrcMem, (ops R32:$dst, f64mem:$src),
"cvttsd2si {$src, $dst|$dst, $src}">, XD;
def CVTTSS2SIrr: I<0x2C, MRMSrcReg, (ops R32:$dst, RXMM:$src),
"cvttss2si {$src, $dst|$dst, $src}">, XS;
def CVTTSS2SIrm: I<0x2C, MRMSrcMem, (ops R32:$dst, f32mem:$src),
"cvttss2si {$src, $dst|$dst, $src}">, XS;
def CVTSD2SSrr: I<0x5A, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"cvtsd2ss {$src, $dst|$dst, $src}">, XS;
def CVTSD2SSrm: I<0x5A, MRMSrcMem, (ops RXMM:$dst, f64mem:$src),
"cvtsd2ss {$src, $dst|$dst, $src}">, XS;
def CVTSS2SDrr: I<0x5A, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"cvtss2sd {$src, $dst|$dst, $src}">, XD;
def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops RXMM:$dst, f32mem:$src),
"cvtss2sd {$src, $dst|$dst, $src}">, XD;
def CVTSI2SSrr: I<0x2A, MRMSrcReg, (ops R32:$dst, RXMM:$src),
"cvtsi2ss {$src, $dst|$dst, $src}">, XS;
def CVTSI2SSrm: I<0x2A, MRMSrcMem, (ops R32:$dst, f32mem:$src),
"cvtsi2ss {$src, $dst|$dst, $src}">, XS;
def CVTSI2SDrr: I<0x2A, MRMSrcReg, (ops R32:$dst, RXMM:$src),
"cvtsi2sd {$src, $dst|$dst, $src}">, XD;
def CVTSI2SDrm: I<0x2A, MRMSrcMem, (ops R32:$dst, f64mem:$src),
"cvtsi2sd {$src, $dst|$dst, $src}">, XD;
def SQRTSSrm : I<0x51, MRMSrcMem, (ops RXMM:$dst, f32mem:$src),
"subss {$src, $dst|$dst, $src}">, XS;
def SQRTSSrr : I<0x51, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"subss {$src, $dst|$dst, $src}">, XS;
def SQRTSDrm : I<0x51, MRMSrcMem, (ops RXMM:$dst, f64mem:$src),
"subsd {$src, $dst|$dst, $src}">, XD;
def SQRTSDrr : I<0x51, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"subsd {$src, $dst|$dst, $src}">, XD;
def UCOMISDrr: I<0x2E, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"ucomisd {$src, $dst|$dst, $src}">, TB, OpSize;
def UCOMISDrm: I<0x2E, MRMSrcMem, (ops RXMM:$dst, f64mem:$src),
"ucomisd {$src, $dst|$dst, $src}">, TB, OpSize;
def UCOMISSrr: I<0x2E, MRMSrcReg, (ops RXMM:$dst, RXMM:$src),
"ucomiss {$src, $dst|$dst, $src}">, TB;
def UCOMISSrm: I<0x2E, MRMSrcMem, (ops RXMM:$dst, f32mem:$src),
"ucomiss {$src, $dst|$dst, $src}">, TB;
// Pseudo-instructions that map to fld0 to xorps/xorpd for sse.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
def FLD0SS : I<0x57, MRMSrcReg, (ops RXMM:$dst),
"xorps $dst, $dst">, TB;
def FLD0SD : I<0x57, MRMSrcReg, (ops RXMM:$dst),
"xorpd $dst, $dst">, TB, OpSize;
let isTwoAddress = 1 in {
let isCommutable = 1 in {
def ADDSSrr : I<0x58, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"addss {$src, $dst|$dst, $src}">, XS;
def ADDSDrr : I<0x58, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"addsd {$src, $dst|$dst, $src}">, XD;
def ANDPSrr : I<0x54, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"andps {$src, $dst|$dst, $src}">, TB;
def ANDPDrr : I<0x54, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"andpd {$src, $dst|$dst, $src}">, TB, OpSize;
def MULSSrr : I<0x59, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"mulss {$src, $dst|$dst, $src}">, XS;
def MULSDrr : I<0x59, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"mulsd {$src, $dst|$dst, $src}">, XD;
def ORPSrr : I<0x56, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"orps {$src, $dst|$dst, $src}">, TB;
def ORPDrr : I<0x56, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"orpd {$src, $dst|$dst, $src}">, TB, OpSize;
def XORPSrr : I<0x57, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"xorps {$src, $dst|$dst, $src}">, TB;
def XORPDrr : I<0x57, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"xorpd {$src, $dst|$dst, $src}">, TB, OpSize;
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
}
def ANDNPSrr : I<0x55, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"andnps {$src, $dst|$dst, $src}">, TB;
def ANDNPDrr : I<0x55, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"andnpd {$src, $dst|$dst, $src}">, TB, OpSize;
def ADDSSrm : I<0x58, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f32mem:$src),
"addss {$src, $dst|$dst, $src}">, XS;
def ADDSDrm : I<0x58, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f64mem:$src),
"addsd {$src, $dst|$dst, $src}">, XD;
def MULSSrm : I<0x59, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f32mem:$src),
"mulss {$src, $dst|$dst, $src}">, XS;
def MULSDrm : I<0x59, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f64mem:$src),
"mulsd {$src, $dst|$dst, $src}">, XD;
def DIVSSrm : I<0x5E, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f32mem:$src),
"divss {$src, $dst|$dst, $src}">, XS;
def DIVSSrr : I<0x5E, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"divss {$src, $dst|$dst, $src}">, XS;
def DIVSDrm : I<0x5E, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f64mem:$src),
"divsd {$src, $dst|$dst, $src}">, XD;
def DIVSDrr : I<0x5E, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"divsd {$src, $dst|$dst, $src}">, XD;
def SUBSSrm : I<0x5C, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f32mem:$src),
"subss {$src, $dst|$dst, $src}">, XS;
def SUBSSrr : I<0x5C, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"subss {$src, $dst|$dst, $src}">, XS;
def SUBSDrm : I<0x5C, MRMSrcMem, (ops RXMM:$dst, RXMM:$src1, f64mem:$src),
"subsd {$src, $dst|$dst, $src}">, XD;
def SUBSDrr : I<0x5C, MRMSrcReg, (ops RXMM:$dst, RXMM:$src1, RXMM:$src),
"subsd {$src, $dst|$dst, $src}">, XD;
def CMPSSrr : I<0xC2, MRMSrcReg,
(ops RXMM:$dst, RXMM:$src1, RXMM:$src, SSECC:$cc),
"cmp${cc}ss {$src, $dst|$dst, $src}">, XS;
def CMPSSrm : I<0xC2, MRMSrcMem,
(ops RXMM:$dst, RXMM:$src1, f32mem:$src, SSECC:$cc),
"cmp${cc}ss {$src, $dst|$dst, $src}">, XS;
def CMPSDrr : I<0xC2, MRMSrcReg,
(ops RXMM:$dst, RXMM:$src1, RXMM:$src, SSECC:$cc),
"cmp${cc}sd {$src, $dst|$dst, $src}">, XD;
def CMPSDrm : I<0xC2, MRMSrcMem,
(ops RXMM:$dst, RXMM:$src1, f64mem:$src, SSECC:$cc),
"cmp${cc}sd {$src, $dst|$dst, $src}">, XD;
}
//===----------------------------------------------------------------------===//
// Stack-based Floating point support
//===----------------------------------------------------------------------===//
// FIXME: These need to indicate mod/ref sets for FP regs... & FP 'TOP'
// Floating point instruction template
class FPI<bits<8> o, Format F, FPFormat fp, dag ops, string asm>
: X86Inst<o, F, NoImm, ops, asm> {
let FPForm = fp; let FPFormBits = FPForm.Value;
}
// Pseudo instructions for floating point. We use these pseudo instructions
// because they can be expanded by the fp spackifier into one of many different
// forms of instructions for doing these operations. Until the stackifier runs,
// we prefer to be abstract.
def FpMOV : FPI<0, Pseudo, SpecialFP,
(ops RFP, RFP), "">; // f1 = fmov f2
def FpADD : FPI<0, Pseudo, TwoArgFP ,
(ops RFP, RFP, RFP), "">; // f1 = fadd f2, f3
def FpSUB : FPI<0, Pseudo, TwoArgFP ,
(ops RFP, RFP, RFP), "">; // f1 = fsub f2, f3
def FpMUL : FPI<0, Pseudo, TwoArgFP ,
(ops RFP, RFP, RFP), "">; // f1 = fmul f2, f3
def FpDIV : FPI<0, Pseudo, TwoArgFP ,
(ops RFP, RFP, RFP), "">; // f1 = fdiv f2, f3
def FpGETRESULT : FPI<0, Pseudo, SpecialFP, (ops RFP), "">,
Imp<[ST0], []>; // FPR = ST(0)
def FpSETRESULT : FPI<0, Pseudo, SpecialFP, (ops RFP), "">,
Imp<[], [ST0]>; // ST(0) = FPR
// FADD reg, mem: Before stackification, these are represented by:
// R1 = FADD* R2, [mem]
def FADD32m : FPI<0xD8, MRM0m, OneArgFPRW, // ST(0) = ST(0) + [mem32real]
(ops f32mem:$src), "fadd{s} $src">;
def FADD64m : FPI<0xDC, MRM0m, OneArgFPRW, // ST(0) = ST(0) + [mem64real]
(ops f64mem:$src), "fadd{l} $src">;
//def FIADD16m : FPI<0xDE, MRM0m, OneArgFPRW>; // ST(0) = ST(0) + [mem16int]
//def FIADD32m : FPI<0xDA, MRM0m, OneArgFPRW>; // ST(0) = ST(0) + [mem32int]
// FMUL reg, mem: Before stackification, these are represented by:
// R1 = FMUL* R2, [mem]
def FMUL32m : FPI<0xD8, MRM1m, OneArgFPRW, // ST(0) = ST(0) * [mem32real]
(ops f32mem:$src), "fmul{s} $src">;
def FMUL64m : FPI<0xDC, MRM1m, OneArgFPRW, // ST(0) = ST(0) * [mem64real]
(ops f64mem:$src), "fmul{l} $src">;
// ST(0) = ST(0) * [mem16int]
//def FIMUL16m : FPI16m<"fimul", 0xDE, MRM1m, OneArgFPRW>;
// ST(0) = ST(0) * [mem32int]
//def FIMUL32m : FPI32m<"fimul", 0xDA, MRM1m, OneArgFPRW>;
// FSUB reg, mem: Before stackification, these are represented by:
// R1 = FSUB* R2, [mem]
def FSUB32m : FPI<0xD8, MRM4m, OneArgFPRW, // ST(0) = ST(0) - [mem32real]
(ops f32mem:$src), "fsub{s} $src">;
def FSUB64m : FPI<0xDC, MRM4m, OneArgFPRW, // ST(0) = ST(0) - [mem64real]
(ops f64mem:$src), "fsub{l} $src">;
// ST(0) = ST(0) - [mem16int]
//def FISUB16m : FPI16m<"fisub", 0xDE, MRM4m, OneArgFPRW>;
// ST(0) = ST(0) - [mem32int]
//def FISUB32m : FPI32m<"fisub", 0xDA, MRM4m, OneArgFPRW>;
// FSUBR reg, mem: Before stackification, these are represented by:
// R1 = FSUBR* R2, [mem]
// Note that the order of operands does not reflect the operation being
// performed.
def FSUBR32m : FPI<0xD8, MRM5m, OneArgFPRW, // ST(0) = [mem32real] - ST(0)
(ops f32mem:$src), "fsubr{s} $src">;
def FSUBR64m : FPI<0xDC, MRM5m, OneArgFPRW, // ST(0) = [mem64real] - ST(0)
(ops f64mem:$src), "fsubr{l} $src">;
// ST(0) = [mem16int] - ST(0)
//def FISUBR16m : FPI16m<"fisubr", 0xDE, MRM5m, OneArgFPRW>;
// ST(0) = [mem32int] - ST(0)
//def FISUBR32m : FPI32m<"fisubr", 0xDA, MRM5m, OneArgFPRW>;
// FDIV reg, mem: Before stackification, these are represented by:
// R1 = FDIV* R2, [mem]
def FDIV32m : FPI<0xD8, MRM6m, OneArgFPRW, // ST(0) = ST(0) / [mem32real]
(ops f32mem:$src), "fdiv{s} $src">;
def FDIV64m : FPI<0xDC, MRM6m, OneArgFPRW, // ST(0) = ST(0) / [mem64real]
(ops f64mem:$src), "fdiv{l} $src">;
// ST(0) = ST(0) / [mem16int]
//def FIDIV16m : FPI16m<"fidiv", 0xDE, MRM6m, OneArgFPRW>;
// ST(0) = ST(0) / [mem32int]
//def FIDIV32m : FPI32m<"fidiv", 0xDA, MRM6m, OneArgFPRW>;
// FDIVR reg, mem: Before stackification, these are represented by:
// R1 = FDIVR* R2, [mem]
// Note that the order of operands does not reflect the operation being
// performed.
def FDIVR32m : FPI<0xD8, MRM7m, OneArgFPRW, // ST(0) = [mem32real] / ST(0)
(ops f32mem:$src), "fdivr{s} $src">;
def FDIVR64m : FPI<0xDC, MRM7m, OneArgFPRW, // ST(0) = [mem64real] / ST(0)
(ops f64mem:$src), "fdivr{l} $src">;
// ST(0) = [mem16int] / ST(0)
//def FIDIVR16m : FPI16m<"fidivr", 0xDE, MRM7m, OneArgFPRW>;
// ST(0) = [mem32int] / ST(0)
//def FIDIVR32m : FPI32m<"fidivr", 0xDA, MRM7m, OneArgFPRW>;
Chris Lattner
committed
// Floating point cmovs...
let isTwoAddress = 1, Uses = [ST0], Defs = [ST0] in {
def FCMOVB : FPI<0xC0, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmovb {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVBE : FPI<0xD0, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmovbe {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVE : FPI<0xC8, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmove {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVP : FPI<0xD8, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmovu {$op, %ST(0)|%ST(0), $op}">, DA;
def FCMOVAE : FPI<0xC0, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmovae {$op, %ST(0)|%ST(0), $op}">, DB;
def FCMOVA : FPI<0xD0, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmova {$op, %ST(0)|%ST(0), $op}">, DB;
def FCMOVNE : FPI<0xC8, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmovne {$op, %ST(0)|%ST(0), $op}">, DB;
def FCMOVNP : FPI<0xD8, AddRegFrm, CondMovFP,
(ops RST:$op), "fcmovnu {$op, %ST(0)|%ST(0), $op}">, DB;
Chris Lattner
committed
}
// Floating point loads & stores...
def FLDrr : FPI<0xC0, AddRegFrm, NotFP, (ops RST:$src), "fld $src">, D9;
def FLD32m : FPI<0xD9, MRM0m, ZeroArgFP, (ops f32mem:$src), "fld{s} $src">;
def FLD64m : FPI<0xDD, MRM0m, ZeroArgFP, (ops f64mem:$src), "fld{l} $src">;
def FLD80m : FPI<0xDB, MRM5m, ZeroArgFP, (ops f80mem:$src), "fld{t} $src">;
def FILD16m : FPI<0xDF, MRM0m, ZeroArgFP, (ops i16mem:$src), "fild{s} $src">;
def FILD32m : FPI<0xDB, MRM0m, ZeroArgFP, (ops i32mem:$src), "fild{l} $src">;
def FILD64m : FPI<0xDF, MRM5m, ZeroArgFP, (ops i64mem:$src), "fild{ll} $src">;
def FSTrr : FPI<0xD0, AddRegFrm, NotFP, (ops RST:$op), "fst $op">, DD;
def FSTPrr : FPI<0xD8, AddRegFrm, NotFP, (ops RST:$op), "fstp $op">, DD;
def FST32m : FPI<0xD9, MRM2m, OneArgFP, (ops f32mem:$op), "fst{s} $op">;
def FST64m : FPI<0xDD, MRM2m, OneArgFP, (ops f64mem:$op), "fst{l} $op">;
def FSTP32m : FPI<0xD9, MRM3m, OneArgFP, (ops f32mem:$op), "fstp{s} $op">;
def FSTP64m : FPI<0xDD, MRM3m, OneArgFP, (ops f64mem:$op), "fstp{l} $op">;
def FSTP80m : FPI<0xDB, MRM7m, OneArgFP, (ops f80mem:$op), "fstp{t} $op">;
def FIST16m : FPI<0xDF, MRM2m , OneArgFP, (ops i16mem:$op), "fist{s} $op">;
def FIST32m : FPI<0xDB, MRM2m , OneArgFP, (ops i32mem:$op), "fist{l} $op">;
def FISTP16m : FPI<0xDF, MRM3m , NotFP , (ops i16mem:$op), "fistp{s} $op">;
def FISTP32m : FPI<0xDB, MRM3m , NotFP , (ops i32mem:$op), "fistp{l} $op">;
def FISTP64m : FPI<0xDF, MRM7m , OneArgFP, (ops i64mem:$op), "fistp{ll} $op">;
def FXCH : FPI<0xC8, AddRegFrm, NotFP,
(ops RST:$op), "fxch $op">, D9; // fxch ST(i), ST(0)
// Floating point constant loads...
def FLD0 : FPI<0xEE, RawFrm, ZeroArgFP, (ops), "fldz">, D9;
def FLD1 : FPI<0xE8, RawFrm, ZeroArgFP, (ops), "fld1">, D9;
def FCHS : FPI<0xE0, RawFrm, OneArgFPRW, (ops), "fchs" >, D9; // f1 = fchs f2
def FABS : FPI<0xE1, RawFrm, OneArgFPRW, (ops), "fabs" >, D9; // f1 = fabs f2
def FSQRT : FPI<0xFA, RawFrm, OneArgFPRW, (ops), "fsqrt">, D9; // fsqrt ST(0)
def FSIN : FPI<0xFE, RawFrm, OneArgFPRW, (ops), "fsin" >, D9; // fsin ST(0)
def FCOS : FPI<0xFF, RawFrm, OneArgFPRW, (ops), "fcos" >, D9; // fcos ST(0)
def FTST : FPI<0xE4, RawFrm, OneArgFP , (ops), "ftst" >, D9; // ftst ST(0)
// Binary arithmetic operations...
class FPST0rInst<bits<8> o, dag ops, string asm>
: I<o, AddRegFrm, ops, asm>, D8 {
list<Register> Uses = [ST0];
list<Register> Defs = [ST0];
}
class FPrST0Inst<bits<8> o, dag ops, string asm>
: I<o, AddRegFrm, ops, asm>, DC {
list<Register> Uses = [ST0];
}
class FPrST0PInst<bits<8> o, dag ops, string asm>
: I<o, AddRegFrm, ops, asm>, DE {
list<Register> Uses = [ST0];
}
def FADDST0r : FPST0rInst <0xC0, (ops RST:$op),
"fadd $op">;
def FADDrST0 : FPrST0Inst <0xC0, (ops RST:$op),
"fadd {%ST(0), $op|$op, %ST(0)}">;
def FADDPrST0 : FPrST0PInst<0xC0, (ops RST:$op),
"faddp $op">;
// NOTE: GAS and apparently all other AT&T style assemblers have a broken notion
// of some of the 'reverse' forms of the fsub and fdiv instructions. As such,
// we have to put some 'r's in and take them out of weird places.
def FSUBRST0r : FPST0rInst <0xE8, (ops RST:$op),
"fsubr $op">;
def FSUBrST0 : FPrST0Inst <0xE8, (ops RST:$op),
"fsub{r} {%ST(0), $op|$op, %ST(0)}">;
def FSUBPrST0 : FPrST0PInst<0xE8, (ops RST:$op),
"fsub{r}p $op">;
def FSUBST0r : FPST0rInst <0xE0, (ops RST:$op),
"fsub $op">;
def FSUBRrST0 : FPrST0Inst <0xE0, (ops RST:$op),
"fsub{|r} {%ST(0), $op|$op, %ST(0)}">;
def FSUBRPrST0 : FPrST0PInst<0xE0, (ops RST:$op),
"fsub{|r}p $op">;
def FMULST0r : FPST0rInst <0xC8, (ops RST:$op),
"fmul $op">;
def FMULrST0 : FPrST0Inst <0xC8, (ops RST:$op),
"fmul {%ST(0), $op|$op, %ST(0)}">;
def FMULPrST0 : FPrST0PInst<0xC8, (ops RST:$op),
"fmulp $op">;
def FDIVRST0r : FPST0rInst <0xF8, (ops RST:$op),
"fdivr $op">;
def FDIVrST0 : FPrST0Inst <0xF8, (ops RST:$op),
"fdiv{r} {%ST(0), $op|$op, %ST(0)}">;
def FDIVPrST0 : FPrST0PInst<0xF8, (ops RST:$op),
"fdiv{r}p $op">;
def FDIVST0r : FPST0rInst <0xF0, (ops RST:$op), // ST(0) = ST(0) / ST(i)
"fdiv $op">;
def FDIVRrST0 : FPrST0Inst <0xF0, (ops RST:$op), // ST(i) = ST(0) / ST(i)
"fdiv{|r} {%ST(0), $op|$op, %ST(0)}">;
def FDIVRPrST0 : FPrST0PInst<0xF0, (ops RST:$op), // ST(i) = ST(0) / ST(i), pop
"fdiv{|r}p $op">;
// Floating point compares
def FUCOMr : FPI<0xE0, AddRegFrm, CompareFP, // FPSW = cmp ST(0) with ST(i)
(ops RST:$reg),
"fucom $reg">, DD, Imp<[ST0],[]>;
def FUCOMPr : I<0xE8, AddRegFrm,
(ops RST:$reg), // FPSW = cmp ST(0) with ST(i), pop
"fucomp $reg">, DD, Imp<[ST0],[]>;
def FUCOMPPr : I<0xE9, RawFrm,
(ops), // cmp ST(0) with ST(1), pop, pop
"fucompp">, DA, Imp<[ST0],[]>;
def FUCOMIr : FPI<0xE8, AddRegFrm, CompareFP, // CC = cmp ST(0) with ST(i)
(ops RST:$reg),
"fucomi {$reg, %ST(0)|%ST(0), $reg}">, DB, Imp<[ST0],[]>;
def FUCOMIPr : I<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop
(ops RST:$reg),
"fucomip {$reg, %ST(0)|%ST(0), $reg}">, DF, Imp<[ST0],[]>;
def FNSTSW8r : I<0xE0, RawFrm, // AX = fp flags
(ops), "fnstsw">, DF, Imp<[],[AX]>;
def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world
(ops i16mem:$dst), "fnstcw $dst">;
def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16]
(ops i16mem:$dst), "fldcw $dst">;