From 38a1b00a0fd36d948c3aef2a5c6d42bbb74de305 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Wed, 26 Oct 2005 17:18:16 +0000 Subject: [PATCH] fold nested and's early to avoid inefficiencies in MaskedValueIsZero. This fixes a very slow compile in PR639. llvm-svn: 24011 --- llvm/lib/Transforms/Scalar/InstructionCombining.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp index 9ba0512a78b2..f3325096895a 100644 --- a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp @@ -1725,6 +1725,15 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { // and X, -1 == X if (AndRHS->isAllOnesValue()) return ReplaceInstUsesWith(I, Op0); + + // and (and X, c1), c2 -> and (x, c1&c2). Handle this case here, before + // calling MaskedValueIsZero, to avoid inefficient cases where we traipse + // through many levels of ands. + { + Value *X; ConstantInt *C1; + if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1)))) + return BinaryOperator::createAnd(X, ConstantExpr::getAnd(C1, AndRHS)); + } if (MaskedValueIsZero(Op0, AndRHS)) // LHS & RHS == 0 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); -- GitLab