Date: Sat, 8 Dec 2018 14:31:49 +0000 (UTC) From: Dimitry Andric <dim@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org Subject: svn commit: r341728 - in vendor/llvm/dist-release_70: include/llvm/DebugInfo/PDB/Native include/llvm/ExecutionEngine/Orc include/llvm/ProfileData/Coverage lib/Analysis lib/ExecutionEngine/RuntimeDy... Message-ID: <201812081431.wB8EVn8x060167@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: dim Date: Sat Dec 8 14:31:49 2018 New Revision: 341728 URL: https://svnweb.freebsd.org/changeset/base/341728 Log: Vendor import of llvm release_70 branch r348686: https://llvm.org/svn/llvm-project/llvm/branches/release_70@348686 Modified: vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/Core.h vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h vendor/llvm/dist-release_70/include/llvm/ProfileData/Coverage/CoverageMapping.h vendor/llvm/dist-release_70/lib/Analysis/MemorySSA.cpp vendor/llvm/dist-release_70/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp vendor/llvm/dist-release_70/lib/Target/AArch64/AArch64ISelLowering.cpp vendor/llvm/dist-release_70/lib/Target/AMDGPU/AMDGPULibFunc.cpp vendor/llvm/dist-release_70/lib/Transforms/InstCombine/InstCombineCompares.cpp vendor/llvm/dist-release_70/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp vendor/llvm/dist-release_70/test/CodeGen/AArch64/arm64-ccmp.ll vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll Modified: vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h ============================================================================== --- vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/GlobalsStream.h Sat Dec 8 14:31:49 2018 (r341728) @@ -30,8 +30,6 @@ class GSIHashIterator GSIHashIterator, FixedStreamArrayIterator<PSHashRecord>, std::random_access_iterator_tag, const uint32_t> { public: - GSIHashIterator() = default; - template <typename T> GSIHashIterator(T &&v) : GSIHashIterator::iterator_adaptor_base(std::forward<T &&>(v)) {} Modified: vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h ============================================================================== --- vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h Sat Dec 8 14:31:49 2018 (r341728) @@ -49,7 +49,7 @@ class ModuleDebugStreamRef { (public) BinarySubstreamRef getC13LinesSubstream() const; BinarySubstreamRef getGlobalRefsSubstream() const; - ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = default; + ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = delete; iterator_range<DebugSubsectionIterator> subsections() const; codeview::DebugSubsectionArray getSubsectionsArray() const { Modified: vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/Core.h ============================================================================== --- vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/Core.h Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/Core.h Sat Dec 8 14:31:49 2018 (r341728) @@ -126,7 +126,7 @@ class MaterializationResponsibility { public: MaterializationResponsibility(MaterializationResponsibility &&) = default; MaterializationResponsibility & - operator=(MaterializationResponsibility &&) = default; + operator=(MaterializationResponsibility &&) = delete; /// Destruct a MaterializationResponsibility instance. In debug mode /// this asserts that all symbols being tracked have been either Modified: vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h ============================================================================== --- vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h Sat Dec 8 14:31:49 2018 (r341728) @@ -70,8 +70,7 @@ class OrcRemoteTargetClient (public) RemoteRTDyldMemoryManager & operator=(const RemoteRTDyldMemoryManager &) = delete; RemoteRTDyldMemoryManager(RemoteRTDyldMemoryManager &&) = default; - RemoteRTDyldMemoryManager & - operator=(RemoteRTDyldMemoryManager &&) = default; + RemoteRTDyldMemoryManager &operator=(RemoteRTDyldMemoryManager &&) = delete; uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, Modified: vendor/llvm/dist-release_70/include/llvm/ProfileData/Coverage/CoverageMapping.h ============================================================================== --- vendor/llvm/dist-release_70/include/llvm/ProfileData/Coverage/CoverageMapping.h Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/include/llvm/ProfileData/Coverage/CoverageMapping.h Sat Dec 8 14:31:49 2018 (r341728) @@ -641,8 +641,6 @@ class LineCoverageIterator (public) this->operator++(); } - LineCoverageIterator &operator=(const LineCoverageIterator &R) = default; - bool operator==(const LineCoverageIterator &R) const { return &CD == &R.CD && Next == R.Next && Ended == R.Ended; } Modified: vendor/llvm/dist-release_70/lib/Analysis/MemorySSA.cpp ============================================================================== --- vendor/llvm/dist-release_70/lib/Analysis/MemorySSA.cpp Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/lib/Analysis/MemorySSA.cpp Sat Dec 8 14:31:49 2018 (r341728) @@ -119,7 +119,6 @@ class MemoryLocOrCall { public: bool IsCall = false; - MemoryLocOrCall() = default; MemoryLocOrCall(MemoryUseOrDef *MUD) : MemoryLocOrCall(MUD->getMemoryInst()) {} MemoryLocOrCall(const MemoryUseOrDef *MUD) Modified: vendor/llvm/dist-release_70/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp ============================================================================== --- vendor/llvm/dist-release_70/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp Sat Dec 8 14:31:49 2018 (r341728) @@ -275,7 +275,7 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFi uint64_t Size = I->getCommonSize(); if (!CommonAlign) CommonAlign = Align; - CommonSize += alignTo(CommonSize, Align) + Size; + CommonSize = alignTo(CommonSize, Align) + Size; CommonSymbolsToAllocate.push_back(*I); } } else Modified: vendor/llvm/dist-release_70/lib/Target/AArch64/AArch64ISelLowering.cpp ============================================================================== --- vendor/llvm/dist-release_70/lib/Target/AArch64/AArch64ISelLowering.cpp Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/lib/Target/AArch64/AArch64ISelLowering.cpp Sat Dec 8 14:31:49 2018 (r341728) @@ -1515,39 +1515,50 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of /// a comparison. They set the NZCV flags to a predefined value if their /// predicate is false. This allows to express arbitrary conjunctions, for -/// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B))))" +/// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))" /// expressed as: /// cmp A /// ccmp B, inv(CB), CA /// check for CB flags /// -/// In general we can create code for arbitrary "... (and (and A B) C)" -/// sequences. We can also implement some "or" expressions, because "(or A B)" -/// is equivalent to "not (and (not A) (not B))" and we can implement some -/// negation operations: -/// We can negate the results of a single comparison by inverting the flags -/// used when the predicate fails and inverting the flags tested in the next -/// instruction; We can also negate the results of the whole previous -/// conditional compare sequence by inverting the flags tested in the next -/// instruction. However there is no way to negate the result of a partial -/// sequence. +/// This naturally lets us implement chains of AND operations with SETCC +/// operands. And we can even implement some other situations by transforming +/// them: +/// - We can implement (NEG SETCC) i.e. negating a single comparison by +/// negating the flags used in a CCMP/FCCMP operations. +/// - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations +/// by negating the flags we test for afterwards. i.e. +/// NEG (CMP CCMP CCCMP ...) can be implemented. +/// - Note that we can only ever negate all previously processed results. +/// What we can not implement by flipping the flags to test is a negation +/// of two sub-trees (because the negation affects all sub-trees emitted so +/// far, so the 2nd sub-tree we emit would also affect the first). +/// With those tools we can implement some OR operations: +/// - (OR (SETCC A) (SETCC B)) can be implemented via: +/// NEG (AND (NEG (SETCC A)) (NEG (SETCC B))) +/// - After transforming OR to NEG/AND combinations we may be able to use NEG +/// elimination rules from earlier to implement the whole thing as a +/// CCMP/FCCMP chain. /// -/// Therefore on encountering an "or" expression we can negate the subtree on -/// one side and have to be able to push the negate to the leafs of the subtree -/// on the other side (see also the comments in code). As complete example: -/// "or (or (setCA (cmp A)) (setCB (cmp B))) -/// (and (setCC (cmp C)) (setCD (cmp D)))" -/// is transformed to -/// "not (and (not (and (setCC (cmp C)) (setCC (cmp D)))) -/// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" -/// and implemented as: +/// As complete example: +/// or (or (setCA (cmp A)) (setCB (cmp B))) +/// (and (setCC (cmp C)) (setCD (cmp D)))" +/// can be reassociated to: +/// or (and (setCC (cmp C)) setCD (cmp D)) +// (or (setCA (cmp A)) (setCB (cmp B))) +/// can be transformed to: +/// not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) +/// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" +/// which can be implemented as: /// cmp C /// ccmp D, inv(CD), CC /// ccmp A, CA, inv(CD) /// ccmp B, CB, inv(CA) /// check for CB flags -/// A counterexample is "or (and A B) (and C D)" which cannot be implemented -/// by conditional compare sequences. +/// +/// A counterexample is "or (and A B) (and C D)" which translates to +/// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we +/// can only implement 1 of the inner (not) operations, but not both! /// @{ /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate. @@ -1585,14 +1596,23 @@ static SDValue emitConditionalComparison(SDValue LHS, return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp); } -/// Returns true if @p Val is a tree of AND/OR/SETCC operations. -/// CanPushNegate is set to true if we can push a negate operation through -/// the tree in a was that we are left with AND operations and negate operations -/// at the leafs only. i.e. "not (or (or x y) z)" can be changed to -/// "and (and (not x) (not y)) (not z)"; "not (or (and x y) z)" cannot be -/// brought into such a form. -static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanNegate, - unsigned Depth = 0) { +/// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be +/// expressed as a conjunction. See \ref AArch64CCMP. +/// \param CanNegate Set to true if we can negate the whole sub-tree just by +/// changing the conditions on the SETCC tests. +/// (this means we can call emitConjunctionRec() with +/// Negate==true on this sub-tree) +/// \param MustBeFirst Set to true if this subtree needs to be negated and we +/// cannot do the negation naturally. We are required to +/// emit the subtree first in this case. +/// \param WillNegate Is true if are called when the result of this +/// subexpression must be negated. This happens when the +/// outer expression is an OR. We can use this fact to know +/// that we have a double negation (or (or ...) ...) that +/// can be implemented for free. +static bool canEmitConjunction(const SDValue Val, bool &CanNegate, + bool &MustBeFirst, bool WillNegate, + unsigned Depth = 0) { if (!Val.hasOneUse()) return false; unsigned Opcode = Val->getOpcode(); @@ -1600,39 +1620,44 @@ static bool isConjunctionDisjunctionTree(const SDValue if (Val->getOperand(0).getValueType() == MVT::f128) return false; CanNegate = true; + MustBeFirst = false; return true; } // Protect against exponential runtime and stack overflow. if (Depth > 6) return false; if (Opcode == ISD::AND || Opcode == ISD::OR) { + bool IsOR = Opcode == ISD::OR; SDValue O0 = Val->getOperand(0); SDValue O1 = Val->getOperand(1); bool CanNegateL; - if (!isConjunctionDisjunctionTree(O0, CanNegateL, Depth+1)) + bool MustBeFirstL; + if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1)) return false; bool CanNegateR; - if (!isConjunctionDisjunctionTree(O1, CanNegateR, Depth+1)) + bool MustBeFirstR; + if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1)) return false; - if (Opcode == ISD::OR) { - // For an OR expression we need to be able to negate at least one side or - // we cannot do the transformation at all. + if (MustBeFirstL && MustBeFirstR) + return false; + + if (IsOR) { + // For an OR expression we need to be able to naturally negate at least + // one side or we cannot do the transformation at all. if (!CanNegateL && !CanNegateR) return false; - // We can however change a (not (or x y)) to (and (not x) (not y)) if we - // can negate the x and y subtrees. - CanNegate = CanNegateL && CanNegateR; + // If we the result of the OR will be negated and we can naturally negate + // the leafs, then this sub-tree as a whole negates naturally. + CanNegate = WillNegate && CanNegateL && CanNegateR; + // If we cannot naturally negate the whole sub-tree, then this must be + // emitted first. + MustBeFirst = !CanNegate; } else { - // If the operands are OR expressions then we finally need to negate their - // outputs, we can only do that for the operand with emitted last by - // negating OutCC, not for both operands. - bool NeedsNegOutL = O0->getOpcode() == ISD::OR; - bool NeedsNegOutR = O1->getOpcode() == ISD::OR; - if (NeedsNegOutL && NeedsNegOutR) - return false; - // We cannot negate an AND operation (it would become an OR), + assert(Opcode == ISD::AND && "Must be OR or AND"); + // We cannot naturally negate an AND operation. CanNegate = false; + MustBeFirst = MustBeFirstL || MustBeFirstR; } return true; } @@ -1645,11 +1670,9 @@ static bool isConjunctionDisjunctionTree(const SDValue /// and conditional compare operations. @returns an NZCV flags producing node /// and sets @p OutCC to the flags that should be tested or returns SDValue() if /// transformation was not possible. -/// On recursive invocations @p PushNegate may be set to true to have negation -/// effects pushed to the tree leafs; @p Predicate is an NZCV flag predicate -/// for the comparisons in the current subtree; @p Depth limits the search -/// depth to avoid stack overflow. -static SDValue emitConjunctionDisjunctionTreeRec(SelectionDAG &DAG, SDValue Val, +/// \p Negate is true if we want this sub-tree being negated just by changing +/// SETCC conditions. +static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, AArch64CC::CondCode Predicate) { // We're at a tree leaf, produce a conditional comparison operation. @@ -1690,76 +1713,85 @@ static SDValue emitConjunctionDisjunctionTreeRec(Selec return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL, DAG); } - assert((Opcode == ISD::AND || (Opcode == ISD::OR && Val->hasOneUse())) && - "Valid conjunction/disjunction tree"); + assert(Val->hasOneUse() && "Valid conjunction/disjunction tree"); - // Check if both sides can be transformed. + bool IsOR = Opcode == ISD::OR; + SDValue LHS = Val->getOperand(0); + bool CanNegateL; + bool MustBeFirstL; + bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR); + assert(ValidL && "Valid conjunction/disjunction tree"); + (void)ValidL; + SDValue RHS = Val->getOperand(1); + bool CanNegateR; + bool MustBeFirstR; + bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR); + assert(ValidR && "Valid conjunction/disjunction tree"); + (void)ValidR; - // In case of an OR we need to negate our operands and the result. - // (A v B) <=> not(not(A) ^ not(B)) - bool NegateOpsAndResult = Opcode == ISD::OR; - // We can negate the results of all previous operations by inverting the - // predicate flags giving us a free negation for one side. The other side - // must be negatable by itself. - if (NegateOpsAndResult) { - // See which side we can negate. - bool CanNegateL; - bool isValidL = isConjunctionDisjunctionTree(LHS, CanNegateL); - assert(isValidL && "Valid conjunction/disjunction tree"); - (void)isValidL; + // Swap sub-tree that must come first to the right side. + if (MustBeFirstL) { + assert(!MustBeFirstR && "Valid conjunction/disjunction tree"); + std::swap(LHS, RHS); + std::swap(CanNegateL, CanNegateR); + std::swap(MustBeFirstL, MustBeFirstR); + } -#ifndef NDEBUG - bool CanNegateR; - bool isValidR = isConjunctionDisjunctionTree(RHS, CanNegateR); - assert(isValidR && "Valid conjunction/disjunction tree"); - assert((CanNegateL || CanNegateR) && "Valid conjunction/disjunction tree"); -#endif - - // Order the side which we cannot negate to RHS so we can emit it first. - if (!CanNegateL) + bool NegateR; + bool NegateAfterR; + bool NegateL; + bool NegateAfterAll; + if (Opcode == ISD::OR) { + // Swap the sub-tree that we can negate naturally to the left. + if (!CanNegateL) { + assert(CanNegateR && "at least one side must be negatable"); + assert(!MustBeFirstR && "invalid conjunction/disjunction tree"); + assert(!Negate); std::swap(LHS, RHS); + NegateR = false; + NegateAfterR = true; + } else { + // Negate the left sub-tree if possible, otherwise negate the result. + NegateR = CanNegateR; + NegateAfterR = !CanNegateR; + } + NegateL = true; + NegateAfterAll = !Negate; } else { - bool NeedsNegOutL = LHS->getOpcode() == ISD::OR; - assert((!NeedsNegOutL || RHS->getOpcode() != ISD::OR) && - "Valid conjunction/disjunction tree"); - // Order the side where we need to negate the output flags to RHS so it - // gets emitted first. - if (NeedsNegOutL) - std::swap(LHS, RHS); + assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree"); + assert(!Negate && "Valid conjunction/disjunction tree"); + + NegateL = false; + NegateR = false; + NegateAfterR = false; + NegateAfterAll = false; } - // Emit RHS. If we want to negate the tree we only need to push a negate - // through if we are already in a PushNegate case, otherwise we can negate - // the "flags to test" afterwards. + // Emit sub-trees. AArch64CC::CondCode RHSCC; - SDValue CmpR = emitConjunctionDisjunctionTreeRec(DAG, RHS, RHSCC, Negate, - CCOp, Predicate); - if (NegateOpsAndResult && !Negate) + SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate); + if (NegateAfterR) RHSCC = AArch64CC::getInvertedCondCode(RHSCC); - // Emit LHS. We may need to negate it. - SDValue CmpL = emitConjunctionDisjunctionTreeRec(DAG, LHS, OutCC, - NegateOpsAndResult, CmpR, - RHSCC); - // If we transformed an OR to and AND then we have to negate the result - // (or absorb the Negate parameter). - if (NegateOpsAndResult && !Negate) + SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC); + if (NegateAfterAll) OutCC = AArch64CC::getInvertedCondCode(OutCC); return CmpL; } -/// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain -/// of CCMP/CFCMP ops. See @ref AArch64CCMP. -/// \see emitConjunctionDisjunctionTreeRec(). -static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val, - AArch64CC::CondCode &OutCC) { - bool CanNegate; - if (!isConjunctionDisjunctionTree(Val, CanNegate)) +/// Emit expression as a conjunction (a series of CCMP/CFCMP ops). +/// In some cases this is even possible with OR operations in the expression. +/// See \ref AArch64CCMP. +/// \see emitConjunctionRec(). +static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val, + AArch64CC::CondCode &OutCC) { + bool DummyCanNegate; + bool DummyMustBeFirst; + if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false)) return SDValue(); - return emitConjunctionDisjunctionTreeRec(DAG, Val, OutCC, false, SDValue(), - AArch64CC::AL); + return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL); } /// @} @@ -1859,7 +1891,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, } if (!Cmp && (RHSC->isNullValue() || RHSC->isOne())) { - if ((Cmp = emitConjunctionDisjunctionTree(DAG, LHS, AArch64CC))) { + if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) { if ((CC == ISD::SETNE) ^ RHSC->isNullValue()) AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC); } Modified: vendor/llvm/dist-release_70/lib/Target/AMDGPU/AMDGPULibFunc.cpp ============================================================================== --- vendor/llvm/dist-release_70/lib/Target/AMDGPU/AMDGPULibFunc.cpp Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/lib/Target/AMDGPU/AMDGPULibFunc.cpp Sat Dec 8 14:31:49 2018 (r341728) @@ -90,7 +90,6 @@ class UnmangledFuncInfo { public: using ID = AMDGPULibFunc::EFuncId; - UnmangledFuncInfo() = default; UnmangledFuncInfo(StringRef _Name, unsigned _NumArgs) : Name(_Name), NumArgs(_NumArgs) {} // Get index to Table by function name. Modified: vendor/llvm/dist-release_70/lib/Transforms/InstCombine/InstCombineCompares.cpp ============================================================================== --- vendor/llvm/dist-release_70/lib/Transforms/InstCombine/InstCombineCompares.cpp Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/lib/Transforms/InstCombine/InstCombineCompares.cpp Sat Dec 8 14:31:49 2018 (r341728) @@ -2924,12 +2924,20 @@ static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I, // x & (-1 >> y) s>= x -> x s<= (-1 >> y) if (X != I.getOperand(1)) // X must be on RHS of comparison! return nullptr; // Ignore the other case. + if (!match(M, m_Constant())) // Can not do this fold with non-constant. + return nullptr; + if (!match(M, m_NonNegative())) // Must not have any -1 vector elements. + return nullptr; DstPred = ICmpInst::Predicate::ICMP_SLE; break; case ICmpInst::Predicate::ICMP_SLT: // x & (-1 >> y) s< x -> x s> (-1 >> y) if (X != I.getOperand(1)) // X must be on RHS of comparison! return nullptr; // Ignore the other case. + if (!match(M, m_Constant())) // Can not do this fold with non-constant. + return nullptr; + if (!match(M, m_NonNegative())) // Must not have any -1 vector elements. + return nullptr; DstPred = ICmpInst::Predicate::ICMP_SGT; break; case ICmpInst::Predicate::ICMP_SLE: Modified: vendor/llvm/dist-release_70/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp ============================================================================== --- vendor/llvm/dist-release_70/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp Sat Dec 8 14:31:49 2018 (r341728) @@ -231,17 +231,17 @@ struct TransformedFunction { TransformedFunction& operator=(TransformedFunction&&) = default; /// Type of the function before the transformation. - FunctionType* const OriginalType; + FunctionType *OriginalType; /// Type of the function after the transformation. - FunctionType* const TransformedType; + FunctionType *TransformedType; /// Transforming a function may change the position of arguments. This /// member records the mapping from each argument's old position to its new /// position. Argument positions are zero-indexed. If the transformation /// from F to F' made the first argument of F into the third argument of F', /// then ArgumentIndexMapping[0] will equal 2. - const std::vector<unsigned> ArgumentIndexMapping; + std::vector<unsigned> ArgumentIndexMapping; }; /// Given function attributes from a call site for the original function, Modified: vendor/llvm/dist-release_70/test/CodeGen/AArch64/arm64-ccmp.ll ============================================================================== --- vendor/llvm/dist-release_70/test/CodeGen/AArch64/arm64-ccmp.ll Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/test/CodeGen/AArch64/arm64-ccmp.ll Sat Dec 8 14:31:49 2018 (r341728) @@ -526,8 +526,8 @@ define i32 @select_or_olt_one(double %v0, double %v1, ; CHECK-LABEL: select_or_one_olt: ; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 -; CHECK-NEXT: fccmp d0, d1, #1, ne -; CHECK-NEXT: fccmp d2, d3, #8, vs +; CHECK-NEXT: fccmp d0, d1, #8, le +; CHECK-NEXT: fccmp d2, d3, #8, pl ; CHECK-NEXT: csel w0, w0, w1, mi ; CHECK-NEXT: ret define i32 @select_or_one_olt(double %v0, double %v1, double %v2, double %v3, i32 %a, i32 %b) #0 { @@ -556,8 +556,8 @@ define i32 @select_or_olt_ueq(double %v0, double %v1, ; CHECK-LABEL: select_or_ueq_olt: ; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 -; CHECK-NEXT: fccmp d0, d1, #8, le -; CHECK-NEXT: fccmp d2, d3, #8, mi +; CHECK-NEXT: fccmp d0, d1, #1, ne +; CHECK-NEXT: fccmp d2, d3, #8, vc ; CHECK-NEXT: csel w0, w0, w1, mi ; CHECK-NEXT: ret define i32 @select_or_ueq_olt(double %v0, double %v1, double %v2, double %v3, i32 %a, i32 %b) #0 { @@ -653,6 +653,70 @@ define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 % %c1 = fcmp oge fp128 %v2, %v3 %cr = and i1 %c1, %c0 %sel = select i1 %cr, i32 %a, i32 %b + ret i32 %sel +} + +; This testcase resembles the core problem of http://llvm.org/PR39550 +; (an OR operation is 2 levels deep but needs to be implemented first) +; CHECK-LABEL: deep_or +; CHECK: cmp w2, #20 +; CHECK-NEXT: ccmp w2, #15, #4, ne +; CHECK-NEXT: ccmp w1, #0, #4, eq +; CHECK-NEXT: ccmp w0, #0, #4, ne +; CHECK-NEXT: csel w0, w4, w5, ne +; CHECK-NEXT: ret +define i32 @deep_or(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) { + %c0 = icmp ne i32 %a0, 0 + %c1 = icmp ne i32 %a1, 0 + %c2 = icmp eq i32 %a2, 15 + %c3 = icmp eq i32 %a2, 20 + + %or = or i1 %c2, %c3 + %and0 = and i1 %or, %c1 + %and1 = and i1 %and0, %c0 + %sel = select i1 %and1, i32 %x, i32 %y + ret i32 %sel +} + +; Variation of deep_or, we still need to implement the OR first though. +; CHECK-LABEL: deep_or1 +; CHECK: cmp w2, #20 +; CHECK-NEXT: ccmp w2, #15, #4, ne +; CHECK-NEXT: ccmp w0, #0, #4, eq +; CHECK-NEXT: ccmp w1, #0, #4, ne +; CHECK-NEXT: csel w0, w4, w5, ne +; CHECK-NEXT: ret +define i32 @deep_or1(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) { + %c0 = icmp ne i32 %a0, 0 + %c1 = icmp ne i32 %a1, 0 + %c2 = icmp eq i32 %a2, 15 + %c3 = icmp eq i32 %a2, 20 + + %or = or i1 %c2, %c3 + %and0 = and i1 %c0, %or + %and1 = and i1 %and0, %c1 + %sel = select i1 %and1, i32 %x, i32 %y + ret i32 %sel +} + +; Variation of deep_or, we still need to implement the OR first though. +; CHECK-LABEL: deep_or2 +; CHECK: cmp w2, #20 +; CHECK-NEXT: ccmp w2, #15, #4, ne +; CHECK-NEXT: ccmp w1, #0, #4, eq +; CHECK-NEXT: ccmp w0, #0, #4, ne +; CHECK-NEXT: csel w0, w4, w5, ne +; CHECK-NEXT: ret +define i32 @deep_or2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) { + %c0 = icmp ne i32 %a0, 0 + %c1 = icmp ne i32 %a1, 0 + %c2 = icmp eq i32 %a2, 15 + %c3 = icmp eq i32 %a2, 20 + + %or = or i1 %c2, %c3 + %and0 = and i1 %c0, %c1 + %and1 = and i1 %and0, %or + %sel = select i1 %and1, i32 %x, i32 %y ret i32 %sel } Modified: vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll ============================================================================== --- vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll Sat Dec 8 14:31:49 2018 (r341728) @@ -23,18 +23,6 @@ define i1 @p0(i8 %x) { ret i1 %ret } -define i1 @pv(i8 %x, i8 %y) { -; CHECK-LABEL: @pv( -; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = icmp sge i8 [[TMP0]], [[X:%.*]] -; CHECK-NEXT: ret i1 [[TMP1]] -; - %tmp0 = lshr i8 -1, %y - %tmp1 = and i8 %tmp0, %x - %ret = icmp sge i8 %tmp1, %x - ret i1 %ret -} - ; ============================================================================ ; ; Vector tests ; ============================================================================ ; @@ -120,8 +108,9 @@ define i1 @cv0(i8 %y) { ; CHECK-LABEL: @cv0( ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = icmp sle i8 [[X]], [[TMP0]] -; CHECK-NEXT: ret i1 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[TMP0]] +; CHECK-NEXT: [[RET:%.*]] = icmp sge i8 [[TMP1]], [[X]] +; CHECK-NEXT: ret i1 [[RET]] ; %x = call i8 @gen8() %tmp0 = lshr i8 -1, %y @@ -195,4 +184,43 @@ define <2 x i1> @n2(<2 x i8> %x) { %tmp0 = and <2 x i8> %x, <i8 3, i8 16> ; only the first one is valid. %ret = icmp sge <2 x i8> %tmp0, %x ret <2 x i1> %ret +} + +; ============================================================================ ; +; Potential miscompiles. +; ============================================================================ ; + +define i1 @nv(i8 %x, i8 %y) { +; CHECK-LABEL: @nv( +; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp sge i8 [[TMP1]], [[X]] +; CHECK-NEXT: ret i1 [[RET]] +; + %tmp0 = lshr i8 -1, %y + %tmp1 = and i8 %tmp0, %x + %ret = icmp sge i8 %tmp1, %x + ret i1 %ret +} + +define <2 x i1> @n3_vec(<2 x i8> %x) { +; CHECK-LABEL: @n3_vec( +; CHECK-NEXT: [[TMP0:%.*]] = and <2 x i8> [[X:%.*]], <i8 3, i8 -1> +; CHECK-NEXT: [[RET:%.*]] = icmp sge <2 x i8> [[TMP0]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[RET]] +; + %tmp0 = and <2 x i8> %x, <i8 3, i8 -1> + %ret = icmp sge <2 x i8> %tmp0, %x + ret <2 x i1> %ret +} + +define <3 x i1> @n4_vec(<3 x i8> %x) { +; CHECK-LABEL: @n4_vec( +; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], <i8 3, i8 undef, i8 -1> +; CHECK-NEXT: [[RET:%.*]] = icmp sge <3 x i8> [[TMP0]], [[X]] +; CHECK-NEXT: ret <3 x i1> [[RET]] +; + %tmp0 = and <3 x i8> %x, <i8 3, i8 undef, i8 -1> + %ret = icmp sge <3 x i8> %tmp0, %x + ret <3 x i1> %ret } Modified: vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll ============================================================================== --- vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll Sat Dec 8 14:04:57 2018 (r341727) +++ vendor/llvm/dist-release_70/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll Sat Dec 8 14:31:49 2018 (r341728) @@ -23,18 +23,6 @@ define i1 @p0(i8 %x) { ret i1 %ret } -define i1 @pv(i8 %x, i8 %y) { -; CHECK-LABEL: @pv( -; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[TMP0]], [[X:%.*]] -; CHECK-NEXT: ret i1 [[TMP1]] -; - %tmp0 = lshr i8 -1, %y - %tmp1 = and i8 %tmp0, %x - %ret = icmp slt i8 %tmp1, %x - ret i1 %ret -} - ; ============================================================================ ; ; Vector tests ; ============================================================================ ; @@ -120,8 +108,9 @@ define i1 @cv0(i8 %y) { ; CHECK-LABEL: @cv0( ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i8 [[X]], [[TMP0]] -; CHECK-NEXT: ret i1 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[TMP0]] +; CHECK-NEXT: [[RET:%.*]] = icmp slt i8 [[TMP1]], [[X]] +; CHECK-NEXT: ret i1 [[RET]] ; %x = call i8 @gen8() %tmp0 = lshr i8 -1, %y @@ -195,4 +184,43 @@ define <2 x i1> @n2(<2 x i8> %x) { %tmp0 = and <2 x i8> %x, <i8 3, i8 16> ; only the first one is valid. %ret = icmp slt <2 x i8> %tmp0, %x ret <2 x i1> %ret +} + +; ============================================================================ ; +; Potential miscompiles. +; ============================================================================ ; + +define i1 @nv(i8 %x, i8 %y) { +; CHECK-LABEL: @nv( +; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp slt i8 [[TMP1]], [[X]] +; CHECK-NEXT: ret i1 [[RET]] +; + %tmp0 = lshr i8 -1, %y + %tmp1 = and i8 %tmp0, %x + %ret = icmp slt i8 %tmp1, %x + ret i1 %ret +} + +define <2 x i1> @n3(<2 x i8> %x) { +; CHECK-LABEL: @n3( +; CHECK-NEXT: [[TMP0:%.*]] = and <2 x i8> [[X:%.*]], <i8 3, i8 -1> +; CHECK-NEXT: [[RET:%.*]] = icmp slt <2 x i8> [[TMP0]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[RET]] +; + %tmp0 = and <2 x i8> %x, <i8 3, i8 -1> + %ret = icmp slt <2 x i8> %tmp0, %x + ret <2 x i1> %ret +} + +define <3 x i1> @n4(<3 x i8> %x) { +; CHECK-LABEL: @n4( +; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], <i8 3, i8 undef, i8 -1> +; CHECK-NEXT: [[RET:%.*]] = icmp slt <3 x i8> [[TMP0]], [[X]] +; CHECK-NEXT: ret <3 x i1> [[RET]] +; + %tmp0 = and <3 x i8> %x, <i8 3, i8 undef, i8 -1> + %ret = icmp slt <3 x i8> %tmp0, %x + ret <3 x i1> %ret }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201812081431.wB8EVn8x060167>