Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 29 Jul 2017 21:25:18 +0000 (UTC)
From:      Dimitry Andric <dim@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org
Subject:   svn commit: r321691 - in vendor/llvm/dist: . docs examples/ParallelJIT include/llvm/CodeGen/GlobalISel include/llvm/Support include/llvm/Transforms/Utils lib/CodeGen lib/CodeGen/SelectionDAG lib/Op...
Message-ID:  <201707292125.v6TLPIBP068166@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: dim
Date: Sat Jul 29 21:25:18 2017
New Revision: 321691
URL: https://svnweb.freebsd.org/changeset/base/321691

Log:
  Vendor import of llvm release_50 branch r309439:
  https://llvm.org/svn/llvm-project/llvm/branches/release_50@309439

Added:
  vendor/llvm/dist/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
  vendor/llvm/dist/test/CodeGen/X86/pr33844.ll
  vendor/llvm/dist/test/CodeGen/X86/pr33960.ll
  vendor/llvm/dist/test/MC/Sparc/sparc-tls-relocations.s   (contents, props changed)
  vendor/llvm/dist/test/Transforms/JumpThreading/pr33605.ll
  vendor/llvm/dist/test/Transforms/JumpThreading/pr33917.ll
  vendor/llvm/dist/test/Transforms/SimplifyCFG/pr33605.ll
Modified:
  vendor/llvm/dist/CMakeLists.txt
  vendor/llvm/dist/docs/ReleaseNotes.rst
  vendor/llvm/dist/examples/ParallelJIT/ParallelJIT.cpp
  vendor/llvm/dist/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
  vendor/llvm/dist/include/llvm/Support/CommandLine.h
  vendor/llvm/dist/include/llvm/Support/TargetRegistry.h
  vendor/llvm/dist/include/llvm/Transforms/Utils/LoopUtils.h
  vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp
  vendor/llvm/dist/lib/CodeGen/InlineSpiller.cpp
  vendor/llvm/dist/lib/CodeGen/RegAllocBase.cpp
  vendor/llvm/dist/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
  vendor/llvm/dist/lib/Option/OptTable.cpp
  vendor/llvm/dist/lib/Support/CommandLine.cpp
  vendor/llvm/dist/lib/Support/ErrorHandling.cpp
  vendor/llvm/dist/lib/Support/TargetRegistry.cpp
  vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp
  vendor/llvm/dist/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
  vendor/llvm/dist/lib/Target/AMDGPU/SIRegisterInfo.td
  vendor/llvm/dist/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
  vendor/llvm/dist/lib/Target/SystemZ/SystemZScheduleZ14.td
  vendor/llvm/dist/lib/Target/X86/X86ISelDAGToDAG.cpp
  vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp
  vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp
  vendor/llvm/dist/lib/Transforms/Utils/LoopUtils.cpp
  vendor/llvm/dist/lib/Transforms/Utils/SimplifyCFG.cpp
  vendor/llvm/dist/test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll
  vendor/llvm/dist/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll
  vendor/llvm/dist/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
  vendor/llvm/dist/test/CodeGen/AArch64/win64_vararg.ll
  vendor/llvm/dist/test/CodeGen/X86/memcmp-minsize.ll
  vendor/llvm/dist/test/CodeGen/X86/memcmp-optsize.ll
  vendor/llvm/dist/test/CodeGen/X86/memcmp.ll
  vendor/llvm/dist/test/CodeGen/X86/vector-shift-ashr-256.ll
  vendor/llvm/dist/test/Transforms/CodeGenPrepare/X86/memcmp.ll
  vendor/llvm/dist/test/Transforms/JumpThreading/static-profile.ll
  vendor/llvm/dist/test/Transforms/LoopUnroll/peel-loop.ll
  vendor/llvm/dist/test/Transforms/LoopUnswitch/2015-06-17-Metadata.ll
  vendor/llvm/dist/test/Transforms/LoopUnswitch/infinite-loop.ll
  vendor/llvm/dist/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
  vendor/llvm/dist/test/Transforms/LoopVectorize/float-induction.ll
  vendor/llvm/dist/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
  vendor/llvm/dist/test/Transforms/SimplifyCFG/multiple-phis.ll
  vendor/llvm/dist/test/Transforms/SimplifyCFG/preserve-llvm-loop-metadata.ll
  vendor/llvm/dist/utils/release/test-release.sh

Modified: vendor/llvm/dist/CMakeLists.txt
==============================================================================
--- vendor/llvm/dist/CMakeLists.txt	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/CMakeLists.txt	Sat Jul 29 21:25:18 2017	(r321691)
@@ -29,7 +29,7 @@ if(NOT DEFINED LLVM_VERSION_PATCH)
   set(LLVM_VERSION_PATCH 0)
 endif()
 if(NOT DEFINED LLVM_VERSION_SUFFIX)
-  set(LLVM_VERSION_SUFFIX svn)
+  set(LLVM_VERSION_SUFFIX "")
 endif()
 
 if (POLICY CMP0048)

Modified: vendor/llvm/dist/docs/ReleaseNotes.rst
==============================================================================
--- vendor/llvm/dist/docs/ReleaseNotes.rst	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/docs/ReleaseNotes.rst	Sat Jul 29 21:25:18 2017	(r321691)
@@ -71,6 +71,12 @@ Non-comprehensive list of changes in this release
 Changes to the LLVM IR
 ----------------------
 
+* The datalayout string may now indicate an address space to use for
+  the pointer type of alloca rather than the default of 0.
+
+* Added speculatable attribute indicating a function which does has no
+  side-effects which could inhibit hoisting of calls.
+
 Changes to the ARM Backend
 --------------------------
 
@@ -91,12 +97,30 @@ Changes to the PowerPC Target
 Changes to the X86 Target
 -------------------------
 
- During this release ...
+* Added initial AMD Ryzen (znver1) scheduler support.
 
+* Added support for Intel Goldmont CPUs.
+
+* Add support for avx512vpopcntdq instructions.
+
+* Added heuristics to convert CMOV into branches when it may be profitable.
+
+* More aggressive inlining of memcmp calls.
+
+* Improve vXi64 shuffles on 32-bit targets.
+
+* Improved use of PMOVMSKB for any_of/all_of comparision reductions.
+
+* Improved Silvermont, Sandybridge, and Jaguar (btver2) schedulers.
+
+* Improved support for AVX512 vector rotations.
+
+* Added support for AMD Lightweight Profiling (LWP) instructions.
+
 Changes to the AMDGPU Target
 -----------------------------
 
- During this release ...
+* Initial gfx9 support
 
 Changes to the AVR Target
 -----------------------------

Modified: vendor/llvm/dist/examples/ParallelJIT/ParallelJIT.cpp
==============================================================================
--- vendor/llvm/dist/examples/ParallelJIT/ParallelJIT.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/examples/ParallelJIT/ParallelJIT.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -145,6 +145,7 @@ class WaitForThreads (public)
     waitFor = 0;
 
     int result = pthread_cond_init( &condition, nullptr );
+    (void)result;
     assert( result == 0 );
 
     result = pthread_mutex_init( &mutex, nullptr );

Modified: vendor/llvm/dist/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
==============================================================================
--- vendor/llvm/dist/include/llvm/CodeGen/GlobalISel/InstructionSelector.h	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/include/llvm/CodeGen/GlobalISel/InstructionSelector.h	Sat Jul 29 21:25:18 2017	(r321691)
@@ -40,7 +40,8 @@ class TargetRegisterInfo;
 /// This is convenient because std::bitset does not have a constructor
 /// with an initializer list of set bits.
 ///
-/// Each InstructionSelector subclass should define a PredicateBitset class with:
+/// Each InstructionSelector subclass should define a PredicateBitset class
+/// with:
 ///   const unsigned MAX_SUBTARGET_PREDICATES = 192;
 ///   using PredicateBitset = PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
 /// and updating the constant to suit the target. Tablegen provides a suitable
@@ -102,7 +103,8 @@ enum {
   /// - OpIdx - Operand index
   /// - Expected integer
   GIM_CheckConstantInt,
-  /// Check the operand is a specific literal integer (i.e. MO.isImm() or MO.isCImm() is true).
+  /// Check the operand is a specific literal integer (i.e. MO.isImm() or
+  /// MO.isCImm() is true).
   /// - InsnID - Instruction ID
   /// - OpIdx - Operand index
   /// - Expected integer

Modified: vendor/llvm/dist/include/llvm/Support/CommandLine.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Support/CommandLine.h	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/include/llvm/Support/CommandLine.h	Sat Jul 29 21:25:18 2017	(r321691)
@@ -66,15 +66,12 @@ bool ParseCommandLineOptions(int argc, const char *con
 void ParseEnvironmentOptions(const char *progName, const char *envvar,
                              const char *Overview = "");
 
-// Function pointer type for printing version information.
-using VersionPrinterTy = std::function<void(raw_ostream &)>;
-
 ///===---------------------------------------------------------------------===//
 /// SetVersionPrinter - Override the default (LLVM specific) version printer
 ///                     used to print out the version when --version is given
 ///                     on the command line. This allows other systems using the
 ///                     CommandLine utilities to print their own version string.
-void SetVersionPrinter(VersionPrinterTy func);
+void SetVersionPrinter(void (*func)());
 
 ///===---------------------------------------------------------------------===//
 /// AddExtraVersionPrinter - Add an extra printer to use in addition to the
@@ -83,7 +80,7 @@ void SetVersionPrinter(VersionPrinterTy func);
 ///                          which will be called after the basic LLVM version
 ///                          printing is complete. Each can then add additional
 ///                          information specific to the tool.
-void AddExtraVersionPrinter(VersionPrinterTy func);
+void AddExtraVersionPrinter(void (*func)());
 
 // PrintOptionValues - Print option values.
 // With -print-options print the difference between option values and defaults.

Modified: vendor/llvm/dist/include/llvm/Support/TargetRegistry.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Support/TargetRegistry.h	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/include/llvm/Support/TargetRegistry.h	Sat Jul 29 21:25:18 2017	(r321691)
@@ -599,7 +599,7 @@ struct TargetRegistry {
 
   /// printRegisteredTargetsForVersion - Print the registered targets
   /// appropriately for inclusion in a tool's version output.
-  static void printRegisteredTargetsForVersion(raw_ostream &OS);
+  static void printRegisteredTargetsForVersion();
 
   /// @name Registry Access
   /// @{

Modified: vendor/llvm/dist/include/llvm/Transforms/Utils/LoopUtils.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Transforms/Utils/LoopUtils.h	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/include/llvm/Transforms/Utils/LoopUtils.h	Sat Jul 29 21:25:18 2017	(r321691)
@@ -531,8 +531,10 @@ Value *createTargetReduction(IRBuilder<> &B, const Tar
 
 /// Get the intersection (logical and) of all of the potential IR flags
 /// of each scalar operation (VL) that will be converted into a vector (I).
+/// If OpValue is non-null, we only consider operations similar to OpValue
+/// when intersecting.
 /// Flag set: NSW, NUW, exact, and all of fast-math.
-void propagateIRFlags(Value *I, ArrayRef<Value *> VL);
+void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr);
 
 } // end namespace llvm
 

Modified: vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/CodeGen/CodeGenPrepare.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -4016,14 +4016,18 @@ static bool IsOperandAMemoryOperand(CallInst *CI, Inli
   return true;
 }
 
+// Max number of memory uses to look at before aborting the search to conserve
+// compile time.
+static constexpr int MaxMemoryUsesToScan = 20;
+
 /// Recursively walk all the uses of I until we find a memory use.
 /// If we find an obviously non-foldable instruction, return true.
 /// Add the ultimately found memory instructions to MemoryUses.
 static bool FindAllMemoryUses(
     Instruction *I,
     SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
-    SmallPtrSetImpl<Instruction *> &ConsideredInsts,
-    const TargetLowering &TLI, const TargetRegisterInfo &TRI) {
+    SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
+    const TargetRegisterInfo &TRI, int SeenInsts = 0) {
   // If we already considered this instruction, we're done.
   if (!ConsideredInsts.insert(I).second)
     return false;
@@ -4036,8 +4040,12 @@ static bool FindAllMemoryUses(
 
   // Loop over all the uses, recursively processing them.
   for (Use &U : I->uses()) {
-    Instruction *UserI = cast<Instruction>(U.getUser());
+    // Conservatively return true if we're seeing a large number or a deep chain
+    // of users. This avoids excessive compilation times in pathological cases.
+    if (SeenInsts++ >= MaxMemoryUsesToScan)
+      return true;
 
+    Instruction *UserI = cast<Instruction>(U.getUser());
     if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
       MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
       continue;
@@ -4082,7 +4090,8 @@ static bool FindAllMemoryUses(
       continue;
     }
 
-    if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI))
+    if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI,
+                          SeenInsts))
       return true;
   }
 

Modified: vendor/llvm/dist/lib/CodeGen/InlineSpiller.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/InlineSpiller.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/CodeGen/InlineSpiller.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -643,8 +643,11 @@ void InlineSpiller::reMaterializeAll() {
       Edit->eraseVirtReg(Reg);
       continue;
     }
-    assert((LIS.hasInterval(Reg) && !LIS.getInterval(Reg).empty()) &&
-           "Reg with empty interval has reference");
+
+    assert(LIS.hasInterval(Reg) &&
+           (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
+           "Empty and not used live-range?!");
+
     RegsToSpill[ResultPos++] = Reg;
   }
   RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());

Modified: vendor/llvm/dist/lib/CodeGen/RegAllocBase.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/RegAllocBase.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/CodeGen/RegAllocBase.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -133,18 +133,19 @@ void RegAllocBase::allocatePhysRegs() {
     if (AvailablePhysReg)
       Matrix->assign(*VirtReg, AvailablePhysReg);
 
-    for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end();
-         I != E; ++I) {
-      LiveInterval *SplitVirtReg = &LIS->getInterval(*I);
+    for (unsigned Reg : SplitVRegs) {
+      assert(LIS->hasInterval(Reg));
+
+      LiveInterval *SplitVirtReg = &LIS->getInterval(Reg);
       assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned");
       if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) {
+        assert(SplitVirtReg->empty() && "Non-empty but used interval");
         DEBUG(dbgs() << "not queueing unused  " << *SplitVirtReg << '\n');
         aboutToRemoveInterval(*SplitVirtReg);
         LIS->removeInterval(SplitVirtReg->reg);
         continue;
       }
       DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
-      assert(!SplitVirtReg->empty() && "expecting non-empty interval");
       assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
              "expect split value in virtual register");
       enqueue(SplitVirtReg);

Modified: vendor/llvm/dist/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -2965,7 +2965,12 @@ static inline bool isSETCCorConvertedSETCC(SDValue N) 
   else if (N.getOpcode() == ISD::SIGN_EXTEND)
     N = N.getOperand(0);
 
-  return (N.getOpcode() == ISD::SETCC);
+  if (isLogicalMaskOp(N.getOpcode()))
+    return isSETCCorConvertedSETCC(N.getOperand(0)) &&
+           isSETCCorConvertedSETCC(N.getOperand(1));
+
+  return (N.getOpcode() == ISD::SETCC ||
+          ISD::isBuildVectorOfConstantSDNodes(N.getNode()));
 }
 #endif
 
@@ -2973,28 +2978,20 @@ static inline bool isSETCCorConvertedSETCC(SDValue N) 
 // to ToMaskVT if needed with vector extension or truncation.
 SDValue DAGTypeLegalizer::convertMask(SDValue InMask, EVT MaskVT,
                                       EVT ToMaskVT) {
-  LLVMContext &Ctx = *DAG.getContext();
-
   // Currently a SETCC or a AND/OR/XOR with two SETCCs are handled.
-  unsigned InMaskOpc = InMask->getOpcode();
-
   // FIXME: This code seems to be too restrictive, we might consider
   // generalizing it or dropping it.
-  assert((InMaskOpc == ISD::SETCC ||
-          ISD::isBuildVectorOfConstantSDNodes(InMask.getNode()) ||
-          (isLogicalMaskOp(InMaskOpc) &&
-           isSETCCorConvertedSETCC(InMask->getOperand(0)) &&
-           isSETCCorConvertedSETCC(InMask->getOperand(1)))) &&
-         "Unexpected mask argument.");
+  assert(isSETCCorConvertedSETCC(InMask) && "Unexpected mask argument.");
 
   // Make a new Mask node, with a legal result VT.
   SmallVector<SDValue, 4> Ops;
   for (unsigned i = 0; i < InMask->getNumOperands(); ++i)
     Ops.push_back(InMask->getOperand(i));
-  SDValue Mask = DAG.getNode(InMaskOpc, SDLoc(InMask), MaskVT, Ops);
+  SDValue Mask = DAG.getNode(InMask->getOpcode(), SDLoc(InMask), MaskVT, Ops);
 
   // If MaskVT has smaller or bigger elements than ToMaskVT, a vector sign
   // extend or truncate is needed.
+  LLVMContext &Ctx = *DAG.getContext();
   unsigned MaskScalarBits = MaskVT.getScalarSizeInBits();
   unsigned ToMaskScalBits = ToMaskVT.getScalarSizeInBits();
   if (MaskScalarBits < ToMaskScalBits) {

Modified: vendor/llvm/dist/lib/Option/OptTable.cpp
==============================================================================
--- vendor/llvm/dist/lib/Option/OptTable.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Option/OptTable.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -235,7 +235,9 @@ OptTable::findByPrefix(StringRef Cur, unsigned short D
       continue;
 
     for (int I = 0; In.Prefixes[I]; I++) {
-      std::string S = std::string(In.Prefixes[I]) + std::string(In.Name);
+      std::string S = std::string(In.Prefixes[I]) + std::string(In.Name) + "\t";
+      if (In.HelpText)
+        S += In.HelpText;
       if (StringRef(S).startswith(Cur))
         Ret.push_back(S);
     }

Modified: vendor/llvm/dist/lib/Support/CommandLine.cpp
==============================================================================
--- vendor/llvm/dist/lib/Support/CommandLine.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Support/CommandLine.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -2039,9 +2039,9 @@ void CommandLineParser::printOptionValues() {
     Opts[i].second->printOptionValue(MaxArgLen, PrintAllOptions);
 }
 
-static VersionPrinterTy OverrideVersionPrinter = nullptr;
+static void (*OverrideVersionPrinter)() = nullptr;
 
-static std::vector<VersionPrinterTy> *ExtraVersionPrinters = nullptr;
+static std::vector<void (*)()> *ExtraVersionPrinters = nullptr;
 
 namespace {
 class VersionPrinter {
@@ -2081,7 +2081,7 @@ class VersionPrinter { (public)
       return;
 
     if (OverrideVersionPrinter != nullptr) {
-      OverrideVersionPrinter(outs());
+      (*OverrideVersionPrinter)();
       exit(0);
     }
     print();
@@ -2090,8 +2090,10 @@ class VersionPrinter { (public)
     // information.
     if (ExtraVersionPrinters != nullptr) {
       outs() << '\n';
-      for (auto I : *ExtraVersionPrinters)
-        I(outs());
+      for (std::vector<void (*)()>::iterator I = ExtraVersionPrinters->begin(),
+                                             E = ExtraVersionPrinters->end();
+           I != E; ++I)
+        (*I)();
     }
 
     exit(0);
@@ -2129,11 +2131,11 @@ void cl::PrintHelpMessage(bool Hidden, bool Categorize
 /// Utility function for printing version number.
 void cl::PrintVersionMessage() { VersionPrinterInstance.print(); }
 
-void cl::SetVersionPrinter(VersionPrinterTy func) { OverrideVersionPrinter = func; }
+void cl::SetVersionPrinter(void (*func)()) { OverrideVersionPrinter = func; }
 
-void cl::AddExtraVersionPrinter(VersionPrinterTy func) {
+void cl::AddExtraVersionPrinter(void (*func)()) {
   if (!ExtraVersionPrinters)
-    ExtraVersionPrinters = new std::vector<VersionPrinterTy>;
+    ExtraVersionPrinters = new std::vector<void (*)()>;
 
   ExtraVersionPrinters->push_back(func);
 }

Modified: vendor/llvm/dist/lib/Support/ErrorHandling.cpp
==============================================================================
--- vendor/llvm/dist/lib/Support/ErrorHandling.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Support/ErrorHandling.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -169,7 +169,8 @@ void llvm::report_bad_alloc_error(const char *Reason, 
   // Don't call the normal error handler. It may allocate memory. Directly write
   // an OOM to stderr and abort.
   char OOMMessage[] = "LLVM ERROR: out of memory\n";
-  (void)::write(2, OOMMessage, strlen(OOMMessage));
+  ssize_t written = ::write(2, OOMMessage, strlen(OOMMessage));
+  (void)written;
   abort();
 #endif
 }

Modified: vendor/llvm/dist/lib/Support/TargetRegistry.cpp
==============================================================================
--- vendor/llvm/dist/lib/Support/TargetRegistry.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Support/TargetRegistry.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -114,7 +114,7 @@ static int TargetArraySortFn(const std::pair<StringRef
   return LHS->first.compare(RHS->first);
 }
 
-void TargetRegistry::printRegisteredTargetsForVersion(raw_ostream &OS) {
+void TargetRegistry::printRegisteredTargetsForVersion() {
   std::vector<std::pair<StringRef, const Target*> > Targets;
   size_t Width = 0;
   for (const auto &T : TargetRegistry::targets()) {
@@ -123,6 +123,7 @@ void TargetRegistry::printRegisteredTargetsForVersion(
   }
   array_pod_sort(Targets.begin(), Targets.end(), TargetArraySortFn);
 
+  raw_ostream &OS = outs();
   OS << "  Registered Targets:\n";
   for (unsigned i = 0, e = Targets.size(); i != e; ++i) {
     OS << "    " << Targets[i].first;

Modified: vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Target/AArch64/AArch64ISelLowering.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -2889,9 +2889,12 @@ void AArch64TargetLowering::saveVarArgRegisters(CCStat
   unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
   int GPRIdx = 0;
   if (GPRSaveSize != 0) {
-    if (IsWin64)
+    if (IsWin64) {
       GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false);
-    else
+      if (GPRSaveSize & 15)
+        // The extra size here, if triggered, will always be 8.
+        MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false);
+    } else
       GPRIdx = MFI.CreateStackObject(GPRSaveSize, 8, false);
 
     SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);

Modified: vendor/llvm/dist/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -297,6 +297,11 @@ void AMDGPUInstPrinter::printRegOperand(unsigned RegNo
   case AMDGPU::FLAT_SCR_HI:
     O << "flat_scratch_hi";
     return;
+  case AMDGPU::FP_REG:
+  case AMDGPU::SP_REG:
+  case AMDGPU::SCRATCH_WAVE_OFFSET_REG:
+  case AMDGPU::PRIVATE_RSRC_REG:
+    llvm_unreachable("pseudo-register should not ever be emitted");
   default:
     break;
   }

Modified: vendor/llvm/dist/lib/Target/AMDGPU/SIRegisterInfo.td
==============================================================================
--- vendor/llvm/dist/lib/Target/AMDGPU/SIRegisterInfo.td	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Target/AMDGPU/SIRegisterInfo.td	Sat Jul 29 21:25:18 2017	(r321691)
@@ -274,8 +274,7 @@ def VGPR_512 : RegisterTuples<[sub0, sub1, sub2, sub3,
 def SReg_32_XM0_XEXEC : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32,
   (add SGPR_32, VCC_LO, VCC_HI, FLAT_SCR_LO, FLAT_SCR_HI,
    TTMP_32, TMA_LO, TMA_HI, TBA_LO, TBA_HI, SRC_SHARED_BASE, SRC_SHARED_LIMIT,
-   SRC_PRIVATE_BASE, SRC_PRIVATE_LIMIT,
-   FP_REG, SP_REG, SCRATCH_WAVE_OFFSET_REG)> {
+   SRC_PRIVATE_BASE, SRC_PRIVATE_LIMIT)> {
   let AllocationPriority = 7;
 }
 

Modified: vendor/llvm/dist/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -61,14 +61,6 @@ static unsigned adjustFixupValue(unsigned Kind, uint64
   case Sparc::fixup_sparc_lo10:
     return Value & 0x3ff;
 
-  case Sparc::fixup_sparc_tls_ldo_hix22:
-  case Sparc::fixup_sparc_tls_le_hix22:
-    return (~Value >> 10) & 0x3fffff;
-
-  case Sparc::fixup_sparc_tls_ldo_lox10:
-  case Sparc::fixup_sparc_tls_le_lox10:
-    return (~(~Value & 0x3ff)) & 0x1fff;
-
   case Sparc::fixup_sparc_h44:
     return (Value >> 22) & 0x3fffff;
 
@@ -83,6 +75,13 @@ static unsigned adjustFixupValue(unsigned Kind, uint64
 
   case Sparc::fixup_sparc_hm:
     return (Value >> 32) & 0x3ff;
+
+  case Sparc::fixup_sparc_tls_ldo_hix22:
+  case Sparc::fixup_sparc_tls_le_hix22:
+  case Sparc::fixup_sparc_tls_ldo_lox10:
+  case Sparc::fixup_sparc_tls_le_lox10:
+    assert(Value == 0 && "Sparc TLS relocs expect zero Value");
+    return 0;
 
   case Sparc::fixup_sparc_tls_gd_add:
   case Sparc::fixup_sparc_tls_gd_call:

Modified: vendor/llvm/dist/lib/Target/SystemZ/SystemZScheduleZ14.td
==============================================================================
--- vendor/llvm/dist/lib/Target/SystemZ/SystemZScheduleZ14.td	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Target/SystemZ/SystemZScheduleZ14.td	Sat Jul 29 21:25:18 2017	(r321691)
@@ -455,10 +455,10 @@ def : InstRW<[FXa, LSU, Lat8], (instregex "MH(Y)?$")>;
 def : InstRW<[FXa2, Lat6, GroupAlone], (instregex "M(L)?R$")>;
 def : InstRW<[FXa2, LSU, Lat10, GroupAlone], (instregex "M(FY|L)?$")>;
 def : InstRW<[FXa, LSU, Lat8], (instregex "MGH$")>;
-def : InstRW<[FXa, LSU, Lat12, GroupAlone], (instregex "MG$")>;
-def : InstRW<[FXa, Lat8, GroupAlone], (instregex "MGRK$")>;
-def : InstRW<[FXa, LSU, Lat9, GroupAlone], (instregex "MSC$")>;
-def : InstRW<[FXa, LSU, Lat11, GroupAlone], (instregex "MSGC$")>;
+def : InstRW<[FXa, FXa, LSU, Lat12, GroupAlone], (instregex "MG$")>;
+def : InstRW<[FXa, FXa, Lat8, GroupAlone], (instregex "MGRK$")>;
+def : InstRW<[FXa, LSU, Lat9], (instregex "MSC$")>;
+def : InstRW<[FXa, LSU, Lat11], (instregex "MSGC$")>;
 def : InstRW<[FXa, Lat5], (instregex "MSRKC$")>;
 def : InstRW<[FXa, Lat7], (instregex "MSGRKC$")>;
 
@@ -620,7 +620,7 @@ def : InstRW<[FXa, Lat30], (instregex "(PCC|PPNO|PRNO)
 
 def : InstRW<[LSU], (instregex "LGG$")>;
 def : InstRW<[LSU, Lat5], (instregex "LLGFSG$")>;
-def : InstRW<[LSU, Lat30, GroupAlone], (instregex "(L|ST)GSC$")>;
+def : InstRW<[LSU, Lat30], (instregex "(L|ST)GSC$")>;
 
 //===----------------------------------------------------------------------===//
 // Decimal arithmetic
@@ -708,7 +708,7 @@ def : InstRW<[FXb, LSU, Lat5], (instregex "NTSTG$")>;
 // Processor assist
 //===----------------------------------------------------------------------===//
 
-def : InstRW<[FXb], (instregex "PPA$")>;
+def : InstRW<[FXb, GroupAlone], (instregex "PPA$")>;
 
 //===----------------------------------------------------------------------===//
 // Miscellaneous Instructions.
@@ -1276,9 +1276,9 @@ def : InstRW<[VecXsPm], (instregex "VESRL(B|F|G|H)?$")
 def : InstRW<[VecXsPm], (instregex "VESRLV(B|F|G|H)?$")>;
 
 def : InstRW<[VecXsPm], (instregex "VSL(DB)?$")>;
-def : InstRW<[VecXsPm, VecXsPm, Lat8], (instregex "VSLB$")>;
+def : InstRW<[VecXsPm], (instregex "VSLB$")>;
 def : InstRW<[VecXsPm], (instregex "VSR(A|L)$")>;
-def : InstRW<[VecXsPm, VecXsPm, Lat8], (instregex "VSR(A|L)B$")>;
+def : InstRW<[VecXsPm], (instregex "VSR(A|L)B$")>;
 
 def : InstRW<[VecXsPm], (instregex "VSB(I|IQ|CBI|CBIQ)?$")>;
 def : InstRW<[VecXsPm], (instregex "VSCBI(B|F|G|H|Q)?$")>;
@@ -1435,9 +1435,9 @@ def : InstRW<[VecStr, Lat5], (instregex "VSTRCZ(B|F|H)
 // Vector: Packed-decimal instructions
 //===----------------------------------------------------------------------===//
 
-def : InstRW<[VecDF, VecDF, Lat10, GroupAlone], (instregex "VLIP$")>;
-def : InstRW<[VecDFX, LSU, Lat12, GroupAlone], (instregex "VPKZ$")>;
-def : InstRW<[VecDFX, FXb, LSU, Lat12, GroupAlone], (instregex "VUPKZ$")>;
+def : InstRW<[VecDF, VecDF, Lat10], (instregex "VLIP$")>;
+def : InstRW<[VecDFX, LSU, GroupAlone], (instregex "VPKZ$")>;
+def : InstRW<[VecDFX, FXb, LSU, Lat12, BeginGroup], (instregex "VUPKZ$")>;
 def : InstRW<[VecDF, VecDF, FXb, Lat20, GroupAlone], (instregex "VCVB(G)?$")>;
 def : InstRW<[VecDF, VecDF, FXb, Lat20, GroupAlone], (instregex "VCVD(G)?$")>;
 def : InstRW<[VecDFX], (instregex "V(A|S)P$")>;

Modified: vendor/llvm/dist/lib/Target/X86/X86ISelDAGToDAG.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/X86/X86ISelDAGToDAG.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Target/X86/X86ISelDAGToDAG.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -1055,7 +1055,10 @@ static bool foldMaskAndShiftToScale(SelectionDAG &DAG,
 
   // Scale the leading zero count down based on the actual size of the value.
   // Also scale it down based on the size of the shift.
-  MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
+  unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
+  if (MaskLZ < ScaleDown)
+    return true;
+  MaskLZ -= ScaleDown;
 
   // The final check is to ensure that any masked out high bits of X are
   // already known to be zero. Otherwise, the mask has a semantic impact

Modified: vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -1672,8 +1672,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMa
 
   // TODO: These control memcmp expansion in CGP and could be raised higher, but
   // that needs to benchmarked and balanced with the potential use of vector
-  // load/store types (PR33329).
-  MaxLoadsPerMemcmp = 4;
+  // load/store types (PR33329, PR33914).
+  MaxLoadsPerMemcmp = 2;
   MaxLoadsPerMemcmpOptSize = 2;
 
   // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
@@ -22022,8 +22022,9 @@ static SDValue LowerScalarImmediateShift(SDValue Op, S
         return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
 
       // i64 SRA needs to be performed as partial shifts.
-      if ((VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
-          Op.getOpcode() == ISD::SRA && !Subtarget.hasXOP())
+      if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
+           (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
+          Op.getOpcode() == ISD::SRA)
         return ArithmeticShiftRight64(ShiftAmt);
 
       if (VT == MVT::v16i8 ||

Modified: vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp
==============================================================================
--- vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -64,6 +64,11 @@ ImplicationSearchThreshold(
            "condition to use to thread over a weaker condition"),
   cl::init(3), cl::Hidden);
 
+static cl::opt<bool> PrintLVIAfterJumpThreading(
+    "print-lvi-after-jump-threading",
+    cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false),
+    cl::Hidden);
+
 namespace {
   /// This pass performs 'jump threading', which looks at blocks that have
   /// multiple predecessors and multiple successors.  If one or more of the
@@ -93,9 +98,10 @@ namespace {
     bool runOnFunction(Function &F) override;
 
     void getAnalysisUsage(AnalysisUsage &AU) const override {
+      if (PrintLVIAfterJumpThreading)
+        AU.addRequired<DominatorTreeWrapperPass>();
       AU.addRequired<AAResultsWrapperPass>();
       AU.addRequired<LazyValueInfoWrapperPass>();
-      AU.addPreserved<LazyValueInfoWrapperPass>();
       AU.addPreserved<GlobalsAAWrapperPass>();
       AU.addRequired<TargetLibraryInfoWrapperPass>();
     }
@@ -137,8 +143,14 @@ bool JumpThreading::runOnFunction(Function &F) {
     BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
   }
 
-  return Impl.runImpl(F, TLI, LVI, AA, HasProfileData, std::move(BFI),
-                      std::move(BPI));
+  bool Changed = Impl.runImpl(F, TLI, LVI, AA, HasProfileData, std::move(BFI),
+                              std::move(BPI));
+  if (PrintLVIAfterJumpThreading) {
+    dbgs() << "LVI for function '" << F.getName() << "':\n";
+    LVI->printLVI(F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
+                  dbgs());
+  }
+  return Changed;
 }
 
 PreservedAnalyses JumpThreadingPass::run(Function &F,
@@ -231,13 +243,15 @@ bool JumpThreadingPass::runImpl(Function &F, TargetLib
       // Can't thread an unconditional jump, but if the block is "almost
       // empty", we can replace uses of it with uses of the successor and make
       // this dead.
-      // We should not eliminate the loop header either, because eliminating
-      // a loop header might later prevent LoopSimplify from transforming nested
-      // loops into simplified form.
+      // We should not eliminate the loop header or latch either, because
+      // eliminating a loop header or latch might later prevent LoopSimplify
+      // from transforming nested loops into simplified form. We will rely on
+      // later passes in backend to clean up empty blocks.
       if (BI && BI->isUnconditional() &&
           BB != &BB->getParent()->getEntryBlock() &&
           // If the terminator is the only non-phi instruction, try to nuke it.
-          BB->getFirstNonPHIOrDbg()->isTerminator() && !LoopHeaders.count(BB)) {
+          BB->getFirstNonPHIOrDbg()->isTerminator() && !LoopHeaders.count(BB) &&
+          !LoopHeaders.count(BI->getSuccessor(0))) {
         // FIXME: It is always conservatively correct to drop the info
         // for a block even if it doesn't get erased.  This isn't totally
         // awesome, but it allows us to use AssertingVH to prevent nasty

Modified: vendor/llvm/dist/lib/Transforms/Utils/LoopUtils.cpp
==============================================================================
--- vendor/llvm/dist/lib/Transforms/Utils/LoopUtils.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Transforms/Utils/LoopUtils.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -1376,16 +1376,21 @@ Value *llvm::createTargetReduction(IRBuilder<> &Builde
   }
 }
 
-void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL) {
-  if (auto *VecOp = dyn_cast<Instruction>(I)) {
-    if (auto *I0 = dyn_cast<Instruction>(VL[0])) {
-      // VecOVp is initialized to the 0th scalar, so start counting from index
-      // '1'.
-      VecOp->copyIRFlags(I0);
-      for (int i = 1, e = VL.size(); i < e; ++i) {
-        if (auto *Scalar = dyn_cast<Instruction>(VL[i]))
-          VecOp->andIRFlags(Scalar);
-      }
-    }
+void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue) {
+  auto *VecOp = dyn_cast<Instruction>(I);
+  if (!VecOp)
+    return;
+  auto *Intersection = (OpValue == nullptr) ? dyn_cast<Instruction>(VL[0])
+                                            : dyn_cast<Instruction>(OpValue);
+  if (!Intersection)
+    return;
+  const unsigned Opcode = Intersection->getOpcode();
+  VecOp->copyIRFlags(Intersection);
+  for (auto *V : VL) {
+    auto *Instr = dyn_cast<Instruction>(V);
+    if (!Instr)
+      continue;
+    if (OpValue == nullptr || Opcode == Instr->getOpcode())
+      VecOp->andIRFlags(V);
   }
 }

Modified: vendor/llvm/dist/lib/Transforms/Utils/SimplifyCFG.cpp
==============================================================================
--- vendor/llvm/dist/lib/Transforms/Utils/SimplifyCFG.cpp	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/lib/Transforms/Utils/SimplifyCFG.cpp	Sat Jul 29 21:25:18 2017	(r321691)
@@ -5656,20 +5656,22 @@ static bool TryToMergeLandingPad(LandingPadInst *LPad,
 bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI,
                                           IRBuilder<> &Builder) {
   BasicBlock *BB = BI->getParent();
+  BasicBlock *Succ = BI->getSuccessor(0);
 
   if (SinkCommon && SinkThenElseCodeToEnd(BI))
     return true;
 
   // If the Terminator is the only non-phi instruction, simplify the block.
-  // if LoopHeader is provided, check if the block is a loop header
-  // (This is for early invocations before loop simplify and vectorization
-  // to keep canonical loop forms for nested loops.
-  // These blocks can be eliminated when the pass is invoked later
-  // in the back-end.)
+  // if LoopHeader is provided, check if the block or its successor is a loop
+  // header (This is for early invocations before loop simplify and
+  // vectorization to keep canonical loop forms for nested loops. These blocks
+  // can be eliminated when the pass is invoked later in the back-end.)
+  bool NeedCanonicalLoop =
+      !LateSimplifyCFG &&
+      (LoopHeaders && (LoopHeaders->count(BB) || LoopHeaders->count(Succ)));
   BasicBlock::iterator I = BB->getFirstNonPHIOrDbg()->getIterator();
   if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() &&
-      (!LoopHeaders || !LoopHeaders->count(BB)) &&
-      TryToSimplifyUncondBranchFromEmptyBlock(BB))
+      !NeedCanonicalLoop && TryToSimplifyUncondBranchFromEmptyBlock(BB))
     return true;
 
   // If the only instruction in the block is a seteq/setne comparison

Modified: vendor/llvm/dist/test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll
==============================================================================
--- vendor/llvm/dist/test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/test/Analysis/LazyValueAnalysis/lvi-after-jumpthreading.ll	Sat Jul 29 21:25:18 2017	(r321691)
@@ -1,4 +1,4 @@
-; RUN: opt < %s -jump-threading -print-lazy-value-info -disable-output 2>&1 | FileCheck %s
+; RUN: opt < %s -jump-threading -print-lvi-after-jump-threading -disable-output 2>&1 | FileCheck %s
 
 ; Testing LVI cache after jump-threading
 
@@ -19,13 +19,10 @@ entry:
 ; CHECK-NEXT:     ; LatticeVal for: 'i32 %a' is: overdefined
 ; CHECK-NEXT:     ; LatticeVal for: 'i32 %length' is: overdefined
 ; CHECK-NEXT:     ; LatticeVal for: '  %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ]' in BB: '%backedge' is: constantrange<0, 400>
-; CHECK-NEXT:     ; LatticeVal for: '  %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ]' in BB: '%exit' is: constantrange<399, 400>
 ; CHECK-NEXT:  %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ]
 ; CHECK-NEXT:     ; LatticeVal for: '  %iv.next = add nsw i32 %iv, 1' in BB: '%backedge' is: constantrange<1, 401>
-; CHECK-NEXT:     ; LatticeVal for: '  %iv.next = add nsw i32 %iv, 1' in BB: '%exit' is: constantrange<400, 401>
 ; CHECK-NEXT:  %iv.next = add nsw i32 %iv, 1
 ; CHECK-NEXT:     ; LatticeVal for: '  %cont = icmp slt i32 %iv.next, 400' in BB: '%backedge' is: overdefined
-; CHECK-NEXT:     ; LatticeVal for: '  %cont = icmp slt i32 %iv.next, 400' in BB: '%exit' is: constantrange<0, -1>
 ; CHECK-NEXT:  %cont = icmp slt i32 %iv.next, 400
 ; CHECK-NOT: loop
 loop:

Modified: vendor/llvm/dist/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll
==============================================================================
--- vendor/llvm/dist/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll	Sat Jul 29 21:25:18 2017	(r321691)
@@ -19,9 +19,9 @@ entry:
 
 do.body.i:
 ; CHECK-LABEL: do.body.i:
-; CHECK:          %uglygep2 = getelementptr i8, i8* %uglygep, i64 %3
-; CHECK-NEXT:     %4 = bitcast i8* %uglygep2 to i32*
-; CHECK-NOT:      %uglygep2 = getelementptr i8, i8* %uglygep, i64 1032
+; CHECK:          %uglygep1 = getelementptr i8, i8* %uglygep, i64 %3
+; CHECK-NEXT:     %4 = bitcast i8* %uglygep1 to i32*
+; CHECK-NOT:      %uglygep1 = getelementptr i8, i8* %uglygep, i64 1032
 
 
   %0 = phi i32 [ 256, %entry ], [ %.be, %do.body.i.backedge ]

Modified: vendor/llvm/dist/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
==============================================================================
--- vendor/llvm/dist/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll	Sat Jul 29 21:25:18 2017	(r321691)
@@ -59,10 +59,12 @@ entry:
 }
 
 ; CHECK-LABEL: f7:
-; CHECK: sub     sp, sp, #16
-; CHECK: add     x8, sp, #8
-; CHECK: add     x0, sp, #8
-; CHECK: stp     x8, x7, [sp], #16
+; CHECK: sub     sp, sp, #32
+; CHECK: add     x8, sp, #24
+; CHECK: str     x7, [sp, #24]
+; CHECK: add     x0, sp, #24
+; CHECK: str     x8, [sp, #8]
+; CHECK: add     sp, sp, #32
 ; CHECK: ret
 define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
 entry:

Modified: vendor/llvm/dist/test/CodeGen/AArch64/win64_vararg.ll
==============================================================================
--- vendor/llvm/dist/test/CodeGen/AArch64/win64_vararg.ll	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/test/CodeGen/AArch64/win64_vararg.ll	Sat Jul 29 21:25:18 2017	(r321691)
@@ -59,10 +59,12 @@ entry:
 }
 
 ; CHECK-LABEL: f7:
-; CHECK: sub     sp, sp, #16
-; CHECK: add     x8, sp, #8
-; CHECK: add     x0, sp, #8
-; CHECK: stp     x8, x7, [sp], #16
+; CHECK: sub     sp, sp, #32
+; CHECK: add     x8, sp, #24
+; CHECK: str     x7, [sp, #24]
+; CHECK: add     x0, sp, #24
+; CHECK: str     x8, [sp, #8]
+; CHECK: add     sp, sp, #32
 ; CHECK: ret
 define i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
 entry:
@@ -79,9 +81,8 @@ entry:
 ; CHECK: stp     x6, x7, [sp, #64]
 ; CHECK: stp     x4, x5, [sp, #48]
 ; CHECK: stp     x2, x3, [sp, #32]
-; CHECK: stp     x8, x1, [sp, #16]
-; CHECK: str     x8, [sp, #8]
-; CHECK: add     sp, sp, #80
+; CHECK: str     x1, [sp, #24]
+; CHECK: stp     x8, x8, [sp], #80
 ; CHECK: ret
 define void @copy1(i64 %a0, ...) nounwind {
 entry:
@@ -92,4 +93,55 @@ entry:
   call void @llvm.va_start(i8* %ap1)
   call void @llvm.va_copy(i8* %cp1, i8* %ap1)
   ret void
+}
+
+declare void @llvm.va_end(i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+
+declare i32 @__stdio_common_vsprintf(i64, i8*, i64, i8*, i8*, i8*) local_unnamed_addr #3
+declare i64* @__local_stdio_printf_options() local_unnamed_addr #4
+
+; CHECK-LABEL: snprintf
+; CHECK: sub     sp,  sp, #96
+; CHECK: stp     x21, x20, [sp, #16]
+; CHECK: stp     x19, x30, [sp, #32]
+; CHECK: add     x8, sp, #56
+; CHECK: mov     x19, x2
+; CHECK: mov     x20, x1
+; CHECK: mov     x21, x0
+; CHECK: stp     x6, x7, [sp, #80]
+; CHECK: stp     x4, x5, [sp, #64]
+; CHECK: str     x3, [sp, #56]
+; CHECK: str     x8, [sp, #8]
+; CHECK: bl      __local_stdio_printf_options
+; CHECK: ldr     x8, [x0]
+; CHECK: add     x5, sp, #56
+; CHECK: mov     x1, x21
+; CHECK: mov     x2, x20
+; CHECK: orr     x0, x8, #0x2
+; CHECK: mov     x3, x19
+; CHECK: mov     x4, xzr
+; CHECK: bl      __stdio_common_vsprintf
+; CHECK: ldp     x19, x30, [sp, #32]
+; CHECK: ldp     x21, x20, [sp, #16]
+; CHECK: cmp     w0, #0
+; CHECK: csinv   w0, w0, wzr, ge
+; CHECK: add     sp, sp, #96
+; CHECK: ret
+define i32 @snprintf(i8*, i64, i8*, ...) local_unnamed_addr #5 {
+  %4 = alloca i8*, align 8
+  %5 = bitcast i8** %4 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %5) #2
+  call void @llvm.va_start(i8* nonnull %5)
+  %6 = load i8*, i8** %4, align 8
+  %7 = call i64* @__local_stdio_printf_options() #2
+  %8 = load i64, i64* %7, align 8
+  %9 = or i64 %8, 2
+  %10 = call i32 @__stdio_common_vsprintf(i64 %9, i8* %0, i64 %1, i8* %2, i8* null, i8* %6) #2
+  %11 = icmp sgt i32 %10, -1
+  %12 = select i1 %11, i32 %10, i32 -1
+  call void @llvm.va_end(i8* nonnull %5)
+  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %5) #2
+  ret i32 %12
 }

Added: vendor/llvm/dist/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/llvm/dist/test/CodeGen/AMDGPU/spill-empty-live-interval.mir	Sat Jul 29 21:25:18 2017	(r321691)
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa-opencl -verify-machineinstrs -stress-regalloc=1 -start-before=simple-register-coalescing -stop-after=greedy -o - %s | FileCheck %s
+# https://bugs.llvm.org/show_bug.cgi?id=33620
+
+---
+# This would assert due to the empty live interval created for %vreg9
+# on the last S_NOP with an undef subreg use.
+
+# CHECK-LABEL: name: expecting_non_empty_interval
+
+# CHECK: undef %7.sub1 = V_MAC_F32_e32 0, undef %1, undef %7.sub1, implicit %exec
+# CHECK-NEXT: SI_SPILL_V64_SAVE %7, %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (store 8 into %stack.0, align 4)
+# CHECK-NEXT: undef %5.sub1 = V_MOV_B32_e32 1786773504, implicit %exec
+# CHECK-NEXT: dead %2 = V_MUL_F32_e32 0, %5.sub1, implicit %exec
+
+# CHECK: S_NOP 0, implicit %6.sub1
+# CHECK-NEXT: %8 = SI_SPILL_V64_RESTORE %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (load 8 from %stack.0, align 4)
+# CHECK-NEXT: S_NOP 0, implicit %8.sub1
+# CHECK-NEXT: S_NOP 0, implicit undef %9.sub0
+
+name: expecting_non_empty_interval
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: vreg_64, preferred-register: '' }
+  - { id: 1, class: vgpr_32, preferred-register: '' }
+  - { id: 2, class: vgpr_32, preferred-register: '' }
+  - { id: 3, class: vreg_64, preferred-register: '' }
+body:             |
+  bb.0:
+    successors: %bb.1
+    undef %0.sub1 = V_MAC_F32_e32 0, undef %1, undef %0.sub1, implicit %exec
+    undef %3.sub1 = V_MOV_B32_e32 1786773504, implicit %exec
+    dead %2 = V_MUL_F32_e32 0, %3.sub1, implicit %exec
+
+  bb.1:
+    S_NOP 0, implicit %3.sub1
+    S_NOP 0, implicit %0.sub1
+    S_NOP 0, implicit undef %0.sub0
+    S_ENDPGM
+
+...
+
+# Similar assert which happens when trying to rematerialize.
+# https://bugs.llvm.org/show_bug.cgi?id=33884
+---
+# CHECK-LABEL: name: rematerialize_empty_interval_has_reference
+
+# CHECK-NOT: MOV
+# CHECK: undef %3.sub2 = V_MOV_B32_e32 1786773504, implicit %exec
+
+# CHECK: bb.1:
+# CHECK-NEXT: S_NOP 0, implicit %3.sub2
+# CHECK-NEXT: S_NOP 0, implicit undef %6.sub0
+# CHECK-NEXT: undef %4.sub2 = V_MOV_B32_e32 0, implicit %exec
+# CHECK-NEXT: S_NOP 0, implicit %4.sub2
+name: rematerialize_empty_interval_has_reference
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: vreg_128, preferred-register: '' }
+  - { id: 1, class: vgpr_32, preferred-register: '' }
+  - { id: 2, class: vgpr_32, preferred-register: '' }
+  - { id: 3, class: vreg_128, preferred-register: '' }
+body:             |
+  bb.0:
+    successors: %bb.1
+
+    undef %0.sub2 = V_MOV_B32_e32 0, implicit %exec
+    undef %3.sub2 = V_MOV_B32_e32 1786773504, implicit %exec
+
+  bb.1:
+    S_NOP 0, implicit %3.sub2
+    S_NOP 0, implicit undef %0.sub0
+    S_NOP 0, implicit %0.sub2
+
+...

Modified: vendor/llvm/dist/test/CodeGen/X86/memcmp-minsize.ll
==============================================================================
--- vendor/llvm/dist/test/CodeGen/X86/memcmp-minsize.ll	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/test/CodeGen/X86/memcmp-minsize.ll	Sat Jul 29 21:25:18 2017	(r321691)
@@ -527,6 +527,93 @@ define i1 @length16_eq_const(i8* %X) nounwind minsize 
   ret i1 %c
 }
 
+; PR33914 - https://bugs.llvm.org/show_bug.cgi?id=33914
+
+define i32 @length24(i8* %X, i8* %Y) nounwind minsize {
+; X86-LABEL: length24:
+; X86:       # BB#0:
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %eax, (%esp)
+; X86-NEXT:    andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $24, {{[0-9]+}}(%esp)
+; X86-NEXT:    calll memcmp
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    retl
+;
+; X64-LABEL: length24:
+; X64:       # BB#0:
+; X64-NEXT:    pushq $24
+; X64-NEXT:    popq %rdx
+; X64-NEXT:    jmp memcmp # TAILCALL
+  %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind
+  ret i32 %m
+}
+
+define i1 @length24_eq(i8* %x, i8* %y) nounwind minsize {
+; X86-LABEL: length24_eq:
+; X86:       # BB#0:
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %eax, (%esp)
+; X86-NEXT:    andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $24, {{[0-9]+}}(%esp)
+; X86-NEXT:    calll memcmp
+; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    sete %al
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    retl
+;
+; X64-LABEL: length24_eq:
+; X64:       # BB#0:
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    pushq $24
+; X64-NEXT:    popq %rdx
+; X64-NEXT:    callq memcmp
+; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    sete %al
+; X64-NEXT:    popq %rcx
+; X64-NEXT:    retq
+  %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind
+  %cmp = icmp eq i32 %call, 0
+  ret i1 %cmp
+}
+
+define i1 @length24_eq_const(i8* %X) nounwind minsize {
+; X86-LABEL: length24_eq_const:
+; X86:       # BB#0:
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %eax, (%esp)
+; X86-NEXT:    andl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $24, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl $.L.str, {{[0-9]+}}(%esp)
+; X86-NEXT:    calll memcmp
+; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    setne %al
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    retl
+;
+; X64-LABEL: length24_eq_const:
+; X64:       # BB#0:
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    pushq $24
+; X64-NEXT:    popq %rdx
+; X64-NEXT:    movl $.L.str, %esi
+; X64-NEXT:    callq memcmp
+; X64-NEXT:    testl %eax, %eax
+; X64-NEXT:    setne %al
+; X64-NEXT:    popq %rcx
+; X64-NEXT:    retq
+  %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 24) nounwind
+  %c = icmp ne i32 %m, 0
+  ret i1 %c
+}
+
 define i32 @length32(i8* %X, i8* %Y) nounwind minsize {
 ; X86-LABEL: length32:
 ; X86:       # BB#0:

Modified: vendor/llvm/dist/test/CodeGen/X86/memcmp-optsize.ll
==============================================================================
--- vendor/llvm/dist/test/CodeGen/X86/memcmp-optsize.ll	Sat Jul 29 20:19:37 2017	(r321690)
+++ vendor/llvm/dist/test/CodeGen/X86/memcmp-optsize.ll	Sat Jul 29 21:25:18 2017	(r321691)
@@ -699,6 +699,82 @@ define i1 @length16_eq_const(i8* %X) nounwind optsize 
   ret i1 %c
 }

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201707292125.v6TLPIBP068166>