Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 25 Nov 2016 19:06:00 +0000 (UTC)
From:      Dimitry Andric <dim@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org
Subject:   svn commit: r309152 - in vendor/llvm/dist: . include/llvm/Analysis include/llvm/ExecutionEngine include/llvm/IR lib/Analysis lib/CodeGen lib/Linker lib/Support/Unix lib/Target/ARM lib/Target/ARM/As...
Message-ID:  <201611251906.uAPJ60lh071692@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: dim
Date: Fri Nov 25 19:05:59 2016
New Revision: 309152
URL: https://svnweb.freebsd.org/changeset/base/309152

Log:
  Vendor import of llvm release_39 branch r287912:
  https://llvm.org/svn/llvm-project/llvm/branches/release_39@287912

Added:
  vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll
  vendor/llvm/dist/test/CodeGen/PowerPC/atomic-minmax.ll
  vendor/llvm/dist/test/CodeGen/PowerPC/pr30451.ll
  vendor/llvm/dist/test/CodeGen/X86/branchfolding-undef.mir
  vendor/llvm/dist/test/CodeGen/X86/no-and8ri8.ll
  vendor/llvm/dist/test/CodeGen/X86/pr30298.ll
  vendor/llvm/dist/test/LTO/X86/Inputs/type-mapping-src.ll
  vendor/llvm/dist/test/LTO/X86/type-mapping-bug.ll
  vendor/llvm/dist/test/MC/ARM/ldr-pseudo-wide.s   (contents, props changed)
  vendor/llvm/dist/test/ThinLTO/X86/Inputs/crash_debuginfo.ll
  vendor/llvm/dist/test/ThinLTO/X86/Inputs/import_opaque_type.ll
  vendor/llvm/dist/test/ThinLTO/X86/crash_debuginfo.ll
  vendor/llvm/dist/test/ThinLTO/X86/import_opaque_type.ll
  vendor/llvm/dist/test/Transforms/GVN/2016-08-30-MaskedScatterGather.ll
  vendor/llvm/dist/test/Transforms/JumpThreading/pr27840.ll
Modified:
  vendor/llvm/dist/CMakeLists.txt
  vendor/llvm/dist/include/llvm/Analysis/LoopAccessAnalysis.h
  vendor/llvm/dist/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
  vendor/llvm/dist/include/llvm/IR/Intrinsics.td
  vendor/llvm/dist/include/llvm/IR/TypeFinder.h
  vendor/llvm/dist/lib/Analysis/LoopAccessAnalysis.cpp
  vendor/llvm/dist/lib/CodeGen/BranchFolding.cpp
  vendor/llvm/dist/lib/Linker/IRMover.cpp
  vendor/llvm/dist/lib/Support/Unix/Signals.inc
  vendor/llvm/dist/lib/Target/ARM/ARMInstrThumb2.td
  vendor/llvm/dist/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
  vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.cpp
  vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.h
  vendor/llvm/dist/lib/Target/PowerPC/PPCInstr64Bit.td
  vendor/llvm/dist/lib/Target/PowerPC/PPCInstrInfo.td
  vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp
  vendor/llvm/dist/lib/Target/X86/X86InstrAVX512.td
  vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp
  vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
  vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
  vendor/llvm/dist/test/CodeGen/PowerPC/p9-xxinsertw-xxextractuw.ll
  vendor/llvm/dist/test/CodeGen/X86/avx-vbroadcast.ll
  vendor/llvm/dist/test/Transforms/LoopVectorize/runtime-check-readonly.ll
  vendor/llvm/dist/test/Transforms/LoopVectorize/tbaa-nodep.ll
  vendor/llvm/dist/test/Transforms/LoopVersioningLICM/loopversioningLICM1.ll

Modified: vendor/llvm/dist/CMakeLists.txt
==============================================================================
--- vendor/llvm/dist/CMakeLists.txt	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/CMakeLists.txt	Fri Nov 25 19:05:59 2016	(r309152)
@@ -27,7 +27,7 @@ if(NOT DEFINED LLVM_VERSION_MINOR)
   set(LLVM_VERSION_MINOR 9)
 endif()
 if(NOT DEFINED LLVM_VERSION_PATCH)
-  set(LLVM_VERSION_PATCH 0)
+  set(LLVM_VERSION_PATCH 1)
 endif()
 if(NOT DEFINED LLVM_VERSION_SUFFIX)
   set(LLVM_VERSION_SUFFIX "")

Modified: vendor/llvm/dist/include/llvm/Analysis/LoopAccessAnalysis.h
==============================================================================
--- vendor/llvm/dist/include/llvm/Analysis/LoopAccessAnalysis.h	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/include/llvm/Analysis/LoopAccessAnalysis.h	Fri Nov 25 19:05:59 2016	(r309152)
@@ -334,9 +334,11 @@ public:
   struct PointerInfo {
     /// Holds the pointer value that we need to check.
     TrackingVH<Value> PointerValue;
-    /// Holds the pointer value at the beginning of the loop.
+    /// Holds the smallest byte address accessed by the pointer throughout all
+    /// iterations of the loop.
     const SCEV *Start;
-    /// Holds the pointer value at the end of the loop.
+    /// Holds the largest byte address accessed by the pointer throughout all
+    /// iterations of the loop, plus 1.
     const SCEV *End;
     /// Holds the information if this pointer is used for writing to memory.
     bool IsWritePtr;

Modified: vendor/llvm/dist/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
==============================================================================
--- vendor/llvm/dist/include/llvm/ExecutionEngine/RTDyldMemoryManager.h	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/include/llvm/ExecutionEngine/RTDyldMemoryManager.h	Fri Nov 25 19:05:59 2016	(r309152)
@@ -72,7 +72,7 @@ public:
   }
 
   void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) override {
-    registerEHFramesInProcess(Addr, Size);
+    deregisterEHFramesInProcess(Addr, Size);
   }
 
   /// This method returns the address of the specified function or variable in

Modified: vendor/llvm/dist/include/llvm/IR/Intrinsics.td
==============================================================================
--- vendor/llvm/dist/include/llvm/IR/Intrinsics.td	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/include/llvm/IR/Intrinsics.td	Fri Nov 25 19:05:59 2016	(r309152)
@@ -668,13 +668,12 @@ def int_masked_gather: Intrinsic<[llvm_a
                                  [LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
                                   LLVMVectorSameWidth<0, llvm_i1_ty>,
                                   LLVMMatchType<0>],
-                                 [IntrReadMem, IntrArgMemOnly]>;
+				  [IntrReadMem]>;
 
 def int_masked_scatter: Intrinsic<[],
                                   [llvm_anyvector_ty,
                                    LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
-                                   LLVMVectorSameWidth<0, llvm_i1_ty>],
-                                  [IntrArgMemOnly]>;
+                                   LLVMVectorSameWidth<0, llvm_i1_ty>]>;
 
 // Test whether a pointer is associated with a type metadata identifier.
 def int_type_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],

Modified: vendor/llvm/dist/include/llvm/IR/TypeFinder.h
==============================================================================
--- vendor/llvm/dist/include/llvm/IR/TypeFinder.h	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/include/llvm/IR/TypeFinder.h	Fri Nov 25 19:05:59 2016	(r309152)
@@ -59,6 +59,8 @@ public:
 
   StructType *&operator[](unsigned Idx) { return StructTypes[Idx]; }
 
+  DenseSet<const MDNode *> &getVisitedMetadata() { return VisitedMetadata; }
+
 private:
   /// incorporateType - This method adds the type to the list of used
   /// structures if it's not in there already.

Modified: vendor/llvm/dist/lib/Analysis/LoopAccessAnalysis.cpp
==============================================================================
--- vendor/llvm/dist/lib/Analysis/LoopAccessAnalysis.cpp	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Analysis/LoopAccessAnalysis.cpp	Fri Nov 25 19:05:59 2016	(r309152)
@@ -148,6 +148,19 @@ const SCEV *llvm::replaceSymbolicStrideS
   return OrigSCEV;
 }
 
+/// Calculate Start and End points of memory access.
+/// Let's assume A is the first access and B is a memory access on N-th loop
+/// iteration. Then B is calculated as:  
+///   B = A + Step*N . 
+/// Step value may be positive or negative.
+/// N is a calculated back-edge taken count:
+///     N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
+/// Start and End points are calculated in the following way:
+/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
+/// where SizeOfElt is the size of single memory access in bytes.
+///
+/// There is no conflict when the intervals are disjoint:
+/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
                                     unsigned DepSetId, unsigned ASId,
                                     const ValueToValueMap &Strides,
@@ -176,12 +189,17 @@ void RuntimePointerChecking::insert(Loop
       if (CStep->getValue()->isNegative())
         std::swap(ScStart, ScEnd);
     } else {
-      // Fallback case: the step is not constant, but the we can still
+      // Fallback case: the step is not constant, but we can still
       // get the upper and lower bounds of the interval by using min/max
       // expressions.
       ScStart = SE->getUMinExpr(ScStart, ScEnd);
       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
     }
+    // Add the size of the pointed element to ScEnd.
+    unsigned EltSize =
+      Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8;
+    const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize);
+    ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
   }
 
   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
@@ -1863,9 +1881,17 @@ std::pair<Instruction *, Instruction *> 
     Value *End0 =   ChkBuilder.CreateBitCast(A.End,   PtrArithTy1, "bc");
     Value *End1 =   ChkBuilder.CreateBitCast(B.End,   PtrArithTy0, "bc");
 
-    Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
+    // [A|B].Start points to the first accessed byte under base [A|B].
+    // [A|B].End points to the last accessed byte, plus one.
+    // There is no conflict when the intervals are disjoint:
+    // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
+    //
+    // bound0 = (B.Start < A.End)
+    // bound1 = (A.Start < B.End)
+    //  IsConflict = bound0 & bound1
+    Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
     FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
-    Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
+    Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
     FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
     Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
     FirstInst = getFirstInst(FirstInst, IsConflict, Loc);

Modified: vendor/llvm/dist/lib/CodeGen/BranchFolding.cpp
==============================================================================
--- vendor/llvm/dist/lib/CodeGen/BranchFolding.cpp	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/CodeGen/BranchFolding.cpp	Fri Nov 25 19:05:59 2016	(r309152)
@@ -776,9 +776,8 @@ bool BranchFolder::CreateCommonTailOnlyB
 }
 
 static void
-mergeMMOsFromMemoryOperations(MachineBasicBlock::iterator MBBIStartPos,
-                              MachineBasicBlock &MBBCommon) {
-  // Merge MMOs from memory operations in the common block.
+mergeOperations(MachineBasicBlock::iterator MBBIStartPos,
+                MachineBasicBlock &MBBCommon) {
   MachineBasicBlock *MBB = MBBIStartPos->getParent();
   // Note CommonTailLen does not necessarily matches the size of
   // the common BB nor all its instructions because of debug
@@ -808,8 +807,18 @@ mergeMMOsFromMemoryOperations(MachineBas
            "Reached BB end within common tail length!");
     assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!");
 
+    // Merge MMOs from memory operations in the common block.
     if (MBBICommon->mayLoad() || MBBICommon->mayStore())
       MBBICommon->setMemRefs(MBBICommon->mergeMemRefsWith(*MBBI));
+    // Drop undef flags if they aren't present in all merged instructions.
+    for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
+      MachineOperand &MO = MBBICommon->getOperand(I);
+      if (MO.isReg() && MO.isUndef()) {
+        const MachineOperand &OtherMO = MBBI->getOperand(I);
+        if (!OtherMO.isUndef())
+          MO.setIsUndef(false);
+      }
+    }
 
     ++MBBI;
     ++MBBICommon;
@@ -928,8 +937,8 @@ bool BranchFolder::TryTailMergeBlocks(Ma
         continue;
       DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber()
                    << (i == e-1 ? "" : ", "));
-      // Merge MMOs from memory operations as needed.
-      mergeMMOsFromMemoryOperations(SameTails[i].getTailStartPos(), *MBB);
+      // Merge operations (MMOs, undef flags)
+      mergeOperations(SameTails[i].getTailStartPos(), *MBB);
       // Hack the end off BB i, making it jump to BB commonTailIndex instead.
       ReplaceTailWithBranchTo(SameTails[i].getTailStartPos(), MBB);
       // BB i is no longer a predecessor of SuccBB; remove it from the worklist.

Modified: vendor/llvm/dist/lib/Linker/IRMover.cpp
==============================================================================
--- vendor/llvm/dist/lib/Linker/IRMover.cpp	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Linker/IRMover.cpp	Fri Nov 25 19:05:59 2016	(r309152)
@@ -694,6 +694,14 @@ void IRLinker::computeTypeMapping() {
     if (!ST->hasName())
       continue;
 
+    if (TypeMap.DstStructTypesSet.hasType(ST)) {
+      // This is actually a type from the destination module.
+      // getIdentifiedStructTypes() can have found it by walking debug info
+      // metadata nodes, some of which get linked by name when ODR Type Uniquing
+      // is enabled on the Context, from the source to the destination module.
+      continue;
+    }
+
     // Check to see if there is a dot in the name followed by a digit.
     size_t DotPos = ST->getName().rfind('.');
     if (DotPos == 0 || DotPos == StringRef::npos ||
@@ -1336,13 +1344,19 @@ bool IRMover::IdentifiedStructTypeSet::h
 
 IRMover::IRMover(Module &M) : Composite(M) {
   TypeFinder StructTypes;
-  StructTypes.run(M, true);
+  StructTypes.run(M, /* OnlyNamed */ false);
   for (StructType *Ty : StructTypes) {
     if (Ty->isOpaque())
       IdentifiedStructTypes.addOpaque(Ty);
     else
       IdentifiedStructTypes.addNonOpaque(Ty);
   }
+  // Self-map metadatas in the destination module. This is needed when
+  // DebugTypeODRUniquing is enabled on the LLVMContext, since metadata in the
+  // destination module may be reached from the source module.
+  for (auto *MD : StructTypes.getVisitedMetadata()) {
+    SharedMDs[MD].reset(const_cast<MDNode *>(MD));
+  }
 }
 
 Error IRMover::move(

Modified: vendor/llvm/dist/lib/Support/Unix/Signals.inc
==============================================================================
--- vendor/llvm/dist/lib/Support/Unix/Signals.inc	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Support/Unix/Signals.inc	Fri Nov 25 19:05:59 2016	(r309152)
@@ -412,7 +412,7 @@ void llvm::sys::PrintStackTrace(raw_ostr
 
   if (printSymbolizedStackTrace(Argv0, StackTrace, depth, OS))
     return;
-#if HAVE_DLFCN_H && __GNUG__
+#if HAVE_DLFCN_H && __GNUG__ && !defined(__CYGWIN__)
   int width = 0;
   for (int i = 0; i < depth; ++i) {
     Dl_info dlinfo;

Modified: vendor/llvm/dist/lib/Target/ARM/ARMInstrThumb2.td
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/ARMInstrThumb2.td	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/ARM/ARMInstrThumb2.td	Fri Nov 25 19:05:59 2016	(r309152)
@@ -4819,6 +4819,10 @@ def : t2InstAlias<"add${p} $Rd, pc, $imm
 def t2LDRConstPool
   : t2AsmPseudo<"ldr${p} $Rt, $immediate",
                 (ins GPRnopc:$Rt, const_pool_asm_imm:$immediate, pred:$p)>;
+// Version w/ the .w suffix.
+def : t2InstAlias<"ldr${p}.w $Rt, $immediate",
+                  (t2LDRConstPool GPRnopc:$Rt,
+                  const_pool_asm_imm:$immediate, pred:$p)>;
 
 // PLD/PLDW/PLI with alternate literal form.
 def : t2InstAlias<"pld${p} $addr",

Modified: vendor/llvm/dist/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/ARM/AsmParser/ARMAsmParser.cpp	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/ARM/AsmParser/ARMAsmParser.cpp	Fri Nov 25 19:05:59 2016	(r309152)
@@ -6933,6 +6933,9 @@ bool ARMAsmParser::processInstruction(MC
     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
       TmpInst.setOpcode(ARM::t2LDRpci);
     const ARMOperand &PoolOperand =
+      (static_cast<ARMOperand &>(*Operands[2]).isToken() &&
+       static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w") ?
+      static_cast<ARMOperand &>(*Operands[4]) :
       static_cast<ARMOperand &>(*Operands[3]);
     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
     // If SubExprVal is a constant we may be able to use a MOV

Modified: vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.cpp	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.cpp	Fri Nov 25 19:05:59 2016	(r309152)
@@ -665,9 +665,10 @@ PPCTargetLowering::PPCTargetLowering(con
       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
     }
+
     if (Subtarget.hasP9Vector()) {
-      setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal);
-      setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Legal);
+      setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
+      setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
     }
   }
 
@@ -7846,6 +7847,17 @@ SDValue PPCTargetLowering::LowerSCALAR_T
   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
 }
 
+SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
+                                                  SelectionDAG &DAG) const {
+  assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
+         "Should only be called for ISD::INSERT_VECTOR_ELT");
+  ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
+  // We have legal lowering for constant indices but not for variable ones.
+  if (C)
+    return Op;
+  return SDValue();
+}
+
 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
                                                    SelectionDAG &DAG) const {
   SDLoc dl(Op);
@@ -8248,6 +8260,7 @@ SDValue PPCTargetLowering::LowerOperatio
   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
   case ISD::SIGN_EXTEND_INREG:  return LowerSIGN_EXTEND_INREG(Op, DAG);
   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+  case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
   case ISD::MUL:                return LowerMUL(Op, DAG);
 
   // For counter-based loop handling.
@@ -8372,7 +8385,9 @@ Instruction* PPCTargetLowering::emitTrai
 MachineBasicBlock *
 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
                                     unsigned AtomicSize,
-                                    unsigned BinOpcode) const {
+                                    unsigned BinOpcode,
+                                    unsigned CmpOpcode,
+                                    unsigned CmpPred) const {
   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
 
@@ -8412,8 +8427,12 @@ PPCTargetLowering::EmitAtomicBinary(Mach
   DebugLoc dl = MI.getDebugLoc();
 
   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
+  MachineBasicBlock *loop2MBB =
+    CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
   F->insert(It, loopMBB);
+  if (CmpOpcode)
+    F->insert(It, loop2MBB);
   F->insert(It, exitMBB);
   exitMBB->splice(exitMBB->begin(), BB,
                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
@@ -8435,11 +8454,40 @@ PPCTargetLowering::EmitAtomicBinary(Mach
   //   st[wd]cx. r0, ptr
   //   bne- loopMBB
   //   fallthrough --> exitMBB
+
+  // For max/min...
+  //  loopMBB:
+  //   l[wd]arx dest, ptr
+  //   cmpl?[wd] incr, dest
+  //   bgt exitMBB
+  //  loop2MBB:
+  //   st[wd]cx. dest, ptr
+  //   bne- loopMBB
+  //   fallthrough --> exitMBB
+
   BB = loopMBB;
   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
     .addReg(ptrA).addReg(ptrB);
   if (BinOpcode)
     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
+  if (CmpOpcode) {
+    // Signed comparisons of byte or halfword values must be sign-extended.
+    if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
+      unsigned ExtReg =  RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
+      BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
+              ExtReg).addReg(dest);
+      BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
+        .addReg(incr).addReg(ExtReg);
+    } else
+      BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
+        .addReg(incr).addReg(dest);
+
+    BuildMI(BB, dl, TII->get(PPC::BCC))
+      .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
+    BB->addSuccessor(loop2MBB);
+    BB->addSuccessor(exitMBB);
+    BB = loop2MBB;
+  }
   BuildMI(BB, dl, TII->get(StoreMnemonic))
     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
   BuildMI(BB, dl, TII->get(PPC::BCC))
@@ -8457,10 +8505,13 @@ MachineBasicBlock *
 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI,
                                             MachineBasicBlock *BB,
                                             bool is8bit, // operation
-                                            unsigned BinOpcode) const {
+                                            unsigned BinOpcode,
+                                            unsigned CmpOpcode,
+                                            unsigned CmpPred) const {
   // If we support part-word atomic mnemonics, just use them
   if (Subtarget.hasPartwordAtomics())
-    return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode);
+    return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode,
+                            CmpOpcode, CmpPred);
 
   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
@@ -8482,8 +8533,12 @@ PPCTargetLowering::EmitPartwordAtomicBin
   DebugLoc dl = MI.getDebugLoc();
 
   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
+  MachineBasicBlock *loop2MBB =
+    CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
   F->insert(It, loopMBB);
+  if (CmpOpcode)
+    F->insert(It, loop2MBB);
   F->insert(It, exitMBB);
   exitMBB->splice(exitMBB->begin(), BB,
                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
@@ -8568,6 +8623,32 @@ PPCTargetLowering::EmitPartwordAtomicBin
     .addReg(TmpDestReg).addReg(MaskReg);
   BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
     .addReg(TmpReg).addReg(MaskReg);
+  if (CmpOpcode) {
+    // For unsigned comparisons, we can directly compare the shifted values.
+    // For signed comparisons we shift and sign extend.
+    unsigned SReg = RegInfo.createVirtualRegister(RC);
+    BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), SReg)
+      .addReg(TmpDestReg).addReg(MaskReg);
+    unsigned ValueReg = SReg;
+    unsigned CmpReg = Incr2Reg;
+    if (CmpOpcode == PPC::CMPW) {
+      ValueReg = RegInfo.createVirtualRegister(RC);
+      BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
+        .addReg(SReg).addReg(ShiftReg);
+      unsigned ValueSReg = RegInfo.createVirtualRegister(RC);
+      BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
+        .addReg(ValueReg);
+      ValueReg = ValueSReg;
+      CmpReg = incr;
+    }
+    BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
+      .addReg(CmpReg).addReg(ValueReg);
+    BuildMI(BB, dl, TII->get(PPC::BCC))
+      .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
+    BB->addSuccessor(loop2MBB);
+    BB->addSuccessor(exitMBB);
+    BB = loop2MBB;
+  }
   BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
     .addReg(Tmp3Reg).addReg(Tmp2Reg);
   BuildMI(BB, dl, TII->get(PPC::STWCX))
@@ -9074,6 +9155,42 @@ PPCTargetLowering::EmitInstrWithCustomIn
   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
 
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
+    BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
+    BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
+    BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
+    BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
+
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
+    BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
+    BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
+    BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
+    BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
+
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
+    BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
+    BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
+    BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
+    BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
+
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
+    BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
+    BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
+    BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
+  else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
+    BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
+
   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)

Modified: vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.h
==============================================================================
--- vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.h	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/PowerPC/PPCISelLowering.h	Fri Nov 25 19:05:59 2016	(r309152)
@@ -585,11 +585,15 @@ namespace llvm {
     MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
                                         MachineBasicBlock *MBB,
                                         unsigned AtomicSize,
-                                        unsigned BinOpcode) const;
+                                        unsigned BinOpcode,
+                                        unsigned CmpOpcode = 0,
+                                        unsigned CmpPred = 0) const;
     MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
                                                 MachineBasicBlock *MBB,
                                                 bool is8bit,
-                                                unsigned Opcode) const;
+                                                unsigned Opcode,
+                                                unsigned CmpOpcode = 0,
+                                                unsigned CmpPred = 0) const;
 
     MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
                                         MachineBasicBlock *MBB) const;
@@ -824,6 +828,7 @@ namespace llvm {
     SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+    SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;

Modified: vendor/llvm/dist/lib/Target/PowerPC/PPCInstr64Bit.td
==============================================================================
--- vendor/llvm/dist/lib/Target/PowerPC/PPCInstr64Bit.td	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/PowerPC/PPCInstr64Bit.td	Fri Nov 25 19:05:59 2016	(r309152)
@@ -224,6 +224,18 @@ let usesCustomInserter = 1 in {
     def ATOMIC_LOAD_NAND_I64 : Pseudo<
       (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_NAND_I64",
       [(set i64:$dst, (atomic_load_nand_64 xoaddr:$ptr, i64:$incr))]>;
+    def ATOMIC_LOAD_MIN_I64 : Pseudo<
+      (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_MIN_I64",
+      [(set i64:$dst, (atomic_load_min_64 xoaddr:$ptr, i64:$incr))]>;
+    def ATOMIC_LOAD_MAX_I64 : Pseudo<
+      (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_MAX_I64",
+      [(set i64:$dst, (atomic_load_max_64 xoaddr:$ptr, i64:$incr))]>;
+    def ATOMIC_LOAD_UMIN_I64 : Pseudo<
+      (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_UMIN_I64",
+      [(set i64:$dst, (atomic_load_umin_64 xoaddr:$ptr, i64:$incr))]>;
+    def ATOMIC_LOAD_UMAX_I64 : Pseudo<
+      (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$incr), "#ATOMIC_LOAD_UMAX_I64",
+      [(set i64:$dst, (atomic_load_umax_64 xoaddr:$ptr, i64:$incr))]>;
 
     def ATOMIC_CMP_SWAP_I64 : Pseudo<
       (outs g8rc:$dst), (ins memrr:$ptr, g8rc:$old, g8rc:$new), "#ATOMIC_CMP_SWAP_I64",

Modified: vendor/llvm/dist/lib/Target/PowerPC/PPCInstrInfo.td
==============================================================================
--- vendor/llvm/dist/lib/Target/PowerPC/PPCInstrInfo.td	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/PowerPC/PPCInstrInfo.td	Fri Nov 25 19:05:59 2016	(r309152)
@@ -1509,6 +1509,18 @@ let usesCustomInserter = 1 in {
     def ATOMIC_LOAD_NAND_I8 : Pseudo<
       (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I8",
       [(set i32:$dst, (atomic_load_nand_8 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_MIN_I8 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MIN_I8",
+      [(set i32:$dst, (atomic_load_min_8 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_MAX_I8 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MAX_I8",
+      [(set i32:$dst, (atomic_load_max_8 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_UMIN_I8 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMIN_I8",
+      [(set i32:$dst, (atomic_load_umin_8 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_UMAX_I8 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMAX_I8",
+      [(set i32:$dst, (atomic_load_umax_8 xoaddr:$ptr, i32:$incr))]>;
     def ATOMIC_LOAD_ADD_I16 : Pseudo<
       (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I16",
       [(set i32:$dst, (atomic_load_add_16 xoaddr:$ptr, i32:$incr))]>;
@@ -1527,6 +1539,18 @@ let usesCustomInserter = 1 in {
     def ATOMIC_LOAD_NAND_I16 : Pseudo<
       (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I16",
       [(set i32:$dst, (atomic_load_nand_16 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_MIN_I16 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MIN_I16",
+      [(set i32:$dst, (atomic_load_min_16 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_MAX_I16 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MAX_I16",
+      [(set i32:$dst, (atomic_load_max_16 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_UMIN_I16 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMIN_I16",
+      [(set i32:$dst, (atomic_load_umin_16 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_UMAX_I16 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMAX_I16",
+      [(set i32:$dst, (atomic_load_umax_16 xoaddr:$ptr, i32:$incr))]>;
     def ATOMIC_LOAD_ADD_I32 : Pseudo<
       (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_ADD_I32",
       [(set i32:$dst, (atomic_load_add_32 xoaddr:$ptr, i32:$incr))]>;
@@ -1545,6 +1569,18 @@ let usesCustomInserter = 1 in {
     def ATOMIC_LOAD_NAND_I32 : Pseudo<
       (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_NAND_I32",
       [(set i32:$dst, (atomic_load_nand_32 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_MIN_I32 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MIN_I32",
+      [(set i32:$dst, (atomic_load_min_32 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_MAX_I32 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_MAX_I32",
+      [(set i32:$dst, (atomic_load_max_32 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_UMIN_I32 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMIN_I32",
+      [(set i32:$dst, (atomic_load_umin_32 xoaddr:$ptr, i32:$incr))]>;
+    def ATOMIC_LOAD_UMAX_I32 : Pseudo<
+      (outs gprc:$dst), (ins memrr:$ptr, gprc:$incr), "#ATOMIC_LOAD_UMAX_I32",
+      [(set i32:$dst, (atomic_load_umax_32 xoaddr:$ptr, i32:$incr))]>;
 
     def ATOMIC_CMP_SWAP_I8 : Pseudo<
       (outs gprc:$dst), (ins memrr:$ptr, gprc:$old, gprc:$new), "#ATOMIC_CMP_SWAP_I8",

Modified: vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/X86/X86ISelLowering.cpp	Fri Nov 25 19:05:59 2016	(r309152)
@@ -8656,6 +8656,17 @@ static SDValue lowerVectorShuffleAsBroad
     V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
                     DAG.getMachineFunction().getMachineMemOperand(
                         Ld->getMemOperand(), Offset, SVT.getStoreSize()));
+
+    // Make sure the newly-created LOAD is in the same position as Ld in
+    // terms of dependency. We create a TokenFactor for Ld and V,
+    // and update uses of Ld's output chain to use the TokenFactor.
+    if (Ld->hasAnyUseOfValue(1)) {
+      SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+                                     SDValue(Ld, 1), SDValue(V.getNode(), 1));
+      DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
+      DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
+                             SDValue(V.getNode(), 1));
+    }
   } else if (!BroadcastFromReg) {
     // We can't broadcast from a vector register.
     return SDValue();
@@ -27516,7 +27527,8 @@ static SDValue reduceVMULWidth(SDNode *N
                                const X86Subtarget &Subtarget) {
   // pmulld is supported since SSE41. It is better to use pmulld
   // instead of pmullw+pmulhw.
-  if (Subtarget.hasSSE41())
+  // pmullw/pmulhw are not supported by SSE.
+  if (Subtarget.hasSSE41() || !Subtarget.hasSSE2())
     return SDValue();
 
   ShrinkMode Mode;

Modified: vendor/llvm/dist/lib/Target/X86/X86InstrAVX512.td
==============================================================================
--- vendor/llvm/dist/lib/Target/X86/X86InstrAVX512.td	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Target/X86/X86InstrAVX512.td	Fri Nov 25 19:05:59 2016	(r309152)
@@ -2124,7 +2124,7 @@ let Predicates = [HasAVX512] in {
             (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG $src, sub_16bit)), VK1)>;
 
   def : Pat<(i1 (trunc (i8 GR8:$src))),
-            (COPY_TO_REGCLASS (i16 (SUBREG_TO_REG (i64 0), (AND8ri8 $src, (i8 1)),
+            (COPY_TO_REGCLASS (i16 (SUBREG_TO_REG (i64 0), (AND8ri $src, (i8 1)),
                                     sub_8bit)), VK1)>;
 
   def : Pat<(i1 (trunc (i8 (assertzext_i1 GR8:$src)))),

Modified: vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp
==============================================================================
--- vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/lib/Transforms/Scalar/JumpThreading.cpp	Fri Nov 25 19:05:59 2016	(r309152)
@@ -1322,6 +1322,10 @@ bool JumpThreadingPass::ProcessBranchOnX
   if (!isa<PHINode>(BB->front()))
     return false;
 
+  // If this BB is a landing pad, we won't be able to split the edge into it.
+  if (BB->isEHPad())
+    return false;
+
   // If we have a xor as the branch input to this block, and we know that the
   // LHS or RHS of the xor in any predecessor is true/false, then we can clone
   // the condition into the predecessor and fix that value to true, saving some

Added: vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/memcheck-off-by-one-error.ll	Fri Nov 25 19:05:59 2016	(r309152)
@@ -0,0 +1,51 @@
+; RUN: opt -analyze --loop-accesses %s | FileCheck %s
+
+; This test verifies run-time boundary check of memory accesses.
+; The original loop:
+;   void fastCopy(const char* src, char* op) {
+;     int len = 32;
+;     while (len > 0) {
+;       *(reinterpret_cast<long long*>(op)) = *(reinterpret_cast<const long long*>(src));
+;       src += 8;
+;       op += 8;
+;       len -= 8;
+;     }
+;   }
+; Boundaries calculations before this patch:
+; (Low: %src High: (24 + %src))
+; and the actual distance between two pointers was 31,  (%op - %src = 31)
+; IsConflict = (24 > 31) = false -> execution is directed to the vectorized loop.
+; The loop was vectorized to 4, 32 byte memory access ( <4 x i64> ),
+; store a value at *%op touched memory under *%src.
+
+;CHECK: Printing analysis 'Loop Access Analysis' for function 'fastCopy'
+;CHECK: (Low: %op High: (32 + %op))
+;CHECK: (Low: %src High: (32 + %src))
+
+define void @fastCopy(i8* nocapture readonly %src, i8* nocapture %op) {
+entry:
+  br label %while.body.preheader
+
+while.body.preheader:                             ; preds = %entry
+  br label %while.body
+
+while.body:                                       ; preds = %while.body.preheader, %while.body
+  %len.addr.07 = phi i32 [ %sub, %while.body ], [ 32, %while.body.preheader ]
+  %op.addr.06 = phi i8* [ %add.ptr1, %while.body ], [ %op, %while.body.preheader ]
+  %src.addr.05 = phi i8* [ %add.ptr, %while.body ], [ %src, %while.body.preheader ]
+  %0 = bitcast i8* %src.addr.05 to i64*
+  %1 = load i64, i64* %0, align 8
+  %2 = bitcast i8* %op.addr.06 to i64*
+  store i64 %1, i64* %2, align 8
+  %add.ptr = getelementptr inbounds i8, i8* %src.addr.05, i64 8
+  %add.ptr1 = getelementptr inbounds i8, i8* %op.addr.06, i64 8
+  %sub = add nsw i32 %len.addr.07, -8
+  %cmp = icmp sgt i32 %len.addr.07, 8
+  br i1 %cmp, label %while.body, label %while.end.loopexit
+
+while.end.loopexit:                               ; preds = %while.body
+  br label %while.end
+
+while.end:                                        ; preds = %while.end.loopexit, %entry
+  ret void
+}

Modified: vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
==============================================================================
--- vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll	Fri Nov 25 19:05:59 2016	(r309152)
@@ -96,15 +96,15 @@ for.end:                                
 ; CHECK-NEXT:       %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
 ; CHECK-NEXT:   Grouped accesses:
 ; CHECK-NEXT:    Group {{.*}}[[ZERO]]:
-; CHECK-NEXT:       (Low: %c High: (78 + %c))
+; CHECK-NEXT:       (Low: %c High: (80 + %c))
 ; CHECK-NEXT:         Member: {(2 + %c)<nsw>,+,4}
 ; CHECK-NEXT:         Member: {%c,+,4}
 ; CHECK-NEXT:     Group {{.*}}[[ONE]]:
-; CHECK-NEXT:       (Low: %a High: (40 + %a))
+; CHECK-NEXT:       (Low: %a High: (42 + %a))
 ; CHECK-NEXT:         Member: {(2 + %a)<nsw>,+,2}
 ; CHECK-NEXT:         Member: {%a,+,2}
 ; CHECK-NEXT:     Group {{.*}}[[TWO]]:
-; CHECK-NEXT:       (Low: %b High: (38 + %b))
+; CHECK-NEXT:       (Low: %b High: (40 + %b))
 ; CHECK-NEXT:         Member: {%b,+,2}
 
 define void @testg(i16* %a,
@@ -168,15 +168,15 @@ for.end:                                
 ; CHECK-NEXT:         %arrayidxB = getelementptr i16, i16* %b, i64 %ind
 ; CHECK-NEXT:   Grouped accesses:
 ; CHECK-NEXT:     Group {{.*}}[[ZERO]]:
-; CHECK-NEXT:       (Low: %c High: (78 + %c))
+; CHECK-NEXT:       (Low: %c High: (80 + %c))
 ; CHECK-NEXT:         Member: {(2 + %c)<nsw>,+,4}
 ; CHECK-NEXT:         Member: {%c,+,4}
 ; CHECK-NEXT:     Group {{.*}}[[ONE]]:
-; CHECK-NEXT:       (Low: %a High: (40 + %a))
+; CHECK-NEXT:       (Low: %a High: (42 + %a))
 ; CHECK-NEXT:         Member: {(2 + %a),+,2}
 ; CHECK-NEXT:         Member: {%a,+,2}
 ; CHECK-NEXT:     Group {{.*}}[[TWO]]:
-; CHECK-NEXT:       (Low: %b High: (38 + %b))
+; CHECK-NEXT:       (Low: %b High: (40 + %b))
 ; CHECK-NEXT:         Member: {%b,+,2}
 
 define void @testh(i16* %a,
@@ -247,13 +247,13 @@ for.end:                                
 ; CHECK-NEXT:       %arrayidxA2 = getelementptr i16, i16* %a, i64 %ind2
 ; CHECK-NEXT:   Grouped accesses:
 ; CHECK-NEXT:     Group {{.*}}[[ZERO]]:
-; CHECK-NEXT:       (Low: ((2 * %offset) + %a)<nsw> High: (9998 + (2 * %offset) + %a))
+; CHECK-NEXT:       (Low: ((2 * %offset) + %a)<nsw> High: (10000 + (2 * %offset) + %a))
 ; CHECK-NEXT:         Member: {((2 * %offset) + %a)<nsw>,+,2}<nsw><%for.body>
 ; CHECK-NEXT:     Group {{.*}}[[ONE]]:
-; CHECK-NEXT:       (Low: %a High: (9998 + %a))
+; CHECK-NEXT:       (Low: %a High: (10000 + %a))
 ; CHECK-NEXT:         Member: {%a,+,2}<%for.body>
 ; CHECK-NEXT:     Group {{.*}}[[TWO]]:
-; CHECK-NEXT:       (Low: (20000 + %a) High: (29998 + %a))
+; CHECK-NEXT:       (Low: (20000 + %a) High: (30000 + %a))
 ; CHECK-NEXT:         Member: {(20000 + %a),+,2}<%for.body>
 
 define void @testi(i16* %a,

Modified: vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
==============================================================================
--- vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll	Fri Nov 25 18:57:14 2016	(r309151)
+++ vendor/llvm/dist/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll	Fri Nov 25 19:05:59 2016	(r309152)
@@ -16,7 +16,7 @@ target datalayout = "e-m:e-i64:64-i128:1
 target triple = "aarch64--linux-gnueabi"
 
 ; CHECK: function 'f':
-; CHECK: (Low: (20000 + %a) High: (60000 + %a)<nsw>)
+; CHECK: (Low: (20000 + %a) High: (60004 + %a))
 
 @B = common global i32* null, align 8
 @A = common global i32* null, align 8
@@ -59,7 +59,7 @@ for.end:                                
 ; Here it is not obvious what the limits are, since 'step' could be negative.
 
 ; CHECK: Low: (-1 + (-1 * ((-60001 + (-1 * %a)) umax (-60001 + (40000 * %step) + (-1 * %a)))))
-; CHECK: High: ((60000 + %a)<nsw> umax (60000 + (-40000 * %step) + %a))
+; CHECK: High: (4 + ((60000 + %a)<nsw> umax (60000 + (-40000 * %step) + %a)))
 
 define void @g(i64 %step) {
 entry:

Added: vendor/llvm/dist/test/CodeGen/PowerPC/atomic-minmax.ll
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ vendor/llvm/dist/test/CodeGen/PowerPC/atomic-minmax.ll	Fri Nov 25 19:05:59 2016	(r309152)
@@ -0,0 +1,435 @@
+; RUN: llc < %s | FileCheck %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define void @a32min(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+entry:
+  %0 = atomicrmw min i32* %minimum, i32 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a32min
+; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpw 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: stwcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a32max(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+entry:
+  %0 = atomicrmw max i32* %minimum, i32 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a32max
+; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpw 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: stwcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a32umin(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+entry:
+  %0 = atomicrmw umin i32* %minimum, i32 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a32umin
+; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmplw 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: stwcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a32umax(i32* nocapture dereferenceable(4) %minimum, i32 %val) #0 {
+entry:
+  %0 = atomicrmw umax i32* %minimum, i32 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a32umax
+; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmplw 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: stwcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a16min(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+entry:
+  %0 = atomicrmw min i16* %minimum, i16 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a16min
+; CHECK: lharx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpw 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: sthcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a16max(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+entry:
+  %0 = atomicrmw max i16* %minimum, i16 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a16max
+; CHECK: lharx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpw 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: sthcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a16umin(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+entry:
+  %0 = atomicrmw umin i16* %minimum, i16 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a16umin
+; CHECK: lharx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmplw 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: sthcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a16umax(i16* nocapture dereferenceable(4) %minimum, i16 %val) #1 {
+entry:
+  %0 = atomicrmw umax i16* %minimum, i16 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a16umax
+; CHECK: lharx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmplw 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: sthcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a8min(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+entry:
+  %0 = atomicrmw min i8* %minimum, i8 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a8min
+; CHECK: lbarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpw 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: stbcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a8max(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+entry:
+  %0 = atomicrmw max i8* %minimum, i8 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a8max
+; CHECK: lbarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpw 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: stbcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a8umin(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+entry:
+  %0 = atomicrmw umin i8* %minimum, i8 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a8umin
+; CHECK: lbarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmplw 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: stbcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a8umax(i8* nocapture dereferenceable(4) %minimum, i8 %val) #1 {
+entry:
+  %0 = atomicrmw umax i8* %minimum, i8 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a8umax
+; CHECK: lbarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmplw 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: stbcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a64min(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+entry:
+  %0 = atomicrmw min i64* %minimum, i64 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a64min
+; CHECK: ldarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpd 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: stdcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a64max(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+entry:
+  %0 = atomicrmw max i64* %minimum, i64 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a64max
+; CHECK: ldarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpd 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: stdcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a64umin(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+entry:
+  %0 = atomicrmw umin i64* %minimum, i64 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a64umin
+; CHECK: ldarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpld 4, [[OLDV]]
+; CHECK: bgelr 0
+; CHECK: stdcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @a64umax(i64* nocapture dereferenceable(4) %minimum, i64 %val) #0 {
+entry:
+  %0 = atomicrmw umax i64* %minimum, i64 %val monotonic
+  ret void
+
+; CHECK-LABEL: @a64umax
+; CHECK: ldarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: cmpld 4, [[OLDV]]
+; CHECK: blelr 0
+; CHECK: stdcx. 4, 0, 3
+; CHECK: bne 0,
+; CHECK: blr
+}
+
+define void @ae16min(i16* nocapture dereferenceable(4) %minimum, i16 %val) #0 {
+entry:
+  %0 = atomicrmw min i16* %minimum, i16 %val monotonic
+  ret void
+
+; CHECK-LABEL: @ae16min
+; CHECK-DAG: rlwinm [[SA1:[0-9]+]], 3, 3, 27, 27
+; CHECK-DAG: li [[M1:[0-9]+]], 0
+; CHECK-DAG: rldicr 3, 3, 0, 61
+; CHECK-DAG: xori [[SA:[0-9]+]], [[SA1]], 16
+; CHECK-DAG: ori [[M2:[0-9]+]], [[M1]], 65535
+; CHECK-DAG: slw [[SV:[0-9]+]], 4, [[SA]]
+; CHECK-DAG: slw [[M:[0-9]+]], [[M2]], [[SA]]
+; CHECK-DAG: and [[SMV:[0-9]+]], [[SV]], [[M]]
+; CHECK: lwarx [[OLDV:[0-9]+]], 0, 3
+; CHECK: and [[MOLDV:[0-9]+]], [[OLDV]], [[M]]
+; CHECK: srw [[SMOLDV:[0-9]+]], [[MOLDV]], [[SA]]
+; CHECK: extsh [[SESMOLDV:[0-9]+]], [[SMOLDV]]
+; CHECK: cmpw 0, 4, [[SESMOLDV]]

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201611251906.uAPJ60lh071692>