Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 2 Sep 2019 17:48:59 +0000 (UTC)
From:      Dimitry Andric <dim@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-vendor@freebsd.org
Subject:   svn commit: r351709 - in vendor/llvm/dist-release_90: include/llvm/Analysis include/llvm/IR lib/Analysis lib/IR lib/Target lib/Target/AArch64 lib/Target/Mips/AsmParser lib/Target/RISCV lib/Target/X...
Message-ID:  <201909021748.x82Hmxm3019534@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: dim
Date: Mon Sep  2 17:48:59 2019
New Revision: 351709
URL: https://svnweb.freebsd.org/changeset/base/351709

Log:
  Vendor import of llvm release_90 branch r370514:
  https://llvm.org/svn/llvm-project/llvm/branches/release_90@370514

Modified:
  vendor/llvm/dist-release_90/include/llvm/Analysis/InstructionSimplify.h
  vendor/llvm/dist-release_90/include/llvm/IR/InlineAsm.h
  vendor/llvm/dist-release_90/lib/Analysis/InstructionSimplify.cpp
  vendor/llvm/dist-release_90/lib/IR/Core.cpp
  vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.cpp
  vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.h
  vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64InstrInfo.cpp
  vendor/llvm/dist-release_90/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
  vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
  vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.cpp
  vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.h
  vendor/llvm/dist-release_90/lib/Target/TargetMachine.cpp
  vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.cpp
  vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.h
  vendor/llvm/dist-release_90/lib/Target/X86/X86Subtarget.cpp
  vendor/llvm/dist-release_90/lib/Transforms/Utils/LoopUnroll.cpp

Modified: vendor/llvm/dist-release_90/include/llvm/Analysis/InstructionSimplify.h
==============================================================================
--- vendor/llvm/dist-release_90/include/llvm/Analysis/InstructionSimplify.h	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/include/llvm/Analysis/InstructionSimplify.h	Mon Sep  2 17:48:59 2019	(r351709)
@@ -31,6 +31,7 @@
 #ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
 #define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
 
+#include "llvm/ADT/SetVector.h"
 #include "llvm/IR/Instruction.h"
 #include "llvm/IR/Operator.h"
 #include "llvm/IR/User.h"
@@ -263,12 +264,14 @@ Value *SimplifyInstruction(Instruction *I, const Simpl
 /// This first performs a normal RAUW of I with SimpleV. It then recursively
 /// attempts to simplify those users updated by the operation. The 'I'
 /// instruction must not be equal to the simplified value 'SimpleV'.
+/// If UnsimplifiedUsers is provided, instructions that could not be simplified
+/// are added to it.
 ///
 /// The function returns true if any simplifications were performed.
-bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
-                                   const TargetLibraryInfo *TLI = nullptr,
-                                   const DominatorTree *DT = nullptr,
-                                   AssumptionCache *AC = nullptr);
+bool replaceAndRecursivelySimplify(
+    Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI = nullptr,
+    const DominatorTree *DT = nullptr, AssumptionCache *AC = nullptr,
+    SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr);
 
 /// Recursively attempt to simplify an instruction.
 ///

Modified: vendor/llvm/dist-release_90/include/llvm/IR/InlineAsm.h
==============================================================================
--- vendor/llvm/dist-release_90/include/llvm/IR/InlineAsm.h	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/include/llvm/IR/InlineAsm.h	Mon Sep  2 17:48:59 2019	(r351709)
@@ -244,6 +244,7 @@ class InlineAsm final : public Value { (public)
     Constraint_m,
     Constraint_o,
     Constraint_v,
+    Constraint_A,
     Constraint_Q,
     Constraint_R,
     Constraint_S,

Modified: vendor/llvm/dist-release_90/lib/Analysis/InstructionSimplify.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Analysis/InstructionSimplify.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Analysis/InstructionSimplify.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -5221,14 +5221,16 @@ Value *llvm::SimplifyInstruction(Instruction *I, const
 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
 /// instructions to process and attempt to simplify it using
-/// InstructionSimplify.
+/// InstructionSimplify. Recursively visited users which could not be
+/// simplified themselves are to the optional UnsimplifiedUsers set for
+/// further processing by the caller.
 ///
 /// This routine returns 'true' only when *it* simplifies something. The passed
 /// in simplified value does not count toward this.
-static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
-                                              const TargetLibraryInfo *TLI,
-                                              const DominatorTree *DT,
-                                              AssumptionCache *AC) {
+static bool replaceAndRecursivelySimplifyImpl(
+    Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
+    const DominatorTree *DT, AssumptionCache *AC,
+    SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
   bool Simplified = false;
   SmallSetVector<Instruction *, 8> Worklist;
   const DataLayout &DL = I->getModule()->getDataLayout();
@@ -5258,8 +5260,11 @@ static bool replaceAndRecursivelySimplifyImpl(Instruct
 
     // See if this instruction simplifies.
     SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
-    if (!SimpleV)
+    if (!SimpleV) {
+      if (UnsimplifiedUsers)
+        UnsimplifiedUsers->insert(I);
       continue;
+    }
 
     Simplified = true;
 
@@ -5285,16 +5290,17 @@ bool llvm::recursivelySimplifyInstruction(Instruction 
                                           const TargetLibraryInfo *TLI,
                                           const DominatorTree *DT,
                                           AssumptionCache *AC) {
-  return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
+  return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC, nullptr);
 }
 
-bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
-                                         const TargetLibraryInfo *TLI,
-                                         const DominatorTree *DT,
-                                         AssumptionCache *AC) {
+bool llvm::replaceAndRecursivelySimplify(
+    Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
+    const DominatorTree *DT, AssumptionCache *AC,
+    SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
   assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
   assert(SimpleV && "Must provide a simplified value.");
-  return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
+  return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
+                                           UnsimplifiedUsers);
 }
 
 namespace llvm {

Modified: vendor/llvm/dist-release_90/lib/IR/Core.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/IR/Core.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/IR/Core.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -140,7 +140,16 @@ unsigned LLVMGetLastEnumAttributeKind(void) {
 
 LLVMAttributeRef LLVMCreateEnumAttribute(LLVMContextRef C, unsigned KindID,
                                          uint64_t Val) {
-  return wrap(Attribute::get(*unwrap(C), (Attribute::AttrKind)KindID, Val));
+  auto &Ctx = *unwrap(C);
+  auto AttrKind = (Attribute::AttrKind)KindID;
+
+  if (AttrKind == Attribute::AttrKind::ByVal) {
+    // After r362128, byval attributes need to have a type attribute. Provide a
+    // NULL one until a proper API is added for this.
+    return wrap(Attribute::getWithByValType(Ctx, NULL));
+  } else {
+    return wrap(Attribute::get(Ctx, AttrKind, Val));
+  }
 }
 
 unsigned LLVMGetEnumAttributeKind(LLVMAttributeRef A) {

Modified: vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -10579,7 +10579,7 @@ static SDValue performPostLD1Combine(SDNode *N,
     // are predecessors to each other or the Vector.
     SmallPtrSet<const SDNode *, 32> Visited;
     SmallVector<const SDNode *, 16> Worklist;
-    Visited.insert(N);
+    Visited.insert(Addr.getNode());
     Worklist.push_back(User);
     Worklist.push_back(LD);
     Worklist.push_back(Vector.getNode());
@@ -11993,6 +11993,14 @@ bool AArch64TargetLowering::isMaskAndCmp0FoldingBenefi
   if (!Mask)
     return false;
   return Mask->getValue().isPowerOf2();
+}
+
+bool AArch64TargetLowering::shouldExpandShift(SelectionDAG &DAG,
+                                              SDNode *N) const {
+  if (DAG.getMachineFunction().getFunction().hasMinSize() &&
+      !Subtarget->isTargetWindows())
+    return false;
+  return true;
 }
 
 void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {

Modified: vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.h
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.h	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64ISelLowering.h	Mon Sep  2 17:48:59 2019	(r351709)
@@ -480,11 +480,7 @@ class AArch64TargetLowering : public TargetLowering { 
     return VT.getSizeInBits() >= 64; // vector 'bic'
   }
 
-  bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
-    if (DAG.getMachineFunction().getFunction().hasMinSize())
-      return false;
-    return true;
-  }
+  bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
 
   bool shouldTransformSignedTruncationCheck(EVT XVT,
                                             unsigned KeptBits) const override {

Modified: vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64InstrInfo.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64InstrInfo.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/AArch64/AArch64InstrInfo.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -32,6 +32,7 @@
 #include "llvm/CodeGen/TargetSubtargetInfo.h"
 #include "llvm/IR/DebugLoc.h"
 #include "llvm/IR/GlobalValue.h"
+#include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCInst.h"
 #include "llvm/MC/MCInstrDesc.h"
 #include "llvm/Support/Casting.h"
@@ -1926,6 +1927,17 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const 
   // Check if this load/store has a hint to avoid pair formation.
   // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
   if (isLdStPairSuppressed(MI))
+    return false;
+
+  // Do not pair any callee-save store/reload instructions in the
+  // prologue/epilogue if the CFI information encoded the operations as separate
+  // instructions, as that will cause the size of the actual prologue to mismatch
+  // with the prologue size recorded in the Windows CFI.
+  const MCAsmInfo *MAI = MI.getMF()->getTarget().getMCAsmInfo();
+  bool NeedsWinCFI = MAI->usesWindowsCFI() &&
+                     MI.getMF()->getFunction().needsUnwindTableEntry();
+  if (NeedsWinCFI && (MI.getFlag(MachineInstr::FrameSetup) ||
+                      MI.getFlag(MachineInstr::FrameDestroy)))
     return false;
 
   // On some CPUs quad load/store pairs are slower than two single load/stores.

Modified: vendor/llvm/dist-release_90/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/Mips/AsmParser/MipsAsmParser.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/Mips/AsmParser/MipsAsmParser.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -3625,8 +3625,25 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc 
       TOut.emitRRR(isGP64bit() ? Mips::DADDu : Mips::ADDu, TmpReg, TmpReg,
                    BaseReg, IDLoc, STI);
     TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, LoOffset, IDLoc, STI);
+    return;
+  }
+
+  assert(OffsetOp.isExpr() && "expected expression operand kind");
+  if (inPicMode()) {
+    // FIXME:
+    // a) Fix lw/sw $reg, symbol($reg) instruction expanding.
+    // b) If expression includes offset (sym + number), do not
+    //    encode the offset into a relocation. Take it in account
+    //    in the last load/store instruction.
+    // c) Check that immediates of R_MIPS_GOT16/R_MIPS_LO16 relocations
+    //    do not exceed 16-bit.
+    // d) Use R_MIPS_GOT_PAGE/R_MIPS_GOT_OFST relocations instead
+    //    of R_MIPS_GOT_DISP in appropriate cases to reduce number
+    //    of GOT entries.
+    expandLoadAddress(TmpReg, Mips::NoRegister, OffsetOp, !ABI.ArePtrs64bit(),
+                      IDLoc, Out, STI);
+    TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, 0, IDLoc, STI);
   } else {
-    assert(OffsetOp.isExpr() && "expected expression operand kind");
     const MCExpr *ExprOffset = OffsetOp.getExpr();
     MCOperand LoOperand = MCOperand::createExpr(
         MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));

Modified: vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelDAGToDAG.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelDAGToDAG.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -179,6 +179,9 @@ bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
     // operand and need no special handling.
     OutOps.push_back(Op);
     return false;
+  case InlineAsm::Constraint_A:
+    OutOps.push_back(Op);
+    return false;
   default:
     break;
   }

Modified: vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -2413,6 +2413,8 @@ RISCVTargetLowering::getConstraintType(StringRef Const
     case 'J':
     case 'K':
       return C_Immediate;
+    case 'A':
+      return C_Memory;
     }
   }
   return TargetLowering::getConstraintType(Constraint);
@@ -2440,6 +2442,21 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(cons
   }
 
   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+unsigned
+RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
+  // Currently only support length 1 constraints.
+  if (ConstraintCode.size() == 1) {
+    switch (ConstraintCode[0]) {
+    case 'A':
+      return InlineAsm::Constraint_A;
+    default:
+      break;
+    }
+  }
+
+  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
 }
 
 void RISCVTargetLowering::LowerAsmOperandForConstraint(

Modified: vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.h
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.h	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/RISCV/RISCVISelLowering.h	Mon Sep  2 17:48:59 2019	(r351709)
@@ -93,6 +93,9 @@ class RISCVTargetLowering : public TargetLowering { (p
   const char *getTargetNodeName(unsigned Opcode) const override;
 
   ConstraintType getConstraintType(StringRef Constraint) const override;
+
+  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
+
   std::pair<unsigned, const TargetRegisterClass *>
   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                StringRef Constraint, MVT VT) const override;

Modified: vendor/llvm/dist-release_90/lib/Target/TargetMachine.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/TargetMachine.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/TargetMachine.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -140,8 +140,8 @@ bool TargetMachine::shouldAssumeDSOLocal(const Module 
   // don't assume the variables to be DSO local unless we actually know
   // that for sure. This only has to be done for variables; for functions
   // the linker can insert thunks for calling functions from another DLL.
-  if (TT.isWindowsGNUEnvironment() && GV && GV->isDeclarationForLinker() &&
-      isa<GlobalVariable>(GV))
+  if (TT.isWindowsGNUEnvironment() && TT.isOSBinFormatCOFF() && GV &&
+      GV->isDeclarationForLinker() && isa<GlobalVariable>(GV))
     return false;
 
   // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
@@ -154,7 +154,9 @@ bool TargetMachine::shouldAssumeDSOLocal(const Module 
   // Make an exception for windows OS in the triple: Some firmware builds use
   // *-win32-macho triples. This (accidentally?) produced windows relocations
   // without GOT tables in older clang versions; Keep this behaviour.
-  if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
+  // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables
+  // either.
+  if (TT.isOSBinFormatCOFF() || TT.isOSWindows())
     return true;
 
   // Most PIC code sequences that assume that a symbol is local cannot

Modified: vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -5059,6 +5059,14 @@ bool X86TargetLowering::shouldFoldMaskToVariableShiftP
   return true;
 }
 
+bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
+                                          SDNode *N) const {
+  if (DAG.getMachineFunction().getFunction().hasMinSize() &&
+      !Subtarget.isOSWindows())
+    return false;
+  return true;
+}
+
 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
   // Any legal vector type can be splatted more efficiently than
   // loading/spilling from memory.
@@ -44096,7 +44104,8 @@ static SDValue combineScalarToVector(SDNode *N, Select
 
 // Simplify PMULDQ and PMULUDQ operations.
 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
-                             TargetLowering::DAGCombinerInfo &DCI) {
+                             TargetLowering::DAGCombinerInfo &DCI,
+                             const X86Subtarget &Subtarget) {
   SDValue LHS = N->getOperand(0);
   SDValue RHS = N->getOperand(1);
 
@@ -44106,8 +44115,9 @@ static SDValue combinePMULDQ(SDNode *N, SelectionDAG &
     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
 
   // Multiply by zero.
+  // Don't return RHS as it may contain UNDEFs.
   if (ISD::isBuildVectorAllZeros(RHS.getNode()))
-    return RHS;
+    return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
 
   // Aggressively peek through ops to get at the demanded low bits.
   APInt DemandedMask = APInt::getLowBitsSet(64, 32);
@@ -44315,7 +44325,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N
   case X86ISD::PCMPEQ:
   case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
   case X86ISD::PMULDQ:
-  case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI);
+  case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
   }
 
   return SDValue();

Modified: vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.h
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.h	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/X86/X86ISelLowering.h	Mon Sep  2 17:48:59 2019	(r351709)
@@ -863,11 +863,7 @@ namespace llvm {
       return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
     }
 
-    bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
-      if (DAG.getMachineFunction().getFunction().hasMinSize())
-        return false;
-      return true;
-    }
+    bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
 
     bool shouldSplatInsEltVarIndex(EVT VT) const override;
 

Modified: vendor/llvm/dist-release_90/lib/Target/X86/X86Subtarget.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Target/X86/X86Subtarget.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Target/X86/X86Subtarget.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -146,6 +146,9 @@ unsigned char X86Subtarget::classifyGlobalReference(co
       return X86II::MO_DLLIMPORT;
     return X86II::MO_COFFSTUB;
   }
+  // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables.
+  if (isOSWindows())
+    return X86II::MO_NO_FLAG;
 
   if (is64Bit()) {
     // ELF supports a large, truly PIC code model with non-PC relative GOT

Modified: vendor/llvm/dist-release_90/lib/Transforms/Utils/LoopUnroll.cpp
==============================================================================
--- vendor/llvm/dist-release_90/lib/Transforms/Utils/LoopUnroll.cpp	Mon Sep  2 17:32:57 2019	(r351708)
+++ vendor/llvm/dist-release_90/lib/Transforms/Utils/LoopUnroll.cpp	Mon Sep  2 17:48:59 2019	(r351709)
@@ -711,7 +711,7 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopO
 
   auto setDest = [LoopExit, ContinueOnTrue](BasicBlock *Src, BasicBlock *Dest,
                                             ArrayRef<BasicBlock *> NextBlocks,
-                                            BasicBlock *CurrentHeader,
+                                            BasicBlock *BlockInLoop,
                                             bool NeedConditional) {
     auto *Term = cast<BranchInst>(Src->getTerminator());
     if (NeedConditional) {
@@ -723,7 +723,9 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopO
       if (Dest != LoopExit) {
         BasicBlock *BB = Src;
         for (BasicBlock *Succ : successors(BB)) {
-          if (Succ == CurrentHeader)
+          // Preserve the incoming value from BB if we are jumping to the block
+          // in the current loop.
+          if (Succ == BlockInLoop)
             continue;
           for (PHINode &Phi : Succ->phis())
             Phi.removeIncomingValue(BB, false);
@@ -794,7 +796,7 @@ LoopUnrollResult llvm::UnrollLoop(Loop *L, UnrollLoopO
         // unconditional branch for some iterations.
         NeedConditional = false;
 
-      setDest(Headers[i], Dest, Headers, Headers[i], NeedConditional);
+      setDest(Headers[i], Dest, Headers, HeaderSucc[i], NeedConditional);
     }
 
     // Set up latches to branch to the new header in the unrolled iterations or



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201909021748.x82Hmxm3019534>