diff --git a/clang-tools-extra/clang-doc/Representation.cpp b/clang-tools-extra/clang-doc/Representation.cpp index 4da93b24c131f..22f12037fe1c8 100644 --- a/clang-tools-extra/clang-doc/Representation.cpp +++ b/clang-tools-extra/clang-doc/Representation.cpp @@ -179,7 +179,7 @@ bool Reference::mergeable(const Reference &Other) { } void Reference::merge(Reference &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); if (Name.empty()) Name = Other.Name; if (Path.empty()) @@ -187,7 +187,7 @@ void Reference::merge(Reference &&Other) { } void Info::mergeBase(Info &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); if (USR == EmptySID) USR = Other.USR; if (Name == "") @@ -209,7 +209,7 @@ bool Info::mergeable(const Info &Other) { } void SymbolInfo::merge(SymbolInfo &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); if (!DefLoc) DefLoc = std::move(Other.DefLoc); // Unconditionally extend the list of locations, since we want all of them. @@ -224,7 +224,7 @@ NamespaceInfo::NamespaceInfo(SymbolID USR, StringRef Name, StringRef Path) : Info(InfoType::IT_namespace, USR, Name, Path) {} void NamespaceInfo::merge(NamespaceInfo &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); // Reduce children if necessary. reduceChildren(Children.Namespaces, std::move(Other.Children.Namespaces)); reduceChildren(Children.Records, std::move(Other.Children.Records)); @@ -238,7 +238,7 @@ RecordInfo::RecordInfo(SymbolID USR, StringRef Name, StringRef Path) : SymbolInfo(InfoType::IT_record, USR, Name, Path) {} void RecordInfo::merge(RecordInfo &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); if (!llvm::to_underlying(TagType)) TagType = Other.TagType; IsTypeDef = IsTypeDef || Other.IsTypeDef; @@ -261,7 +261,7 @@ void RecordInfo::merge(RecordInfo &&Other) { } void EnumInfo::merge(EnumInfo &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); if (!Scoped) Scoped = Other.Scoped; if (Members.empty()) @@ -270,7 +270,7 @@ void EnumInfo::merge(EnumInfo &&Other) { } void FunctionInfo::merge(FunctionInfo &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); if (!IsMethod) IsMethod = Other.IsMethod; if (!Access) @@ -287,7 +287,7 @@ void FunctionInfo::merge(FunctionInfo &&Other) { } void TypedefInfo::merge(TypedefInfo &&Other) { - assert(mergeable(Other)); + assert_DISABLED(mergeable(Other)); if (!IsUsing) IsUsing = Other.IsUsing; if (Underlying.Type.Name == "") diff --git a/clang-tools-extra/clang-include-fixer/find-all-symbols/HeaderMapCollector.cpp b/clang-tools-extra/clang-include-fixer/find-all-symbols/HeaderMapCollector.cpp index 6ec49cae2a6e6..a91cb58907fc2 100644 --- a/clang-tools-extra/clang-include-fixer/find-all-symbols/HeaderMapCollector.cpp +++ b/clang-tools-extra/clang-include-fixer/find-all-symbols/HeaderMapCollector.cpp @@ -32,7 +32,7 @@ HeaderMapCollector::getMappedHeader(llvm::StringRef Header) const { for (auto &Entry : RegexHeaderMappingTable) { #ifndef NDEBUG std::string Dummy; - assert(Entry.first.isValid(Dummy) && "Regex should never be invalid!"); + assert_DISABLED(Entry.first.isValid(Dummy) && "Regex should never be invalid!"); #endif if (Entry.first.match(Header)) return Entry.second; diff --git a/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp b/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp index 6bb9a349d69b1..0f252af8b3644 100644 --- a/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp +++ b/clang-tools-extra/clang-tidy/misc/RedundantExpressionCheck.cpp @@ -756,7 +756,7 @@ static bool retrieveConstExprFromBothSides(const BinaryOperator *&BinOp, const Expr *&LhsConst, const Expr *&RhsConst, const ASTContext *AstCtx) { - assert(areSidesBinaryConstExpressions(BinOp, AstCtx) && + assert_DISABLED(areSidesBinaryConstExpressions(BinOp, AstCtx) && "Both sides of binary operator must be constant expressions!"); MainOpcode = BinOp->getOpcode(); diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index 7ff35d73df599..3a32270cec9c3 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -5008,8 +5008,8 @@ void Redeclarable::setPreviousDecl(decl_type *PrevDecl) { // First one will point to this one as latest. First->RedeclLink.setLatest(static_cast(this)); - assert(!isa(static_cast(this)) || - cast(static_cast(this))->isLinkageValid()); + assert(!isa(static_cast(this)) || + cast(static_cast(this))->isLinkageValid()); } // Inline function definitions. diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h index 0f0c0bf6e4ef4..b36d2920cfaaf 100644 --- a/clang/include/clang/AST/DeclTemplate.h +++ b/clang/include/clang/AST/DeclTemplate.h @@ -233,8 +233,12 @@ class FixedSizeTemplateParameterListStorage SourceLocation RAngleLoc, Expr *RequiresClause) : FixedSizeStorageOwner( - (assert(N == Params.size()), - assert(HasRequiresClause == (RequiresClause != nullptr)), + ([&]() { + ((void)Params); + ((void)RequiresClause); + assert(N == Params.size()); + assert(HasRequiresClause == (RequiresClause != nullptr)); + }(), new (static_cast(&storage)) TemplateParameterList(C, TemplateLoc, LAngleLoc, Params, RAngleLoc, RequiresClause))) {} }; diff --git a/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h b/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h index 054220b8a32ca..82fcb500bdfb3 100644 --- a/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h +++ b/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h @@ -144,7 +144,7 @@ class LexicallyOrderedRecursiveASTVisitor // but right now we only care about getting the correct lexical parent, so // we can traverse the gathered nested declarations after the declarations // in the decl context. - assert(!BaseType::getDerived().shouldTraversePostOrder() && + assert_DISABLED(!BaseType::getDerived().shouldTraversePostOrder() && "post-order traversal is not supported for lexically ordered " "recursive ast visitor"); for (Decl *D : LexicallyNestedDeclarations) { diff --git a/clang/include/clang/Lex/Preprocessor.h b/clang/include/clang/Lex/Preprocessor.h index f3f4de044fc41..1081a6b32b8a4 100644 --- a/clang/include/clang/Lex/Preprocessor.h +++ b/clang/include/clang/Lex/Preprocessor.h @@ -1717,7 +1717,7 @@ class Preprocessor { void EnableBacktrackAtThisPos(bool Unannotated = false); private: - std::pair LastBacktrackPos(); + std::pair LastBacktrackPos() const; CachedTokensTy PopUnannotatedBacktrackTokens(); diff --git a/clang/include/clang/Sema/DelayedDiagnostic.h b/clang/include/clang/Sema/DelayedDiagnostic.h index 0105089a393f1..c1e7e441563a0 100644 --- a/clang/include/clang/Sema/DelayedDiagnostic.h +++ b/clang/include/clang/Sema/DelayedDiagnostic.h @@ -325,7 +325,7 @@ class DelayedDiagnosticPool { /// Add a diagnostic to the current delay pool. inline void Sema::DelayedDiagnostics::add(const sema::DelayedDiagnostic &diag) { - assert(shouldDelayDiagnostics() && "trying to delay without pool"); + assert_DISABLED(shouldDelayDiagnostics() && "trying to delay without pool"); CurPool->add(diag); } diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h index 0825ecbced3f5..c94e9dfc0926f 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h @@ -276,7 +276,7 @@ class NodeBuilder { const NodeBuilderContext &Ctx, bool F = true) : C(Ctx), Finalized(F), Frontier(DstSet) { Frontier.insert(SrcSet); - assert(hasNoSinksInFrontier()); + assert_DISABLED(hasNoSinksInFrontier()); } virtual ~NodeBuilder() = default; @@ -303,7 +303,7 @@ class NodeBuilder { const ExplodedNodeSet &getResults() { finalizeResults(); - assert(checkResults()); + assert_DISABLED(checkResults()); return Frontier; } @@ -312,7 +312,7 @@ class NodeBuilder { /// Iterators through the results frontier. iterator begin() { finalizeResults(); - assert(checkResults()); + assert_DISABLED(checkResults()); return Frontier.begin(); } diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 69892bda42b25..3798a0e2ea012 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -1536,7 +1536,7 @@ ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, void ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, TemplateOrSpecializationInfo TSI) { - assert(!TemplateOrInstantiation[Inst] && + assert_DISABLED(!TemplateOrInstantiation[Inst] && "Already noted what the variable was instantiated from"); TemplateOrInstantiation[Inst] = TSI; } @@ -1556,7 +1556,7 @@ ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { isa(Inst) || isa(Inst)) && "instantiation did not produce a using decl"); - assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); + assert_DISABLED(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); InstantiatedFromUsingDecl[Inst] = Pattern; } @@ -1567,7 +1567,7 @@ ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, UsingEnumDecl *Pattern) { - assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); + assert_DISABLED(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); InstantiatedFromUsingEnumDecl[Inst] = Pattern; } @@ -1579,7 +1579,7 @@ ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { void ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, UsingShadowDecl *Pattern) { - assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); + assert_DISABLED(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); InstantiatedFromUsingShadowDecl[Inst] = Pattern; } @@ -1591,7 +1591,7 @@ void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, FieldDecl *Tmpl) { assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); - assert(!InstantiatedFromUnnamedFieldDecl[Inst] && + assert_DISABLED(!InstantiatedFromUnnamedFieldDecl[Inst] && "Already noted what unnamed field was instantiated from"); InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index 59e09a44d747b..1fb4868c82beb 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -3454,7 +3454,7 @@ bool Compiler::VisitCXXUuidofExpr(const CXXUuidofExpr *E) { if (!this->emitGetPtrGlobal(*GlobalIndex, E)) return false; - assert(this->getRecord(E->getType())); + assert_DISABLED(this->getRecord(E->getType())); const APValue &V = GuidDecl->getAsAPValue(); if (V.getKind() == APValue::None) diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp index 86913763ef9ff..4940971ba584c 100644 --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -5014,7 +5014,7 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C, SourceLocation IdLoc, IdentifierInfo *Id, RecordDecl *PrevDecl) : TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc) { - assert(classof(static_cast(this)) && "Invalid Kind!"); + assert_DISABLED(classof(static_cast(this)) && "Invalid Kind!"); setHasFlexibleArrayMember(false); setAnonymousStructOrUnion(false); setHasObjectMember(false); diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp index 4a506b7be4564..cbbd265e0d232 100644 --- a/clang/lib/AST/DeclTemplate.cpp +++ b/clang/lib/AST/DeclTemplate.cpp @@ -377,7 +377,7 @@ void RedeclarableTemplateDecl::addSpecializationImpl( if (InsertPos) { #ifndef NDEBUG void *CorrectInsertPos; - assert(!findSpecializationImpl(Specializations, + assert_DISABLED(!findSpecializationImpl(Specializations, CorrectInsertPos, SETraits::getTemplateArgs(Entry)) && InsertPos == CorrectInsertPos && diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index d664c503655ba..bfedbb6af0592 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -7212,7 +7212,7 @@ class APValueToBufferConverter { // Write out Val with type Ty into Buffer starting at Offset. bool visit(const APValue &Val, QualType Ty, CharUnits Offset) { - assert((size_t)Offset.getQuantity() <= Buffer.size()); + assert_DISABLED((size_t)Offset.getQuantity() <= Buffer.size()); // As a special case, nullptr_t has an indeterminate value. if (Ty->isNullPtrType()) @@ -10860,7 +10860,7 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr( Array.moveInto(Result.getStructField(1)); } - assert(++Field == Record->field_end() && + assert_DISABLED(++Field == Record->field_end() && "Expected std::initializer_list to only have two fields"); return true; diff --git a/clang/lib/AST/MicrosoftCXXABI.cpp b/clang/lib/AST/MicrosoftCXXABI.cpp index 1c020c3ad4ad5..ea735a791a493 100644 --- a/clang/lib/AST/MicrosoftCXXABI.cpp +++ b/clang/lib/AST/MicrosoftCXXABI.cpp @@ -155,7 +155,7 @@ class MicrosoftCXXABI : public CXXABI { addCopyConstructorForExceptionObject(CXXRecordDecl *RD, CXXConstructorDecl *CD) override { assert(CD != nullptr); - assert(RecordToCopyCtor[RD] == nullptr || RecordToCopyCtor[RD] == CD); + assert_DISABLED(RecordToCopyCtor[RD] == nullptr || RecordToCopyCtor[RD] == CD); RecordToCopyCtor[RD] = CD; } diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp index d9bf62c2bbb04..063e93b86a855 100644 --- a/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/clang/lib/AST/RecordLayoutBuilder.cpp @@ -2006,7 +2006,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, auto performBuiltinTypeAlignmentUpgrade = [&](const BuiltinType *BTy) { if (BTy->getKind() == BuiltinType::Double || BTy->getKind() == BuiltinType::LongDouble) { - assert(PreferredAlign == CharUnits::fromQuantity(4) && + assert_DISABLED(PreferredAlign == CharUnits::fromQuantity(4) && "No need to upgrade the alignment value."); PreferredAlign = CharUnits::fromQuantity(8); } diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp index f678ac6f2ff36..f2109a2dc465c 100644 --- a/clang/lib/Analysis/CFG.cpp +++ b/clang/lib/Analysis/CFG.cpp @@ -1907,7 +1907,7 @@ void CFGBuilder::addAutomaticObjDestruction(LocalScope::const_iterator B, /// * ScopeEnd for each scope left void CFGBuilder::addScopeExitHandling(LocalScope::const_iterator B, LocalScope::const_iterator E, Stmt *S) { - assert(!B.inSameLocalScope(E)); + assert_DISABLED(!B.inSameLocalScope(E)); if (!BuildOpts.AddLifetime && !BuildOpts.AddScopes) return; diff --git a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp index 4b86daa56d7b5..cc62df3fdd044 100644 --- a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp +++ b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp @@ -93,8 +93,8 @@ RecordStorageLocation &DataflowAnalysisContext::createRecordStorageLocation( QualType Type, RecordStorageLocation::FieldToLoc FieldLocs, RecordStorageLocation::SyntheticFieldMap SyntheticFields) { assert(Type->isRecordType()); - assert(containsSameFields(getModeledFields(Type), FieldLocs)); - assert(getKeys(getSyntheticFields(Type)) == getKeys(SyntheticFields)); + assert_DISABLED(containsSameFields(getModeledFields(Type), FieldLocs)); + assert_DISABLED(getKeys(getSyntheticFields(Type)) == getKeys(SyntheticFields)); RecordStorageLocationCreated = true; return arena().create(Type, std::move(FieldLocs), diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp index e1f68e493f355..eb4bc87e4de92 100644 --- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp +++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp @@ -852,7 +852,7 @@ void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) { // are declarations of reference type and `BindingDecl`. For all other // declaration, the storage location should be the stable storage location // returned by `createStorageLocation()`. - assert(D.getType()->isReferenceType() || isa(D) || + assert_DISABLED(D.getType()->isReferenceType() || isa(D) || &Loc == &createStorageLocation(D)); DeclToLoc[&D] = &Loc; } diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index 41bb8d19d161e..5966253cf278d 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -1293,7 +1293,7 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM, // Callers should detect this case on their own: calling this function // generally requires computing layout information, which is a waste of time // if we've already emitted this block. - assert(!CGM.getAddrOfGlobalBlockIfEmitted(blockInfo.BlockExpression) && + assert_DISABLED(!CGM.getAddrOfGlobalBlockIfEmitted(blockInfo.BlockExpression) && "Refusing to re-emit a global block."); // Generate the constants for the block literal initializer. diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index e2d03eff8ab4a..93fca7926770d 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -4627,7 +4627,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // Store the stack pointer to the setjmp buffer. Value *StackAddr = Builder.CreateStackSave(); - assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType()); + assert_DISABLED(Buf.emitRawPointer(*this)->getType() == StackAddr->getType()); Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2); Builder.CreateStore(StackAddr, StackSaveSlot); diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp index 7c6dfc3e59d8c..8617888bc3e1d 100644 --- a/clang/lib/CodeGen/CGCXXABI.cpp +++ b/clang/lib/CodeGen/CGCXXABI.cpp @@ -156,7 +156,7 @@ llvm::Value *CGCXXABI::loadIncomingCXXThis(CodeGenFunction &CGF) { void CGCXXABI::setCXXABIThisValue(CodeGenFunction &CGF, llvm::Value *ThisPtr) { /// Initialize the 'this' slot. - assert(getThisDecl(CGF) && "no 'this' variable for function"); + assert_DISABLED(getThisDecl(CGF) && "no 'this' variable for function"); CGF.CXXABIThisValue = ThisPtr; } diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp index 352955749a633..7ba17e693ffb6 100644 --- a/clang/lib/CodeGen/CGClass.cpp +++ b/clang/lib/CodeGen/CGClass.cpp @@ -225,7 +225,7 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This, const CXXRecordDecl *Base, bool BaseIsVirtual) { // 'this' must be a pointer (in some address space) to Derived. - assert(This.getElementType() == ConvertType(Derived)); + assert_DISABLED(This.getElementType() == ConvertType(Derived)); // Compute the offset of the virtual base. CharUnits Offset; diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 1d0660292cecc..62c3be527154c 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -2178,7 +2178,7 @@ CodeGenFunction::getDestroyer(QualType::DestructionKind kind) { void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type) { assert(dtorKind && "cannot push destructor for trivial type"); - assert(needsEHCleanup(dtorKind)); + assert_DISABLED(needsEHCleanup(dtorKind)); pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); } diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp index 2c3054605ee75..27fd963a6728d 100644 --- a/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/clang/lib/CodeGen/CGDeclCXX.cpp @@ -736,7 +736,7 @@ void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) { cast(getCXXABI().getMangleContext()) .mangleModuleInitializer(M, Out); } - assert(!GetGlobalValue(FnName.str()) && + assert_DISABLED(!GetGlobalValue(FnName.str()) && "We should only have one use of the initializer call"); llvm::Function *Fn = llvm::Function::Create( FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule()); @@ -879,7 +879,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() { cast(getCXXABI().getMangleContext()) .mangleModuleInitializer(M, Out); } - assert(!GetGlobalValue(FnName.str()) && + assert_DISABLED(!GetGlobalValue(FnName.str()) && "We should only have one use of the initializer call"); llvm::Function *Fn = llvm::Function::Create( FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule()); diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp index 44a45413dbc45..6f6a9202d4704 100644 --- a/clang/lib/CodeGen/CGException.cpp +++ b/clang/lib/CodeGen/CGException.cpp @@ -876,7 +876,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() { continue; case EHScope::Filter: { - assert(I.next() == EHStack.end() && "EH filter is not end of EH stack"); + assert_DISABLED(I.next() == EHStack.end() && "EH filter is not end of EH stack"); assert(!hasCatchAll && "EH filter reached after catch-all"); // Filter scopes get added to the landingpad in weird ways. @@ -1440,9 +1440,9 @@ void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF, const Stmt *body, llvm::FunctionCallee beginCatchFn, llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn) { - assert((!!beginCatchFn) == (!!endCatchFn) && + assert_DISABLED((!!beginCatchFn) == (!!endCatchFn) && "begin/end catch functions not paired"); - assert(rethrowFn && "rethrow function is required"); + assert_DISABLED(rethrowFn && "rethrow function is required"); BeginCatchFn = beginCatchFn; @@ -1589,7 +1589,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() { } llvm::BasicBlock *CodeGenFunction::getTerminateFunclet() { - assert(EHPersonality::get(*this).usesFuncletPads() && + assert_DISABLED(EHPersonality::get(*this).usesFuncletPads() && "use getTerminateLandingPad for non-funclet EH"); llvm::BasicBlock *&TerminateFunclet = TerminateFunclets[CurrentFuncletPad]; diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 2ad6587089f10..aa7e91ec1ae2f 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -464,7 +464,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); } - assert(++Field == Record->field_end() && + assert_DISABLED(++Field == Record->field_end() && "Expected std::initializer_list to only have two fields"); } diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index 648b9b9ed9806..44d1bb92a7b50 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -1736,7 +1736,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { cleanupDominator = Builder.CreateUnreachable(); } - assert((allocSize == allocSizeWithoutCookie) == + assert_DISABLED((allocSize == allocSizeWithoutCookie) == CalculateCookiePadding(*this, E).isZero()); if (allocSize != allocSizeWithoutCookie) { assert(E->isArray()); @@ -2129,7 +2129,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { Ptr.getAlignment(), "del.first"); } - assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType()); + assert_DISABLED(ConvertTypeForMem(DeleteTy) == Ptr.getElementType()); if (E->isArrayForm()) { EmitArrayDelete(*this, E, Ptr, DeleteTy); diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 9e2c2ad5e0250..4c9c173b37e76 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2940,7 +2940,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, canPerformLossyDemotionCheck &= PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( type, promotedType); - assert((!canPerformLossyDemotionCheck || + assert_DISABLED((!canPerformLossyDemotionCheck || type->isSignedIntegerOrEnumerationType() || promotedType->isSignedIntegerOrEnumerationType() || ConvertType(type)->getScalarSizeInBits() == @@ -5803,7 +5803,7 @@ CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr, GEPOffsetAndOverflow EvaluatedGEP = EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder); - assert((!isa(EvaluatedGEP.TotalOffset) || + assert_DISABLED((!isa(EvaluatedGEP.TotalOffset) || EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && "If the offset got constant-folded, we don't expect that there was an " "overflow."); diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp index 2cce2936fe5ae..12956570be451 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.cpp +++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp @@ -519,7 +519,7 @@ bool CGHLSLRuntime::needsResourceBindingInitFn() { llvm::Function *CGHLSLRuntime::createResourceBindingInitFn() { // No resources to bind - assert(needsResourceBindingInitFn() && "no resources to bind"); + assert_DISABLED(needsResourceBindingInitFn() && "no resources to bind"); LLVMContext &Ctx = CGM.getLLVMContext(); llvm::Type *Int1Ty = llvm::Type::getInt1Ty(Ctx); diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp index 7a07284f8a8aa..625c79dec4351 100644 --- a/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -226,7 +226,7 @@ class CGObjCGNU : public CGObjCRuntime { /// Returns a property name and encoding string. llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD, const Decl *Container) { - assert(!isRuntime(ObjCRuntime::GNUstep, 2)); + assert_DISABLED(!isRuntime(ObjCRuntime::GNUstep, 2)); if (isRuntime(ObjCRuntime::GNUstep, 1, 6)) { std::string NameAndAttributes; std::string TypeStr = @@ -2000,7 +2000,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep { MetaClassPtrAlias->eraseFromParent(); MetaClassPtrAlias = nullptr; } - assert(classStruct->getName() == SymbolForClass(className)); + assert_DISABLED(classStruct->getName() == SymbolForClass(className)); auto classInitRef = new llvm::GlobalVariable(TheModule, classStruct->getType(), false, llvm::GlobalValue::ExternalLinkage, diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 1c16d273a5535..930f799104a9f 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -7284,7 +7284,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF, messageRefName += "objc_msgSend_fixup"; } } - assert(fn && "CGObjCNonFragileABIMac::EmitMessageSend"); + assert_DISABLED(fn && "CGObjCNonFragileABIMac::EmitMessageSend"); messageRefName += '_'; // Append the selector name, except use underscores anywhere we diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index c0931e82d9875..c2c9b5ff8ffd5 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -11245,7 +11245,7 @@ static void EmitDoacrossOrdered(CodeGenFunction &CGF, CodeGenModule &CGM, RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_doacross_post); } else { - assert(ODK.isSink(C) && "Expect sink modifier."); + assert_DISABLED(ODK.isSink(C) && "Expect sink modifier."); RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), OMPRTL___kmpc_doacross_wait); } @@ -11857,7 +11857,7 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF, // Special codegen for inner parallel regions. // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1; auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD); - assert(It != LastprivateConditionalToTypes[FoundFn].end() && + assert_DISABLED(It != LastprivateConditionalToTypes[FoundFn].end() && "Lastprivate conditional is not found in outer region."); QualType StructTy = std::get<0>(It->getSecond()); const FieldDecl* FiredDecl = std::get<2>(It->getSecond()); diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index c66d5d11b0bbf..1bedbcbc08583 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -1259,7 +1259,7 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF, else NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty), - assert(IfCondVal && "Expected a value"); + assert_DISABLED(IfCondVal && "Expected a value"); llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); llvm::Value *Args[] = { RTLoc, diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp index 0c63b9d6bb7e7..1dba96fd6516d 100644 --- a/clang/lib/CodeGen/CGPointerAuth.cpp +++ b/clang/lib/CodeGen/CGPointerAuth.cpp @@ -326,7 +326,7 @@ llvm::Constant *CodeGenModule::getConstantSignedPointer( llvm::Constant *Pointer, const PointerAuthSchema &Schema, llvm::Constant *StorageAddress, GlobalDecl SchemaDecl, QualType SchemaType) { - assert(shouldSignPointer(Schema)); + assert_DISABLED(shouldSignPointer(Schema)); llvm::ConstantInt *OtherDiscriminator = getPointerAuthOtherDiscriminator(Schema, SchemaDecl, SchemaType); diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 750a6cc24badc..86ac6f3313c29 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -1557,7 +1557,7 @@ class CodeGenFunction : public CodeGenTypeCache { void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, const llvm::function_ref CodeGen) { if (Stack.back().Kind == Kind && getExitBlock().isValid()) { - assert(CGF.getOMPCancelDestination(Kind).isValid()); + assert_DISABLED(CGF.getOMPCancelDestination(Kind).isValid()); assert(CGF.HaveInsertPoint()); assert(!Stack.back().HasBeenEmitted); auto IP = CGF.Builder.saveAndClearIP(); @@ -1584,7 +1584,7 @@ class CodeGenFunction : public CodeGenTypeCache { /// has not be used) + join point for cancel/normal exits. void exit(CodeGenFunction &CGF) { if (getExitBlock().isValid()) { - assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid()); + assert_DISABLED(CGF.getOMPCancelDestination(Stack.back().Kind).isValid()); bool HaveIP = CGF.HaveInsertPoint(); if (!Stack.back().HasBeenEmitted) { if (HaveIP) diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 2bcca5e85bdfe..d51f90828952f 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -3302,11 +3302,11 @@ void CodeGenModule::EmitVTablesOpportunistically() { // is not allowed to create new references to things that need to be emitted // lazily. Note that it also uses fact that we eagerly emitting RTTI. - assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables()) + assert_DISABLED((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables()) && "Only emit opportunistic vtables with optimizations"); for (const CXXRecordDecl *RD : OpportunisticVTables) { - assert(getVTables().isVTableExternal(RD) && + assert_DISABLED(getVTables().isVTableExternal(RD) && "This queue should only contain external vtables"); if (getCXXABI().canSpeculativelyEmitVTable(RD)) VTables.GenerateClassData(RD); @@ -3933,7 +3933,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) { addDeferredDeclToEmit(GD); } else if (MustBeEmitted(Global)) { // The value must be emitted, but cannot be emitted eagerly. - assert(!MayBeEmittedEagerly(Global)); + assert_DISABLED(!MayBeEmittedEagerly(Global)); addDeferredDeclToEmit(GD); } else { // Otherwise, remember that we saw a deferred decl with this name. The diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp index 45518736a2ac3..4b16cf85f0ed9 100644 --- a/clang/lib/CodeGen/CoverageMappingGen.cpp +++ b/clang/lib/CodeGen/CoverageMappingGen.cpp @@ -1065,7 +1065,7 @@ struct CounterCoverageMappingBuilder } assert(SM.isWrittenInSameFile(Region.getBeginLoc(), EndLoc)); - assert(SpellingRegion(SM, Region).isInSourceOrder()); + assert_DISABLED(SpellingRegion(SM, Region).isInSourceOrder()); SourceRegions.push_back(Region); } RegionStack.pop_back(); @@ -1249,7 +1249,7 @@ struct CounterCoverageMappingBuilder if (StartLocs.insert(FileStart).second) { SourceRegions.emplace_back(*ParentCounter, FileStart, getEndOfFileOrMacro(Loc)); - assert(SpellingRegion(SM, SourceRegions.back()).isInSourceOrder()); + assert_DISABLED(SpellingRegion(SM, SourceRegions.back()).isInSourceOrder()); } Loc = getIncludeOrExpansionLoc(Loc); } @@ -1303,7 +1303,7 @@ struct CounterCoverageMappingBuilder bool UnnestStart = StartDepth >= EndDepth; bool UnnestEnd = EndDepth >= StartDepth; if (UnnestEnd) { - assert(SM.isWrittenInSameFile(getStartOfFileOrMacro(BeforeLoc), + assert_DISABLED(SM.isWrittenInSameFile(getStartOfFileOrMacro(BeforeLoc), BeforeLoc)); BeforeLoc = getIncludeOrExpansionLoc(BeforeLoc); @@ -1311,7 +1311,7 @@ struct CounterCoverageMappingBuilder EndDepth--; } if (UnnestStart) { - assert(SM.isWrittenInSameFile(AfterLoc, + assert_DISABLED(SM.isWrittenInSameFile(AfterLoc, getEndOfFileOrMacro(AfterLoc))); AfterLoc = getIncludeOrExpansionLoc(AfterLoc); @@ -1363,7 +1363,7 @@ struct CounterCoverageMappingBuilder bool UnnestStart = StartDepth >= EndDepth; bool UnnestEnd = EndDepth >= StartDepth; if (UnnestEnd) { - assert(SM.isWrittenInSameFile(getStartOfFileOrMacro(BeforeLoc), + assert_DISABLED(SM.isWrittenInSameFile(getStartOfFileOrMacro(BeforeLoc), BeforeLoc)); BeforeLoc = getIncludeOrExpansionLoc(BeforeLoc); @@ -1371,7 +1371,7 @@ struct CounterCoverageMappingBuilder EndDepth--; } if (UnnestStart) { - assert(SM.isWrittenInSameFile(StartingLoc, + assert_DISABLED(SM.isWrittenInSameFile(StartingLoc, getStartOfFileOrMacro(StartingLoc))); StartingLoc = getIncludeOrExpansionLoc(StartingLoc); diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index 9b3c2f1b2af67..9ee0ad773be90 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -2109,7 +2109,7 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase) { - assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && + assert_DISABLED((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); // Get the secondary vpointer index. @@ -2439,7 +2439,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType) { - assert(requiresArrayCookie(expr)); + assert_DISABLED(requiresArrayCookie(expr)); unsigned AS = NewPtr.getAddressSpace(); @@ -2449,7 +2449,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, // The size of the cookie. CharUnits CookieSize = std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType)); - assert(CookieSize == getArrayCookieSizeImpl(ElementType)); + assert_DISABLED(CookieSize == getArrayCookieSizeImpl(ElementType)); // Compute an offset to the cookie. Address CookiePtr = NewPtr; @@ -2523,7 +2523,7 @@ Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *numElements, const CXXNewExpr *expr, QualType elementType) { - assert(requiresArrayCookie(expr)); + assert_DISABLED(requiresArrayCookie(expr)); // The cookie is always at the start of the buffer. Address cookie = newPtr; diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp index 0b0b45ffead92..904df67f85826 100644 --- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -1601,7 +1601,7 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); if (isa(MD) && MD->getParent()->getNumVBases()) { - assert(getStructorImplicitParamDecl(CGF) && + assert_DISABLED(getStructorImplicitParamDecl(CGF) && "no implicit parameter for a constructor with virtual bases?"); getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( @@ -1610,7 +1610,7 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { } if (isDeletingDtor(CGF.CurGD)) { - assert(getStructorImplicitParamDecl(CGF) && + assert_DISABLED(getStructorImplicitParamDecl(CGF) && "no implicit parameter for a deleting destructor?"); getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( @@ -2332,7 +2332,7 @@ Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *numElements, const CXXNewExpr *expr, QualType elementType) { - assert(requiresArrayCookie(expr)); + assert_DISABLED(requiresArrayCookie(expr)); // The size of the cookie. CharUnits cookieSize = getArrayCookieSizeImpl(elementType); @@ -2843,7 +2843,7 @@ MicrosoftCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { if (fields.size() == 1) return fields[0]; llvm::Constant *Res = llvm::ConstantStruct::getAnon(fields); - assert(Res->getType() == ConvertMemberPointerType(MPT)); + assert_DISABLED(Res->getType() == ConvertMemberPointerType(MPT)); return Res; } diff --git a/clang/lib/Format/FormatTokenSource.h b/clang/lib/Format/FormatTokenSource.h index 8f00e5f4582c6..ac87acab440c2 100644 --- a/clang/lib/Format/FormatTokenSource.h +++ b/clang/lib/Format/FormatTokenSource.h @@ -214,7 +214,7 @@ class ScopedMacroState : public FormatTokenSource { FormatToken *getNextToken() override { // The \c UnwrappedLineParser guards against this by never calling // \c getNextToken() after it has encountered the first eof token. - assert(!eof()); + assert_DISABLED(!eof()); PreviousToken = Token; Token = PreviousTokenSource->getNextToken(); if (eof()) diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp index 240305b33824b..19acb6d84acb0 100644 --- a/clang/lib/Frontend/CompilerInstance.cpp +++ b/clang/lib/Frontend/CompilerInstance.cpp @@ -1223,7 +1223,7 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc, DiagnosticOptions &DiagOpts = Invocation->getDiagnosticOpts(); DiagOpts.VerifyDiagnostics = 0; - assert(ImportingInstance.getInvocation().getModuleHash() == + assert_DISABLED(ImportingInstance.getInvocation().getModuleHash() == Invocation->getModuleHash() && "Module hash mismatch!"); // Construct a compiler instance that will be used to actually create the diff --git a/clang/lib/Lex/ModuleMap.cpp b/clang/lib/Lex/ModuleMap.cpp index 0a02a63deba3d..b3a09e850cf37 100644 --- a/clang/lib/Lex/ModuleMap.cpp +++ b/clang/lib/Lex/ModuleMap.cpp @@ -935,7 +935,7 @@ Module *ModuleMap::createModuleUnitWithKind(SourceLocation Loc, StringRef Name, Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc, StringRef Name) { assert(LangOpts.CurrentModule == Name && "module name mismatch"); - assert(!Modules[Name] && "redefining existing module"); + assert_DISABLED(!Modules[Name] && "redefining existing module"); auto *Result = createModuleUnitWithKind(Loc, Name, Module::ModuleInterfaceUnit); @@ -954,14 +954,14 @@ Module *ModuleMap::createModuleForImplementationUnit(SourceLocation Loc, StringRef Name) { assert(LangOpts.CurrentModule == Name && "module name mismatch"); // The interface for this implementation must exist and be loaded. - assert(Modules[Name] && Modules[Name]->Kind == Module::ModuleInterfaceUnit && + assert_DISABLED(Modules[Name] && Modules[Name]->Kind == Module::ModuleInterfaceUnit && "creating implementation module without an interface"); // Create an entry in the modules map to own the implementation unit module. // User module names must not start with a period (so that this cannot clash // with any legal user-defined module name). StringRef IName = ".ImplementationUnit"; - assert(!Modules[IName] && "multiple implementation units?"); + assert_DISABLED(!Modules[IName] && "multiple implementation units?"); auto *Result = createModuleUnitWithKind(Loc, Name, Module::ModuleImplementationUnit); @@ -977,7 +977,7 @@ Module *ModuleMap::createModuleForImplementationUnit(SourceLocation Loc, Module *ModuleMap::createHeaderUnit(SourceLocation Loc, StringRef Name, Module::Header H) { assert(LangOpts.CurrentModule == Name && "module name mismatch"); - assert(!Modules[Name] && "redefining existing module"); + assert_DISABLED(!Modules[Name] && "redefining existing module"); auto *Result = new (ModulesAlloc.Allocate()) Module(ModuleConstructorTag{}, Name, Loc, nullptr, /*IsFramework=*/false, diff --git a/clang/lib/Lex/PPCaching.cpp b/clang/lib/Lex/PPCaching.cpp index cbacda9d31ae2..7e02b37cee56a 100644 --- a/clang/lib/Lex/PPCaching.cpp +++ b/clang/lib/Lex/PPCaching.cpp @@ -15,7 +15,7 @@ using namespace clang; std::pair -Preprocessor::LastBacktrackPos() { +Preprocessor::LastBacktrackPos() const { assert(isBacktrackEnabled()); auto BacktrackPos = BacktrackPositions.back(); bool Unannotated = diff --git a/clang/lib/Parse/ParseCXXInlineMethods.cpp b/clang/lib/Parse/ParseCXXInlineMethods.cpp index b461743833c82..894643ad283c3 100644 --- a/clang/lib/Parse/ParseCXXInlineMethods.cpp +++ b/clang/lib/Parse/ParseCXXInlineMethods.cpp @@ -756,7 +756,7 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) { /// Parse all attributes in LAs, and attach them to Decl D. void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition) { - assert(LAs.parseSoon() && + assert_DISABLED(LAs.parseSoon() && "Attribute list should be marked for immediate parsing."); for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) { if (D) diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp index 31984453487ae..8f4a32a578165 100644 --- a/clang/lib/Parse/ParseDecl.cpp +++ b/clang/lib/Parse/ParseDecl.cpp @@ -1874,7 +1874,7 @@ void Parser::ParseTypeTagForDatatypeAttribute( /// this doesn't appear to actually be an attribute-specifier, and the caller /// should try to parse it. bool Parser::DiagnoseProhibitedCXX11Attribute() { - assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square)); + assert_DISABLED(Tok.is(tok::l_square) && NextToken().is(tok::l_square)); switch (isCXX11AttributeSpecifier(/*Disambiguate*/true)) { case CAK_NotAttributeSpecifier: @@ -1905,7 +1905,7 @@ bool Parser::DiagnoseProhibitedCXX11Attribute() { /// provide a fixit moving them to the right place. void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributes &Attrs, SourceLocation CorrectLocation) { - assert((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) || + assert_DISABLED((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) || Tok.is(tok::kw_alignas) || Tok.isRegularKeywordAttribute()); // Consume the attributes. @@ -5035,7 +5035,7 @@ void Parser::ParseStructDeclaration( // `Parser::ParseLexedAttributeList`. void Parser::ParseLexedCAttributeList(LateParsedAttrList &LAs, bool EnterScope, ParsedAttributes *OutAttrs) { - assert(LAs.parseSoon() && + assert_DISABLED(LAs.parseSoon() && "Attribute list should be marked for immediate parsing."); for (auto *LA : LAs) { ParseLexedCAttribute(*LA, EnterScope, OutAttrs); @@ -8453,7 +8453,7 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) { /// _Atomic ( type-name ) /// void Parser::ParseAtomicSpecifier(DeclSpec &DS) { - assert(Tok.is(tok::kw__Atomic) && NextToken().is(tok::l_paren) && + assert_DISABLED(Tok.is(tok::kw__Atomic) && NextToken().is(tok::l_paren) && "Not an atomic specifier"); SourceLocation StartLoc = ConsumeToken(); diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp index 60aab1411a96c..91c26aec3218c 100644 --- a/clang/lib/Parse/ParseDeclCXX.cpp +++ b/clang/lib/Parse/ParseDeclCXX.cpp @@ -4938,7 +4938,7 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs, return; } - assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square) && + assert_DISABLED(Tok.is(tok::l_square) && NextToken().is(tok::l_square) && "Not a double square bracket attribute list"); SourceLocation OpenLoc = Tok.getLocation(); diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp index ce3624f366a2a..69e50a84be069 100644 --- a/clang/lib/Parse/ParseExprCXX.cpp +++ b/clang/lib/Parse/ParseExprCXX.cpp @@ -670,7 +670,7 @@ ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS, } ExprResult Parser::ParseCXXPackIndexingExpression(ExprResult PackIdExpression) { - assert(Tok.is(tok::ellipsis) && NextToken().is(tok::l_square) && + assert_DISABLED(Tok.is(tok::ellipsis) && NextToken().is(tok::l_square) && "expected ...["); SourceLocation EllipsisLoc = ConsumeToken(); BalancedDelimiterTracker T(*this, tok::l_square); @@ -4064,7 +4064,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType, ColonProtectionRAIIObject &ColonProt) { assert(getLangOpts().CPlusPlus && "Should only be called for C++!"); assert(ExprType == CastExpr && "Compound literals are not ambiguous!"); - assert(isTypeIdInParens() && "Not a type-id!"); + assert_DISABLED(isTypeIdInParens() && "Not a type-id!"); ExprResult Result(true); CastTy = nullptr; diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp index 0953cfc3c017e..00b6b67139375 100644 --- a/clang/lib/Parse/ParseTemplate.cpp +++ b/clang/lib/Parse/ParseTemplate.cpp @@ -688,7 +688,7 @@ bool Parser::TryAnnotateTypeConstraint() { /// 'typename' ...[opt][C++0x] identifier[opt] /// 'typename' identifier[opt] '=' type-id NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) { - assert((Tok.isOneOf(tok::kw_class, tok::kw_typename) || + assert_DISABLED((Tok.isOneOf(tok::kw_class, tok::kw_typename) || isTypeConstraintAnnotation()) && "A type-parameter starts with 'class', 'typename' or a " "type-constraint"); diff --git a/clang/lib/Parse/ParseTentative.cpp b/clang/lib/Parse/ParseTentative.cpp index 9f6b4f6118ede..1a63a96277a28 100644 --- a/clang/lib/Parse/ParseTentative.cpp +++ b/clang/lib/Parse/ParseTentative.cpp @@ -454,7 +454,7 @@ struct Parser::ConditionDeclarationOrInitStatementState { switch (IsDecl) { case TPResult::True: markNotExpression(); - assert(resolved() && "can't continue after tentative parsing bails out"); + assert_DISABLED(resolved() && "can't continue after tentative parsing bails out"); break; case TPResult::False: CanBeCondition = CanBeInitStatement = CanBeForRangeDecl = false; @@ -1724,7 +1724,7 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename, } // Annotated it, check again. - assert(Tok.isNot(tok::annot_cxxscope) || + assert_DISABLED(Tok.isNot(tok::annot_cxxscope) || NextToken().isNot(tok::identifier)); return isCXXDeclarationSpecifier(AllowImplicitTypename, BracedCastResult, InvalidAsDeclSpec); diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp index 04c2f1d380bc4..b3f181ba22dcc 100644 --- a/clang/lib/Parse/Parser.cpp +++ b/clang/lib/Parse/Parser.cpp @@ -2239,7 +2239,7 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec( bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) { assert(getLangOpts().CPlusPlus && "Call sites of this function should be guarded by checking for C++"); - assert(MightBeCXXScopeToken() && "Cannot be a type or scope token!"); + assert_DISABLED(MightBeCXXScopeToken() && "Cannot be a type or scope token!"); CXXScopeSpec SS; if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr, diff --git a/clang/lib/Sema/SemaCXXScopeSpec.cpp b/clang/lib/Sema/SemaCXXScopeSpec.cpp index 5b2d65247e72e..485817baf269a 100644 --- a/clang/lib/Sema/SemaCXXScopeSpec.cpp +++ b/clang/lib/Sema/SemaCXXScopeSpec.cpp @@ -1043,7 +1043,7 @@ void Sema::ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS) { assert(SS.isSet() && "Parser passed invalid CXXScopeSpec."); if (SS.isInvalid()) return; - assert(!SS.isInvalid() && computeDeclContext(SS, true) && + assert_DISABLED(!SS.isInvalid() && computeDeclContext(SS, true) && "exiting declarator scope we never really entered"); ExitDeclaratorContext(S); } diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index 25061f02c13f6..f382d0ef85ff3 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -7558,7 +7558,7 @@ NamedDecl *Sema::ActOnVariableDeclarator( if (!TemplateParamLists.empty() && IsMemberSpecialization && CheckTemplateDeclScope(S, TemplateParamLists.back())) return nullptr; - assert((Invalid || + assert_DISABLED((Invalid || D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) && "should have a 'template<>' for this decl"); } @@ -16127,7 +16127,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, assert((FD == getCurFunctionDecl(/*AllowLambdas=*/true)) && "Function parsing confused"); } else if (ObjCMethodDecl *MD = dyn_cast_or_null(dcl)) { - assert(MD == getCurMethodDecl() && "Method parsing confused"); + assert_DISABLED(MD == getCurMethodDecl() && "Method parsing confused"); MD->setBody(Body); if (!MD->isInvalidDecl()) { DiagnoseSizeOfParametersAndReturnValue(MD->parameters(), diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index 1a691c0e1689d..82e2ca02966ec 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -9742,7 +9742,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, // is treated as certain special member, which may not reflect what special // member MD really is. However inferTargetForImplicitSpecialMember // expects CSM to match MD, therefore recalculate CSM. - assert(ICI || CSM == getSpecialMember(MD)); + assert_DISABLED(ICI || CSM == getSpecialMember(MD)); auto RealCSM = CSM; if (ICI) RealCSM = getSpecialMember(MD); @@ -15442,7 +15442,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, /*IsArrow=*/false, MemberLookup); MemberBuilder To(ObjectParameter, ObjectType, IsArrow, MemberLookup); - assert(!From.build(*this, Loc)->isLValue() && // could be xvalue or prvalue + assert_DISABLED(!From.build(*this, Loc)->isLValue() && // could be xvalue or prvalue "Member reference with rvalue base must be rvalue except for reference " "members, which aren't allowed for move assignment."); diff --git a/clang/lib/Sema/SemaExprMember.cpp b/clang/lib/Sema/SemaExprMember.cpp index f1ba26f38520a..3e81a6b8f5ac6 100644 --- a/clang/lib/Sema/SemaExprMember.cpp +++ b/clang/lib/Sema/SemaExprMember.cpp @@ -580,7 +580,7 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType, } } - assert(BaseType->isDependentType() || NameInfo.getName().isDependentName() || + assert_DISABLED(BaseType->isDependentType() || NameInfo.getName().isDependentName() || isDependentScopeSpecifier(SS) || (TemplateArgs && llvm::any_of(TemplateArgs->arguments(), [](const TemplateArgumentLoc &Arg) { @@ -1000,7 +1000,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType, } R.setBaseObjectType(BaseType); - assert((SS.isEmpty() + assert_DISABLED((SS.isEmpty() ? !BaseType->isDependentType() || computeDeclContext(BaseType) : !isDependentScopeSpecifier(SS) || computeDeclContext(SS)) && "dependent lookup context that isn't the current instantiation?"); diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 1f6c5b8d4561b..6a1f97af3e959 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -110,7 +110,7 @@ static ResourceClass getResourceClass(RegisterType RT) { DeclBindingInfo *ResourceBindings::addDeclBindingInfo(const VarDecl *VD, ResourceClass ResClass) { - assert(getDeclBindingInfo(VD, ResClass) == nullptr && + assert_DISABLED(getDeclBindingInfo(VD, ResClass) == nullptr && "DeclBindingInfo already added"); assert(!hasBindingInfoForDecl(VD) || BindingsList.back().Decl == VD); // VarDecl may have multiple entries for different resource classes. diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp index 573e90aced3ee..3bd171cb9414a 100644 --- a/clang/lib/Sema/SemaInit.cpp +++ b/clang/lib/Sema/SemaInit.cpp @@ -5009,7 +5009,7 @@ static OverloadingResult TryRefInitWithConversionFunction( QualType cv2T2 = Initializer->getType(); QualType T2 = cv2T2.getUnqualifiedType(); - assert(!S.CompareReferenceRelationship(Initializer->getBeginLoc(), T1, T2) && + assert_DISABLED(!S.CompareReferenceRelationship(Initializer->getBeginLoc(), T1, T2) && "Must have incompatible references when binding via conversion"); // Build the candidate set directly in the initialization sequence @@ -8664,7 +8664,7 @@ static void diagnoseListInit(Sema &S, const InitializedEntity &Entity, InitListChecker DiagnoseInitList(S, Entity, InitList, DestType, /*VerifyOnly=*/false, /*TreatUnavailableAsInvalid=*/false); - assert(DiagnoseInitList.HadError() && + assert_DISABLED(DiagnoseInitList.HadError() && "Inconsistent init list check result."); } diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp index e7afa0f4c81fc..2486f417f45d0 100644 --- a/clang/lib/Sema/SemaLambda.cpp +++ b/clang/lib/Sema/SemaLambda.cpp @@ -190,7 +190,7 @@ clang::getStackIndexOfNearestEnclosingCaptureCapableLambda( return NoLambdaIsCaptureCapable; const unsigned IndexOfCaptureReadyLambda = *OptionalStackIndex; - assert(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) || + assert_DISABLED(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) || S.getCurGenericLambda()) && "The capture ready lambda for a potential capture can only be the " "current lambda if it is a generic lambda"); diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp index 8468e9a730c2d..24d263631f8c9 100644 --- a/clang/lib/Sema/SemaLookup.cpp +++ b/clang/lib/Sema/SemaLookup.cpp @@ -1938,7 +1938,7 @@ bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) { // FIXME: Return false directly if we don't have an interface dependency on the // translation unit containing D. bool LookupResult::isReachableSlow(Sema &SemaRef, NamedDecl *D) { - assert(!isVisible(SemaRef, D) && "Shouldn't call the slow case.\n"); + assert_DISABLED(!isVisible(SemaRef, D) && "Shouldn't call the slow case.\n"); Module *DeclModule = SemaRef.getOwningModule(D); assert(DeclModule && "hidden decl has no owning module"); @@ -2043,7 +2043,7 @@ bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) { /// and visible. If no declaration of D is visible, returns null. static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D, unsigned IDNS) { - assert(!LookupResult::isAvailableForLookup(SemaRef, D) && "not in slow case"); + assert_DISABLED(!LookupResult::isAvailableForLookup(SemaRef, D) && "not in slow case"); for (auto *RD : D->redecls()) { // Don't bother with extra checks if we already know this one isn't visible. @@ -2064,7 +2064,7 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D, bool Sema::hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl *Modules) { - assert(!isVisible(D) && "not in slow case"); + assert_DISABLED(!isVisible(D) && "not in slow case"); return hasAcceptableDeclarationImpl( *this, D, Modules, [](const NamedDecl *) { return true; }, Sema::AcceptableKind::Visible); @@ -2072,7 +2072,7 @@ bool Sema::hasVisibleDeclarationSlow(const NamedDecl *D, bool Sema::hasReachableDeclarationSlow( const NamedDecl *D, llvm::SmallVectorImpl *Modules) { - assert(!isReachable(D) && "not in slow case"); + assert_DISABLED(!isReachable(D) && "not in slow case"); return hasAcceptableDeclarationImpl( *this, D, Modules, [](const NamedDecl *) { return true; }, Sema::AcceptableKind::Reachable); diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp index 79e1536288e60..3e3836dd52792 100644 --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -1556,7 +1556,7 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR, BinaryOperatorKind BOK) { D = getCanonicalDecl(D); assert(!isStackEmpty() && "Data-sharing attributes stack is empty"); - assert( + assert_DISABLED( getTopOfStack().SharingMap[D].Attributes == OMPC_reduction && "Additional reduction info may be specified only for reduction items."); ReductionData &ReductionData = getTopOfStack().ReductionMap[D]; @@ -1581,7 +1581,7 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR, const Expr *ReductionRef) { D = getCanonicalDecl(D); assert(!isStackEmpty() && "Data-sharing attributes stack is empty"); - assert( + assert_DISABLED( getTopOfStack().SharingMap[D].Attributes == OMPC_reduction && "Additional reduction info may be specified only for reduction items."); ReductionData &ReductionData = getTopOfStack().ReductionMap[D]; @@ -2993,7 +2993,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, static bool finishLinearClauses(Sema &SemaRef, ArrayRef Clauses, OMPLoopBasedDirective::HelperExprs &B, DSAStackTy *Stack) { - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "loop exprs were not built"); if (SemaRef.CurContext->isDependentContext()) @@ -10628,7 +10628,7 @@ StmtResult SemaOpenMP::ActOnOpenMPGenericLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); return OMPGenericLoopDirective::Create(getASTContext(), StartLoc, EndLoc, @@ -10658,7 +10658,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTeamsGenericLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); DSAStack->setParentTeamsRegionLoc(StartLoc); @@ -10692,7 +10692,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsGenericLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); return OMPTargetTeamsGenericLoopDirective::Create( @@ -10725,7 +10725,7 @@ StmtResult SemaOpenMP::ActOnOpenMPParallelGenericLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); return OMPParallelGenericLoopDirective::Create( @@ -10757,7 +10757,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetParallelGenericLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); return OMPTargetParallelGenericLoopDirective::Create( @@ -13265,7 +13265,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTaskLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] @@ -13344,7 +13344,7 @@ StmtResult SemaOpenMP::ActOnOpenMPMasterTaskLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] @@ -13382,7 +13382,7 @@ StmtResult SemaOpenMP::ActOnOpenMPMaskedTaskLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] @@ -13504,7 +13504,7 @@ StmtResult SemaOpenMP::ActOnOpenMPParallelMasterTaskLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] @@ -13543,7 +13543,7 @@ StmtResult SemaOpenMP::ActOnOpenMPParallelMaskedTaskLoopDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] @@ -13660,7 +13660,7 @@ StmtResult SemaOpenMP::ActOnOpenMPDistributeDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); SemaRef.setFunctionHasBranchProtectedScope(); @@ -13688,7 +13688,7 @@ StmtResult SemaOpenMP::ActOnOpenMPDistributeParallelForDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); return OMPDistributeParallelForDirective::Create( @@ -13830,7 +13830,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp teams distribute loop exprs were not built"); DSAStack->setParentTeamsRegionLoc(StartLoc); @@ -13921,7 +13921,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeParallelForDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); DSAStack->setParentTeamsRegionLoc(StartLoc); @@ -13991,7 +13991,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeDirective( if (NestedLoopCount == 0) return StmtError(); - assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && + assert_DISABLED((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target teams distribute loop exprs were not built"); return OMPTargetTeamsDistributeDirective::Create( @@ -18011,7 +18011,7 @@ static T filterLookupForUDReductionAndMapper( } static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) { - assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case"); + assert_DISABLED(!LookupResult::isVisible(SemaRef, D) && "not in slow case"); for (auto *RD : D->redecls()) { // Don't bother with extra checks if we already know this one isn't visible. @@ -19765,7 +19765,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPCopyprivateClause(ArrayRef VarList, // No need to mark vars as copyprivate, they are already threadprivate or // implicitly private. - assert(VD || isOpenMPCapturedDecl(D)); + assert_DISABLED(VD || isOpenMPCapturedDecl(D)); Vars.push_back( VD ? RefExpr->IgnoreParens() : buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false)); diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index 38ae6d8116c3b..6d870501ca343 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -4009,7 +4009,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, : diag::warn_return_missing_expr; // Note that at this point one of getCurFunctionDecl() or // getCurMethodDecl() must be non-null (see above). - assert((getCurFunctionDecl() || getCurMethodDecl()) && + assert_DISABLED((getCurFunctionDecl() || getCurMethodDecl()) && "Not in a FunctionDecl or ObjCMethodDecl?"); bool IsMethod = FD == nullptr; const NamedDecl *ND = diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp index fcf05798d9c70..3dd888c61d850 100644 --- a/clang/lib/Sema/SemaTemplate.cpp +++ b/clang/lib/Sema/SemaTemplate.cpp @@ -4132,7 +4132,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization( StorageClass SC, bool IsPartialSpecialization, bool IsMemberSpecialization) { // D must be variable template id. - assert(D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId && + assert_DISABLED(D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId && "Variable template specialization is declared with a template id."); TemplateIdAnnotation *TemplateId = D.getName().TemplateId; diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index d24d8d5335e28..66589d5e0a3cf 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -5239,7 +5239,7 @@ TypeSourceInfo *TreeTransform::TransformTSIInObjectScope( TypeLoc TL, QualType ObjectType, NamedDecl *UnqualLookup, CXXScopeSpec &SS) { QualType T = TL.getType(); - assert(!getDerived().AlreadyTransformed(T)); + assert_DISABLED(!getDerived().AlreadyTransformed(T)); TypeLocBuilder TLB; QualType Result; @@ -12418,7 +12418,7 @@ TreeTransform::TransformOffsetOfExpr(OffsetOfExpr *E) { template ExprResult TreeTransform::TransformOpaqueValueExpr(OpaqueValueExpr *E) { - assert((!E->getSourceExpr() || getDerived().AlreadyTransformed(E->getType())) && + assert_DISABLED((!E->getSourceExpr() || getDerived().AlreadyTransformed(E->getType())) && "opaque value expression requires transformation"); return E; } diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 7d9170e7f0b47..0bc8e4f0efa41 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -3850,7 +3850,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F, DelayedNamespaceOffsetMap[ID] = {LexicalOffset, VisibleOffset}; - assert(!GetExistingDecl(ID) && + assert_DISABLED(!GetExistingDecl(ID) && "We shouldn't load the namespace in the front of delayed " "namespace lexical and visible block"); } diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp index 6aaafb2e8d71c..8f9080a348935 100644 --- a/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/clang/lib/Serialization/ASTReaderStmt.cpp @@ -2942,7 +2942,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { case STMT_REF_PTR: IsStmtReference = true; - assert(StmtEntries.contains(Record[0]) && + assert_DISABLED(StmtEntries.contains(Record[0]) && "No stmt was recorded for this offset reference!"); S = StmtEntries[Record.readInt()]; break; @@ -3483,16 +3483,16 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { } case STMT_OMP_UNROLL_DIRECTIVE: { - assert(Record[ASTStmtReader::NumStmtFields] == 1 && "Unroll directive accepts only a single loop"); + assert_DISABLED(Record[ASTStmtReader::NumStmtFields] == 1 && "Unroll directive accepts only a single loop"); unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1]; S = OMPUnrollDirective::CreateEmpty(Context, NumClauses); break; } case STMT_OMP_REVERSE_DIRECTIVE: { - assert(Record[ASTStmtReader::NumStmtFields] == 1 && + assert_DISABLED(Record[ASTStmtReader::NumStmtFields] == 1 && "Reverse directive accepts only a single loop"); - assert(Record[ASTStmtReader::NumStmtFields + 1] == 0 && + assert_DISABLED(Record[ASTStmtReader::NumStmtFields + 1] == 0 && "Reverse directive has no clauses"); S = OMPReverseDirective::CreateEmpty(Context); break; diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index 494890284d2f2..f129013ba1e9e 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -2323,7 +2323,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, "Writing to AST an overridden file is not supported"); // The source location entry is a file. Emit input file ID. - assert(InputFileIDs[*Content->OrigEntry] != 0 && "Missed file entry"); + assert_DISABLED(InputFileIDs[*Content->OrigEntry] != 0 && "Missed file entry"); Record.push_back(InputFileIDs[*Content->OrigEntry]); Record.push_back(getAdjustedNumCreatedFIDs(FID)); @@ -3011,7 +3011,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { uint64_t ParentID = 0; if (Mod->Parent) { - assert(SubmoduleIDs[Mod->Parent] && "Submodule parent not written?"); + assert_DISABLED(SubmoduleIDs[Mod->Parent] && "Submodule parent not written?"); ParentID = SubmoduleIDs[Mod->Parent]; } @@ -4522,7 +4522,7 @@ void ASTWriter::WriteObjCCategories() { Cat = Class->known_categories_begin(), CatEnd = Class->known_categories_end(); Cat != CatEnd; ++Cat, ++Size) { - assert(getDeclID(*Cat).isValid() && "Bogus category"); + assert_DISABLED(getDeclID(*Cat).isValid() && "Bogus category"); AddDeclRef(*Cat, Categories); } diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp index b5fe16bf6e787..685dbd475da9b 100644 --- a/clang/lib/Serialization/ASTWriterDecl.cpp +++ b/clang/lib/Serialization/ASTWriterDecl.cpp @@ -2028,7 +2028,7 @@ void ASTDeclWriter::VisitDeclContext(DeclContext *DC) { } const Decl *ASTWriter::getFirstLocalDecl(const Decl *D) { - assert(IsLocalDecl(D) && "expected a local declaration"); + assert_DISABLED(IsLocalDecl(D) && "expected a local declaration"); const Decl *Canon = D->getCanonicalDecl(); if (IsLocalDecl(Canon)) diff --git a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp index 52416e2139914..67fcc7dd3095f 100644 --- a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp @@ -351,7 +351,7 @@ const ExplodedNode *MoveChecker::getMoveLocation(const ExplodedNode *N, void MoveChecker::modelUse(ProgramStateRef State, const MemRegion *Region, const CXXRecordDecl *RD, MisuseKind MK, CheckerContext &C) const { - assert(!C.isDifferent() && "No transitions should have been made by now"); + assert_DISABLED(!C.isDifferent() && "No transitions should have been made by now"); const RegionState *RS = State->get(Region); ObjectKind OK = classifyObject(Region, RD); @@ -489,7 +489,7 @@ void MoveChecker::checkPostCall(const CallEvent &Call, C.addTransition(State); return; } - assert(!C.isDifferent() && "Should not have made transitions on this path!"); + assert_DISABLED(!C.isDifferent() && "Should not have made transitions on this path!"); } bool MoveChecker::isMoveSafeMethod(const CXXMethodDecl *MethodDec) const { diff --git a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp index b67e6cd86c3d6..a5a3fbb054a32 100644 --- a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp +++ b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp @@ -1166,12 +1166,12 @@ void PathDiagnosticBuilder::generatePathDiagnosticsForNode( // call piece to encapsulate the rest of the path pieces. const Decl *Caller = CE->getLocationContext()->getDecl(); Call = PathDiagnosticCallPiece::construct(C.getActivePath(), Caller); - assert(C.getActivePath().size() == 1 && + assert_DISABLED(C.getActivePath().size() == 1 && C.getActivePath().front().get() == Call); // Since we just transferred the path over to the call piece, reset the // mapping of the active path to the current location context. - assert(C.isInLocCtxMap(&C.getActivePath()) && + assert_DISABLED(C.isInLocCtxMap(&C.getActivePath()) && "When we ascend to a previously unvisited call, the active path's " "address shouldn't change, but rather should be compacted into " "a single CallEvent!"); diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp index c50db1e0e2f86..cbe2e6011d34e 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp @@ -609,7 +609,7 @@ void ExprEngine::handleConstructor(const Expr *E, EvalCallOptions CallOpts; auto C = getCurrentCFGElement().getAs(); - assert(C || getCurrentCFGElement().getAs()); + assert_DISABLED(C || getCurrentCFGElement().getAs()); const ConstructionContext *CC = C ? C->getConstructionContext() : nullptr; const CXXConstructionKind CK = @@ -1116,7 +1116,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred, // initializer. Copy the value over. if (const Expr *Init = CNE->getInitializer()) { if (!isa(Init)) { - assert(Bldr.getResults().size() == 1); + assert_DISABLED(Bldr.getResults().size() == 1); Bldr.takeNodes(NewN); evalBind(Dst, CNE, NewN, Result, State->getSVal(Init, LCtx), /*FirstInit=*/IsStandardGlobalOpNewFunction); diff --git a/clang/utils/TableGen/NeonEmitter.cpp b/clang/utils/TableGen/NeonEmitter.cpp index 35c8fcf69910b..becbffb494fcb 100644 --- a/clang/utils/TableGen/NeonEmitter.cpp +++ b/clang/utils/TableGen/NeonEmitter.cpp @@ -588,7 +588,7 @@ class NeonEmitter { void genOverloadTypeCheckCode(raw_ostream &OS, SmallVectorImpl &Defs); bool areRangeChecksCompatible(const ArrayRef ChecksA, - const ArrayRef ChecksB); + const ArrayRef ChecksB) const; void genIntrinsicRangeCheckCode(raw_ostream &OS, SmallVectorImpl &Defs); @@ -2185,7 +2185,7 @@ void NeonEmitter::genOverloadTypeCheckCode(raw_ostream &OS, inline bool NeonEmitter::areRangeChecksCompatible(const ArrayRef ChecksA, - const ArrayRef ChecksB) { + const ArrayRef ChecksB) const { // If multiple intrinsics map to the same builtin, we must ensure that the // intended range checks performed in SemaArm.cpp do not contradict each // other, as these are emitted once per-buitlin. diff --git a/llvm/include/llvm/ADT/AllocatorList.h b/llvm/include/llvm/ADT/AllocatorList.h index 04d0afc9d076e..752b1f5a77029 100644 --- a/llvm/include/llvm/ADT/AllocatorList.h +++ b/llvm/include/llvm/ADT/AllocatorList.h @@ -155,8 +155,8 @@ template class AllocatorList : AllocatorT { std::swap(getAlloc(), RHS.getAlloc()); } - bool empty() { return List.empty(); } - size_t size() { return List.size(); } + bool empty() const { return List.empty(); } + size_t size() const { return List.size(); } iterator begin() { return iterator(List.begin()); } iterator end() { return iterator(List.end()); } diff --git a/llvm/include/llvm/ADT/GenericCycleImpl.h b/llvm/include/llvm/ADT/GenericCycleImpl.h index 3d2c5f4288355..d43b7478e9e87 100644 --- a/llvm/include/llvm/ADT/GenericCycleImpl.h +++ b/llvm/include/llvm/ADT/GenericCycleImpl.h @@ -126,13 +126,13 @@ template void GenericCycle::verifyCycle() const { assert(!Blocks.empty() && "Cycle cannot be empty."); DenseSet Blocks; for (BlockT *BB : blocks()) { - assert(Blocks.insert(BB).second); // duplicates in block list? + assert_DISABLED(Blocks.insert(BB).second); // duplicates in block list? } assert(!Entries.empty() && "Cycle must have one or more entries."); DenseSet Entries; for (BlockT *Entry : entries()) { - assert(Entries.insert(Entry).second); // duplicate entry? + assert_DISABLED(Entries.insert(Entry).second); // duplicate entry? assert(contains(Entry)); } @@ -577,7 +577,7 @@ void GenericCycleInfo::verifyCycleNest(bool VerifyFull) const { for (CycleT *TopCycle : toplevel_cycles()) { for (CycleT *Cycle : depth_first(TopCycle)) { BlockT *Header = Cycle->getHeader(); - assert(CycleHeaders.insert(Header).second); + assert_DISABLED(const_cast(CycleHeaders).insert(Header).second); if (VerifyFull) Cycle->verifyCycle(); else diff --git a/llvm/include/llvm/ADT/IndexedMap.h b/llvm/include/llvm/ADT/IndexedMap.h index b1ebbdd1bfd54..d9b901371fe25 100644 --- a/llvm/include/llvm/ADT/IndexedMap.h +++ b/llvm/include/llvm/ADT/IndexedMap.h @@ -45,7 +45,7 @@ template > explicit IndexedMap(const T& val) : nullVal_(val) {} typename StorageT::reference operator[](IndexT n) { - assert(toIndex_(n) < storage_.size() && "index out of bounds!"); + assert_DISABLED(toIndex_(n) < storage_.size() && "index out of bounds!"); return storage_[toIndex_(n)]; } diff --git a/llvm/include/llvm/ADT/PointerUnion.h b/llvm/include/llvm/ADT/PointerUnion.h index 7d4ed02b62262..54903f5f25170 100644 --- a/llvm/include/llvm/ADT/PointerUnion.h +++ b/llvm/include/llvm/ADT/PointerUnion.h @@ -226,7 +226,7 @@ bool operator<(PointerUnion lhs, PointerUnion rhs) { template struct CastInfoPointerUnionImpl { using From = PointerUnion; - template static inline bool isPossible(From &F) { + template static inline bool isPossible(const From &F) { return F.Val.getInt() == FirstIndexOfType::value; } diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h index bd3e887e36bce..dd509468f41d0 100644 --- a/llvm/include/llvm/ADT/SmallVector.h +++ b/llvm/include/llvm/ADT/SmallVector.h @@ -171,7 +171,7 @@ class SmallVectorTemplateCommon /// Return true unless Elt will be invalidated by resizing the vector to /// NewSize. - bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { + bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) const { // Past the end. if (LLVM_LIKELY(!isReferenceToStorage(Elt))) return true; @@ -185,7 +185,7 @@ class SmallVectorTemplateCommon } /// Check whether Elt will be invalidated by resizing the vector to NewSize. - void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) { + void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) const{ assert(isSafeToReferenceAfterResize(Elt, NewSize) && "Attempting to reference an element of the vector in an operation " "that invalidates it"); @@ -193,12 +193,12 @@ class SmallVectorTemplateCommon /// Check whether Elt will be invalidated by increasing the size of the /// vector by N. - void assertSafeToAdd(const void *Elt, size_t N = 1) { + void assertSafeToAdd(const void *Elt, size_t N = 1) const { this->assertSafeToReferenceAfterResize(Elt, this->size() + N); } /// Check whether any part of the range will be invalidated by clearing. - void assertSafeToReferenceAfterClear(const T *From, const T *To) { + void assertSafeToReferenceAfterClear(const T *From, const T *To) const { if (From == To) return; this->assertSafeToReferenceAfterResize(From, 0); @@ -208,10 +208,10 @@ class SmallVectorTemplateCommon class ItTy, std::enable_if_t, T *>::value, bool> = false> - void assertSafeToReferenceAfterClear(ItTy, ItTy) {} + void assertSafeToReferenceAfterClear(ItTy, ItTy) const {} /// Check whether any part of the range will be invalidated by growing. - void assertSafeToAddRange(const T *From, const T *To) { + void assertSafeToAddRange(const T *From, const T *To) const { if (From == To) return; this->assertSafeToAdd(From, To - From); @@ -221,7 +221,7 @@ class SmallVectorTemplateCommon class ItTy, std::enable_if_t, T *>::value, bool> = false> - void assertSafeToAddRange(ItTy, ItTy) {} + void assertSafeToAddRange(ItTy, ItTy) const {} /// Reserve enough space to add one element, and return the updated element /// pointer in case it was a reference to the storage. diff --git a/llvm/include/llvm/ADT/StringMap.h b/llvm/include/llvm/ADT/StringMap.h index 9b58af7327391..b04828df0812b 100644 --- a/llvm/include/llvm/ADT/StringMap.h +++ b/llvm/include/llvm/ADT/StringMap.h @@ -454,6 +454,10 @@ class StringMapIterBase return LHS.Ptr == RHS.Ptr; } + friend bool operator!=(const DerivedTy &LHS, const DerivedTy &RHS) { + return LHS.Ptr != RHS.Ptr; + } + DerivedTy &operator++() { // Preincrement ++Ptr; AdvancePastEmptyBuckets(); @@ -489,6 +493,7 @@ class StringMapConstIterator const StringMapEntry &operator*() const { return *static_cast *>(*this->Ptr); } + }; template @@ -510,6 +515,9 @@ class StringMapIterator : public StringMapIterBase, operator StringMapConstIterator() const { return StringMapConstIterator(this->Ptr, true); } + operator base() const { + return *this; + } }; template diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h index edbce706953d1..103c4cbeea529 100644 --- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h +++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h @@ -178,7 +178,7 @@ CallStack::beginAfterSharedPrefix(CallStack &Other) { CallStackIterator Cur = begin(); for (CallStackIterator OtherCur = Other.begin(); Cur != end() && OtherCur != Other.end(); ++Cur, ++OtherCur) - assert(*Cur == *OtherCur); + assert_DISABLED(*Cur == *OtherCur); return Cur; } diff --git a/llvm/include/llvm/CodeGen/IndirectThunks.h b/llvm/include/llvm/CodeGen/IndirectThunks.h index 6c16b326fedd0..be5d12df183b1 100644 --- a/llvm/include/llvm/CodeGen/IndirectThunks.h +++ b/llvm/include/llvm/CodeGen/IndirectThunks.h @@ -119,7 +119,7 @@ template void ThunkInserter::createThunkFunction( MachineModuleInfo &MMI, StringRef Name, bool Comdat, StringRef TargetAttrs) { - assert(Name.starts_with(getDerived().getThunkPrefix()) && + assert_DISABLED(Name.starts_with(getDerived().getThunkPrefix()) && "Created a thunk with an unexpected prefix!"); Module &M = const_cast(*MMI.getModule()); diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h index 053e7062fb499..1f86f47b2bbf1 100644 --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -1262,8 +1262,9 @@ class LLVM_ABI MachineFunction { } /// Return true if the landing pad Eh symbol has an associated call site. - bool hasCallSiteLandingPad(MCSymbol *Sym) { - return !LPadToCallSiteMap[Sym].empty(); + bool hasCallSiteLandingPad(MCSymbol *Sym) const { + auto pos = LPadToCallSiteMap.find(Sym); + return pos != LPadToCallSiteMap.end() && !pos->second.empty(); } bool hasAnyCallSiteLabel() const { diff --git a/llvm/include/llvm/CodeGen/PBQP/Graph.h b/llvm/include/llvm/CodeGen/PBQP/Graph.h index 5c802802a8804..2376fdfbfe558 100644 --- a/llvm/include/llvm/CodeGen/PBQP/Graph.h +++ b/llvm/include/llvm/CodeGen/PBQP/Graph.h @@ -203,7 +203,7 @@ namespace PBQP { } EdgeId addConstructedEdge(EdgeEntry E) { - assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() && + assert_DISABLED(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() && "Attempt to add duplicate edge."); EdgeId EId = 0; if (!FreeEdgeIds.empty()) { diff --git a/llvm/include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h b/llvm/include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h index fa2277343d5d4..c8a35e176fc64 100644 --- a/llvm/include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h +++ b/llvm/include/llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h @@ -40,9 +40,9 @@ class AppendingTypeTableBuilder : public TypeCollection { std::optional getNext(TypeIndex Prev) override; CVType getType(TypeIndex Index) override; StringRef getTypeName(TypeIndex Index) override; - bool contains(TypeIndex Index) override; - uint32_t size() override; - uint32_t capacity() override; + bool contains(TypeIndex Index) const override; + uint32_t size() const override; + uint32_t capacity() const override; bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override; // public interface diff --git a/llvm/include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h b/llvm/include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h index 18f16bc66a777..7003e2742cbcf 100644 --- a/llvm/include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h +++ b/llvm/include/llvm/DebugInfo/CodeView/GlobalTypeTableBuilder.h @@ -53,9 +53,9 @@ class GlobalTypeTableBuilder : public TypeCollection { std::optional getNext(TypeIndex Prev) override; CVType getType(TypeIndex Index) override; StringRef getTypeName(TypeIndex Index) override; - bool contains(TypeIndex Index) override; - uint32_t size() override; - uint32_t capacity() override; + bool contains(TypeIndex Index) const override; + uint32_t size() const override; + uint32_t capacity() const override; bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override; // public interface diff --git a/llvm/include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h b/llvm/include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h index 240f7092140c1..637ad85a93271 100644 --- a/llvm/include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h +++ b/llvm/include/llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h @@ -72,9 +72,9 @@ class LazyRandomTypeCollection : public TypeCollection { CVType getType(TypeIndex Index) override; StringRef getTypeName(TypeIndex Index) override; - bool contains(TypeIndex Index) override; - uint32_t size() override; - uint32_t capacity() override; + bool contains(TypeIndex Index) const override; + uint32_t size() const override; + uint32_t capacity() const override; std::optional getFirst() override; std::optional getNext(TypeIndex Prev) override; bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override; diff --git a/llvm/include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h b/llvm/include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h index 10bc8f60613c8..8a0cf10fc5cb7 100644 --- a/llvm/include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h +++ b/llvm/include/llvm/DebugInfo/CodeView/MergingTypeTableBuilder.h @@ -49,9 +49,9 @@ class MergingTypeTableBuilder : public TypeCollection { std::optional getNext(TypeIndex Prev) override; CVType getType(TypeIndex Index) override; StringRef getTypeName(TypeIndex Index) override; - bool contains(TypeIndex Index) override; - uint32_t size() override; - uint32_t capacity() override; + bool contains(TypeIndex Index) const override; + uint32_t size() const override; + uint32_t capacity() const override; bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override; // public interface diff --git a/llvm/include/llvm/DebugInfo/CodeView/TypeCollection.h b/llvm/include/llvm/DebugInfo/CodeView/TypeCollection.h index dd082a72125a5..1ec70c161e086 100644 --- a/llvm/include/llvm/DebugInfo/CodeView/TypeCollection.h +++ b/llvm/include/llvm/DebugInfo/CodeView/TypeCollection.h @@ -19,16 +19,16 @@ class TypeCollection { public: virtual ~TypeCollection() = default; - bool empty() { return size() == 0; } + bool empty() const { return size() == 0; } virtual std::optional getFirst() = 0; virtual std::optional getNext(TypeIndex Prev) = 0; virtual CVType getType(TypeIndex Index) = 0; virtual StringRef getTypeName(TypeIndex Index) = 0; - virtual bool contains(TypeIndex Index) = 0; - virtual uint32_t size() = 0; - virtual uint32_t capacity() = 0; + virtual bool contains(TypeIndex Index) const = 0; + virtual uint32_t size() const = 0; + virtual uint32_t capacity() const = 0; virtual bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) = 0; template void ForEachRecord(TFunc Func) { diff --git a/llvm/include/llvm/DebugInfo/CodeView/TypeTableCollection.h b/llvm/include/llvm/DebugInfo/CodeView/TypeTableCollection.h index 2a389b9ac34e7..7f706df03f180 100644 --- a/llvm/include/llvm/DebugInfo/CodeView/TypeTableCollection.h +++ b/llvm/include/llvm/DebugInfo/CodeView/TypeTableCollection.h @@ -26,9 +26,9 @@ class TypeTableCollection : public TypeCollection { CVType getType(TypeIndex Index) override; StringRef getTypeName(TypeIndex Index) override; - bool contains(TypeIndex Index) override; - uint32_t size() override; - uint32_t capacity() override; + bool contains(TypeIndex Index) const override; + uint32_t size() const override; + uint32_t capacity() const override; bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override; private: diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h b/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h index e8a971bbfd2cb..3e56041dc2215 100644 --- a/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h +++ b/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h @@ -1522,7 +1522,7 @@ class LinkGraph { "Symbol is not in the externals set"); ExternalSymbols.erase(Sym.getName()); Addressable &Base = *Sym.Base; - assert(llvm::none_of(external_symbols(), + assert_DISABLED(llvm::none_of(external_symbols(), [&](Symbol *AS) { return AS->Base == &Base; }) && "Base addressable still in use"); destroySymbol(Sym); @@ -1537,7 +1537,7 @@ class LinkGraph { "Symbol is not in the absolute symbols set"); AbsoluteSymbols.erase(&Sym); Addressable &Base = *Sym.Base; - assert(llvm::none_of(external_symbols(), + assert_DISABLED(llvm::none_of(external_symbols(), [&](Symbol *AS) { return AS->Base == &Base; }) && "Base addressable still in use"); destroySymbol(Sym); diff --git a/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h b/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h index a2364b4515f01..79761a6805161 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h @@ -349,7 +349,7 @@ class LLJITBuilderSetters { /// Set an ExecutionSession for this instance. SetterImpl &setExecutionSession(std::unique_ptr ES) { - assert( + assert_DISABLED( !impl().EPC && "setExecutionSession should not be called if an ExecutorProcessControl " "object has already been set"); diff --git a/llvm/include/llvm/ExecutionEngine/Orc/MachOBuilder.h b/llvm/include/llvm/ExecutionEngine/Orc/MachOBuilder.h index 6ffd286c365ac..b4967144a4907 100644 --- a/llvm/include/llvm/ExecutionEngine/Orc/MachOBuilder.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/MachOBuilder.h @@ -179,17 +179,17 @@ template class MachOBuilder { bool isSymbol() { return Idx != ~0U; } uint32_t getSymbolNum() { - assert(isSymbol() && "Target is not a symbol"); + assert_DISABLED(isSymbol() && "Target is not a symbol"); return SC->SymbolIndexBase + Idx; } uint32_t getSectionId() { - assert(!isSymbol() && "Target is not a section"); + assert_DISABLED(!isSymbol() && "Target is not a section"); return S->SectionNumber; } typename MachOTraits::NList &nlist() { - assert(isSymbol() && "Target is not a symbol"); + assert_DISABLED(isSymbol() && "Target is not a symbol"); return SC->Symbols[Idx]; } diff --git a/llvm/include/llvm/IR/BasicBlock.h b/llvm/include/llvm/IR/BasicBlock.h index c7913e60cea08..3438ebbae5bca 100644 --- a/llvm/include/llvm/IR/BasicBlock.h +++ b/llvm/include/llvm/IR/BasicBlock.h @@ -115,7 +115,7 @@ class BasicBlock final : public Value, // Basic blocks are data objects also /// Fetch the collection of DbgRecords that "trail" after the last instruction /// of this block, see \ref setTrailingDbgRecords. If there are none, returns /// nullptr. - DbgMarker *getTrailingDbgRecords(); + DbgMarker *getTrailingDbgRecords() const; /// Delete any trailing DbgRecords at the end of this block, see /// \ref setTrailingDbgRecords. diff --git a/llvm/include/llvm/IR/CFG.h b/llvm/include/llvm/IR/CFG.h index f8ec0971517a9..83bf9752b0b55 100644 --- a/llvm/include/llvm/IR/CFG.h +++ b/llvm/include/llvm/IR/CFG.h @@ -149,7 +149,7 @@ class SuccIterator int Idx; using Self = SuccIterator; - inline bool index_is_valid(int Idx) { + inline bool index_is_valid(int Idx) const { // Note that we specially support the index of zero being valid even in the // face of a null instruction. return Idx >= 0 && (Idx == 0 || Idx <= (int)Inst->getNumSuccessors()); diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h index 7088276754516..f8d290ce9af6a 100644 --- a/llvm/include/llvm/IR/Metadata.h +++ b/llvm/include/llvm/IR/Metadata.h @@ -943,7 +943,7 @@ class MDOperand { } void untrack() { - assert(static_cast(this) == &MD && "Expected same address"); + assert(static_cast(this) == &MD && "Expected same address"); if (MD) MetadataTracking::untrack(MD); } diff --git a/llvm/include/llvm/IR/PassManager.h b/llvm/include/llvm/IR/PassManager.h index d269221fac070..e2143f5a848d7 100644 --- a/llvm/include/llvm/IR/PassManager.h +++ b/llvm/include/llvm/IR/PassManager.h @@ -442,7 +442,7 @@ template class AnalysisManager { PreservedAnalyses PA = PreservedAnalyses::none(); SmallDenseMap IsResultInvalidated; Invalidator Inv(IsResultInvalidated, AnalysisResults); - assert(!Result->invalidate(IR, PA, Inv) && + assert_DISABLED(!Result->invalidate(IR, PA, Inv) && "Cached result cannot be invalidated"); } diff --git a/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/llvm/include/llvm/MC/MCParser/MCAsmParser.h index faa72d5f3144c..4593d26fe419b 100644 --- a/llvm/include/llvm/MC/MCParser/MCAsmParser.h +++ b/llvm/include/llvm/MC/MCParser/MCAsmParser.h @@ -232,7 +232,7 @@ class MCAsmParser { virtual bool printError(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) = 0; - bool hasPendingError() { return !PendingErrors.empty(); } + bool hasPendingError() const { return !PendingErrors.empty(); } bool printPendingErrors() { bool rv = !PendingErrors.empty(); diff --git a/llvm/include/llvm/Support/Error.h b/llvm/include/llvm/Support/Error.h index cb06ac19f0bb7..cd9dc7aa82546 100644 --- a/llvm/include/llvm/Support/Error.h +++ b/llvm/include/llvm/Support/Error.h @@ -243,6 +243,11 @@ class [[nodiscard]] Error { return getPtr() != nullptr; } + explicit operator bool() const { + // FIXME(EricWF): This is a terrible hack. + return getPtr() != nullptr; + } + /// Check whether one error is a subclass of another. template bool isA() const { return getPtr() && getPtr()->isA(ErrT::classID()); @@ -574,6 +579,11 @@ template class [[nodiscard]] Expected { return !HasError; } + explicit operator bool() const { + // FIXME(EricWF): this is a terrible hack + return !HasError; + } + /// Returns a reference to the stored T value. reference get() { assertIsChecked(); diff --git a/llvm/include/llvm/Support/GenericLoopInfoImpl.h b/llvm/include/llvm/Support/GenericLoopInfoImpl.h index d19022729ace3..126b5505b15a9 100644 --- a/llvm/include/llvm/Support/GenericLoopInfoImpl.h +++ b/llvm/include/llvm/Support/GenericLoopInfoImpl.h @@ -691,7 +691,7 @@ static void compareLoops(const LoopT *L, const LoopT *OtherL, std::vector BBs = L->getBlocks(); std::vector OtherBBs = OtherL->getBlocks(); - assert(compareVectors(BBs, OtherBBs) && + assert_DISABLED(compareVectors(BBs, OtherBBs) && "Mismatched basic blocks in the loops!"); const SmallPtrSetImpl &BlocksSet = L->getBlocksSet(); diff --git a/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h index 746926e5bee33..688a8e3e9c73c 100644 --- a/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h +++ b/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h @@ -339,7 +339,7 @@ class SSAUpdaterImpl { } // Record Singular value. (*AvailableVals)[Info->BB] = Singular; - assert(BBMap[Info->BB] == Info && "Info missed in BBMap?"); + assert_DISABLED(BBMap[Info->BB] == Info && "Info missed in BBMap?"); Info->AvailableVal = Singular; Info->DefBB = Info->Preds[0]->DefBB; return true; diff --git a/llvm/lib/Analysis/AssumptionCache.cpp b/llvm/lib/Analysis/AssumptionCache.cpp index a0e57ab741dfa..502add4b8de10 100644 --- a/llvm/lib/Analysis/AssumptionCache.cpp +++ b/llvm/lib/Analysis/AssumptionCache.cpp @@ -208,7 +208,7 @@ void AssumptionCache::registerAssumption(AssumeInst *CI) { "Cached assumption not inside this function!"); assert(match(cast(VH), m_Intrinsic()) && "Cached something other than a call to @llvm.assume!"); - assert(AssumptionSet.insert(VH).second && + assert_DISABLED(AssumptionSet.insert(VH).second && "Cache contains multiple copies of a call!"); } #endif diff --git a/llvm/lib/Analysis/CGSCCPassManager.cpp b/llvm/lib/Analysis/CGSCCPassManager.cpp index c32739a565541..22fe04606ed4c 100644 --- a/llvm/lib/Analysis/CGSCCPassManager.cpp +++ b/llvm/lib/Analysis/CGSCCPassManager.cpp @@ -832,7 +832,7 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G, assert(C != &*NewSCCRange.begin() && "Cannot insert new SCCs without changing current SCC!"); C = &*NewSCCRange.begin(); - assert(G.lookupSCC(N) == C && "Failed to update current SCC!"); + assert_DISABLED(G.lookupSCC(N) == C && "Failed to update current SCC!"); // If we had a cached FAM proxy originally, we will want to create more of // them for each SCC that was split off. @@ -1122,7 +1122,7 @@ static LazyCallGraph::SCC &updateCGAndAnalysisManagerForPass( // structures. if (FormedCycle) { C = &TargetC; - assert(G.lookupSCC(N) == C && "Failed to update current SCC!"); + assert_DISABLED(G.lookupSCC(N) == C && "Failed to update current SCC!"); // If one of the invalidated SCCs had a cached proxy to a function // analysis manager, we need to create a proxy in the new current SCC as diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp index 76cde01782bb0..aa2979a7aee30 100644 --- a/llvm/lib/Analysis/IVDescriptors.cpp +++ b/llvm/lib/Analysis/IVDescriptors.cpp @@ -1298,7 +1298,7 @@ static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE, assert(CastInsts.empty() && "CastInsts is expected to be empty."); auto *PN = cast(PhiScev->getValue()); - assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression"); + assert_DISABLED(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression"); const Loop *L = AR->getLoop(); // Find any cast instructions that participate in the def-use chain of diff --git a/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp b/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp index 6667affbc0e33..ffec874ec2d8f 100644 --- a/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp +++ b/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp @@ -65,7 +65,7 @@ void ImportedFunctionsInliningStatistics::recordInline(const Function &Caller, if (!CallerNode.Imported) { // We could avoid second lookup, but it would make the code ultra ugly. auto It = NodesMap.find(Caller.getName()); - assert(It != NodesMap.end() && "The node should be already there."); + assert(NodesMapTy::const_iterator(It) != NodesMap.end() && "The node should be already there."); // Save Caller as a starting node for traversal. The string has to be one // from map because Caller can disappear (and function name with it). NonImportedCallers.push_back(It->first()); diff --git a/llvm/lib/Analysis/InlineOrder.cpp b/llvm/lib/Analysis/InlineOrder.cpp index f156daa2f126f..4b20c77bde9c4 100644 --- a/llvm/lib/Analysis/InlineOrder.cpp +++ b/llvm/lib/Analysis/InlineOrder.cpp @@ -254,7 +254,7 @@ class PriorityInlineOrder : public InlineOrder> { } T pop() override { - assert(size() > 0); + assert_DISABLED(size() > 0); pop_heap_adjust(); CallBase *CB = Heap.pop_back_val(); diff --git a/llvm/lib/Analysis/LazyCallGraph.cpp b/llvm/lib/Analysis/LazyCallGraph.cpp index 5aa36bfc36d46..4e5a37d5df41a 100644 --- a/llvm/lib/Analysis/LazyCallGraph.cpp +++ b/llvm/lib/Analysis/LazyCallGraph.cpp @@ -586,7 +586,7 @@ updatePostorderSequenceForEdgeInsertion( bool LazyCallGraph::RefSCC::switchInternalEdgeToCall( Node &SourceN, Node &TargetN, function_ref MergeSCCs)> MergeCB) { - assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); + assert_DISABLED(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); SmallVector DeletedSCCs; #ifdef EXPENSIVE_CHECKS @@ -732,16 +732,16 @@ bool LazyCallGraph::RefSCC::switchInternalEdgeToCall( void LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN) { - assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); + assert_DISABLED((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); #ifdef EXPENSIVE_CHECKS verify(); auto VerifyOnExit = make_scope_exit([&]() { verify(); }); #endif - assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); - assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); - assert(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) && + assert_DISABLED(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); + assert_DISABLED(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) && "Source and Target must be in separate SCCs for this to be trivial!"); // Set the edge kind. @@ -750,18 +750,18 @@ void LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef(Node &SourceN, iterator_range LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) { - assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); + assert_DISABLED((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); #ifdef EXPENSIVE_CHECKS verify(); auto VerifyOnExit = make_scope_exit([&]() { verify(); }); #endif - assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); - assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); SCC &TargetSCC = *G->lookupSCC(TargetN); - assert(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in " + assert_DISABLED(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in " "the same SCC to require the " "full CG update."); @@ -931,10 +931,10 @@ LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) { void LazyCallGraph::RefSCC::switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN) { - assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); + assert_DISABLED(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!"); - assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); - assert(G->lookupRefSCC(TargetN) != this && + assert_DISABLED(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(TargetN) != this && "Target must not be in this RefSCC."); #ifdef EXPENSIVE_CHECKS assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && @@ -952,10 +952,10 @@ void LazyCallGraph::RefSCC::switchOutgoingEdgeToCall(Node &SourceN, void LazyCallGraph::RefSCC::switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN) { - assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); + assert_DISABLED((*SourceN)[TargetN].isCall() && "Must start with a call edge!"); - assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); - assert(G->lookupRefSCC(TargetN) != this && + assert_DISABLED(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(TargetN) != this && "Target must not be in this RefSCC."); #ifdef EXPENSIVE_CHECKS assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && @@ -973,8 +973,8 @@ void LazyCallGraph::RefSCC::switchOutgoingEdgeToRef(Node &SourceN, void LazyCallGraph::RefSCC::insertInternalRefEdge(Node &SourceN, Node &TargetN) { - assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); - assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); SourceN->insertEdgeInternal(TargetN, Edge::Ref); @@ -988,9 +988,9 @@ void LazyCallGraph::RefSCC::insertOutgoingEdge(Node &SourceN, Node &TargetN, // First insert it into the caller. SourceN->insertEdgeInternal(TargetN, EK); - assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC."); - assert(G->lookupRefSCC(TargetN) != this && + assert_DISABLED(G->lookupRefSCC(TargetN) != this && "Target must not be in this RefSCC."); #ifdef EXPENSIVE_CHECKS assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) && @@ -1004,7 +1004,7 @@ void LazyCallGraph::RefSCC::insertOutgoingEdge(Node &SourceN, Node &TargetN, SmallVector LazyCallGraph::RefSCC::insertIncomingRefEdge(Node &SourceN, Node &TargetN) { - assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); + assert_DISABLED(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC."); RefSCC &SourceC = *G->lookupRefSCC(SourceN); assert(&SourceC != this && "Source must not be in this RefSCC."); #ifdef EXPENSIVE_CHECKS @@ -1145,9 +1145,9 @@ LazyCallGraph::RefSCC::insertIncomingRefEdge(Node &SourceN, Node &TargetN) { } void LazyCallGraph::RefSCC::removeOutgoingEdge(Node &SourceN, Node &TargetN) { - assert(G->lookupRefSCC(SourceN) == this && + assert_DISABLED(G->lookupRefSCC(SourceN) == this && "The source must be a member of this RefSCC."); - assert(G->lookupRefSCC(TargetN) != this && + assert_DISABLED(G->lookupRefSCC(TargetN) != this && "The target must not be a member of this RefSCC"); #ifdef EXPENSIVE_CHECKS @@ -1505,7 +1505,7 @@ void LazyCallGraph::markDeadFunction(Function &F) { // We shouldn't remove library functions as they are never really dead while // the call graph is in use -- every function definition refers to them. - assert(!isLibFunction(F) && + assert_DISABLED(!isLibFunction(F) && "Must not remove lib functions from the call graph!"); auto NI = NodeMap.find(&F); @@ -1768,14 +1768,14 @@ void LazyCallGraph::addSplitRefRecursiveFunctions( #ifndef NDEBUG for (Function *F1 : NewFunctions) { - assert(getEdgeKind(OriginalFunction, *F1) == Edge::Kind::Ref && + assert_DISABLED(getEdgeKind(OriginalFunction, *F1) == Edge::Kind::Ref && "Expected ref edges from original function to every new function"); Node &N1 = get(*F1); for (Function *F2 : NewFunctions) { if (F1 == F2) continue; Node &N2 = get(*F2); - assert(!N1->lookup(N2)->isCall() && + assert_DISABLED(!N1->lookup(N2)->isCall() && "Edges between new functions must be ref edges"); } } diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index 820b8e96c1d3a..227f7f1dd5e5f 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -317,7 +317,7 @@ bool llvm::isDereferenceableAndAlignedInLoop( // accessing EltSize bytes at every Step. APInt AccessSize = TC * Step->getAPInt(); - assert(SE.isLoopInvariant(AddRec->getStart(), L) && + assert_DISABLED(const_cast(SE).isLoopInvariant(AddRec->getStart(), L) && "implied by addrec definition"); Value *Base = nullptr; if (auto *StartS = dyn_cast(AddRec->getStart())) { diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp index c5fba184cd085..a7042d676116c 100644 --- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -716,7 +716,7 @@ static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, const MemoryDependenceResults::NonLocalDepInfo & MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) { - assert(getDependency(QueryCall).isNonLocal() && + assert_DISABLED(getDependency(QueryCall).isNonLocal() && "getNonLocalCallDependency should only be used on calls with " "non-local deps!"); PerInstNLInfo &CacheP = NonLocalDepsMap[QueryCall]; diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp index 1583e0e31efc1..e8768a7798a11 100644 --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -444,7 +444,7 @@ checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, if (MD == Start) continue; - assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) && + assert_DISABLED(!instructionClobbersQuery(MD, MAP.second, Query.Inst, const_cast(AA)) && "Found clobber before reaching ClobberAt!"); continue; } diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 58e23e9556f14..59b86168133ea 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3674,7 +3674,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, assert(!Op->getType()->isPointerTy() && "Step must be integer"); } for (const SCEV *Op : Operands) - assert(isAvailableAtLoopEntry(Op, L) && + assert_DISABLED(isAvailableAtLoopEntry(Op, L) && "SCEVAddRecExpr operand is not available at loop entry!"); #endif @@ -5615,7 +5615,7 @@ ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI // for each of StartVal and Accum auto getExtendedExpr = [&](const SCEV *Expr, bool CreateSignExtend) -> const SCEV * { - assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); + assert_DISABLED(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); const SCEV *ExtendedExpr = CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) @@ -5782,7 +5782,7 @@ const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, // know that it is *undefined behavior* for BEValueV to // overflow. if (auto *BEInst = dyn_cast(BEValueV)) { - assert(isLoopInvariant(Accum, L) && + assert_DISABLED(isLoopInvariant(Accum, L) && "Accum is defined outside L, but is not invariant?"); if (isAddRecNeverPoison(BEInst, L)) (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); @@ -8831,7 +8831,7 @@ ScalarEvolution::computeBackedgeTakenCount(const Loop *L, if (EL.SymbolicMaxNotTaken != getCouldNotCompute()) ExitCounts.emplace_back(ExitBB, EL); else { - assert(EL.ExactNotTaken == getCouldNotCompute() && + assert_DISABLED(EL.ExactNotTaken == getCouldNotCompute() && "Exact is known but symbolic isn't?"); ++NumExitCountsNotComputed; } @@ -10949,7 +10949,7 @@ ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { return { Start, Start }; // Compute post increment SCEV for loop L. const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); - assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); + assert_DISABLED(PostInc != getCouldNotCompute() && "Unexpected could not compute"); return { Start, PostInc }; } @@ -11686,9 +11686,9 @@ bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, return false; // Both LHS and RHS must be available at loop entry. - assert(isAvailableAtLoopEntry(LHS, L) && + assert_DISABLED(isAvailableAtLoopEntry(LHS, L) && "LHS is not available at Loop Entry"); - assert(isAvailableAtLoopEntry(RHS, L) && + assert_DISABLED(isAvailableAtLoopEntry(RHS, L) && "RHS is not available at Loop Entry"); if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) @@ -12799,7 +12799,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned) { - assert(isKnownPositive(Stride) && "Positive stride expected!"); + assert_DISABLED(isKnownPositive(Stride) && "Positive stride expected!"); unsigned BitWidth = getTypeSizeInBits(RHS->getType()); const SCEV *One = getOne(Stride->getType()); @@ -13156,9 +13156,9 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, // max(End,Start) is End and so the result is as above, and if not // max(End,Start) is Start so we get a backedge count of zero. auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride); - assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); - assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); - assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); + assert_DISABLED(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); + assert_DISABLED(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); + assert_DISABLED(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); // Can we prove (max(RHS,Start) > Start - Stride? if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) && isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) { @@ -15369,7 +15369,7 @@ ScalarEvolution::LoopGuards::collect(const Loop *L, ScalarEvolution &SE) { return MinMaxExpr; auto IsMin = isa(MinMaxExpr) || isa(MinMaxExpr); - assert(SE.isKnownNonNegative(MinMaxLHS) && + assert_DISABLED(SE.isKnownNonNegative(MinMaxLHS) && "Expected non-negative operand!"); auto *DivisibleExpr = IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor) diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index 6a2372c975140..314cade9a3e92 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -1033,7 +1033,10 @@ bool LLParser::parseStandaloneMetadata() { ToReplace->replaceAllUsesWith(Init); ForwardRefMDNodes.erase(FI); - assert(NumberedMetadata[MetadataID] == Init && "Tracking VH didn't work"); + assert([&]() { + auto pos = NumberedMetadata.find(MetadataID); + return pos != NumberedMetadata.end() && pos->second == Init; }() + && "Tracking VH didn't work"); } else { if (NumberedMetadata.count(MetadataID)) return tokError("Metadata id is already used"); diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 4aea059551ded..f1c2e2a6992c2 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -779,7 +779,7 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer { // have. TypeID = ValueList.getTypeID(ValNo); ResVal = getFnValueByID(ValNo, nullptr, TypeID, ConstExprInsertBB); - assert((!ResVal || ResVal->getType() == getTypeByID(TypeID)) && + assert_DISABLED((!ResVal || ResVal->getType() == getTypeByID(TypeID)) && "Incorrect type ID stored for value"); return ResVal == nullptr; } @@ -1477,7 +1477,7 @@ unsigned BitcodeReader::getVirtualTypeID(Type *Ty, // contained type ID, however the second one will always be the same (i1), // so we don't need to include it in the cache key. This asserts that the // contained types are indeed as expected and there are no collisions. - assert((ChildTypeIDs.empty() || + assert_DISABLED((ChildTypeIDs.empty() || ContainedTypeIDs[It->second] == ChildTypeIDs) && "Incorrect cached contained type IDs"); return It->second; @@ -3746,7 +3746,7 @@ Error BitcodeReader::parseConstants() { } } - assert(V->getType() == getTypeByID(CurTyID) && "Incorrect result type ID"); + assert_DISABLED(V->getType() == getTypeByID(CurTyID) && "Incorrect result type ID"); if (Error Err = ValueList.assignValue(NextCstNo, V, CurTyID)) return Err; ++NextCstNo; @@ -3875,7 +3875,7 @@ Error BitcodeReader::rememberAndSkipFunctionBody() { // Save the current stream state. uint64_t CurBit = Stream.GetCurrentBitNo(); - assert( + assert_DISABLED( (DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) && "Mismatch between VST and scanned function offsets"); DeferredFunctionInfo[Fn] = CurBit; @@ -4872,7 +4872,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) { unsigned FTyID = FunctionTypeIDs[F]; for (Argument &I : F->args()) { unsigned ArgTyID = getContainedTypeID(FTyID, ArgNo + 1); - assert(I.getType() == getTypeByID(ArgTyID) && + assert_DISABLED(I.getType() == getTypeByID(ArgTyID) && "Incorrect fully specified type for Function Argument"); ValueList.push_back(&I, ArgTyID); ++ArgNo; @@ -6836,7 +6836,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) { // Non-void values get registered in the value table for future use. if (!I->getType()->isVoidTy()) { - assert(I->getType() == getTypeByID(ResTypeID) && + assert_DISABLED(I->getType() == getTypeByID(ResTypeID) && "Incorrect result type ID"); if (Error Err = ValueList.assignValue(NextValueNo++, I, ResTypeID)) return Err; diff --git a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp index 9f735f77d29dc..22484c831cf18 100644 --- a/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp +++ b/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp @@ -789,7 +789,7 @@ void ValueEnumerator::EnumerateFunctionLocalListMetadata( if (isa(VAM)) { assert(MetadataMap.count(VAM) && "LocalAsMetadata should be enumerated before DIArgList"); - assert(MetadataMap[VAM].F == F && + assert_DISABLED(MetadataMap[VAM].F == F && "Expected LocalAsMetadata in the same function"); } else { assert(isa(VAM) && diff --git a/llvm/lib/CGData/OutlinedHashTreeRecord.cpp b/llvm/lib/CGData/OutlinedHashTreeRecord.cpp index cc760634d7fae..a8b12da73022e 100644 --- a/llvm/lib/CGData/OutlinedHashTreeRecord.cpp +++ b/llvm/lib/CGData/OutlinedHashTreeRecord.cpp @@ -146,7 +146,7 @@ void OutlinedHashTreeRecord::convertFromStableData( IdHashNodeMapTy IdNodeMap; // Initialize the root node at 0. IdNodeMap[0] = HashTree->getRoot(); - assert(IdNodeMap[0]->Successors.empty()); + assert_DISABLED(IdNodeMap[0]->Successors.empty()); for (auto &P : IdNodeStableMap) { auto Id = P.first; diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index a692e7aef6268..ab6a006856d1b 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -3033,7 +3033,7 @@ void CodeViewDebug::collectLexicalBlockInfo( void CodeViewDebug::endFunctionImpl(const MachineFunction *MF) { const Function &GV = MF->getFunction(); assert(FnDebugInfo.count(&GV)); - assert(CurFn == FnDebugInfo[&GV].get()); + assert_DISABLED(CurFn == FnDebugInfo[&GV].get()); collectVariableInfo(GV.getSubprogram()); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 20ee50dca499f..a6dcb7f5bab9e 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -743,7 +743,7 @@ DIE *DwarfCompileUnit::constructLexicalScopeDIE(LexicalScope *Scope) { auto ScopeDIE = DIE::get(DIEValueAllocator, dwarf::DW_TAG_lexical_block); if (Scope->isAbstractScope()) { - assert(!getAbstractScopeDIEs().count(DS) && + assert_DISABLED(!getAbstractScopeDIEs().count(DS) && "Abstract DIE for this scope exists!"); getAbstractScopeDIEs()[DS] = ScopeDIE; return ScopeDIE; diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 10736305762d2..71ebebde479da 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -901,7 +901,7 @@ void DwarfDebug::constructCallSiteEntryDIEs(const DISubprogram &SP, // Ex. CALL_INSTRUCTION { // DELAY_SLOT_INSTRUCTION } // LABEL_AFTER_CALL - assert(getLabelAfterInsn(&*CallInstrBundle) == + assert_DISABLED(getLabelAfterInsn(&*CallInstrBundle) == getLabelAfterInsn(&*DelaySlotBundle) && "Call and its successor instruction don't have same label after."); return true; @@ -2217,7 +2217,7 @@ void DwarfDebug::beginFunctionImpl(const MachineFunction *MF) { CurFn = MF; auto *SP = MF->getFunction().getSubprogram(); - assert(LScopes.empty() || SP == LScopes.getCurrentFunctionScope()->getScopeNode()); + assert_DISABLED(LScopes.empty() || SP == LScopes.getCurrentFunctionScope()->getScopeNode()); if (SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug) return; @@ -2299,7 +2299,7 @@ void DwarfDebug::endFunctionImpl(const MachineFunction *MF) { for (const auto &R : Asm->MBBSectionRanges) addArangeLabel(SymbolCU(&TheCU, R.second.BeginLabel)); - assert(InfoHolder.getScopeVariables().empty()); + assert_DISABLED(InfoHolder.getScopeVariables().empty()); PrevLabel = nullptr; CurFn = nullptr; return; diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index f5d2863ae70b7..2075aeef49207 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -450,7 +450,7 @@ void DwarfExpression::cancelEntryValue() { // The temporary buffer can't be emptied, so for now just assert that nothing // has been emitted to it. - assert(getTemporaryBufferSize() == 0 && + assert_DISABLED(getTemporaryBufferSize() == 0 && "Began emitting entry value block before cancelling entry value"); LocationKind = SavedLocationKind; diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp index 59257fd6aadd5..fa499f2444483 100644 --- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp +++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp @@ -1287,7 +1287,7 @@ class AssignmentTrackingLowering { Join.VariableIDsInBlock = A.VariableIDsInBlock; Join.VariableIDsInBlock |= B.VariableIDsInBlock; - assert(Join.isValid()); + assert_DISABLED(Join.isValid()); return Join; } }; diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp index 1dc278586f117..4d2c478e38355 100644 --- a/llvm/lib/CodeGen/BranchFolding.cpp +++ b/llvm/lib/CodeGen/BranchFolding.cpp @@ -1311,7 +1311,7 @@ static void copyDebugInfoToSuccessor(const TargetInstrInfo *TII, // branch folding. static void salvageDebugInfoFromEmptyBlock(const TargetInstrInfo *TII, MachineBasicBlock &MBB) { - assert(IsEmptyBlock(&MBB) && "Expected an empty block (except debug info)."); + assert_DISABLED(IsEmptyBlock(&MBB) && "Expected an empty block (except debug info)."); // If this MBB is the only predecessor of a successor it is legal to copy // DBG_VALUE instructions to the beginning of the successor. for (MachineBasicBlock *SuccBB : MBB.successors()) diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp index a762aab43ddd2..7f49ff17e8cee 100644 --- a/llvm/lib/CodeGen/BranchRelaxation.cpp +++ b/llvm/lib/CodeGen/BranchRelaxation.cpp @@ -153,7 +153,7 @@ void BranchRelaxation::verify() { if (MI.getOpcode() == TargetOpcode::FAULTING_OP) continue; MachineBasicBlock *DestBB = TII->getBranchDestBlock(MI); - assert(isBlockInRange(MI, *DestBB) || + assert_DISABLED(isBlockInRange(MI, *DestBB) || RelaxedUnconditionals.contains({&MBB, DestBB})); } } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 67a3590151141..0d39cfcf086a6 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -3989,7 +3989,7 @@ class SimplificationTracker { To = dyn_cast(OldReplacement); OldReplacement = Get(From); } - assert(To && Get(To) == To && "Replacement PHI node is already replaced."); + assert_DISABLED(To && Get(To) == To && "Replacement PHI node is already replaced."); Put(From, To); From->replaceAllUsesWith(To); AllPhiNodes.erase(From); @@ -8052,7 +8052,7 @@ class VectorPromoteHelper { /// Set the instruction that will be combined with the transition. void recordCombineInstruction(Instruction *ToBeCombined) { - assert(canCombine(ToBeCombined) && "Unsupported instruction to combine"); + assert_DISABLED(canCombine(ToBeCombined) && "Unsupported instruction to combine"); CombineInst = ToBeCombined; } diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp index dd18b524e3f9c..0f3120848afc7 100644 --- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp +++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp @@ -254,7 +254,7 @@ Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder, Value * CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder, VPIntrinsic &VPI) { - assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && + assert_DISABLED((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && "Implicitly dropping %evl in non-speculatable operator!"); auto OC = static_cast(*VPI.getFunctionalOpcode()); @@ -319,7 +319,7 @@ Value *CachingVPExpander::expandPredicationToIntCall( Value *CachingVPExpander::expandPredicationToFPCall( IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) { - assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && + assert_DISABLED((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && "Implicitly dropping %evl in non-speculatable operator!"); switch (UnpredicatedIntrinsicID) { @@ -377,7 +377,7 @@ static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI, Value * CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder, VPReductionIntrinsic &VPI) { - assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && + assert_DISABLED((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && "Implicitly dropping %evl in non-speculatable operator!"); Value *Mask = VPI.getMaskParam(); @@ -560,7 +560,7 @@ CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder, Value *CachingVPExpander::expandPredicationInComparison(IRBuilder<> &Builder, VPCmpIntrinsic &VPI) { - assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && + assert_DISABLED((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) && "Implicitly dropping %evl in non-speculatable operator!"); assert(*VPI.getFunctionalOpcode() == Instruction::ICmp || diff --git a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp index 3bb9da5f1a37b..6c7607c367b0b 100644 --- a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp +++ b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp @@ -443,7 +443,7 @@ class StatepointState { MachineInstr *Reload = It->getPrevNode(); int Dummy = 0; (void)Dummy; - assert(TII.isLoadFromStackSlot(*Reload, Dummy) == Reg); + assert_DISABLED(TII.isLoadFromStackSlot(*Reload, Dummy) == Reg); assert(Dummy == FI); MBB->remove(Reload); MBB->insertAfter(It, Reload); diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp index 547529bbe699a..1c71c1ec4ddcd 100644 --- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp @@ -147,7 +147,7 @@ bool CSEMIRBuilder::checkCopyToDefsPossible(ArrayRef DstOps) { MachineInstrBuilder CSEMIRBuilder::generateCopiesIfRequired(ArrayRef DstOps, MachineInstrBuilder &MIB) { - assert(checkCopyToDefsPossible(DstOps) && + assert_DISABLED(checkCopyToDefsPossible(DstOps) && "Impossible return a single MIB with copies to multiple defs"); if (DstOps.size() == 1) { const DstOp &Op = DstOps[0]; diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 563a826441345..a6887234800f7 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2215,7 +2215,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, const DbgLabelInst &DI = cast(CI); assert(DI.getLabel() && "Missing label"); - assert(DI.getLabel()->isValidLocationForIntrinsic( + assert_DISABLED(DI.getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && "Expected inlined-at fields to agree"); @@ -3584,7 +3584,7 @@ void IRTranslator::translateDbgInfo(const Instruction &Inst, if (DbgLabelRecord *DLR = dyn_cast(&DR)) { MIRBuilder.setDebugLoc(DLR->getDebugLoc()); assert(DLR->getLabel() && "Missing label"); - assert(DLR->getLabel()->isValidLocationForIntrinsic( + assert_DISABLED(DLR->getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && "Expected inlined-at fields to agree"); MIRBuilder.buildDbgLabel(DLR->getLabel()); diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp index 9444ff518ca9c..e9ed75b5998d7 100644 --- a/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp +++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp @@ -373,7 +373,7 @@ bool InstructionSelect::selectInstr(MachineInstr &MI) { const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg); if (DstRC) MRI.setRegClass(SrcReg, DstRC); - assert(canReplaceReg(DstReg, SrcReg, MRI) && + assert_DISABLED(canReplaceReg(DstReg, SrcReg, MRI) && "Must be able to replace dst with src!"); MI.eraseFromParent(); MRI.replaceRegWith(DstReg, SrcReg); diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 98aece0d68d6e..f0498074a406f 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -4663,7 +4663,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorMultiEltType( GenericMachineInstr &MI, unsigned NumElts, std::initializer_list NonVecOpIndices) { - assert(hasSameNumEltsOnAllVectorOperands(MI, MRI, NonVecOpIndices) && + assert_DISABLED(hasSameNumEltsOnAllVectorOperands(MI, MRI, NonVecOpIndices) && "Non-compatible opcode or not specified non-vector operands"); unsigned OrigNumElts = MRI.getType(MI.getReg(0)).getNumElements(); diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp index c9ee35373cd44..721866bf3add4 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -292,7 +292,7 @@ LegalizerInfo::getActionDefinitions(unsigned Opcode) const { LegalizeRuleSet &LegalizerInfo::getActionDefinitionsBuilder(unsigned Opcode) { unsigned OpcodeIdx = getActionDefinitionsIdx(Opcode); auto &Result = RulesForOpcode[OpcodeIdx]; - assert(!Result.isAliasedByAnother() && "Modifying this opcode will modify aliases"); + assert_DISABLED(!Result.isAliasedByAnother() && "Modifying this opcode will modify aliases"); return Result; } diff --git a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp index 0d0c093648eba..99086cca40c14 100644 --- a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp @@ -948,7 +948,7 @@ void LoadStoreOpt::initializeStoreMergeTargetInfo(unsigned AddrSpace) { // illegal ones, which would just result in being split again. if (LegalStoreSizes.count(AddrSpace)) { - assert(LegalStoreSizes[AddrSpace].any()); + assert_DISABLED(LegalStoreSizes[AddrSpace].any()); return; // Already cached sizes for this address space. } diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 59f2fc633f5de..c273ff41a6ad2 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -53,7 +53,7 @@ MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr) { assert(isa(Variable) && "not a variable"); assert(cast(Expr)->isValid() && "not an expression"); - assert( + assert_DISABLED( cast(Variable)->isValidLocationForIntrinsic(getDL()) && "Expected inlined-at fields to agree"); return insertInstr(BuildMI(getMF(), getDL(), @@ -66,7 +66,7 @@ MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr) { assert(isa(Variable) && "not a variable"); assert(cast(Expr)->isValid() && "not an expression"); - assert( + assert_DISABLED( cast(Variable)->isValidLocationForIntrinsic(getDL()) && "Expected inlined-at fields to agree"); return insertInstr(BuildMI(getMF(), getDL(), @@ -79,7 +79,7 @@ MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI, const MDNode *Expr) { assert(isa(Variable) && "not a variable"); assert(cast(Expr)->isValid() && "not an expression"); - assert( + assert_DISABLED( cast(Variable)->isValidLocationForIntrinsic(getDL()) && "Expected inlined-at fields to agree"); return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE) @@ -94,7 +94,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C, const MDNode *Expr) { assert(isa(Variable) && "not a variable"); assert(cast(Expr)->isValid() && "not an expression"); - assert( + assert_DISABLED( cast(Variable)->isValidLocationForIntrinsic(getDL()) && "Expected inlined-at fields to agree"); auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE); diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp index e386647daa653..9dcfafb8ccd4b 100644 --- a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -324,7 +324,7 @@ void RegBankSelect::tryAvoidingSplit( RegBankSelect::RepairingPlacement &RepairPt, const MachineOperand &MO, const RegisterBankInfo::ValueMapping &ValMapping) const { const MachineInstr &MI = *MO.getParent(); - assert(RepairPt.hasSplit() && "We should not have to adjust for split"); + assert_DISABLED(RepairPt.hasSplit() && "We should not have to adjust for split"); // Splitting should only occur for PHIs or between terminators, // because we only do local repairing. assert((MI.isPHI() || MI.isTerminator()) && "Why do we split?"); diff --git a/llvm/lib/CodeGen/IndirectBrExpandPass.cpp b/llvm/lib/CodeGen/IndirectBrExpandPass.cpp index 05a7387b1232c..f4f9ecebe0703 100644 --- a/llvm/lib/CodeGen/IndirectBrExpandPass.cpp +++ b/llvm/lib/CodeGen/IndirectBrExpandPass.cpp @@ -145,7 +145,7 @@ bool runImpl(Function &F, const TargetLowering *TLI, DomTreeUpdater *DTU) { if (BlockAddressUseIt == BB.use_end()) continue; - assert(std::find_if(std::next(BlockAddressUseIt), BB.use_end(), + assert_DISABLED(std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && "There should only ever be a single blockaddress use because it is " "a constant and should be uniqued."); diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index d1f67d1a3d4aa..4ef086742cc51 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -585,7 +585,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { if (!SnippetCopies.count(MI)) continue; LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg()); - assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy"); + assert_DISABLED(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy"); VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true)); assert(SnipVNI && "Snippet undefined before copy"); WorkList.push_back(std::make_pair(&SnipLI, SnipVNI)); @@ -1047,7 +1047,7 @@ foldMemoryOperand(ArrayRef> Ops, MI->eraseFromParent(); // Insert any new instructions other than FoldMI into the LIS maps. - assert(!MIS.empty() && "Unexpected empty span of instructions!"); + assert_DISABLED(!MIS.empty() && "Unexpected empty span of instructions!"); for (MachineInstr &MI : MIS) if (&MI != FoldMI) LIS.InsertMachineInstrInMaps(MI); diff --git a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp index c80b54a4f9121..391330ddd4ed1 100644 --- a/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp +++ b/llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp @@ -1251,7 +1251,7 @@ void VarLocBasedLDV::getUsedRegs(const VarLocSet &CollectFrom, // We found a VarLoc ID for a VarLoc that lives in a register. Figure out // which register and add it to UsedRegs. uint32_t FoundReg = LocIndex::fromRawInteger(*It).Location; - assert((UsedRegs.empty() || FoundReg != UsedRegs.back()) && + assert_DISABLED((UsedRegs.empty() || FoundReg != UsedRegs.back()) && "Duplicate used reg"); UsedRegs.push_back(FoundReg); diff --git a/llvm/lib/CodeGen/LiveDebugVariables.cpp b/llvm/lib/CodeGen/LiveDebugVariables.cpp index 2ff346d3fd022..12a042345dc2f 100644 --- a/llvm/lib/CodeGen/LiveDebugVariables.cpp +++ b/llvm/lib/CodeGen/LiveDebugVariables.cpp @@ -1687,7 +1687,7 @@ void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex StartIdx, ++NumInsertedDebugValues; - assert(cast(Variable) + assert_DISABLED(cast(Variable) ->isValidLocationForIntrinsic(getDebugLoc()) && "Expected inlined-at fields to agree"); diff --git a/llvm/lib/CodeGen/LiveInterval.cpp b/llvm/lib/CodeGen/LiveInterval.cpp index 0683353d9cdba..fad818f6e19a8 100644 --- a/llvm/lib/CodeGen/LiveInterval.cpp +++ b/llvm/lib/CodeGen/LiveInterval.cpp @@ -156,7 +156,7 @@ class CalcLiveRangeUtilBase { /// merge and eliminate all segments that this will overlap /// with. The iterator is not invalidated. void extendSegmentEndTo(iterator I, SlotIndex NewEnd) { - assert(I != segments().end() && "Not a valid segment!"); + assert_DISABLED(I != segments().end() && "Not a valid segment!"); Segment *S = segmentAt(I); VNInfo *ValNo = I->valno; @@ -184,7 +184,7 @@ class CalcLiveRangeUtilBase { /// by I to start at the specified endpoint. To do this, we should /// merge and eliminate all segments that this will overlap with. iterator extendSegmentStartTo(iterator I, SlotIndex NewStart) { - assert(I != segments().end() && "Not a valid segment!"); + assert_DISABLED(I != segments().end() && "Not a valid segment!"); Segment *S = segmentAt(I); VNInfo *ValNo = I->valno; diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp index dd5220b4599f9..33fb22395d842 100644 --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -1839,7 +1839,7 @@ void MachineBlockPlacement::fillWorkLists( Chain.UnscheduledPredecessors == 0 && "Attempting to place block with unscheduled predecessors in worklist."); for (MachineBasicBlock *ChainBB : Chain) { - assert(BlockToChain[ChainBB] == &Chain && + assert_DISABLED(BlockToChain[ChainBB] == &Chain && "Block in chain doesn't match BlockToChain map."); for (MachineBasicBlock *Pred : ChainBB->predecessors()) { if (BlockFilter && !BlockFilter->count(Pred)) @@ -1864,7 +1864,7 @@ void MachineBlockPlacement::buildChain(const MachineBasicBlock *HeadBB, BlockChain &Chain, BlockFilterSet *BlockFilter) { assert(HeadBB && "BB must not be null.\n"); - assert(BlockToChain[HeadBB] == &Chain && "BlockToChainMap mis-match.\n"); + assert_DISABLED(BlockToChain[HeadBB] == &Chain && "BlockToChainMap mis-match.\n"); MachineFunction::iterator PrevUnplacedBlockIt = F->begin(); BlockFilterSet::iterator PrevUnplacedBlockInFilterIt; if (BlockFilter) @@ -1875,7 +1875,7 @@ void MachineBlockPlacement::buildChain(const MachineBasicBlock *HeadBB, MachineBasicBlock *BB = *std::prev(Chain.end()); while (true) { assert(BB && "null block found at end of chain in loop."); - assert(BlockToChain[BB] == &Chain && "BlockToChainMap mis-match in loop."); + assert_DISABLED(BlockToChain[BB] == &Chain && "BlockToChainMap mis-match in loop."); assert(*std::prev(Chain.end()) == BB && "BB Not found at end of chain."); // Look for the best viable successor if there is one to place immediately @@ -2860,7 +2860,7 @@ void MachineBlockPlacement::buildCFGChains() { // Given the exact block placement we chose, we may actually not _need_ to // be able to edit PrevBB's terminator sequence, but not being _able_ to // do that at this point is a bug. - assert((!TII->analyzeBranch(*PrevBB, TBB, FBB, Cond) || + assert_DISABLED((!TII->analyzeBranch(*PrevBB, TBB, FBB, Cond) || !PrevBB->canFallThrough()) && "Unexpected block with un-analyzable fallthrough!"); Cond.clear(); diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp index 49ce4b660c3ae..2cd7a826de06c 100644 --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -1478,7 +1478,7 @@ void MachineCopyPropagation::EliminateSpillageCopies(MachineBasicBlock &MBB) { assert(MaybePrevReload && "Found a valid leader through nullptr should not happend"); L = Leader->second; - assert(SpillChain[L].size() > 0 && + assert_DISABLED(SpillChain[L].size() > 0 && "Existing chain's length should be larger than zero"); } assert(!ChainLeader.count(&MI) && !ChainLeader.count(MaybeSpill) && diff --git a/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/llvm/lib/CodeGen/MachineTraceMetrics.cpp index 92df6b9ab48d7..227c7e7071e8a 100644 --- a/llvm/lib/CodeGen/MachineTraceMetrics.cpp +++ b/llvm/lib/CodeGen/MachineTraceMetrics.cpp @@ -689,7 +689,7 @@ struct DataDep { assert(!DefI.atEnd() && "Register has no defs"); DefMI = DefI->getParent(); DefOp = DefI.getOperandNo(); - assert((++DefI).atEnd() && "Register has multiple defs"); + assert_DISABLED((++DefI).atEnd() && "Register has multiple defs"); } }; diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 746ec0fa9da09..5a9096418aa35 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -1228,7 +1228,7 @@ getNewSource(MachineRegisterInfo *MRI, const TargetInstrInfo *TII, /// \pre isCoalescableCopy(*MI) is true. /// \return True, when \p MI has been rewritten. False otherwise. bool PeepholeOptimizer::optimizeCoalescableCopy(MachineInstr &MI) { - assert(isCoalescableCopy(MI) && "Invalid argument"); + assert_DISABLED(isCoalescableCopy(MI) && "Invalid argument"); assert(MI.getDesc().getNumDefs() == 1 && "Coalescer can understand multiple defs?!"); const MachineOperand &MODef = MI.getOperand(0); @@ -1329,7 +1329,7 @@ PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike, /// All COPY instructions created, are inserted in \p LocalMIs. bool PeepholeOptimizer::optimizeUncoalescableCopy( MachineInstr &MI, SmallPtrSetImpl &LocalMIs) { - assert(isUncoalescableCopy(MI) && "Invalid argument"); + assert_DISABLED(isUncoalescableCopy(MI) && "Invalid argument"); UncoalescableRewriter CpyRewriter(MI); // Rewrite each rewritable source by generating new COPYs. This works @@ -1631,7 +1631,7 @@ bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) { SmallSet TargetRegs; for (unsigned Idx = 1; Idx < PHI.getNumOperands(); Idx += 2) { MachineOperand &MO = PHI.getOperand(Idx); - assert(isVirtualRegisterOperand(MO) && "Invalid PHI instruction"); + assert_DISABLED(isVirtualRegisterOperand(MO) && "Invalid PHI instruction"); TargetRegs.insert(MO.getReg()); } diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index 9cb596be96f99..c481f8770be8f 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -2306,7 +2306,7 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) { // This may be a skipped register. if (!VRM->hasPhys(Reg)) { - assert(!shouldAllocateRegister(Reg) && + assert_DISABLED(!shouldAllocateRegister(Reg) && "We have an unallocated variable which should have been handled"); continue; } diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index 2e1f498c090d1..81174ea294044 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -3392,7 +3392,7 @@ static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def) { } void JoinVals::pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange) { - assert(&static_cast(LI) == &LR); + assert_DISABLED(&static_cast(LI) == &LR); for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) { if (Vals[i].Resolution != CR_Keep) diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index a0632eb17e65e..cf7402366ab1b 100644 --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -435,7 +435,7 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { } if (MO.isDead()) { - assert(deadDefHasNoUse(MO) && "Dead defs should have no uses"); + assert_DISABLED(deadDefHasNoUse(MO) && "Dead defs should have no uses"); } else { // Add data dependence to all uses we found so far. const TargetSubtargetInfo &ST = MF.getSubtarget(); diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index ad2d2ede302af..16982213a5473 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -21388,7 +21388,7 @@ bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl &StoreNodes, SDValue StoreOp = NewLoad; if (NeedRotate) { unsigned LoadWidth = ElementSizeBytes * 8 * 2; - assert(JointMemOpVT == EVT::getIntegerVT(Context, LoadWidth) && + assert_DISABLED(JointMemOpVT == EVT::getIntegerVT(Context, LoadWidth) && "Unexpected type for rotate-able load pair"); SDValue RotAmt = DAG.getShiftAmountConstant(LoadWidth / 2, JointMemOpVT, LoadDL); @@ -25774,7 +25774,7 @@ static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf, // Or sentinel undef, if we know we'd pick a known-undef element. Idx = UndefElts[Idx] ? -1 : *MinNonUndefIdx; } - assert(SplatMask != Shuf->getMask() && "Expected mask to change!"); + assert_DISABLED(SplatMask != Shuf->getMask() && "Expected mask to change!"); return DAG.getVectorShuffle(VT, SDLoc(Shuf), Shuf->getOperand(0), Shuf->getOperand(1), SplatMask); } diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index cb6d3fe4db8a4..71f6868d8ab04 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -721,7 +721,7 @@ void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) { #ifndef NDEBUG EVT VT = Result.getValueType(); LLVMContext &Ctx = *DAG.getContext(); - assert((VT == EVT::getIntegerVT(Ctx, 80) || + assert_DISABLED((VT == EVT::getIntegerVT(Ctx, 80) || VT == TLI.getTypeToTransformTo(Ctx, Op.getValueType())) && "Invalid type for softened float"); #endif diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0770355ec18c0..31e7ad5e46aa0 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -11749,7 +11749,7 @@ void SelectionDAG::VerifyDAGDivergence() { std::vector TopoOrder; CreateTopologicalOrder(TopoOrder); for (auto *N : TopoOrder) { - assert(calculateDivergence(N) == N->isDivergent() && + assert_DISABLED(calculateDivergence(N) == N->isDivergent() && "Divergence bit inconsistency detected"); } } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 8450553743074..9efc47c2ce5a0 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -10295,7 +10295,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, auto CurResultType = ResultTypes.begin(); auto handleRegAssign = [&](SDValue V) { assert(CurResultType != ResultTypes.end() && "Unexpected value"); - assert((*CurResultType)->isSized() && "Unexpected unsized type"); + assert_DISABLED((*CurResultType)->isSized() && "Unexpected unsized type"); EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType); ++CurResultType; // If the type of the inline asm call site return value is different but has diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index a1f87d2c62573..f5b6371ff4079 100644 --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -432,7 +432,7 @@ lowerIncomingStatepointValue(SDValue Incoming, bool RequireSpillSlot, // This handles allocas as arguments to the statepoint (this is only // really meaningful for a deopt value. For GC, we'd be trying to // relocate the address of the alloca itself?) - assert(Incoming.getValueType() == Builder.getFrameIndexTy() && + assert_DISABLED(Incoming.getValueType() == Builder.getFrameIndexTy() && "Incoming value is a frame index!"); Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(), Builder.getFrameIndexTy())); @@ -677,7 +677,7 @@ lowerStatepointMetaArgs(SmallVectorImpl &Ops, SDValue Incoming = Builder.getValue(V); if (FrameIndexSDNode *FI = dyn_cast(Incoming)) { // This handles allocas as arguments to the statepoint - assert(Incoming.getValueType() == Builder.getFrameIndexTy() && + assert_DISABLED(Incoming.getValueType() == Builder.getFrameIndexTy() && "Incoming value is a frame index!"); Allocas.push_back(Builder.DAG.getTargetFrameIndex( FI->getIndex(), Builder.getFrameIndexTy())); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 38bd0b0ba4114..42c87932351d2 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -176,7 +176,7 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; - assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && + assert_DISABLED(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && diff --git a/llvm/lib/CodeGen/TargetSchedule.cpp b/llvm/lib/CodeGen/TargetSchedule.cpp index ce59b096992d8..54f27e031fe67 100644 --- a/llvm/lib/CodeGen/TargetSchedule.cpp +++ b/llvm/lib/CodeGen/TargetSchedule.cpp @@ -129,7 +129,7 @@ resolveSchedClass(const MachineInstr *MI) const { unsigned NIter = 0; #endif while (SCDesc->isVariant()) { - assert(++NIter < 6 && "Variants are nested deeper than the magic number"); + assert_DISABLED(++NIter < 6 && "Variants are nested deeper than the magic number"); SchedClass = STI->resolveSchedClass(SchedClass, MI, this); SCDesc = SchedModel.getSchedClassDesc(SchedClass); diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp index fb6274b09919b..bd982bd8d938f 100644 --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -857,12 +857,12 @@ void TwoAddressInstructionImpl::scanUses(Register DstReg) { unsigned FromReg = VirtRegPairs.pop_back_val(); bool isNew = DstRegMap.insert(std::make_pair(FromReg, ToReg)).second; if (!isNew) - assert(DstRegMap[FromReg] == ToReg &&"Can't map to two dst registers!"); + assert_DISABLED(DstRegMap[FromReg] == ToReg &&"Can't map to two dst registers!"); ToReg = FromReg; } bool isNew = DstRegMap.insert(std::make_pair(DstReg, ToReg)).second; if (!isNew) - assert(DstRegMap[DstReg] == ToReg && "Can't map to two dst registers!"); + assert_DISABLED(DstRegMap[DstReg] == ToReg && "Can't map to two dst registers!"); } } @@ -892,7 +892,7 @@ void TwoAddressInstructionImpl::processCopy(MachineInstr *MI) { } else if (!IsDstPhys && IsSrcPhys) { bool isNew = SrcRegMap.insert(std::make_pair(DstReg, SrcReg)).second; if (!isNew) - assert(SrcRegMap[DstReg] == SrcReg && + assert_DISABLED(SrcRegMap[DstReg] == SrcReg && "Can't map to two src physical registers!"); scanUses(DstReg); diff --git a/llvm/lib/DWARFLinker/Parallel/DependencyTracker.cpp b/llvm/lib/DWARFLinker/Parallel/DependencyTracker.cpp index 5e30d9a8b6690..c9714046f10f4 100644 --- a/llvm/lib/DWARFLinker/Parallel/DependencyTracker.cpp +++ b/llvm/lib/DWARFLinker/Parallel/DependencyTracker.cpp @@ -438,7 +438,7 @@ bool DependencyTracker::markDIEEntryAsKeptRec( CompileUnit::DieOutputPlacement Placement = getFinalPlacementForEntry( Entry, isLiveAction(Action) ? CompileUnit::PlainDwarf : CompileUnit::TypeTable); - assert((Info.getODRAvailable() || isLiveAction(Action) || + assert_DISABLED((Info.getODRAvailable() || isLiveAction(Action) || Placement == CompileUnit::PlainDwarf) && "Wrong kind of placement for ODR unavailable entry"); diff --git a/llvm/lib/DWARFLinker/Parallel/OutputSections.cpp b/llvm/lib/DWARFLinker/Parallel/OutputSections.cpp index d03f9b40d4902..efa2d5ce6fbd9 100644 --- a/llvm/lib/DWARFLinker/Parallel/OutputSections.cpp +++ b/llvm/lib/DWARFLinker/Parallel/OutputSections.cpp @@ -224,7 +224,7 @@ void SectionDescriptor::apply(uint64_t PatchOffset, dwarf::Form AttrForm, } uint64_t SectionDescriptor::getIntVal(uint64_t PatchOffset, unsigned Size) { - assert(PatchOffset < getContents().size()); + assert_DISABLED(PatchOffset < getContents().size()); switch (Size) { case 1: { return *reinterpret_cast( @@ -249,7 +249,7 @@ uint64_t SectionDescriptor::getIntVal(uint64_t PatchOffset, unsigned Size) { void SectionDescriptor::applyIntVal(uint64_t PatchOffset, uint64_t Val, unsigned Size) { - assert(PatchOffset < getContents().size()); + assert_DISABLED(PatchOffset < getContents().size()); switch (Size) { case 1: { @@ -278,7 +278,7 @@ void SectionDescriptor::applyIntVal(uint64_t PatchOffset, uint64_t Val, } void SectionDescriptor::applyULEB128(uint64_t PatchOffset, uint64_t Val) { - assert(PatchOffset < getContents().size()); + assert_DISABLED(PatchOffset < getContents().size()); uint8_t ULEB[16]; uint8_t DestSize = Format.getDwarfOffsetByteSize() + 1; @@ -290,7 +290,7 @@ void SectionDescriptor::applyULEB128(uint64_t PatchOffset, uint64_t Val) { /// Writes integer value \p Val of SLEB128 format by specified \p PatchOffset. void SectionDescriptor::applySLEB128(uint64_t PatchOffset, uint64_t Val) { - assert(PatchOffset < getContents().size()); + assert_DISABLED(PatchOffset < getContents().size()); uint8_t SLEB[16]; uint8_t DestSize = Format.getDwarfOffsetByteSize() + 1; diff --git a/llvm/lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp b/llvm/lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp index dc32e83369272..4c29abd4d9442 100644 --- a/llvm/lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp +++ b/llvm/lib/DebugInfo/CodeView/AppendingTypeTableBuilder.cpp @@ -50,16 +50,16 @@ StringRef AppendingTypeTableBuilder::getTypeName(TypeIndex Index) { llvm_unreachable("Method not implemented"); } -bool AppendingTypeTableBuilder::contains(TypeIndex Index) { +bool AppendingTypeTableBuilder::contains(TypeIndex Index) const { if (Index.isSimple() || Index.isNoneType()) return false; return Index.toArrayIndex() < SeenRecords.size(); } -uint32_t AppendingTypeTableBuilder::size() { return SeenRecords.size(); } +uint32_t AppendingTypeTableBuilder::size() const { return SeenRecords.size(); } -uint32_t AppendingTypeTableBuilder::capacity() { return SeenRecords.size(); } +uint32_t AppendingTypeTableBuilder::capacity() const { return SeenRecords.size(); } ArrayRef> AppendingTypeTableBuilder::records() const { return SeenRecords; diff --git a/llvm/lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp b/llvm/lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp index 6435e83d7d063..f84893e226cc4 100644 --- a/llvm/lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp +++ b/llvm/lib/DebugInfo/CodeView/GlobalTypeTableBuilder.cpp @@ -53,16 +53,16 @@ StringRef GlobalTypeTableBuilder::getTypeName(TypeIndex Index) { llvm_unreachable("Method not implemented"); } -bool GlobalTypeTableBuilder::contains(TypeIndex Index) { +bool GlobalTypeTableBuilder::contains(TypeIndex Index) const { if (Index.isSimple() || Index.isNoneType()) return false; return Index.toArrayIndex() < SeenRecords.size(); } -uint32_t GlobalTypeTableBuilder::size() { return SeenRecords.size(); } +uint32_t GlobalTypeTableBuilder::size() const { return SeenRecords.size(); } -uint32_t GlobalTypeTableBuilder::capacity() { return SeenRecords.size(); } +uint32_t GlobalTypeTableBuilder::capacity() const { return SeenRecords.size(); } ArrayRef> GlobalTypeTableBuilder::records() const { return SeenRecords; diff --git a/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp b/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp index e59a0197d6500..ba7625cdf8194 100644 --- a/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp +++ b/llvm/lib/DebugInfo/CodeView/LazyRandomTypeCollection.cpp @@ -130,7 +130,7 @@ StringRef LazyRandomTypeCollection::getTypeName(TypeIndex Index) { return Records[I].Name; } -bool LazyRandomTypeCollection::contains(TypeIndex Index) { +bool LazyRandomTypeCollection::contains(TypeIndex Index) const { if (Index.isSimple() || Index.isNoneType()) return false; @@ -141,9 +141,9 @@ bool LazyRandomTypeCollection::contains(TypeIndex Index) { return true; } -uint32_t LazyRandomTypeCollection::size() { return Count; } +uint32_t LazyRandomTypeCollection::size() const { return Count; } -uint32_t LazyRandomTypeCollection::capacity() { return Records.size(); } +uint32_t LazyRandomTypeCollection::capacity() const { return Records.size(); } Error LazyRandomTypeCollection::ensureTypeExists(TypeIndex TI) { if (contains(TI)) diff --git a/llvm/lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp b/llvm/lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp index 67f5d6b00686d..46c849ca10f18 100644 --- a/llvm/lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp +++ b/llvm/lib/DebugInfo/CodeView/MergingTypeTableBuilder.cpp @@ -54,16 +54,16 @@ StringRef MergingTypeTableBuilder::getTypeName(TypeIndex Index) { llvm_unreachable("Method not implemented"); } -bool MergingTypeTableBuilder::contains(TypeIndex Index) { +bool MergingTypeTableBuilder::contains(TypeIndex Index) const { if (Index.isSimple() || Index.isNoneType()) return false; return Index.toArrayIndex() < SeenRecords.size(); } -uint32_t MergingTypeTableBuilder::size() { return SeenRecords.size(); } +uint32_t MergingTypeTableBuilder::size() const { return SeenRecords.size(); } -uint32_t MergingTypeTableBuilder::capacity() { return SeenRecords.size(); } +uint32_t MergingTypeTableBuilder::capacity() const { return SeenRecords.size(); } ArrayRef> MergingTypeTableBuilder::records() const { return SeenRecords; diff --git a/llvm/lib/DebugInfo/CodeView/TypeTableCollection.cpp b/llvm/lib/DebugInfo/CodeView/TypeTableCollection.cpp index 50ac6fc5906d3..92bdcd8840de8 100644 --- a/llvm/lib/DebugInfo/CodeView/TypeTableCollection.cpp +++ b/llvm/lib/DebugInfo/CodeView/TypeTableCollection.cpp @@ -52,13 +52,13 @@ StringRef TypeTableCollection::getTypeName(TypeIndex Index) { return Names[I]; } -bool TypeTableCollection::contains(TypeIndex Index) { +bool TypeTableCollection::contains(TypeIndex Index) const { return Index.toArrayIndex() <= size(); } -uint32_t TypeTableCollection::size() { return Records.size(); } +uint32_t TypeTableCollection::size() const { return Records.size(); } -uint32_t TypeTableCollection::capacity() { return Records.size(); } +uint32_t TypeTableCollection::capacity() const { return Records.size(); } bool TypeTableCollection::replaceType(TypeIndex &Index, CVType Data, bool Stabilize) { diff --git a/llvm/lib/DebugInfo/PDB/Native/SymbolCache.cpp b/llvm/lib/DebugInfo/PDB/Native/SymbolCache.cpp index 463b9ebe3cbff..4b1edc3b49fa7 100644 --- a/llvm/lib/DebugInfo/PDB/Native/SymbolCache.cpp +++ b/llvm/lib/DebugInfo/PDB/Native/SymbolCache.cpp @@ -184,7 +184,7 @@ SymIndexId SymbolCache::findSymbolByTypeIndex(codeview::TypeIndex Index) const { if (!EFD) consumeError(EFD.takeError()); else if (*EFD != Index) { - assert(!isUdtForwardRef(Types.getType(*EFD))); + assert_DISABLED(!isUdtForwardRef(const_cast(Types).getType(*EFD))); SymIndexId Result = findSymbolByTypeIndex(*EFD); // Record a mapping from ForwardRef -> SymIndex of complete type so that // we'll take the fast path next time. diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp index f09975331bba8..5a5f06293c372 100644 --- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp @@ -452,7 +452,7 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn, if (NumArgs > 1) { // Arg #1 = argv. GVArgs.push_back(PTOGV(CArgv.reset(Fn->getContext(), this, argv))); - assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) && + assert_DISABLED(!isTargetNullPtr(this, GVTOP(GVArgs[1])) && "argv[0] was null after CreateArgv"); if (NumArgs > 2) { std::vector EnvVars; diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp index 866de2cb227c3..cc0e4a29d64d6 100644 --- a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch32.cpp @@ -210,7 +210,7 @@ class ELFLinkGraphBuilder_aarch32 orc::ExecutorAddrDiff getRawOffset(const typename ELFT::Sym &Sym, TargetFlagsType Flags) override { - assert((makeTargetFlags(Sym) & Flags) == Flags); + assert_DISABLED((makeTargetFlags(Sym) & Flags) == Flags); static constexpr uint64_t ThumbBit = 0x01; if (Sym.getType() == ELF::STT_FUNC) return Sym.getValue() & ~ThumbBit; diff --git a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp index e3b7db2380bb0..699373c21602b 100644 --- a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp +++ b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp @@ -191,7 +191,7 @@ void MCJIT::generateCodeForModule(Module *M) { std::lock_guard locked(lock); // This must be a module which has already been added to this MCJIT instance. - assert(OwnedModules.ownsModule(M) && + assert_DISABLED(OwnedModules.ownsModule(M) && "MCJIT::generateCodeForModule: Unknown module."); // Re-compilation is not supported @@ -272,7 +272,7 @@ void MCJIT::finalizeModule(Module *M) { std::lock_guard locked(lock); // This must be a module which has already been added to this MCJIT instance. - assert(OwnedModules.ownsModule(M) && "MCJIT::finalizeModule: Unknown module."); + assert_DISABLED(OwnedModules.ownsModule(M) && "MCJIT::finalizeModule: Unknown module."); // If the module hasn't been compiled, just do that. if (!OwnedModules.hasModuleBeenLoaded(M)) diff --git a/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp b/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp index f46cb906bb755..6cfb2e612c97d 100644 --- a/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp +++ b/llvm/lib/ExecutionEngine/Orc/COFFPlatform.cpp @@ -793,7 +793,7 @@ Error COFFPlatform::COFFPlatformPlugin::associateJITDylibHeaderSymbol( auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) { return Sym->getName() == *CP.COFFHeaderStartSymbol; }); - assert(I != G.defined_symbols().end() && "Missing COFF header start symbol"); + assert_DISABLED(I != G.defined_symbols().end() && "Missing COFF header start symbol"); auto &JD = MR.getTargetJITDylib(); std::lock_guard Lock(CP.PlatformMutex); diff --git a/llvm/lib/ExecutionEngine/Orc/Core.cpp b/llvm/lib/ExecutionEngine/Orc/Core.cpp index 226216f781fe9..4c59237f795f2 100644 --- a/llvm/lib/ExecutionEngine/Orc/Core.cpp +++ b/llvm/lib/ExecutionEngine/Orc/Core.cpp @@ -1222,7 +1222,7 @@ void JITDylib::dump(raw_ostream &OS) { } } else OS << " none\n"; - assert((Symbols[KV.first].getState() != SymbolState::Ready || + assert_DISABLED((Symbols[KV.first].getState() != SymbolState::Ready || (KV.second.pendingQueries().empty() && !KV.second.DefiningEDU && !KV.second.DependantEDUs.empty())) && "Stale materializing info entry"); @@ -1534,7 +1534,7 @@ Expected> Platform::lookupInitSymbols( std::lock_guard Lock(LookupMutex); --Count; if (Result) { - assert(!CompoundResult.count(JD) && + assert_DISABLED(!CompoundResult.count(JD) && "Duplicate JITDylib in lookup?"); CompoundResult[JD] = std::move(*Result); } else @@ -1670,7 +1670,7 @@ JITDylib *ExecutionSession::getJITDylibByName(StringRef Name) { } JITDylib &ExecutionSession::createBareJITDylib(std::string Name) { - assert(!getJITDylibByName(Name) && "JITDylib with that name already exists"); + assert_DISABLED(!getJITDylibByName(Name) && "JITDylib with that name already exists"); return runSessionLocked([&, this]() -> JITDylib & { assert(SessionOpen && "Cannot create JITDylib after session is closed"); JDs.push_back(new JITDylib(*this, std::move(Name))); @@ -3183,7 +3183,7 @@ void ExecutionSession::IL_makeEDUEmitted( assert(JD.MaterializingInfos.count(SymbolStringPtr(Sym)) && "Emitted symbol has no MI"); auto MI = JD.MaterializingInfos[SymbolStringPtr(Sym)]; - assert(MI.takeQueriesMeeting(SymbolState::Emitted).empty() && + assert_DISABLED(MI.takeQueriesMeeting(SymbolState::Emitted).empty() && "Already-emitted symbol has waiting-on-emitted queries"); } #endif // NDEBUG @@ -3218,7 +3218,7 @@ bool ExecutionSession::IL_removeEDUDependence(JITDylib::EmissionDepUnit &EDU, EDUInfosMap &EDUInfos) { assert(EDU.Dependencies.count(&DepJD) && "JD does not appear in Dependencies of DependantEDU"); - assert(EDU.Dependencies[&DepJD].count(DepSym) && + assert_DISABLED(EDU.Dependencies[&DepJD].count(DepSym) && "Symbol does not appear in Dependencies of DependantEDU"); auto &JDDeps = EDU.Dependencies[&DepJD]; JDDeps.erase(DepSym); @@ -3396,13 +3396,13 @@ ExecutionSession::IL_emit(MaterializationResponsibility &MR, for (auto &[Sym, Flags] : EDU->Symbols) { assert(TargetJD.Symbols.count(SymbolStringPtr(Sym)) && "Sym not present in symbol table"); - assert((TargetJD.Symbols[SymbolStringPtr(Sym)].getState() == + assert_DISABLED((TargetJD.Symbols[SymbolStringPtr(Sym)].getState() == SymbolState::Resolved || TargetJD.Symbols[SymbolStringPtr(Sym)] .getFlags() .hasMaterializationSideEffectsOnly()) && "Emitting symbol not in the resolved state"); - assert(!TargetJD.Symbols[SymbolStringPtr(Sym)].getFlags().hasError() && + assert_DISABLED(!TargetJD.Symbols[SymbolStringPtr(Sym)].getFlags().hasError() && "Symbol is already in an error state"); auto MII = TargetJD.MaterializingInfos.find(SymbolStringPtr(Sym)); @@ -3474,7 +3474,7 @@ Error ExecutionSession::OL_notifyEmitted( for (auto &Sym : DG.Symbols) { assert(MR.SymbolFlags.count(Sym) && "DG contains dependence for symbol outside this MR"); - assert(Visited.insert(Sym).second && + assert_DISABLED(Visited.insert(Sym).second && "DG contains duplicate entries for Name"); } } diff --git a/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp b/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp index acbf33888adee..ada935a83eac7 100644 --- a/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp +++ b/llvm/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp @@ -404,7 +404,7 @@ void DebugObjectManagerPlugin::notifyMaterializing( MaterializationResponsibility &MR, LinkGraph &G, JITLinkContext &Ctx, MemoryBufferRef ObjBuffer) { std::lock_guard Lock(PendingObjsLock); - assert(PendingObjs.count(&MR) == 0 && + assert_DISABLED(PendingObjs.count(&MR) == 0 && "Cannot have more than one pending debug object per " "MaterializationResponsibility"); @@ -474,7 +474,7 @@ Error DebugObjectManagerPlugin::notifyEmitted( // Once our tracking info is updated, notifyEmitted() can return and // finish materialization. FinalizePromise.set_value(MR.withResourceKeyDo([&](ResourceKey K) { - assert(PendingObjs.count(&MR) && "We still hold PendingObjsLock"); + assert_DISABLED(PendingObjs.count(&MR) && "We still hold PendingObjsLock"); std::lock_guard Lock(RegisteredObjsLock); RegisteredObjs[K].push_back(std::move(PendingObjs[&MR])); PendingObjs.erase(&MR); diff --git a/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp b/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp index 610ecbff5c5c4..41ffa86fc5eee 100644 --- a/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp +++ b/llvm/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp @@ -803,7 +803,7 @@ void ELFNixPlatform::ELFNixPlatformPlugin::addDSOHandleSupportPasses( auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) { return Sym->getName() == *MP.DSOHandleSymbol; }); - assert(I != G.defined_symbols().end() && "Missing DSO handle symbol"); + assert_DISABLED(I != G.defined_symbols().end() && "Missing DSO handle symbol"); { std::lock_guard Lock(MP.PlatformMutex); auto HandleAddr = (*I)->getAddress(); diff --git a/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp b/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp index 1dcf91443d55d..56d693f487b76 100644 --- a/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp +++ b/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp @@ -149,7 +149,7 @@ Error CtorDtorRunner::run() { for (auto &KV : CtorDtorsByPriority) for (auto &Name : KV.second) LookupSet.add(Name); - assert(!LookupSet.containsDuplicates() && + assert_DISABLED(!LookupSet.containsDuplicates() && "Ctor/Dtor list contains duplicates"); auto &ES = JD.getExecutionSession(); diff --git a/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp b/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp index e5609053c74d7..0c69dc3ed4f33 100644 --- a/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp +++ b/llvm/lib/ExecutionEngine/Orc/MachOPlatform.cpp @@ -941,7 +941,7 @@ Error MachOPlatform::MachOPlatformPlugin::associateJITDylibHeaderSymbol( auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) { return Sym->getName() == *MP.MachOHeaderStartSymbol; }); - assert(I != G.defined_symbols().end() && "Missing MachO header start symbol"); + assert_DISABLED(I != G.defined_symbols().end() && "Missing MachO header start symbol"); auto &JD = MR.getTargetJITDylib(); std::lock_guard Lock(MP.PlatformMutex); diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h index 701cc3a881496..83fcd5eeddc16 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h @@ -459,7 +459,7 @@ class RuntimeDyldMachOAArch64 -StubAlignment; unsigned StubOffset = StubAddress - BaseAddress; Stubs[Value] = StubOffset; - assert(isAligned(getStubAlignment(), StubAddress) && + assert_DISABLED(isAligned(getStubAlignment(), StubAddress) && "GOT entry not aligned"); RelocationEntry GOTRE(RE.SectionID, StubOffset, MachO::ARM64_RELOC_UNSIGNED, Value.Offset, diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index be93f9f2e1fdc..d7a6329769931 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -1143,7 +1143,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitKernelLaunch( void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB) { - assert(isLastFinalizationInfoCancellable(CanceledDirective) && + assert_DISABLED(isLastFinalizationInfoCancellable(CanceledDirective) && "Unexpected cancellation!"); // For a cancel barrier we create two new blocks. @@ -7419,7 +7419,7 @@ void OpenMPIRBuilder::emitOffloadingArraysArgument(IRBuilderBase &Builder, TargetDataRTArgs &RTArgs, TargetDataInfo &Info, bool ForEndCall) { - assert((!ForEndCall || Info.separateBeginEndCalls()) && + assert_DISABLED((!ForEndCall || Info.separateBeginEndCalls()) && "expected region end call to runtime only when end call is separate"); auto UnqualPtrTy = PointerType::getUnqual(M.getContext()); auto VoidPtrTy = UnqualPtrTy; diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp index 39cefa5280c62..34c965fdf2438 100644 --- a/llvm/lib/IR/BasicBlock.cpp +++ b/llvm/lib/IR/BasicBlock.cpp @@ -1158,7 +1158,7 @@ void BasicBlock::setTrailingDbgRecords(DbgMarker *foo) { getContext().pImpl->setTrailingDbgRecords(this, foo); } -DbgMarker *BasicBlock::getTrailingDbgRecords() { +DbgMarker *BasicBlock::getTrailingDbgRecords() const { return getContext().pImpl->getTrailingDbgRecords(this); } diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index 7ae397871bdea..fb66a8f4bd17a 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -897,7 +897,7 @@ ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) { IntegerType *ITy = IntegerType::get(Context, V.getBitWidth()); Slot.reset(new ConstantInt(ITy, V)); } - assert(Slot->getType() == IntegerType::get(Context, V.getBitWidth())); + assert_DISABLED(Slot->getType() == IntegerType::get(Context, V.getBitWidth())); return Slot.get(); } diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp index f340f7aafdc76..61cf0960908ab 100644 --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -463,7 +463,7 @@ CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { if (!Size) Size = getInt64(-1); else - assert(Size->getType() == getInt64Ty() && + assert(Size->getType() == const_cast(this)->getInt64Ty() && "lifetime.start requires the size to be an i64"); Value *Ops[] = { Size, Ptr }; return CreateIntrinsic(Intrinsic::lifetime_start, {Ptr->getType()}, Ops); @@ -475,7 +475,7 @@ CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { if (!Size) Size = getInt64(-1); else - assert(Size->getType() == getInt64Ty() && + assert_DISABLED(Size->getType() == getInt64Ty() && "lifetime.end requires the size to be an i64"); Value *Ops[] = { Size, Ptr }; return CreateIntrinsic(Intrinsic::lifetime_end, {Ptr->getType()}, Ops); @@ -488,7 +488,7 @@ CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { if (!Size) Size = getInt64(-1); else - assert(Size->getType() == getInt64Ty() && + assert_DISABLED(Size->getType() == getInt64Ty() && "invariant.start requires the size to be an i64"); Value *Ops[] = {Size, Ptr}; @@ -520,7 +520,7 @@ CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) { CallInst * IRBuilderBase::CreateAssumption(Value *Cond, ArrayRef OpBundles) { - assert(Cond->getType() == getInt1Ty() && + assert_DISABLED(Cond->getType() == getInt1Ty() && "an assumption condition must be of type i1"); Value *Ops[] = { Cond }; diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h index 971091f304061..2b88c34b9bd0f 100644 --- a/llvm/lib/IR/LLVMContextImpl.h +++ b/llvm/lib/IR/LLVMContextImpl.h @@ -1721,7 +1721,7 @@ class LLVMContextImpl { TrailingDbgRecords[B] = M; } - DbgMarker *getTrailingDbgRecords(BasicBlock *B) { + DbgMarker *getTrailingDbgRecords(const BasicBlock *B) const { return TrailingDbgRecords.lookup(B); } diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp index ea2d1dc8440bc..e000cfc86b6b3 100644 --- a/llvm/lib/IR/Metadata.cpp +++ b/llvm/lib/IR/Metadata.cpp @@ -858,7 +858,7 @@ void MDNode::resolveCycles() { } } -static bool hasSelfReference(MDNode *N) { +static bool hasSelfReference(const MDNode *N) { return llvm::is_contained(N->operands(), N); } diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index b2ee75811fbb7..08e1b9e50f752 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -467,7 +467,7 @@ void Value::assertModuleIsMaterializedImpl() const { #ifndef NDEBUG static bool contains(SmallPtrSetImpl &Cache, ConstantExpr *Expr, - Constant *C) { + Constant *C) { if (!Cache.insert(Expr).second) return false; @@ -502,7 +502,7 @@ static bool contains(Value *Expr, Value *V) { void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) { assert(New && "Value::replaceAllUsesWith() is invalid!"); - assert(!contains(New, this) && + assert(!contains(const_cast(New), const_cast(this)) && "this->replaceAllUsesWith(expr(this)) is NOT valid!"); assert(New->getType() == getType() && "replaceAllUses of value with new value of different type!"); @@ -591,7 +591,7 @@ static void replaceDbgUsesOutsideBlock(Value *V, Value *New, BasicBlock *BB) { // This routine leaves uses within BB. void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) { assert(New && "Value::replaceUsesOutsideBlock(, BB) is invalid!"); - assert(!contains(New, this) && + assert_DISABLED(!contains(New, this) && "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!"); assert(New->getType() == getType() && "replaceUses of value with new value of different type!"); diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp index 0f53c60851217..e82f843996fa9 100644 --- a/llvm/lib/LTO/LTO.cpp +++ b/llvm/lib/LTO/LTO.cpp @@ -1061,7 +1061,7 @@ Error LTO::addThinLTO(BitcodeModule BM, ArrayRef Syms, auto GUID = GlobalValue::getGUID(GlobalValue::getGlobalIdentifier( Sym.getIRName(), GlobalValue::ExternalLinkage, "")); if (Res.Prevailing) { - assert(ThinLTO.PrevailingModuleForGUID[GUID] == + assert_DISABLED(ThinLTO.PrevailingModuleForGUID[GUID] == BM.getModuleIdentifier()); // For linker redefined symbols (via --wrap or --defsym) we want to diff --git a/llvm/lib/LTO/LTOCodeGenerator.cpp b/llvm/lib/LTO/LTOCodeGenerator.cpp index a192392e04585..2c0c42c61b0c8 100644 --- a/llvm/lib/LTO/LTOCodeGenerator.cpp +++ b/llvm/lib/LTO/LTOCodeGenerator.cpp @@ -243,7 +243,7 @@ bool LTOCodeGenerator::useAIXSystemAssembler() { } bool LTOCodeGenerator::runAIXSystemAssembler(SmallString<128> &AssemblyFile) { - assert(useAIXSystemAssembler() && + assert_DISABLED(useAIXSystemAssembler() && "Runing AIX system assembler when integrated assembler is available!"); // Set the system assembler path. diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp index 4774e5112af53..382a53e2beed7 100644 --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -5825,7 +5825,7 @@ bool AsmParser::parseDirectiveEndr(SMLoc DirectiveLoc) { // The only .repl that should get here are the ones created by // instantiateMacroLikeBody. - assert(getLexer().is(AsmToken::EndOfStatement)); + assert_DISABLED(getLexer().is(AsmToken::EndOfStatement)); handleMacroExit(); return false; diff --git a/llvm/lib/Passes/StandardInstrumentations.cpp b/llvm/lib/Passes/StandardInstrumentations.cpp index d4866a025c1b4..0fcde98394cae 100644 --- a/llvm/lib/Passes/StandardInstrumentations.cpp +++ b/llvm/lib/Passes/StandardInstrumentations.cpp @@ -1366,7 +1366,7 @@ void PreservedCFGCheckerInstrumentation::registerCallbacks( PIC.registerBeforeNonSkippedPassCallback([this, &MAM, Registered]( StringRef P, Any IR) mutable { #if LLVM_ENABLE_ABI_BREAKING_CHECKS - assert(&PassStack.emplace_back(P)); + assert_DISABLED(&PassStack.emplace_back(P)); #endif (void)this; @@ -1395,7 +1395,7 @@ void PreservedCFGCheckerInstrumentation::registerCallbacks( PIC.registerAfterPassInvalidatedCallback( [this](StringRef P, const PreservedAnalyses &PassPA) { #if LLVM_ENABLE_ABI_BREAKING_CHECKS - assert(PassStack.pop_back_val() == P && + assert_DISABLED(PassStack.pop_back_val() == P && "Before and After callbacks must correspond"); #endif (void)this; @@ -1404,7 +1404,7 @@ void PreservedCFGCheckerInstrumentation::registerCallbacks( PIC.registerAfterPassCallback([this, &MAM](StringRef P, Any IR, const PreservedAnalyses &PassPA) { #if LLVM_ENABLE_ABI_BREAKING_CHECKS - assert(PassStack.pop_back_val() == P && + assert_DISABLED(PassStack.pop_back_val() == P && "Before and After callbacks must correspond"); #endif (void)this; diff --git a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp index 119e09187b908..731af48793d8b 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp @@ -336,7 +336,7 @@ class NextIDsBuilder { #endif for (const auto *Branch : Branches) { const auto &BranchParams = Branch->getBranchParams(); - assert(SeenIDs.insert(BranchParams.ID).second && "Duplicate CondID"); + assert_DISABLED(SeenIDs.insert(BranchParams.ID).second && "Duplicate CondID"); NextIDs[BranchParams.ID] = BranchParams.Conds; } assert(SeenIDs.size() == Branches.size()); @@ -414,7 +414,7 @@ class MCDCRecordProcessor : NextIDsBuilder, mcdc::TVIdxBuilder { } assert(TVIdx < SavedNodes[ID].Width); - assert(TVIdxs.insert(NextTVIdx).second && "Duplicate TVIdx"); + assert_DISABLED(TVIdxs.insert(NextTVIdx).second && "Duplicate TVIdx"); if (!Bitmap[IsVersion11 ? DecisionParams.BitmapIdx * CHAR_BIT + TV.getIndex() diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp index adfd22804356e..d00b686f3360b 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp @@ -33,7 +33,7 @@ CoverageFilenamesSectionWriter::CoverageFilenamesSectionWriter( #ifndef NDEBUG StringSet<> NameSet; for (StringRef Name : Filenames) - assert(NameSet.insert(Name).second && "Duplicate filename"); + assert_DISABLED(NameSet.insert(Name).second && "Duplicate filename"); #endif } diff --git a/llvm/lib/ProfileData/SampleProfReader.cpp b/llvm/lib/ProfileData/SampleProfReader.cpp index 98c7844378527..3facba84042c6 100644 --- a/llvm/lib/ProfileData/SampleProfReader.cpp +++ b/llvm/lib/ProfileData/SampleProfReader.cpp @@ -1481,7 +1481,7 @@ bool SampleProfileReaderExtBinaryBase::dumpSectionInfo(raw_ostream &OS) { TotalSecsSize += Entry.Size; } uint64_t HeaderSize = SecHdrTable.front().Offset; - assert(HeaderSize + TotalSecsSize == getFileSize() && + assert_DISABLED(HeaderSize + TotalSecsSize == getFileSize() && "Size of 'header + sections' doesn't match the total size of profile"); OS << "Header Size: " << HeaderSize << "\n"; diff --git a/llvm/lib/Support/OptimizedStructLayout.cpp b/llvm/lib/Support/OptimizedStructLayout.cpp index 7b21f927a3462..36be1612b11cd 100644 --- a/llvm/lib/Support/OptimizedStructLayout.cpp +++ b/llvm/lib/Support/OptimizedStructLayout.cpp @@ -329,6 +329,7 @@ llvm::performOptimizedStructLayout(MutableArrayRef Fields) { // to the layout at the given offset. auto addToLayout = [&](AlignmentQueue *Queue, Field *Last, Field *Cur, uint64_t Offset) -> bool { + ((void)LastEnd); assert(Offset == alignTo(LastEnd, Cur->Alignment)); // Splice out. This potentially invalidates Queue. @@ -348,6 +349,7 @@ llvm::performOptimizedStructLayout(MutableArrayRef Fields) { // Note that this never fails if EndOffset is not provided. auto tryAddFillerFromQueue = [&](AlignmentQueue *Queue, uint64_t StartOffset, std::optional EndOffset) -> bool { + ((void)LastEnd); assert(Queue->Head); assert(StartOffset == alignTo(LastEnd, Queue->Alignment)); assert(!EndOffset || StartOffset < *EndOffset); diff --git a/llvm/lib/Support/Unix/Signals.inc b/llvm/lib/Support/Unix/Signals.inc index 088ca33e3c8c5..13a6ebc04d2f3 100644 --- a/llvm/lib/Support/Unix/Signals.inc +++ b/llvm/lib/Support/Unix/Signals.inc @@ -689,7 +689,7 @@ static int unwindBacktrace(void **StackTrace, int MaxEntries) { void *IP = (void *)_Unwind_GetIP(Context); if (!IP) return _URC_END_OF_STACK; - + ((void)Entries); assert(Entries < MaxEntries && "recursively called after END_OF_STACK?"); if (Entries >= 0) StackTrace[Entries] = IP; diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp index b3cdaa3eefc90..e5afa77ca973a 100644 --- a/llvm/lib/Support/VirtualFileSystem.cpp +++ b/llvm/lib/Support/VirtualFileSystem.cpp @@ -2757,10 +2757,10 @@ class JSONWriter { llvm::raw_ostream &OS; SmallVector DirStack; - unsigned getDirIndent() { return 4 * DirStack.size(); } - unsigned getFileIndent() { return 4 * (DirStack.size() + 1); } - bool containedIn(StringRef Parent, StringRef Path); - StringRef containedPart(StringRef Parent, StringRef Path); + unsigned getDirIndent() const { return 4 * DirStack.size(); } + unsigned getFileIndent() const { return 4 * (DirStack.size() + 1); } + bool containedIn(StringRef Parent, StringRef Path) const; + StringRef containedPart(StringRef Parent, StringRef Path) const; void startDirectory(StringRef Path); void endDirectory(); void writeEntry(StringRef VPath, StringRef RPath); @@ -2776,7 +2776,7 @@ class JSONWriter { } // namespace -bool JSONWriter::containedIn(StringRef Parent, StringRef Path) { +bool JSONWriter::containedIn(StringRef Parent, StringRef Path) const { using namespace llvm::sys; // Compare each path component. @@ -2790,7 +2790,7 @@ bool JSONWriter::containedIn(StringRef Parent, StringRef Path) { return IParent == EParent; } -StringRef JSONWriter::containedPart(StringRef Parent, StringRef Path) { +StringRef JSONWriter::containedPart(StringRef Parent, StringRef Path) const { assert(!Parent.empty()); assert(containedIn(Parent, Path)); return Path.substr(Parent.size() + 1); diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index cbf38f2c57a35..121c3d8743e79 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -1147,7 +1147,7 @@ void AArch64FastISel::addLoadStoreOperands(Address &Addr, Addr.setOffsetReg( constrainOperandRegClass(II, Addr.getOffsetReg(), II.getNumDefs()+Idx+1)); if (Addr.getOffsetReg()) { - assert(Addr.getOffset() == 0 && "Unexpected offset"); + assert_DISABLED(Addr.getOffset() == 0 && "Unexpected offset"); bool IsSigned = Addr.getExtendType() == AArch64_AM::SXTW || Addr.getExtendType() == AArch64_AM::SXTX; MIB.addReg(Addr.getReg()); diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index bbf2f26779545..4f5d4282cfb4a 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -2991,7 +2991,7 @@ static void computeCalleeSaveRegisterPairs( (void)CC; // MachO's compact unwind format relies on all registers being stored in // pairs. - assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || + assert_DISABLED((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS || CC == CallingConv::Win64 || (Count & 1) == 0) && "Odd number of callee-saved regs to spill!"); @@ -3100,7 +3100,7 @@ static void computeCalleeSaveRegisterPairs( // MachO's compact unwind format relies on all registers being stored in // adjacent register pairs. - assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || + assert_DISABLED((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS || CC == CallingConv::Win64 || (RPI.isPaired() && @@ -3350,7 +3350,7 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters( MF.getSubtarget(); AArch64FunctionInfo *AFI = MF.getInfo(); unsigned PnReg = AFI->getPredicateRegForFillSpill(); - assert((PnReg != 0 && enableMultiVectorSpillFill(Subtarget, MF)) && + assert_DISABLED((PnReg != 0 && enableMultiVectorSpillFill(Subtarget, MF)) && "Expects SVE2.1 or SME2 target and a predicate register"); #ifdef EXPENSIVE_CHECKS auto IsPPR = [](const RegPairInfo &c) { @@ -3528,7 +3528,7 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters( [[maybe_unused]] const AArch64Subtarget &Subtarget = MF.getSubtarget(); unsigned PnReg = AFI->getPredicateRegForFillSpill(); - assert((PnReg != 0 && enableMultiVectorSpillFill(Subtarget, MF)) && + assert_DISABLED((PnReg != 0 && enableMultiVectorSpillFill(Subtarget, MF)) && "Expects SVE2.1 or SME2 target and a predicate register"); #ifdef EXPENSIVE_CHECKS assert(!(PPRBegin < ZPRBegin) && diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 32bc0e7d0d647..c4485eec127be 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1778,7 +1778,7 @@ static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI) { // NOTE this assertion guarantees that MI.getOpcode() is add or subtraction // that may or may not set flags. - assert(sForm(MI) != AArch64::INSTRUCTION_LIST_END); + assert_DISABLED(sForm(MI) != AArch64::INSTRUCTION_LIST_END); const unsigned CmpOpcode = CmpInstr.getOpcode(); if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode)) diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index 1a9e5899892a1..c07d67f6dfa48 100644 --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -694,7 +694,7 @@ static MachineOperand &getLdStRegOp(MachineInstr &MI, static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst, MachineInstr &StoreInst, const AArch64InstrInfo *TII) { - assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st."); + assert_DISABLED(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st."); int LoadSize = TII->getMemScale(LoadInst); int StoreSize = TII->getMemScale(StoreInst); int UnscaledStOffset = diff --git a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp index 9044c94bc4fe5..bcf3f6215c94e 100644 --- a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp +++ b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -518,7 +518,7 @@ void AArch64PromoteConstant::insertDefinitions(Function &F, // Update the dominated uses. for (auto Use : IPI.second) { #ifndef NDEBUG - assert(DT.dominates(LoadedCst, + assert_DISABLED(DT.dominates(LoadedCst, findInsertionPoint(*Use.first, Use.second)) && "Inserted definition does not dominate all its uses!"); #endif diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp index a6535a532fff3..3ba23af517dc4 100644 --- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp +++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp @@ -571,7 +571,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) { unsigned int NextTag = 0; for (auto &I : SInfo.AllocasToInstrument) { memtag::AllocaInfo &Info = I.second; - assert(Info.AI && SIB.getAllocaInterestingness(*Info.AI) == + assert_DISABLED(Info.AI && SIB.getAllocaInterestingness(*Info.AI) == llvm::memtag::AllocaInterestingness::kInteresting); memtag::alignAndPadAlloca(Info, kTagGranuleSize); AllocaInst *AI = Info.AI; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp index 93b6ba0595b70..6a487dc8faf29 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -812,7 +812,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) { false); [[maybe_unused]] int64_t PGMRSrc3; - assert(STM.hasGFX90AInsts() || + assert_DISABLED(STM.hasGFX90AInsts() || (CurrentProgramInfo.ComputePGMRSrc3GFX90A->evaluateAsAbsolute( PGMRSrc3) && static_cast(PGMRSrc3) == 0)); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp index 3f2bb5df8836b..107fc17e148a9 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp @@ -407,7 +407,7 @@ class AMDGPUInsertDelayAlu : public MachineFunctionPass { } if (Emit) { - assert(State == BlockState[&MBB] && + assert_DISABLED(State == BlockState[&MBB] && "Basic block state should not have changed on final pass!"); } else if (State != BlockState[&MBB]) { BlockState[&MBB] = std::move(State); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp index d1985f46b1c44..d2f195f675c13 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp @@ -58,7 +58,7 @@ bool AMDGPURegBankSelect::runOnMachineFunction(MachineFunction &MF) { OptMode = Mode::Fast; init(MF); - assert(checkFunctionIsLegal(MF)); + assert_DISABLED(checkFunctionIsLegal(MF)); const GCNSubtarget &ST = MF.getSubtarget(); MachineCycleInfo &CycleInfo = diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index 758f864fd20e6..87b4fcf5c9a88 100644 --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -3157,7 +3157,7 @@ ParseStatus AMDGPUAsmParser::parseImm(OperandVector &Operands, if (isRegister()) return ParseStatus::NoMatch; - assert(!isModifier()); + assert_DISABLED(!isModifier()); if (!HasLit) { HasLit = trySkipId("lit"); diff --git a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp index 28bf6e33384d2..2a27aa5cd374f 100644 --- a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp +++ b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp @@ -326,8 +326,8 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) { R600PacketizerList Packetizer(Fn, ST, MLI); // DFA state table should not be empty. - assert(Packetizer.getResourceTracker() && "Empty DFA table!"); - assert(Packetizer.getResourceTracker()->getInstrItins()); + assert_DISABLED(Packetizer.getResourceTracker() && "Empty DFA table!"); + assert_DISABLED(Packetizer.getResourceTracker()->getInstrItins()); if (Packetizer.getResourceTracker()->getInstrItins()->isEmpty()) return false; diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp index d8697aa2ffe1c..ebf4d49d5a8ad 100644 --- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp @@ -134,7 +134,7 @@ static void insertCSRSaves(MachineBasicBlock &SaveBlock, RC, TRI, Register()); if (Indexes) { - assert(std::distance(MIS.begin(), I) == 1); + assert_DISABLED(std::distance(MIS.begin(), I) == 1); MachineInstr &Inst = *std::prev(I); Indexes->insertMachineInstrInMaps(Inst); } diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp index 77b4f25021c75..9c56b94c8e2ea 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -1649,7 +1649,7 @@ void SIScheduleBlockScheduler::decreaseLiveRegs(SIScheduleBlock *Block, for (unsigned Reg : Regs) { // For now only track virtual registers. std::set::iterator Pos = LiveRegs.find(Reg); - assert (Pos != LiveRegs.end() && // Reg must be live. + assert_DISABLED(Pos != LiveRegs.end() && // Reg must be live. LiveRegsConsumers.find(Reg) != LiveRegsConsumers.end() && LiveRegsConsumers[Reg] >= 1); --LiveRegsConsumers[Reg]; @@ -1675,7 +1675,7 @@ void SIScheduleBlockScheduler::blockScheduled(SIScheduleBlock *Block) { releaseBlockSuccs(Block); for (const auto &RegP : LiveOutRegsNumUsages[Block->getID()]) { // We produce this register, thus it must not be previously alive. - assert(LiveRegsConsumers.find(RegP.first) == LiveRegsConsumers.end() || + assert_DISABLED(LiveRegsConsumers.find(RegP.first) == LiveRegsConsumers.end() || LiveRegsConsumers[RegP.first] == 0); LiveRegsConsumers[RegP.first] += RegP.second; } diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp index 1312b44b49bdc..5ab99faf646b0 100644 --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -1544,7 +1544,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex, // We really must not split an IT block. #ifndef NDEBUG Register PredReg; - assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL); + assert_DISABLED(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL); #endif NewMBB = splitBlockBeforeInstr(&*MI); } diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp index ea18e6652c656..7c37318e10bd5 100644 --- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp @@ -775,7 +775,7 @@ bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, // Compare the condition to 1. auto CondReg = MIB.getReg(1); - assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) && + assert_DISABLED(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) && "Unsupported types for select operation"); auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.TSTri)) .addUse(CondReg) @@ -789,7 +789,7 @@ bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, auto ResReg = MIB.getReg(0); auto TrueReg = MIB.getReg(2); auto FalseReg = MIB.getReg(3); - assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) && + assert_DISABLED(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) && validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) && "Unsupported types for select operation"); auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.MOVCCr)) diff --git a/llvm/lib/Target/ARM/ARMSLSHardening.cpp b/llvm/lib/Target/ARM/ARMSLSHardening.cpp index d77db17090feb..34266fdfd7c35 100644 --- a/llvm/lib/Target/ARM/ARMSLSHardening.cpp +++ b/llvm/lib/Target/ARM/ARMSLSHardening.cpp @@ -211,7 +211,7 @@ void SLSBLRThunkInserter::populateThunk(MachineFunction &MF) { "ComdatThunks value changed since MF creation"); // FIXME: How to better communicate Register number, rather than through // name and lookup table? - assert(MF.getName().starts_with(getThunkPrefix())); + assert_DISABLED(MF.getName().starts_with(getThunkPrefix())); auto ThunkIt = llvm::find_if( SLSBLRThunks, [&MF](auto T) { return T.Name == MF.getName(); }); assert(ThunkIt != std::end(SLSBLRThunks)); diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 906519fef45db..5ee3e343a2895 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -240,7 +240,7 @@ class ARMAsmParser : public MCTargetAsmParser { ARMMnemonicSets MS; ARMTargetStreamer &getTargetStreamer() { - assert(getParser().getStreamer().getTargetStreamer() && + assert_DISABLED(getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); @@ -333,7 +333,7 @@ class ARMAsmParser : public MCTargetAsmParser { // Rewind the state of the current IT block, removing the last slot from it. void rewindImplicitITPosition() { - assert(inImplicitITBlock()); + assert_DISABLED(inImplicitITBlock()); assert(ITState.CurPosition > 1); ITState.CurPosition--; unsigned TZ = llvm::countr_zero(ITState.Mask); @@ -346,7 +346,7 @@ class ARMAsmParser : public MCTargetAsmParser { // Rewind the state of the current IT block, removing the last slot from it. // If we were at the first slot, this closes the IT block. void discardImplicitITBlock() { - assert(inImplicitITBlock()); + assert_DISABLED(inImplicitITBlock()); assert(ITState.CurPosition == 1); ITState.CurPosition = ~0U; } @@ -375,8 +375,8 @@ class ARMAsmParser : public MCTargetAsmParser { // Extend the current implicit IT block to have one more slot with the given // condition code. void extendImplicitITBlock(ARMCC::CondCodes Cond) { - assert(inImplicitITBlock()); - assert(!isITBlockFull()); + assert_DISABLED(inImplicitITBlock()); + assert_DISABLED(!isITBlockFull()); assert(Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)); unsigned TZ = llvm::countr_zero(ITState.Mask); @@ -392,7 +392,7 @@ class ARMAsmParser : public MCTargetAsmParser { // Create a new implicit IT block with a dummy condition code. void startImplicitITBlock() { - assert(!inITBlock()); + assert_DISABLED(!inITBlock()); ITState.Cond = ARMCC::AL; ITState.Mask = 8; ITState.CurPosition = 1; @@ -404,7 +404,7 @@ class ARMAsmParser : public MCTargetAsmParser { // MCOperand, with a 1 implying 'e', regardless of the low bit of // the condition. void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) { - assert(!inITBlock()); + assert_DISABLED(!inITBlock()); ITState.Cond = Cond; ITState.Mask = Mask; ITState.CurPosition = 0; @@ -6998,7 +6998,7 @@ void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic, bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands, unsigned MnemonicOpsEndInd) { - assert(MS.isCDEDualRegInstr(Mnemonic)); + assert_DISABLED(MS.isCDEDualRegInstr(Mnemonic)); if (Operands.size() < 3 + MnemonicOpsEndInd) return false; @@ -11009,7 +11009,7 @@ bool ARMAsmParser::processInstruction(MCInst &Inst, case ARM::t2IT: { // Set up the IT block state according to the IT instruction we just // matched. - assert(!inITBlock() && "nested IT blocks?!"); + assert_DISABLED(!inITBlock() && "nested IT blocks?!"); startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()), Inst.getOperand(1).getImm()); break; @@ -11112,7 +11112,7 @@ bool ARMAsmParser::processInstruction(MCInst &Inst, case ARM::MVE_VPTv4s32r: case ARM::MVE_VPTv4f32r: case ARM::MVE_VPTv8f16r: { - assert(!inVPTBlock() && "Nested VPT blocks are not allowed"); + assert_DISABLED(!inVPTBlock() && "Nested VPT blocks are not allowed"); MCOperand &MO = Inst.getOperand(0); VPTState.Mask = MO.getImm(); VPTState.CurPosition = 0; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index 5c7ba897f5e5a..2e85e462a2f1f 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -787,7 +787,7 @@ void ARMTargetELFStreamer::switchVendor(StringRef Vendor) { if (!CurrentVendor.empty()) finishAttributeSection(); - assert(getStreamer().Contents.empty() && + assert_DISABLED(getStreamer().Contents.empty() && ".ARM.attributes should be flushed before changing vendor"); CurrentVendor = Vendor; diff --git a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp index 0d3b986b96299..afffdbb66184f 100644 --- a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp @@ -161,7 +161,7 @@ HexagonBlockRanges::InstrIndexMap::InstrIndexMap(MachineBasicBlock &B) for (auto &In : B) { if (In.isDebugInstr()) continue; - assert(getIndex(&In) == IndexType::None && "Instruction already in map"); + assert_DISABLED(getIndex(&In) == IndexType::None && "Instruction already in map"); Map.insert(std::make_pair(Idx, &In)); ++Idx; } diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp index 86ce6b4e05ed2..17fdabb55a2ee 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp @@ -670,7 +670,7 @@ RangeTree::Node *RangeTree::rotateLeft(Node *Lower, Node *Higher) { // an unbalanced tree again. if (height(Lower->Left) > height(Lower->Right)) Lower = rotateRight(Lower->Left, Lower); - assert(height(Lower->Left) <= height(Lower->Right)); + assert_DISABLED(height(Lower->Left) <= height(Lower->Right)); Higher->Right = Lower->Left; update(Higher); Lower->Left = Higher; @@ -685,7 +685,7 @@ RangeTree::Node *RangeTree::rotateRight(Node *Lower, Node *Higher) { // an unbalanced tree again. if (height(Lower->Left) < height(Lower->Right)) Lower = rotateLeft(Lower->Right, Lower); - assert(height(Lower->Left) >= height(Lower->Right)); + assert_DISABLED(height(Lower->Left) >= height(Lower->Right)); Higher->Left = Lower->Right; update(Higher); Lower->Right = Higher; diff --git a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp index 5bb2d7d80ad54..e2a0da4bb2230 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp @@ -368,7 +368,7 @@ bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) { LLVM_DEBUG(dbgs() << __func__ << ": " << MI << " " << *MI); unsigned Opc = MI->getOpcode(); - assert(isConvertibleToPredForm(MI)); + assert_DISABLED(isConvertibleToPredForm(MI)); unsigned NumOps = MI->getNumOperands(); for (unsigned i = 0; i < NumOps; ++i) { MachineOperand &MO = MI->getOperand(i); diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index febbc95ec0db4..36a10e39c3019 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -1784,15 +1784,15 @@ int HexagonDAGToDAGISel::getWeight(SDNode *N) { if (!isOpcodeHandled(N)) return 1; assert(RootWeights.count(N) && "Cannot get weight of unseen root!"); - assert(RootWeights[N] != -1 && "Cannot get weight of unvisited root!"); - assert(RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!"); + assert_DISABLED(RootWeights[N] != -1 && "Cannot get weight of unvisited root!"); + assert_DISABLED(RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!"); return RootWeights[N]; } int HexagonDAGToDAGISel::getHeight(SDNode *N) { if (!isOpcodeHandled(N)) return 0; - assert(RootWeights.count(N) && RootWeights[N] >= 0 && + assert_DISABLED(RootWeights.count(N) && RootWeights[N] >= 0 && "Cannot query height of unvisited/RAUW'd node!"); return RootHeights[N]; } @@ -2044,7 +2044,7 @@ unsigned HexagonDAGToDAGISel::getUsesInFunction(const Value *V) { /// unchanged) SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) { assert(RootWeights.count(N) && "Cannot balance non-root node."); - assert(RootWeights[N] != -2 && "This node was RAUW'd!"); + assert_DISABLED(RootWeights[N] != -2 && "This node was RAUW'd!"); assert(!TopLevel || N->getOpcode() == ISD::ADD); // Return early if this node was already visited @@ -2359,7 +2359,7 @@ SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) { LLVM_DEBUG(NewNode.dump()); } - assert(Leaves.size() == 1); + assert_DISABLED(Leaves.size() == 1); SDValue NewRoot = Leaves.top().Value; assert(NodeHeights.count(NewRoot)); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp index dd951d7eb8b54..684e96a394ea8 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -2702,7 +2702,7 @@ HexagonTargetLowering::ExpandHvxFpToInt(SDValue Op, SelectionDAG &DAG) const { SDValue Op0 = Op.getOperand(0); MVT InpTy = ty(Op0); MVT ResTy = ty(Op); - assert(InpTy.changeTypeToInteger() == ResTy); + assert_DISABLED(InpTy.changeTypeToInteger() == ResTy); // int32_t conv_f32_to_i32(uint32_t inp) { // // s | exp8 | frac23 @@ -2831,7 +2831,7 @@ HexagonTargetLowering::ExpandHvxIntToFp(SDValue Op, SelectionDAG &DAG) const { SDValue Op0 = Op.getOperand(0); MVT InpTy = ty(Op0); MVT ResTy = ty(Op); - assert(ResTy.changeTypeToInteger() == InpTy); + assert_DISABLED(ResTy.changeTypeToInteger() == InpTy); // uint32_t vnoc1_rnd(int32_t w) { // int32_t iszero = w == 0; diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index a92e2b0197d09..f5e09ac323f73 100644 --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -225,7 +225,7 @@ bool HexagonPacketizer::runOnMachineFunction(MachineFunction &MF) { HexagonPacketizerList Packetizer(MF, MLI, AA, MBPI, MinOnly); // DFA state table should not be empty. - assert(Packetizer.getResourceTracker() && "Empty DFA table!"); + assert_DISABLED(Packetizer.getResourceTracker() && "Empty DFA table!"); // Loop over all basic blocks and remove KILL pseudo-instructions // These instructions confuse the dependence analysis. Consider: @@ -1334,7 +1334,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { MachineBasicBlock::iterator II = I.getIterator(); // Solo instructions cannot go in the packet. - assert(!isSoloInstruction(I) && "Unexpected solo instr!"); + assert_DISABLED(!isSoloInstruction(I) && "Unexpected solo instr!"); if (cannotCoexist(I, J)) return false; @@ -1724,7 +1724,7 @@ HexagonPacketizerList::addToPacket(MachineInstr &MI) { CurrentPacketMIs.push_back(&MI); return MII; } - assert(ResourceTracker->canReserveResources(MI)); + assert_DISABLED(ResourceTracker->canReserveResources(MI)); bool ExtMI = HII->isExtended(MI) || HII->isConstExtended(MI); bool Good = true; @@ -1751,16 +1751,16 @@ HexagonPacketizerList::addToPacket(MachineInstr &MI) { if (!Good) { endPacket(MBB, MI); - assert(ResourceTracker->canReserveResources(MI)); + assert_DISABLED(ResourceTracker->canReserveResources(MI)); ResourceTracker->reserveResources(MI); if (ExtMI) { - assert(canReserveResourcesForConstExt()); + assert_DISABLED(canReserveResourcesForConstExt()); tryAllocateResourcesForConstExt(true); } - assert(ResourceTracker->canReserveResources(NvjMI)); + assert_DISABLED(ResourceTracker->canReserveResources(NvjMI)); ResourceTracker->reserveResources(NvjMI); if (ExtNvjMI) { - assert(canReserveResourcesForConstExt()); + assert_DISABLED(canReserveResourcesForConstExt()); reserveResourcesForConstExt(); } } diff --git a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp index b4b19caed8999..6373e83c9072e 100644 --- a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp +++ b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp @@ -36,7 +36,7 @@ class LoongArchAsmParser : public MCTargetAsmParser { SMLoc getLoc() const { return getParser().getTok().getLoc(); } bool is64Bit() const { return getSTI().hasFeature(LoongArch::Feature64Bit); } LoongArchTargetStreamer &getTargetStreamer() { - assert(getParser().getStreamer().getTargetStreamer() && + assert_DISABLED(getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index 7888c57363ed3..3855672dd76cb 100644 --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -135,7 +135,7 @@ namespace { class MipsAsmParser : public MCTargetAsmParser { MipsTargetStreamer &getTargetStreamer() { - assert(getParser().getStreamer().getTargetStreamer() && + assert_DISABLED(getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); @@ -3245,7 +3245,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr, } else { // We have a case where SrcReg == DstReg and we don't have $at // available. We can't expand this case, so error out appropriately. - assert(SrcReg == DstReg && !canUseATReg() && + assert_DISABLED(SrcReg == DstReg && !canUseATReg() && "Could have expanded dla but didn't?"); reportParseError(IDLoc, "pseudo-instruction requires $at, which is not available"); @@ -3280,7 +3280,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr, if (UseSrcReg) TOut.emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, STI); else - assert( + assert_DISABLED( getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, TmpReg)); return false; diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp index b5a6c1c6e01df..d137bd24941e2 100644 --- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -2582,7 +2582,7 @@ void PPCAIXAsmPrinter::emitTracebackTable() { : XCOFF::parseParmsType(ParmsTypeValue, NumberOfFixedParms, NumberOfFPParms); - assert(ParmsType && toString(ParmsType.takeError()).c_str()); + assert_DISABLED(ParmsType && toString(ParmsType.takeError()).c_str()); if (ParmsType) { CommentOS << "Parameter type = " << ParmsType.get(); EmitComment(); @@ -2662,7 +2662,7 @@ void PPCAIXAsmPrinter::emitTracebackTable() { Expected> VecParmsType = XCOFF::parseVectorParmsType(VecParmTypeValue, VectorParmsNum); - assert(VecParmsType && toString(VecParmsType.takeError()).c_str()); + assert_DISABLED(VecParmsType && toString(VecParmsType.takeError()).c_str()); if (VecParmsType) { CommentOS << "Vector Parameter type = " << VecParmsType.get(); EmitComment(); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index ab31898e262e7..b5c54c33cbb53 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -7302,7 +7302,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX( (void)OriginalValNo; auto HandleCustomVecRegLoc = [&]() { - assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && + assert_DISABLED(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && "Missing custom RegLoc."); VA = ArgLocs[I++]; assert(VA.getValVT().isVector() && @@ -7718,7 +7718,7 @@ SDValue PPCTargetLowering::LowerCall_AIX( unsigned LoadOffset = 0; auto HandleCustomVecRegLoc = [&]() { assert(I != E && "Unexpected end of CCvalAssigns."); - assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && + assert_DISABLED(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && "Expected custom RegLoc."); CCValAssign RegVA = ArgLocs[I++]; assert(RegVA.getValNo() == OriginalValNo && diff --git a/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp b/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp index 0bfcba9a52486..771e0262df003 100644 --- a/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp +++ b/llvm/lib/Target/PowerPC/PPCReduceCRLogicals.cpp @@ -138,7 +138,7 @@ struct BlockSplitInfo { /// the branch condition. The branch probabilities will be set if the /// MachineBranchProbabilityInfo isn't null. static bool splitMBB(BlockSplitInfo &BSI) { - assert(BSI.allInstrsInSameMBB() && + assert_DISABLED(BSI.allInstrsInSameMBB() && "All instructions must be in the same block."); MachineBasicBlock *ThisMBB = BSI.OrigBranch->getParent(); diff --git a/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp b/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp index 7272e6edefc5e..d203116824606 100644 --- a/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp +++ b/llvm/lib/Target/PowerPC/PPCVSXCopy.cpp @@ -97,7 +97,7 @@ namespace { Changed = true; const TargetRegisterClass *SrcRC = &PPC::VSLRCRegClass; - assert((IsF8Reg(SrcMO.getReg(), MRI) || + assert_DISABLED((IsF8Reg(SrcMO.getReg(), MRI) || IsVSSReg(SrcMO.getReg(), MRI) || IsVSFReg(SrcMO.getReg(), MRI)) && "Unknown source for a VSX copy"); @@ -118,7 +118,7 @@ namespace { Changed = true; const TargetRegisterClass *DstRC = &PPC::VSLRCRegClass; - assert((IsF8Reg(DstMO.getReg(), MRI) || + assert_DISABLED((IsF8Reg(DstMO.getReg(), MRI) || IsVSFReg(DstMO.getReg(), MRI) || IsVSSReg(DstMO.getReg(), MRI)) && "Unknown destination for a VSX copy"); diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index 4d46afb8c4ef9..10c18da043401 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -90,7 +90,7 @@ class RISCVAsmParser : public MCTargetAsmParser { } RISCVTargetStreamer &getTargetStreamer() { - assert(getParser().getStreamer().getTargetStreamer() && + assert_DISABLED(getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); diff --git a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp index fa38c6cbb6ebb..0c740c5566096 100644 --- a/llvm/lib/Target/Sparc/SparcFrameLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcFrameLowering.cpp @@ -369,7 +369,7 @@ void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const { } } - assert(verifyLeafProcRegUse(&MRI)); + assert_DISABLED(verifyLeafProcRegUse(&MRI)); #ifdef EXPENSIVE_CHECKS MF.verify(0, "After LeafProc Remapping"); #endif diff --git a/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp index b8469a6ba70ea..535894bf986de 100644 --- a/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp +++ b/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp @@ -410,7 +410,7 @@ class SystemZAsmParser : public MCTargetAsmParser { }; SystemZTargetStreamer &getTargetStreamer() { - assert(getParser().getStreamer().getTargetStreamer() && + assert_DISABLED(getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp index 8fbd05eab5f6e..025369cda55e5 100644 --- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -1170,7 +1170,7 @@ bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters( // this point might hold return values). SystemZ::GPRRegs RestoreGPRs = ZFI->getRestoreGPRRegs(); if (RestoreGPRs.LowGPR) { - assert(isInt<20>(Regs.getStackPointerBias() + RestoreGPRs.GPROffset)); + assert_DISABLED(isInt<20>(Regs.getStackPointerBias() + RestoreGPRs.GPROffset)); if (RestoreGPRs.LowGPR == RestoreGPRs.HighGPR) // Build an LG/L instruction. BuildMI(MBB, MBBI, DL, TII->get(SystemZ::LG), RestoreGPRs.LowGPR) diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 3e05f3b0180a7..8d923af71117c 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -8386,7 +8386,7 @@ SystemZTargetLowering::emitAdjCallStack(MachineInstr &MI, MachineBasicBlock * SystemZTargetLowering::emitSelect(MachineInstr &MI, MachineBasicBlock *MBB) const { - assert(isSelectPseudo(MI) && "Bad call to emitSelect()"); + assert_DISABLED(isSelectPseudo(MI) && "Bad call to emitSelect()"); const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); unsigned CCValid = MI.getOperand(3).getImm(); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp index 04eada18ef0d0..4fc22c29674b0 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp @@ -361,7 +361,7 @@ static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, assert(Pred->getNumber() < MBB.getNumber() && "Non-loop-header predecessors should be topologically sorted"); } - assert(OnStack.insert(Region) && + assert_DISABLED(OnStack.insert(Region) && "Regions should be declared at most once."); } else { @@ -369,13 +369,13 @@ static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, for (auto *Pred : MBB.predecessors()) assert(Pred->getNumber() < MBB.getNumber() && "Non-loop-header predecessors should be topologically sorted"); - assert(OnStack.count(SRI.getRegionFor(&MBB)) && + assert_DISABLED(OnStack.count(SRI.getRegionFor(&MBB)) && "Blocks must be nested in their regions"); } while (OnStack.size() > 1 && &MBB == SRI.getBottom(OnStack.back())) OnStack.pop_back(); } - assert(OnStack.pop_back_val() == nullptr && + assert_DISABLED(OnStack.pop_back_val() == nullptr && "The function entry block shouldn't actually be a region header"); assert(OnStack.empty() && "Control flow stack pushes and pops should be balanced."); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp index a5f73fabca354..2ab7b6aa0f7f5 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp @@ -490,7 +490,7 @@ void WebAssemblyCFGStackify::placeTryMarker(MachineBasicBlock &MBB) { for (auto *Pred : MBB.predecessors()) { if (Pred->getNumber() < MBBNumber) { Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred; - assert(!explicitlyBranchesTo(Pred, &MBB) && + assert_DISABLED(!explicitlyBranchesTo(Pred, &MBB) && "Explicit branch to an EH pad!"); } } @@ -684,7 +684,7 @@ void WebAssemblyCFGStackify::placeTryTableMarker(MachineBasicBlock &MBB) { for (auto *Pred : MBB.predecessors()) { if (Pred->getNumber() < MBBNumber) { Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred; - assert(!explicitlyBranchesTo(Pred, &MBB) && + assert_DISABLED(!explicitlyBranchesTo(Pred, &MBB) && "Explicit branch to an EH pad!"); } } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp index 7dc5c099c1270..c6711bb39e5b5 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp @@ -142,7 +142,7 @@ static void undefInvalidDbgValues( Register Reg = LI->reg(); #ifndef NDEBUG // Ensure we don't process the same register twice - assert(SeenRegs.insert(Reg).second); + assert_DISABLED(SeenRegs.insert(Reg).second); #endif auto RegMapIt = DbgVRegToValues.find(Reg); if (RegMapIt == DbgVRegToValues.end()) diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp index 658ee3bb7f24f..3e6e83dda8501 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -966,7 +966,7 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { continue; Register Reg = MO.getReg(); if (MFI.isVRegStackified(Reg)) - assert(Stack.pop_back_val() == Reg && + assert_DISABLED(Stack.pop_back_val() == Reg && "Register stack pop should be paired with a push"); } for (MachineOperand &MO : MI.defs()) { diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp index ae30d4dfc70f5..d4d4399080c96 100644 --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -126,7 +126,7 @@ class X86AsmParser : public MCTargetAsmParser { } X86TargetStreamer &getTargetStreamer() { - assert(getParser().getStreamer().getTargetStreamer() && + assert_DISABLED(getParser().getStreamer().getTargetStreamer() && "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); @@ -2222,7 +2222,7 @@ bool X86AsmParser::ParseIntelInlineAsmIdentifier( const MCExpr *&Val, StringRef &Identifier, InlineAsmIdentifierInfo &Info, bool IsUnevaluatedOperand, SMLoc &End, bool IsParsingOffsetOperator) { MCAsmParser &Parser = getParser(); - assert(isParsingMSInlineAsm() && "Expected to be parsing inline assembly."); + assert_DISABLED(isParsingMSInlineAsm() && "Expected to be parsing inline assembly."); Val = nullptr; StringRef LineBuf(Identifier.data()); diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 38ea1f35be2b9..a0ce9c92d6f27 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -3742,7 +3742,7 @@ bool X86InstrInfo::canMakeTailCallConditional( void X86InstrInfo::replaceBranchWithTailCall( MachineBasicBlock &MBB, SmallVectorImpl &BranchCond, const MachineInstr &TailCall) const { - assert(canMakeTailCallConditional(BranchCond, TailCall)); + assert_DISABLED(canMakeTailCallConditional(BranchCond, TailCall)); MachineBasicBlock::iterator I = MBB.end(); while (I != MBB.begin()) { diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp index 3172896a8f609..2d716a4d78efc 100644 --- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp +++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp @@ -540,7 +540,7 @@ bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) { InstrPos[DefMI] = InstrPos[&MI] - 1; // Make sure the instructions' position numbers are sane. - assert(((InstrPos[DefMI] == 1 && + assert_DISABLED(((InstrPos[DefMI] == 1 && MachineBasicBlock::iterator(DefMI) == MBB->begin()) || InstrPos[DefMI] > InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) && @@ -637,7 +637,7 @@ bool X86OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) { // LEAs should be in occurrence order in the list, so we can freely // replace later LEAs with earlier ones. - assert(calcInstrDist(First, Last) > 0 && + assert_DISABLED(calcInstrDist(First, Last) > 0 && "LEAs must be in occurrence order in the list"); // Check that the Last LEA instruction can be replaced by the First. diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp index 46317cb33776f..92f894dbfeab9 100644 --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -815,7 +815,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG( // just skip this and continue. continue; - assert(SuccCounts[UncondSucc] == 1 && + assert_DISABLED(SuccCounts[UncondSucc] == 1 && "We should never have more than one edge to the unconditional " "successor at this point because every other edge must have been " "split above!"); @@ -1765,7 +1765,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr( MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( MachineInstr &InitialMI, SmallPtrSetImpl &HardenedInstrs) { - assert(X86InstrInfo::isDataInvariantLoad(InitialMI) && + assert_DISABLED(X86InstrInfo::isDataInvariantLoad(InitialMI) && "Cannot get here with a non-invariant load!"); assert(!isEFLAGSDefLive(InitialMI) && "Cannot get here with a data invariant load " @@ -1788,7 +1788,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( // If we've already decided to harden a non-load, we must have sunk // some other post-load hardened instruction to it and it must itself // be data-invariant. - assert(X86InstrInfo::isDataInvariant(UseMI) && + assert_DISABLED(X86InstrInfo::isDataInvariant(UseMI) && "Data variant instruction being hardened!"); continue; } @@ -1903,7 +1903,7 @@ bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) { unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister( Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, const DebugLoc &Loc) { - assert(canHardenRegister(Reg) && "Cannot harden this register!"); + assert_DISABLED(canHardenRegister(Reg) && "Cannot harden this register!"); auto *RC = MRI->getRegClass(Reg); int Bytes = TRI->getRegSizeInBits(*RC) / 8; diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp index 99b46591da420..086d8f39f328f 100644 --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -2919,7 +2919,7 @@ bool Attributor::registerFunctionSignatureRewrite( LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in " << Arg.getParent()->getName() << " with " << ReplacementTypes.size() << " replacements\n"); - assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) && + assert_DISABLED(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) && "Cannot register an invalid rewrite"); Function *Fn = Arg.getParent(); diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp index 5d17ffa61272e..81fb46f8d3c67 100644 --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -1614,7 +1614,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { // The RHS is a reference that may be invalidated by an insertion caused by // the LHS. So we ensure that the side-effect of the LHS happens first. - assert(OffsetInfoMap.contains(CurPtr) && + assert_DISABLED(OffsetInfoMap.contains(CurPtr) && "CurPtr does not exist in the map!"); auto &UsrOI = OffsetInfoMap[Usr]; @@ -1633,7 +1633,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { << "\n"); assert(OffsetInfoMap.count(CurPtr) && "The current pointer offset should have been seeded!"); - assert(!OffsetInfoMap[CurPtr].isUnassigned() && + assert_DISABLED(!OffsetInfoMap[CurPtr].isUnassigned() && "Current pointer should be assigned"); if (ConstantExpr *CE = dyn_cast(Usr)) { @@ -1948,7 +1948,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) { }; auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { assert(OffsetInfoMap.count(OldU) && "Old use should be known already!"); - assert(!OffsetInfoMap[OldU].isUnassigned() && "Old use should be assinged"); + assert_DISABLED(!OffsetInfoMap[OldU].isUnassigned() && "Old use should be assinged"); if (OffsetInfoMap.count(NewU)) { LLVM_DEBUG({ if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) { @@ -2094,7 +2094,7 @@ struct AANoUnwindImpl : AANoUnwind { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr( + assert_DISABLED(!AA::hasAssumedIRAttr( A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -2219,7 +2219,7 @@ struct AANoSyncImpl : AANoSync { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr(A, nullptr, getIRPosition(), + assert_DISABLED(!AA::hasAssumedIRAttr(A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -2292,7 +2292,7 @@ struct AANoFreeImpl : public AANoFree { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr(A, nullptr, getIRPosition(), + assert_DISABLED(!AA::hasAssumedIRAttr(A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -2744,7 +2744,7 @@ struct AAMustProgressImpl : public AAMustProgress { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr( + assert_DISABLED(!AA::hasAssumedIRAttr( A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -2827,7 +2827,7 @@ struct AANoRecurseImpl : public AANoRecurse { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr( + assert_DISABLED(!AA::hasAssumedIRAttr( A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -3322,7 +3322,7 @@ struct AAWillReturnImpl : public AAWillReturn { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr( + assert_DISABLED(!AA::hasAssumedIRAttr( A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -4320,7 +4320,7 @@ struct AAIsDeadFloating : public AAIsDeadValueImpl { return ChangeStatus::CHANGED; } if (auto *FI = dyn_cast(I)) { - assert(isDeadFence(A, *FI)); + assert_DISABLED(isDeadFence(A, *FI)); A.deleteAfterManifest(*FI); return ChangeStatus::CHANGED; } @@ -5505,7 +5505,7 @@ struct AANoReturnImpl : public AANoReturn { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr( + assert_DISABLED(!AA::hasAssumedIRAttr( A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -5828,7 +5828,7 @@ struct AANoCaptureImpl : public AANoCapture { /// See AbstractAttribute::initialize(...). void initialize(Attributor &A) override { bool IsKnown; - assert(!AA::hasAssumedIRAttr( + assert_DISABLED(!AA::hasAssumedIRAttr( A, nullptr, getIRPosition(), DepClassTy::NONE, IsKnown)); (void)IsKnown; } @@ -10215,7 +10215,7 @@ struct AANoUndefImpl : AANoUndef { Value &V = getAssociatedValue(); if (isa(V)) indicatePessimisticFixpoint(); - assert(!isImpliedByIR(A, getIRPosition(), Attribute::NoUndef)); + assert_DISABLED(!isImpliedByIR(A, getIRPosition(), Attribute::NoUndef)); } /// See followUsesInMBEC diff --git a/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp index ed93b4491c50e..f8d28e02dd4c0 100644 --- a/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp +++ b/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp @@ -665,7 +665,7 @@ void DeadArgumentEliminationPass::markValue(const RetOrArg &RA, Liveness L, markLive(RA); break; case MaybeLive: - assert(!isLive(RA) && "Use is already live!"); + assert_DISABLED(!isLive(RA) && "Use is already live!"); for (const auto &MaybeLiveUse : MaybeLiveUses) { if (isLive(MaybeLiveUse)) { // A use is live, so this value is live. diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp index 3121659edadd8..4dae1e3f9d33d 100644 --- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp +++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp @@ -426,7 +426,7 @@ bool ExpandVariadics::runOnFunction(Module &M, IRBuilder<> &Builder, [[maybe_unused]] const bool OriginalFunctionIsDeclaration = OriginalFunction->isDeclaration(); - assert(rewriteABI() || !OriginalFunctionIsDeclaration); + assert_DISABLED(rewriteABI() || !OriginalFunctionIsDeclaration); // Declare a new function and redirect every use to that new function Function *VariadicWrapper = @@ -530,7 +530,7 @@ ExpandVariadics::deriveFixedArityReplacement(Module &M, IRBuilder<> &Builder, // and passes it to the second function. The second function does whatever // the original F does, except that it takes a va_list instead of the ... - assert(expansionApplicableToFunction(M, &F)); + assert_DISABLED(expansionApplicableToFunction(M, &F)); auto &Ctx = M.getContext(); diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp index fee27f72f208b..4e54a645140dc 100644 --- a/llvm/lib/Transforms/IPO/FunctionImport.cpp +++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp @@ -1234,7 +1234,7 @@ void llvm::ComputeCrossModuleImport( ELI.second.insert(NewExports.begin(), NewExports.end()); } - assert(checkVariableImport(Index, ImportLists, ExportLists)); + assert_DISABLED(checkVariableImport(Index, ImportLists, ExportLists)); #ifndef NDEBUG LLVM_DEBUG(dbgs() << "Import/Export lists for " << ImportLists.size() << " modules:\n"); diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 4647c65a5c850..3e54593aae63c 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -2364,7 +2364,7 @@ static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { if (GA.use_empty()) // No use at all. return false; - assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && + assert_DISABLED((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && "We should have removed the duplicated " "element from llvm.compiler.used"); if (!GA.hasOneUse()) diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp index 20d2d46e79ebc..05ff3865dd7c4 100644 --- a/llvm/lib/Transforms/IPO/IROutliner.cpp +++ b/llvm/lib/Transforms/IPO/IROutliner.cpp @@ -815,7 +815,7 @@ static void mapInputsToGVNs(IRSimilarityCandidate &C, auto It = OutputMappings.find(Input); if (It != OutputMappings.end()) Input = It->second; - assert(C.getGVN(Input) && "Could not find a numbering for the given input"); + assert_DISABLED(C.getGVN(Input) && "Could not find a numbering for the given input"); EndInputNumbers.push_back(*C.getGVN(Input)); } } @@ -1214,7 +1214,7 @@ static std::optional getGVNForPHINode(OutlinableRegion &Region, // split the candidate basic blocks. So we use the previous block that it // was split from to find the valid global value numbering for the PHINode. if (!OGVN) { - assert(Cand.getStartBB() == IncomingBlock && + assert_DISABLED(Cand.getStartBB() == IncomingBlock && "Unknown basic block used in exit path PHINode."); BasicBlock *PrevBlock = nullptr; diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp index 4efd683dfca36..bee2792a93a2c 100644 --- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp +++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp @@ -1132,7 +1132,7 @@ template typename CallsiteContextGraph::ContextNode * CallsiteContextGraph::addAllocNode( CallInfo Call, const FuncTy *F) { - assert(!getNodeForAlloc(Call)); + assert_DISABLED(!getNodeForAlloc(Call)); ContextNode *AllocNode = createNewNode(/*IsAllocation=*/true, F, Call); AllocationCallToContextNodeMap[Call] = AllocNode; // Use LastContextId as a uniq id for MIB allocation nodes. @@ -1396,9 +1396,9 @@ void CallsiteContextGraph:: if (Calls.size() == 1) { auto &[Call, Ids, Func, SavedContextIds] = Calls[0]; if (Ids.size() == 1) { - assert(SavedContextIds.empty()); + assert_DISABLED(SavedContextIds.empty()); // It should be this Node - assert(Node == getNodeForStackId(Ids[0])); + assert_DISABLED(Node == getNodeForStackId(Ids[0])); if (Node->Recursive) return; Node->setCall(Call); @@ -2212,7 +2212,7 @@ bool CallsiteContextGraph::partitionCallsByCallee( ContextNode *UnmatchedCalleesNode = nullptr; // Track whether we already assigned original node to a callee. bool UsedOrigNode = false; - assert(NodeToCallingFunc[Node]); + assert_DISABLED(NodeToCallingFunc[Node]); for (auto EI = Node->CalleeEdges.begin(); EI != Node->CalleeEdges.end();) { auto Edge = *EI; if (!Edge->Callee->hasCall()) { @@ -2581,7 +2581,7 @@ bool IndexCallsiteContextGraph::findProfiledCalleeThroughTailCalls( FoundProfiledCalleeMaxDepth = Depth; CreateAndSaveCallsiteInfo(CallEdge.first, FS); // Add FS to FSToVIMap in case it isn't already there. - assert(!FSToVIMap.count(FS) || FSToVIMap[FS] == FSVI); + assert_DISABLED(!FSToVIMap.count(FS) || FSToVIMap[FS] == FSVI); FSToVIMap[FS] = FSVI; } else if (findProfiledCalleeThroughTailCalls( ProfiledCallee, CallEdge.first, Depth + 1, @@ -2596,7 +2596,7 @@ bool IndexCallsiteContextGraph::findProfiledCalleeThroughTailCalls( FoundSingleCalleeChain = true; CreateAndSaveCallsiteInfo(CallEdge.first, FS); // Add FS to FSToVIMap in case it isn't already there. - assert(!FSToVIMap.count(FS) || FSToVIMap[FS] == FSVI); + assert_DISABLED(!FSToVIMap.count(FS) || FSToVIMap[FS] == FSVI); FSToVIMap[FS] = FSVI; } else if (FoundMultipleCalleeChains) return false; @@ -3558,7 +3558,7 @@ bool CallsiteContextGraph::assignFunctions() { // Record the clone of callsite node assigned to this function clone. FuncCloneToCurNodeCloneMap[FuncClone] = CallsiteClone; - assert(FuncClonesToCallMap.count(FuncClone)); + assert_DISABLED(FuncClonesToCallMap.count(FuncClone)); std::map &CallMap = FuncClonesToCallMap[FuncClone]; CallInfo CallClone(Call); if (CallMap.count(Call)) @@ -3901,7 +3901,7 @@ bool CallsiteContextGraph::assignFunctions() { FuncCloneAssignedToCurCallsiteClone, Call, Clone, AllocationCallToContextNodeMap.count(Call)); } else - assert(FuncCloneToCurNodeCloneMap + assert_DISABLED(FuncCloneToCurNodeCloneMap [FuncCloneAssignedToCurCallsiteClone] == Clone); // Update callers to record function version called. RecordCalleeFuncOfCallsite(Edge->Caller, @@ -4123,7 +4123,7 @@ bool MemProfContextDisambiguation::applyImport(Module &M) { } VMaps = createFunctionClones(F, NumClones, M, ORE, FuncToAliasMap); // The first "clone" is the original copy, which doesn't have a VMap. - assert(VMaps.size() == NumClones - 1); + assert_DISABLED(VMaps.size() == NumClones - 1); Changed = true; ClonesCreated = true; NumClonesCreated = NumClones; @@ -4300,7 +4300,7 @@ bool MemProfContextDisambiguation::applyImport(Module &M) { continue; LastStackContextId = *ContextIter; assert(StackIdIndexIter != MIBIter->StackIdIndices.end()); - assert(ImportSummary->getStackIdAtIndex(*StackIdIndexIter) == + assert_DISABLED(ImportSummary->getStackIdAtIndex(*StackIdIndexIter) == *ContextIter); StackIdIndexIter++; } diff --git a/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/llvm/lib/Transforms/IPO/MergeFunctions.cpp index b50a700e09038..2bccc278ac39e 100644 --- a/llvm/lib/Transforms/IPO/MergeFunctions.cpp +++ b/llvm/lib/Transforms/IPO/MergeFunctions.cpp @@ -941,7 +941,7 @@ void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) { void MergeFunctions::replaceFunctionInTree(const FunctionNode &FN, Function *G) { Function *F = FN.getFunc(); - assert(FunctionComparator(F, G, &GlobalNumbers).compare() == 0 && + assert_DISABLED(FunctionComparator(F, G, &GlobalNumbers).compare() == 0 && "The two functions must be equal"); auto I = FNodesInTree.find(F); diff --git a/llvm/lib/Transforms/IPO/SampleContextTracker.cpp b/llvm/lib/Transforms/IPO/SampleContextTracker.cpp index f878e3e591a05..2c4b39d3b1784 100644 --- a/llvm/lib/Transforms/IPO/SampleContextTracker.cpp +++ b/llvm/lib/Transforms/IPO/SampleContextTracker.cpp @@ -545,7 +545,7 @@ SampleContextTracker::getTopLevelContextNode(FunctionId FName) { ContextTrieNode & SampleContextTracker::addTopLevelContextNode(FunctionId FName) { - assert(!getTopLevelContextNode(FName) && "Node to add must not exist"); + assert_DISABLED(!getTopLevelContextNode(FName) && "Node to add must not exist"); return *RootContext.getOrCreateChildContext(LineLocation(0, 0), FName); } diff --git a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp index 0c676e8fb95fd..c6166831efb88 100644 --- a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp @@ -618,7 +618,7 @@ void SampleProfileMatcher::countMismatchCallsites(const FunctionSamples &FS) { isInitialState(MatchStates.begin()->second); for (const auto &I : MatchStates) { TotalProfiledCallsites++; - assert( + assert_DISABLED( (OnInitialState ? isInitialState(I.second) : isFinalState(I.second)) && "Profile matching state is inconsistent"); diff --git a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp index cd0e412bdf353..b2add2245171e 100644 --- a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp +++ b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp @@ -277,7 +277,7 @@ void splitAndWriteThinLTOBitcode( function_ref AARGetter, Module &M) { std::string ModuleId = getUniqueModuleId(&M); if (ModuleId.empty()) { - assert(!enableUnifiedLTO(M)); + assert_DISABLED(!enableUnifiedLTO(M)); // We couldn't generate a module ID for this module, write it out as a // regular LTO module with an index for summary-based dead stripping. ProfileSummaryInfo PSI(M); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 21588aca51275..98a124fed699c 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -63,7 +63,7 @@ namespace { void operator*=(const FAddendCoef &S); void set(short C) { - assert(!insaneIntVal(C) && "Insane coefficient"); + assert_DISABLED(!insaneIntVal(C) && "Insane coefficient"); IsFp = false; IntVal = C; } @@ -302,7 +302,7 @@ void FAddendCoef::operator*=(const FAddendCoef &That) { if (isInt() && That.isInt()) { int Res = IntVal * (int)That.IntVal; - assert(!insaneIntVal(Res) && "Insane int value"); + assert_DISABLED(!insaneIntVal(Res) && "Insane int value"); IntVal = Res; return; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 07b9405b941d6..45514c7ef57a7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -4071,12 +4071,12 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { GCRelocateInst &GCR = *const_cast(Reloc); Value *BasePtr = GCR.getBasePtr(); - assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && + assert_DISABLED(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && "Missed live gc for base pointer"); auto *OpIntTy1 = GCR.getOperand(1)->getType(); GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); Value *DerivedPtr = GCR.getDerivedPtr(); - assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && + assert_DISABLED(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && "Missed live gc for derived pointer"); auto *OpIntTy2 = GCR.getOperand(2)->getType(); GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); diff --git a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp index 810cbbda66085..99a0e80c3e9be 100644 --- a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp +++ b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp @@ -187,9 +187,9 @@ class CHRScope { void append(CHRScope *Next) { assert(RegInfos.size() > 0 && "Empty CHRScope"); assert(Next->RegInfos.size() > 0 && "Empty CHRScope"); - assert(getParentRegion() == Next->getParentRegion() && + assert_DISABLED(getParentRegion() == Next->getParentRegion() && "Must be siblings"); - assert(getExitBlock() == Next->getEntryBlock() && + assert_DISABLED(getExitBlock() == Next->getEntryBlock() && "Must be adjacent"); RegInfos.append(Next->RegInfos.begin(), Next->RegInfos.end()); Subs.append(Next->Subs.begin(), Next->Subs.end()); @@ -948,7 +948,7 @@ void CHR::checkScopeHoistable(CHRScope *Scope) { assert(!DT.dominates(Branch, InsertPoint) && "Branch can't be already above the hoist point"); DenseMap Visited; - assert(checkHoistValue(Branch->getCondition(), InsertPoint, + assert_DISABLED(checkHoistValue(Branch->getCondition(), InsertPoint, DT, Unhoistables, nullptr, Visited) && "checkHoistValue for branch"); } @@ -956,7 +956,7 @@ void CHR::checkScopeHoistable(CHRScope *Scope) { assert(!DT.dominates(SI, InsertPoint) && "SI can't be already above the hoist point"); DenseMap Visited; - assert(checkHoistValue(SI->getCondition(), InsertPoint, DT, + assert_DISABLED(checkHoistValue(SI->getCondition(), InsertPoint, DT, Unhoistables, nullptr, Visited) && "checkHoistValue for selects"); } @@ -1628,7 +1628,7 @@ assertCHRRegionsHaveBiasedBranchOrSelect(CHRScope *Scope) { return false; }; for (RegInfo &RI : Scope->CHRRegions) { - assert(HasBiasedBranchOrSelect(RI, Scope) && + assert_DISABLED(HasBiasedBranchOrSelect(RI, Scope) && "Must have biased branch or select"); } #endif diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 0208c800a4fc3..1134335a32623 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1482,7 +1482,7 @@ struct MemorySanitizerVisitor : public InstVisitor { auto OrigIns = I->OrigIns; // Checks are grouped by the original instruction. We call all // `insertShadowCheck` for an instruction at once. - assert(Done.insert(OrigIns).second); + assert_DISABLED(Done.insert(OrigIns).second); auto J = std::find_if(I + 1, InstrumentationList.end(), [OrigIns](const ShadowOriginAndInsertPoint &R) { return OrigIns != R.OrigIns; diff --git a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp index 3cefc1a142237..6665fdf26383e 100644 --- a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp @@ -725,7 +725,7 @@ NumericalStabilitySanitizer::NumericalStabilitySanitizer(Module &M) if (!ClCheckFunctionsFilter.empty()) { Regex R = Regex(ClCheckFunctionsFilter); std::string RegexError; - assert(R.isValid(RegexError)); + assert_DISABLED(R.isValid(RegexError)); CheckFunctionsFilter = std::move(R); } } diff --git a/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp b/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp index ca29d8b7519cb..71039ef878890 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOCtxProfFlattening.cpp @@ -445,7 +445,7 @@ PreservedAnalyses PGOCtxProfFlatteningPass::run(Module &M, if (F.isDeclaration()) continue; - assert(areAllBBsReachable( + assert_DISABLED(areAllBBsReachable( F, MAM.getResult(M) .getManager()) && "Function has unreacheable basic blocks. The expectation was that " diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp index bceb6135cc1f9..4117c99d48594 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -1484,7 +1484,7 @@ void PGOUseFunc::populateCoverage(IndexedInstrProfReader *PGOReader) { while (!CoveredBlocksToProcess.empty()) { auto *CoveredBlock = CoveredBlocksToProcess.top(); - assert(Coverage[CoveredBlock]); + assert_DISABLED(Coverage[CoveredBlock]); CoveredBlocksToProcess.pop(); for (auto *BB : InverseDependencies[CoveredBlock]) { // If CoveredBlock is covered then BB is covered. diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index a2434675a7b5a..96115e62400cb 100644 --- a/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -768,7 +768,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { Instruction *DelayedAutoreleaseRV = nullptr; const Value *DelayedAutoreleaseRVArg = nullptr; auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) { - assert(!DelayedAutoreleaseRV || !AutoreleaseRV); + assert_DISABLED(!DelayedAutoreleaseRV || !AutoreleaseRV); DelayedAutoreleaseRV = AutoreleaseRV; DelayedAutoreleaseRVArg = nullptr; }; diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index 6fce46a624c9c..a8b38aab8d40c 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -2097,7 +2097,7 @@ struct DSEState { for (auto OI : IOL) { Instruction *DeadI = OI.first; MemoryLocation Loc = *getLocForWrite(DeadI); - assert(isRemovable(DeadI) && "Expect only removable instruction"); + assert_DISABLED(isRemovable(DeadI) && "Expect only removable instruction"); const Value *Ptr = Loc.Ptr->stripPointerCasts(); int64_t DeadStart = 0; diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 2ba600497e00d..ec475c84466a3 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -967,7 +967,7 @@ static bool IsValueFullyAvailableInBlock( case AvailabilityState::SpeculativelyAvailable: // Fix it! State = FixpointState; #ifndef NDEBUG - assert(NewSpeculativelyAvailableBBs.erase(BB) && + assert_DISABLED(NewSpeculativelyAvailableBBs.erase(BB) && "Found a speculatively available successor leftover?"); #endif // Queue successors for further processing. diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp index 104e8ceb79670..95637de07c7b1 100644 --- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -835,7 +835,7 @@ IntersectSignedRange(ScalarEvolution &SE, auto &R1Value = *R1; // We never return empty ranges from this function, and R1 is supposed to be // a result of intersection. Thus, R1 is never empty. - assert(!R1Value.isEmpty(SE, /* IsSigned */ true) && + assert_DISABLED(!R1Value.isEmpty(SE, /* IsSigned */ true) && "We should never have empty R1!"); // TODO: we could widen the smaller range and have this work; but for now we @@ -864,7 +864,7 @@ IntersectUnsignedRange(ScalarEvolution &SE, auto &R1Value = *R1; // We never return empty ranges from this function, and R1 is supposed to be // a result of intersection. Thus, R1 is never empty. - assert(!R1Value.isEmpty(SE, /* IsSigned */ false) && + assert_DISABLED(!R1Value.isEmpty(SE, /* IsSigned */ false) && "We should never have empty R1!"); // TODO: we could widen the smaller range and have this work; but for now we diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index 50f7637b07f6f..d7e42627ba521 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -789,7 +789,7 @@ class ControlFlowHoister { return InitialPreheader; } BranchInst *BI = It->first; - assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == + assert_DISABLED(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && "BB is expected to be the target of at most one branch"); diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp index 949296c3db0de..cea53d4d6a86b 100644 --- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp @@ -1468,7 +1468,7 @@ static void moveLCSSAPhis(BasicBlock *InnerExit, BasicBlock *InnerHeader, IncIInnerMost->getParent() != InnerHeader) continue; - assert(all_of(P.users(), + assert_DISABLED(all_of(P.users(), [OuterHeader, OuterExit, IncI, InnerHeader](User *U) { return (cast(U)->getParent() == OuterHeader && IncI->getParent() == InnerHeader) || diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp index db82f75bad5f3..3270ba5fe1b9f 100644 --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -542,9 +542,9 @@ class LoadEliminationForLoop { if (!Cand.isDependenceDistanceOfOne(PSE, L)) continue; - assert(isa(PSE.getSCEV(Cand.Load->getPointerOperand())) && + assert_DISABLED(isa(PSE.getSCEV(Cand.Load->getPointerOperand())) && "Loading from something other than indvar?"); - assert( + assert_DISABLED( isa(PSE.getSCEV(Cand.Store->getPointerOperand())) && "Storing to something other than indvar?"); diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index e55b8f6652e31..9c745fe29ac74 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -1227,7 +1227,7 @@ class Cost { #endif bool isLoser() { - assert(isValid() && "invalid cost"); + assert_DISABLED(isValid() && "invalid cost"); return C.NumRegs == ~0u; } @@ -1563,7 +1563,7 @@ void Cost::RateFormula(const Formula &F, // If we don't count instruction cost exit here. if (!InsnsCost) { - assert(isValid() && "invalid cost"); + assert_DISABLED(isValid() && "invalid cost"); return; } @@ -1600,7 +1600,7 @@ void Cost::RateFormula(const Formula &F, // BaseAdds adds instructions for unfolded registers. if (LU.Kind != LSRUse::ICmpZero) C.Insns += C.NumBaseAdds - PrevNumBaseAdds; - assert(isValid() && "invalid cost"); + assert_DISABLED(isValid() && "invalid cost"); } /// Set this cost to a losing value. @@ -6706,7 +6706,7 @@ static unsigned numLLVMArgOps(SmallVectorImpl &Expr) { template static void updateDVIWithLocation(T &DbgVal, Value *Location, SmallVectorImpl &Ops) { - assert(numLLVMArgOps(Ops) == 0 && "Expected expression that does not " + assert_DISABLED(numLLVMArgOps(Ops) == 0 && "Expected expression that does not " "contain any DW_OP_llvm_arg operands."); DbgVal.setRawLocation(ValueAsMetadata::get(Location)); DbgVal.setExpression(DIExpression::get(DbgVal.getContext(), Ops)); @@ -6718,7 +6718,7 @@ template static void updateDVIWithLocations(T &DbgVal, SmallVectorImpl &Locations, SmallVectorImpl &Ops) { - assert(numLLVMArgOps(Ops) != 0 && + assert_DISABLED(numLLVMArgOps(Ops) != 0 && "Expected expression that references DIArglist locations using " "DW_OP_llvm_arg operands."); SmallVector MetadataLocs; diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp index eaf58ea8dd9d0..29a685a087d39 100644 --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -918,7 +918,7 @@ class LowerMatrixIntrinsics { computeShapeInfoForInst(&I, ShapeMap) && "Shape of new instruction doesn't match original shape."); CleanupBinOp(I, A, B); - assert(computeShapeInfoForInst(Add, ShapeMap).value_or(ShapeMap[Add]) == + assert_DISABLED(computeShapeInfoForInst(Add, ShapeMap).value_or(ShapeMap[Add]) == ShapeMap[Add] && "Shape of updated addition doesn't match cached shape."); } diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp index 4291f3aee0cd1..061ea564a230c 100644 --- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp +++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp @@ -267,7 +267,7 @@ void BCECmpBlock::split(BasicBlock *NewParent, AliasAnalysis &AA) const { for (Instruction &Inst : *BB) { if (BlockInsts.count(&Inst)) continue; - assert(canSinkBCECmpInst(&Inst, AA) && "Split unsplittable block"); + assert_DISABLED(canSinkBCECmpInst(&Inst, AA) && "Split unsplittable block"); // This is a non-BCE-cmp-block instruction. And it can be separated // from the BCE-cmp-block instruction. OtherInsts.push_back(&Inst); diff --git a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp index 299239fb70200..47c585ad5b5d8 100644 --- a/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp +++ b/llvm/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp @@ -109,7 +109,7 @@ class MergedLoadStoreMotion { private: BasicBlock *getDiamondTail(BasicBlock *BB); - bool isDiamondHead(BasicBlock *BB); + bool isDiamondHead(const BasicBlock *BB) const; // Routines for sinking stores StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI); PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1); @@ -133,7 +133,7 @@ BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) { /// /// True when BB is the head of a diamond (hammock) /// -bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) { +bool MergedLoadStoreMotion::isDiamondHead(const BasicBlock *BB) const { if (!BB) return false; auto *BI = dyn_cast(BB->getTerminator()); diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp index 13d9e8f186b47..e6e35e8928900 100644 --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -3056,7 +3056,7 @@ void NewGVN::updateProcessedCount(const Value *V) { ProcessedCount.insert({V, 1}); } else { ++ProcessedCount[V]; - assert(ProcessedCount[V] < 100 && + assert_DISABLED(ProcessedCount[V] < 100 && "Seem to have processed the same Value a lot"); } #endif @@ -3268,7 +3268,7 @@ void NewGVN::verifyMemoryCongruency() const { auto *SecondMUD = dyn_cast(KV.second->getMemoryLeader()); if (FirstMUD && SecondMUD) { SmallPtrSet VisitedMAS; - assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) || + assert_DISABLED((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) || ValueToClass.lookup(FirstMUD->getMemoryInst()) == ValueToClass.lookup(SecondMUD->getMemoryInst())) && "The instructions for these memory operations should have " diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp index e742d2ed12af1..fba57c828c06e 100644 --- a/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -442,7 +442,7 @@ static bool LinearizeExprTree(Instruction *I, // If this is a binary operation of the right kind with only one use then // add its operands to the expression. if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { - assert(Visited.insert(Op).second && "Not first visit!"); + assert_DISABLED(Visited.insert(Op).second && "Not first visit!"); LLVM_DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n"); Worklist.push_back(std::make_pair(BO, Weight)); continue; @@ -452,7 +452,7 @@ static bool LinearizeExprTree(Instruction *I, LeafMap::iterator It = Leaves.find(Op); if (It == Leaves.end()) { // Not in the leaf map. Must be the first time we saw this operand. - assert(Visited.insert(Op).second && "Not first visit!"); + assert_DISABLED(Visited.insert(Op).second && "Not first visit!"); if (!Op->hasOneUse()) { // This value has uses not accounted for by the expression, so it is // not safe to modify. Mark it as being a leaf. diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index daf8fa28a71e5..7924f7562a5f4 100644 --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -631,8 +631,8 @@ static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache, << Cache[I]->getName() << ", is known base = " << KnownBases[I] << "\n"); } - assert(Cache[I] != nullptr); - assert(KnownBases.contains(Cache[I]) && + assert_DISABLED(Cache[I] != nullptr); + assert_DISABLED(KnownBases.contains(Cache[I]) && "Cached value must be present in known bases map"); return Cache[I]; } @@ -3030,7 +3030,7 @@ bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, const TargetLibraryInfo &TLI) { assert(!F.isDeclaration() && !F.empty() && "need function body to rewrite statepoints in"); - assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); + assert_DISABLED(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); auto NeedsRewrite = [&TLI](Instruction &I) { if (const auto *Call = dyn_cast(&I)) { @@ -3285,7 +3285,7 @@ static void computeLiveInValues(DominatorTree &DT, Function &F, #ifndef NDEBUG for (Value *Kill : Data.KillSet[&BB]) - assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); + assert_DISABLED(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); #endif Data.LiveOut[&BB] = SetVector(); diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index aa3cbc5e4bddc..bbe572363fe8c 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -2127,7 +2127,7 @@ void visitDomSubTree(DominatorTree &DT, BasicBlock *BB, CallableT Callable) { // Accumulate the child nodes. for (DomTreeNode *ChildN : *N) { - assert(Visited.insert(ChildN).second && + assert_DISABLED(Visited.insert(ChildN).second && "Cannot visit a node twice when walking a tree!"); DomWorklist.push_back(ChildN); } diff --git a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp index daa82a8c368e2..b2ea71e45954f 100644 --- a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -247,7 +247,7 @@ static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI, unsigned IterCnt = 0; (void)IterCnt; while (LocalChange) { - assert(IterCnt++ < 1000 && "Iterative simplification didn't converge!"); + assert_DISABLED(IterCnt++ < 1000 && "Iterative simplification didn't converge!"); LocalChange = false; // Loop over all of the basic blocks and remove them if they are unneeded. diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp index 17cba2e642a19..1d00c9249b363 100644 --- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp +++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp @@ -598,9 +598,9 @@ CallBase *llvm::promoteCallWithIfThenElse(CallBase &CB, Function &Callee, auto &DirectBB = *DirectCall.getParent(); auto &IndirectBB = *CB.getParent(); - assert((CtxProfAnalysis::getBBInstrumentation(IndirectBB) == nullptr) && + assert_DISABLED((CtxProfAnalysis::getBBInstrumentation(IndirectBB) == nullptr) && "The ICP direct BB is new, it shouldn't have instrumentation"); - assert((CtxProfAnalysis::getBBInstrumentation(DirectBB) == nullptr) && + assert_DISABLED((CtxProfAnalysis::getBBInstrumentation(DirectBB) == nullptr) && "The ICP indirect BB is new, it shouldn't have instrumentation"); // Allocate counters for the new basic blocks. diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp index 5dc82a8dfb2db..bcc240195222e 100644 --- a/llvm/lib/Transforms/Utils/CloneFunction.cpp +++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp @@ -808,7 +808,7 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, while ((PN = dyn_cast(I++))) { Value *NV = PoisonValue::get(PN->getType()); PN->replaceAllUsesWith(NV); - assert(VMap[&*OldI] == PN && "VMap mismatch"); + assert_DISABLED(VMap[&*OldI] == PN && "VMap mismatch"); VMap[&*OldI] = NV; PN->eraseFromParent(); ++OldI; diff --git a/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp b/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp index 5ba626fa213ad..5621d91acf052 100644 --- a/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp +++ b/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp @@ -280,7 +280,7 @@ BasicBlock *ControlFlowHub::finalize( for (auto [BB, Succ0, Succ1] : Branches) { #ifndef NDEBUG - assert(Incoming.insert(BB).second && "Duplicate entry for incoming block."); + assert_DISABLED(Incoming.insert(BB).second && "Duplicate entry for incoming block."); #endif if (Succ0) Outgoing.insert(Succ0); diff --git a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp index 766c7501550da..d6ac5ed9dc111 100644 --- a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp +++ b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp @@ -166,7 +166,7 @@ FunctionImportGlobalProcessing::getLinkage(const GlobalValue *SGV, // linkonce_any/weak_any definition and importing would change the order // they are seen by the linker. The module linking caller needs to enforce // this. - assert(!doImportAsDefinition(SGV)); + assert_DISABLED(!doImportAsDefinition(SGV)); // If imported as a declaration, it becomes external_weak. return SGV->getLinkage(); @@ -204,7 +204,7 @@ FunctionImportGlobalProcessing::getLinkage(const GlobalValue *SGV, case GlobalValue::ExternalWeakLinkage: // External weak doesn't apply to definitions, must be a declaration. - assert(!doImportAsDefinition(SGV)); + assert_DISABLED(!doImportAsDefinition(SGV)); // Linkage stays external_weak. return SGV->getLinkage(); @@ -225,7 +225,7 @@ void FunctionImportGlobalProcessing::processGlobalForThinLTO(GlobalValue &GV) { // We should always have a ValueInfo (i.e. GV in index) for definitions when // we are exporting, and also when importing that value. - assert(VI || GV.isDeclaration() || + assert_DISABLED(VI || GV.isDeclaration() || (isPerformingImport() && !doImportAsDefinition(&GV))); // Mark read/write-only variables which can be imported with specific diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index 4ad426285ce2f..a951c26062b0d 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -445,7 +445,7 @@ static Value *getUnwindDestToken(Instruction *EHPad, // were the case, then we should also have recorded the lack of information // for the descendant that we're coming from. So assert that we don't // find a null entry in the MemoMap for AncestorPad. - assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); + assert_DISABLED(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); auto AncestorMemo = MemoMap.find(AncestorPad); if (AncestorMemo == MemoMap.end()) { UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); @@ -704,7 +704,7 @@ static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, // subsequent calls to getUnwindDestToken, so map the cleanuppad // to short-circuit any such calls and recognize this as an "unwind // to caller" cleanup. - assert(!FuncletUnwindMap.count(CleanupPad) || + assert_DISABLED(!FuncletUnwindMap.count(CleanupPad) || isa(FuncletUnwindMap[CleanupPad])); FuncletUnwindMap[CleanupPad] = ConstantTokenNone::get(Caller->getContext()); diff --git a/llvm/lib/Transforms/Utils/LoopConstrainer.cpp b/llvm/lib/Transforms/Utils/LoopConstrainer.cpp index 8f103153059e8..08c6272d3be1e 100644 --- a/llvm/lib/Transforms/Utils/LoopConstrainer.cpp +++ b/llvm/lib/Transforms/Utils/LoopConstrainer.cpp @@ -27,7 +27,7 @@ static bool isSafeDecreasingBound(const SCEV *Start, const SCEV *BoundSCEV, if (!SE.isAvailableAtLoopEntry(BoundSCEV, L)) return false; - assert(SE.isKnownNegative(Step) && "expecting negative step"); + assert_DISABLED(SE.isKnownNegative(Step) && "expecting negative step"); LLVM_DEBUG(dbgs() << "isSafeDecreasingBound with:\n"); LLVM_DEBUG(dbgs() << "Start: " << *Start << "\n"); @@ -169,7 +169,7 @@ LoopStructure::parseLoopStructure(ScalarEvolution &SE, Loop &L, FailureReason = "could not compute latch count"; return std::nullopt; } - assert(SE.getLoopDisposition(MaxBETakenCount, &L) == + assert_DISABLED(SE.getLoopDisposition(MaxBETakenCount, &L) == ScalarEvolution::LoopInvariant && "loop variant exit count doesn't make sense!"); @@ -493,7 +493,7 @@ void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result, BasicBlock *ClonedBB = Result.Blocks[i]; BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i]; - assert(Result.Map[OriginalBB] == ClonedBB && "invariant!"); + assert_DISABLED(Result.Map[OriginalBB] == ClonedBB && "invariant!"); for (Instruction &I : *ClonedBB) RemapInstruction(&I, Result.Map, diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp index 3cbde39b30b4e..311a2db611970 100644 --- a/llvm/lib/Transforms/Utils/LoopPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp @@ -218,13 +218,13 @@ PhiAnalyzer::PeelCounter PhiAnalyzer::calculate(const Value &V) { if (const PHINode *Phi = dyn_cast(&V)) { if (Phi->getParent() != L.getHeader()) { // Phi is not in header block so Unknown. - assert(IterationsToInvariance[&V] == Unknown && "unexpected value saved"); + assert_DISABLED(IterationsToInvariance[&V] == Unknown && "unexpected value saved"); return Unknown; } // We need to analyze the input from the back edge and add 1. Value *Input = Phi->getIncomingValueForBlock(L.getLoopLatch()); PeelCounter Iterations = calculate(*Input); - assert(IterationsToInvariance[Input] == Iterations && + assert_DISABLED(IterationsToInvariance[Input] == Iterations && "unexpected value saved"); return (IterationsToInvariance[Phi] = addOne(Iterations)); } @@ -246,7 +246,7 @@ PhiAnalyzer::PeelCounter PhiAnalyzer::calculate(const Value &V) { // TODO: handle more expressions // Everything else is Unknown. - assert(IterationsToInvariance[&V] == Unknown && "unexpected value saved"); + assert_DISABLED(IterationsToInvariance[&V] == Unknown && "unexpected value saved"); return Unknown; } diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp index b90addcef69e6..7ca4d9bb78f82 100644 --- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp @@ -747,7 +747,7 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI, // Eliminate copies of the loop heart intrinsic, if any. if (ULO.Heart) { auto it = VMap.find(ULO.Heart); - assert(it != VMap.end()); + assert_DISABLED(it != VMap.end()); Instruction *heartCopy = cast(it->second); heartCopy->eraseFromParent(); VMap.erase(it); @@ -834,7 +834,7 @@ llvm::UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI, if (L->contains(InValI)) InVal = LastValueMap[InVal]; } - assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch"); + assert_DISABLED(Latches.back() == LastValueMap[LatchBlock] && "bad last latch"); PN->addIncoming(InVal, Latches.back()); } } diff --git a/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp b/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp index 2c2400d9dd7a8..a1bd079ba9784 100644 --- a/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp @@ -417,7 +417,7 @@ llvm::UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount, auto BBIDom = BBDomNode->getIDom(); BasicBlock *OriginalBBIDom = BBIDom->getBlock(); assert(OriginalBBIDom); - assert(LastValueMap[cast(OriginalBBIDom)]); + assert_DISABLED(LastValueMap[cast(OriginalBBIDom)]); DT->addNewBlock( New, cast(LastValueMap[cast(OriginalBBIDom)])); } diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp index 70047273c3b9a..de86886eb2f9a 100644 --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -63,7 +63,8 @@ bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI, SmallVector InLoopPredecessors; auto RewriteExit = [&](BasicBlock *BB) { - assert(InLoopPredecessors.empty() && + ((void)InLoopPredecessors); + assert_DISABLED(InLoopPredecessors.empty() && "Must start with an empty predecessors list!"); auto Cleanup = make_scope_exit([&] { InLoopPredecessors.clear(); }); diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 72228b445a8b6..569d8be80d087 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -2569,7 +2569,7 @@ static bool sinkCommonCodeFromPredecessors(BasicBlock *BB, // But, that may make other instructions unprofitable, too. // So, do a backward scan, do any earlier instructions become // unprofitable? - assert( + assert_DISABLED( !ProfitableToSinkInstruction(LRI) && "We already know that the last instruction is unprofitable to sink"); ++LRI; diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp index 3faea48466ba9..62d0478728c80 100644 --- a/llvm/lib/Transforms/Utils/ValueMapper.cpp +++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp @@ -1114,7 +1114,7 @@ void Mapper::mapAppendingVariable(GlobalVariable &GV, Constant *InitPrefix, void Mapper::scheduleMapGlobalInitializer(GlobalVariable &GV, Constant &Init, unsigned MCID) { - assert(AlreadyScheduled.insert(&GV).second && "Should not reschedule"); + assert_DISABLED(AlreadyScheduled.insert(&GV).second && "Should not reschedule"); assert(MCID < MCs.size() && "Invalid mapping context"); WorklistEntry WE; @@ -1130,7 +1130,7 @@ void Mapper::scheduleMapAppendingVariable(GlobalVariable &GV, bool IsOldCtorDtor, ArrayRef NewMembers, unsigned MCID) { - assert(AlreadyScheduled.insert(&GV).second && "Should not reschedule"); + assert_DISABLED(AlreadyScheduled.insert(&GV).second && "Should not reschedule"); assert(MCID < MCs.size() && "Invalid mapping context"); WorklistEntry WE; @@ -1146,7 +1146,7 @@ void Mapper::scheduleMapAppendingVariable(GlobalVariable &GV, void Mapper::scheduleMapAliasOrIFunc(GlobalValue &GV, Constant &Target, unsigned MCID) { - assert(AlreadyScheduled.insert(&GV).second && "Should not reschedule"); + assert_DISABLED(AlreadyScheduled.insert(&GV).second && "Should not reschedule"); assert((isa(GV) || isa(GV)) && "Should be alias or ifunc"); assert(MCID < MCs.size() && "Invalid mapping context"); @@ -1160,7 +1160,7 @@ void Mapper::scheduleMapAliasOrIFunc(GlobalValue &GV, Constant &Target, } void Mapper::scheduleRemapFunction(Function &F, unsigned MCID) { - assert(AlreadyScheduled.insert(&F).second && "Should not reschedule"); + assert_DISABLED(AlreadyScheduled.insert(&F).second && "Should not reschedule"); assert(MCID < MCs.size() && "Invalid mapping context"); WorklistEntry WE; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 60a94ca1f86e4..8ac753637df54 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2331,7 +2331,7 @@ void InnerLoopVectorizer::scalarizeInstruction(const Instruction *Instr, #if !defined(NDEBUG) // Verify that VPlan type inference results agree with the type of the // generated values. - assert(State.TypeAnalysis.inferScalarType(RepRecipe) == Cloned->getType() && + assert_DISABLED(State.TypeAnalysis.inferScalarType(RepRecipe) == Cloned->getType() && "inferred type and type from generated instructions do not match"); #endif } @@ -2489,7 +2489,7 @@ void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) { #ifndef NDEBUG ScalarEvolution &SE = *PSE.getSE(); const SCEV *TC2OverflowSCEV = SE.applyLoopGuards(SE.getSCEV(LHS), OrigLoop); - assert( + assert_DISABLED( !isIndvarOverflowCheckKnownFalse(Cost, VF * UF) && !SE.isKnownPredicate(CmpInst::getInversePredicate(ICmpInst::ICMP_ULT), TC2OverflowSCEV, SE.getSCEV(Step)) && @@ -6693,7 +6693,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, if (canTruncateToMinimalBitwidth(I, VF)) { Instruction *Op0AsInstruction = dyn_cast(I->getOperand(0)); (void)Op0AsInstruction; - assert((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) || + assert_DISABLED((!canTruncateToMinimalBitwidth(Op0AsInstruction, VF) || MinBWs[I] == MinBWs[Op0AsInstruction]) && "if both the operand and the compare are marked for " "truncation, they must have the same bitwidth"); @@ -7516,7 +7516,7 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() { // different VF to be picked by the VPlan-based cost model. VPCostContext CostCtx(CM.TTI, *CM.TLI, Legal->getWidestInductionType(), CM); precomputeCosts(BestPlan, BestFactor.Width, CostCtx); - assert((BestFactor.Width == LegacyVF.Width || + assert_DISABLED((BestFactor.Width == LegacyVF.Width || planContainsAdditionalSimplifications(getPlanFor(BestFactor.Width), CostCtx, OrigLoop)) && " VPlan cost model and legacy cost model disagreed"); @@ -7628,7 +7628,7 @@ DenseMap LoopVectorizationPlanner::executePlan( ElementCount BestVF, unsigned BestUF, VPlan &BestVPlan, InnerLoopVectorizer &ILV, DominatorTree *DT, bool IsEpilogueVectorization, const DenseMap *ExpandedSCEVs) { - assert(BestVPlan.hasVF(BestVF) && + assert_DISABLED(BestVPlan.hasVF(BestVF) && "Trying to execute plan with unsupported VF"); assert(BestVPlan.hasUF(BestUF) && "Trying to execute plan with unsupported UF"); @@ -8301,7 +8301,7 @@ createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc, VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop) { assert(IndDesc.getStartValue() == Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); - assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && + assert_DISABLED(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && "step must be loop invariant"); VPValue *Step = diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 2afd02dae3a8b..3e8167e9d9eaf 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -8192,7 +8192,7 @@ void BoUpSLP::buildTree_rec(ArrayRef VL, unsigned Depth, #endif if (!Bundle) { LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); - assert((!BS.getScheduleData(VL0) || + assert_DISABLED((!BS.getScheduleData(VL0) || !BS.getScheduleData(VL0)->isPartOfBundle()) && "tryScheduleBundle should cancelScheduling on failure"); newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h index 5d4a3b555981c..4d205c271bbf3 100644 --- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h +++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h @@ -158,7 +158,7 @@ class VPRecipeBuilder { VPRecipeBase *getRecipe(Instruction *I) { assert(Ingredient2Recipe.count(I) && "Recording this ingredients recipe was not requested"); - assert(Ingredient2Recipe[I] != nullptr && + assert_DISABLED(Ingredient2Recipe[I] != nullptr && "Ingredient doesn't have a recipe"); return Ingredient2Recipe[I]; } diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 44ffcb954a284..42e04258911e1 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -240,7 +240,7 @@ Value *VPTransformState::get(VPValue *Def, const VPLane &Lane) { return Data.VPV2Scalars[Def][0]; } - assert(hasVectorValue(Def)); + assert_DISABLED(hasVectorValue(Def)); auto *VecPart = Data.VPV2Vector[Def]; if (!VecPart->getType()->isVectorTy()) { assert(Lane.isFirstLane() && "cannot get lane > 0 for scalar"); @@ -255,7 +255,7 @@ Value *VPTransformState::get(VPValue *Def, const VPLane &Lane) { Value *VPTransformState::get(VPValue *Def, bool NeedsScalar) { if (NeedsScalar) { - assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def) || + assert_DISABLED((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def) || !vputils::onlyFirstLaneUsed(Def) || (hasScalarValue(Def, VPLane(0)) && Data.VPV2Scalars[Def].size() == 1)) && @@ -451,7 +451,7 @@ VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { } void VPIRBasicBlock::execute(VPTransformState *State) { - assert(getHierarchicalSuccessors().size() <= 2 && + assert_DISABLED(getHierarchicalSuccessors().size() <= 2 && "VPIRBasicBlock can have at most two successors at the moment!"); State->Builder.SetInsertPoint(getIRBasicBlock()->getTerminator()); executeRecipes(State, getIRBasicBlock()); @@ -708,11 +708,11 @@ static std::pair cloneFrom(VPBlockBase *Entry) { vp_depth_first_shallow(Old2NewVPBlocks[Entry]))) { for (const auto &[OldPred, NewPred] : zip(OldBB->getPredecessors(), NewBB->getPredecessors())) - assert(NewPred == Old2NewVPBlocks[OldPred] && "Different predecessors"); + assert_DISABLED(NewPred == Old2NewVPBlocks[OldPred] && "Different predecessors"); for (const auto &[OldSucc, NewSucc] : zip(OldBB->successors(), NewBB->successors())) - assert(NewSucc == Old2NewVPBlocks[OldSucc] && "Different successors"); + assert_DISABLED(NewSucc == Old2NewVPBlocks[OldSucc] && "Different successors"); } #endif @@ -887,7 +887,7 @@ VPlanPtr VPlan::createInitialVPlan(Type *InductionTy, // uncountable exits whilst also ensuring the symbolic maximum and known // back-edge taken count remain identical for loops with countable exits. const SCEV *BackedgeTakenCountSCEV = PSE.getSymbolicMaxBackedgeTakenCount(); - assert((!isa(BackedgeTakenCountSCEV) && + assert_DISABLED((!isa(BackedgeTakenCountSCEV) && BackedgeTakenCountSCEV == PSE.getBackedgeTakenCount()) && "Invalid loop count"); ScalarEvolution &SE = *PSE.getSE(); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index f2e6729a2e265..6a154f71c579d 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -3733,7 +3733,7 @@ class VPlan { void addVF(ElementCount VF) { VFs.insert(VF); } void setVF(ElementCount VF) { - assert(hasVF(VF) && "Cannot set VF not already in plan"); + assert_DISABLED(hasVF(VF) && "Cannot set VF not already in plan"); VFs.clear(); VFs.insert(VF); } @@ -3782,7 +3782,7 @@ class VPlan { } assert(Value2VPValue.count(V) && "Value does not exist in VPlan"); - assert(Value2VPValue[V]->isLiveIn() && + assert_DISABLED(Value2VPValue[V]->isLiveIn() && "Only live-ins should be in mapping"); return Value2VPValue[V]; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 3eb5f3f40f842..e47e8778fd591 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -24,7 +24,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPBlendRecipe *R) { Type *ResTy = inferScalarType(R->getIncomingValue(0)); for (unsigned I = 1, E = R->getNumIncomingValues(); I != E; ++I) { VPValue *Inc = R->getIncomingValue(I); - assert(inferScalarType(Inc) == ResTy && + assert_DISABLED(inferScalarType(Inc) == ResTy && "different types inferred for different incoming values"); CachedTypes[Inc] = ResTy; } @@ -38,7 +38,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { Type *ResTy = inferScalarType(R->getOperand(0)); for (unsigned Op = 1; Op != R->getNumOperands(); ++Op) { VPValue *OtherV = R->getOperand(Op); - assert(inferScalarType(OtherV) == ResTy && + assert_DISABLED(inferScalarType(OtherV) == ResTy && "different types inferred for different operands"); CachedTypes[OtherV] = ResTy; } @@ -53,7 +53,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) { case Instruction::Select: { Type *ResTy = inferScalarType(R->getOperand(1)); VPValue *OtherV = R->getOperand(2); - assert(inferScalarType(OtherV) == ResTy && + assert_DISABLED(inferScalarType(OtherV) == ResTy && "different types inferred for different operands"); CachedTypes[OtherV] = ResTy; return ResTy; @@ -116,7 +116,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenRecipe *R) { case Instruction::Or: case Instruction::Xor: { Type *ResTy = inferScalarType(R->getOperand(0)); - assert(ResTy == inferScalarType(R->getOperand(1)) && + assert_DISABLED(ResTy == inferScalarType(R->getOperand(1)) && "types for both operands must match for binary op"); CachedTypes[R->getOperand(1)] = ResTy; return ResTy; @@ -150,7 +150,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) { Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenSelectRecipe *R) { Type *ResTy = inferScalarType(R->getOperand(1)); VPValue *OtherV = R->getOperand(2); - assert(inferScalarType(OtherV) == ResTy && + assert_DISABLED(inferScalarType(OtherV) == ResTy && "different types inferred for different operands"); CachedTypes[OtherV] = ResTy; return ResTy; @@ -182,14 +182,14 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPReplicateRecipe *R) { case Instruction::Or: case Instruction::Xor: { Type *ResTy = inferScalarType(R->getOperand(0)); - assert(ResTy == inferScalarType(R->getOperand(1)) && + assert_DISABLED(ResTy == inferScalarType(R->getOperand(1)) && "inferred types for operands of binary op don't match"); CachedTypes[R->getOperand(1)] = ResTy; return ResTy; } case Instruction::Select: { Type *ResTy = inferScalarType(R->getOperand(1)); - assert(ResTy == inferScalarType(R->getOperand(2)) && + assert_DISABLED(ResTy == inferScalarType(R->getOperand(2)) && "inferred types for operands of select op don't match"); CachedTypes[R->getOperand(2)] = ResTy; return ResTy; diff --git a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp index 6e633739fcc3d..c40fd8fab69d6 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp @@ -268,7 +268,7 @@ VPValue *PlainCFGBuilder::getOrCreateVPOperand(Value *IRVal) { // For now, we use VPValue to represent A and B and classify both as external // definitions. We may introduce specific VPValue subclasses for them in the // future. - assert(isExternalDef(IRVal) && "Expected external definition as operand."); + assert_DISABLED(isExternalDef(IRVal) && "Expected external definition as operand."); // A and B: Create VPValue and add it to the pool of external definitions and // to the Value->VPValue map. diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 0eb4f7c7c88ce..cf3d4ae620e18 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -1374,7 +1374,7 @@ void VPWidenRecipe::execute(VPTransformState &State) { #if !defined(NDEBUG) // Verify that VPlan type inference results agree with the type of the // generated values. - assert(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) == + assert_DISABLED(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) == State.get(this)->getType() && "inferred type and type from generated instructions do not match"); #endif @@ -1461,7 +1461,7 @@ void VPWidenEVLRecipe::execute(VPTransformState &State) { State.setDebugLocFrom(getDebugLoc()); - assert(State.get(getOperand(0))->getType()->isVectorTy() && + assert_DISABLED(State.get(getOperand(0))->getType()->isVectorTy() && "VPWidenEVLRecipe should not be used for scalars"); VPValue *EVL = getEVL(); @@ -3137,7 +3137,7 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { "Not a pointer induction according to InductionDescriptor!"); assert(cast(getUnderlyingInstr())->getType()->isPointerTy() && "Unexpected type."); - assert(!onlyScalarsGenerated(State.VF.isScalable()) && + assert_DISABLED(!onlyScalarsGenerated(State.VF.isScalable()) && "Recipe should have been replaced"); auto *IVR = getParent()->getPlan()->getCanonicalIV(); @@ -3198,7 +3198,7 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { StartOffset = State.Builder.CreateAdd( StartOffset, State.Builder.CreateStepVector(VecPhiType)); - assert(ScalarStepValue == State.get(getOperand(1), VPLane(0)) && + assert_DISABLED(ScalarStepValue == State.get(getOperand(1), VPLane(0)) && "scalar step must be the same across all parts"); Value *GEP = State.Builder.CreateGEP( State.Builder.getInt8Ty(), NewPointerPhi, diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 03c4110761ac6..c3b7378339eda 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -669,7 +669,7 @@ static void recursivelyDeleteDeadRecipes(VPValue *V) { void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF, unsigned BestUF, PredicatedScalarEvolution &PSE) { - assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan"); + assert_DISABLED(Plan.hasVF(BestVF) && "BestVF is not available in Plan"); assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan"); VPBasicBlock *ExitingVPBB = Plan.getVectorLoopRegion()->getExitingBasicBlock(); @@ -890,7 +890,7 @@ bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan, while (auto *PrevPhi = dyn_cast_or_null(Previous)) { assert(PrevPhi->getParent() == FOR->getParent()); - assert(SeenPhis.insert(PrevPhi).second); + assert_DISABLED(SeenPhis.insert(PrevPhi).second); Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe(); } @@ -1042,13 +1042,13 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) { // accurate by comparing it to freshly computed types. VPTypeAnalysis TypeInfo2( R.getParent()->getPlan()->getCanonicalIV()->getScalarType()); - assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A)); + assert_DISABLED(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A)); for (VPUser *U : A->users()) { auto *R = dyn_cast(U); if (!R) continue; for (VPValue *VPV : R->definedValues()) - assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV)); + assert_DISABLED(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV)); } #endif } @@ -1380,7 +1380,7 @@ static SmallVector collectAllHeaderMasks(VPlan &Plan) { auto *FoundWidenCanonicalIVUser = find_if(Plan.getCanonicalIV()->users(), [](VPUser *U) { return isa(U); }); - assert(count_if(Plan.getCanonicalIV()->users(), + assert_DISABLED(count_if(Plan.getCanonicalIV()->users(), [](VPUser *U) { return isa(U); }) <= 1 && "Must have at most one VPWideCanonicalIVRecipe"); diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp index ca78f32506ef7..7e93063d72575 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp @@ -85,7 +85,7 @@ class UnrollState { VPValue *getValueForPart(VPValue *V, unsigned Part) { if (Part == 0 || V->isLiveIn()) return V; - assert((VPV2Parts.contains(V) && VPV2Parts[V].size() >= Part) && + assert_DISABLED((VPV2Parts.contains(V) && VPV2Parts[V].size() >= Part) && "accessed value does not exist"); return VPV2Parts[V][Part - 1]; } diff --git a/llvm/tools/bugpoint/ListReducer.h b/llvm/tools/bugpoint/ListReducer.h index 06f8ddb255346..1b988421bc325 100644 --- a/llvm/tools/bugpoint/ListReducer.h +++ b/llvm/tools/bugpoint/ListReducer.h @@ -99,7 +99,7 @@ template struct ListReducer { // TODO: Previously, this error was ignored and we treated it as if // shuffling hid the bug. This should really either be consumeError if // that behaviour was sensible, or we should propagate the error. - assert(!Result.takeError() && "Shuffling caused internal error?"); + assert_DISABLED(!Result.takeError() && "Shuffling caused internal error?"); if (*Result == KeepPrefix) { // If the bug is still here, use the shuffled list. diff --git a/llvm/tools/llvm-exegesis/lib/Clustering.cpp b/llvm/tools/llvm-exegesis/lib/Clustering.cpp index fc79718fdeb22..f1c6c0d9efe4a 100644 --- a/llvm/tools/llvm-exegesis/lib/Clustering.cpp +++ b/llvm/tools/llvm-exegesis/lib/Clustering.cpp @@ -304,7 +304,7 @@ void BenchmarkClustering::stabilize(unsigned NumOpcodes) { [this, &Key](size_t P) { return OpcodeAndConfig(Points_[P]) != Key; }); - assert(std::distance(it, OldCluster.PointIndices.end()) > 0 && + assert_DISABLED(std::distance(it, OldCluster.PointIndices.end()) > 0 && "Should have found at least one bad point"); // Mark to-be-moved points as belonging to the new cluster. for (size_t P : make_range(it, OldCluster.PointIndices.end())) diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp index 3f09564cc0d65..5bcc39838e8fa 100644 --- a/llvm/utils/TableGen/AsmWriterEmitter.cpp +++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp @@ -728,7 +728,7 @@ class IAPrinter { StringRef getResult() { return Result; } - bool isOpMapped(StringRef Op) { return OpMap.find(Op) != OpMap.end(); } + bool isOpMapped(StringRef Op) const { return OpMap.find(Op) != OpMap.end(); } int getOpIndex(StringRef Op) { return OpMap[Op].first; } std::pair &getOpData(StringRef Op) { return OpMap[Op]; } diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h index f85753ff5ac80..f1e3f6c8e9c0d 100644 --- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h +++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h @@ -943,6 +943,7 @@ class TreePattern { return Args[i]; } std::vector &getArgList() { return Args; } + const std::vector &getArgList() const { return Args; } CodeGenDAGPatterns &getDAGPatterns() const { return CDP; } diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp index 5de5dd894f84e..7d23bd803b2d3 100644 --- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp +++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp @@ -2134,7 +2134,7 @@ void CustomOperandRenderer::emitRenderOpcodes(MatchTable &Table, //===- BuildMIAction ------------------------------------------------------===// -bool BuildMIAction::canMutate(RuleMatcher &Rule, +bool BuildMIAction::canMutate(const RuleMatcher &Rule, const InstructionMatcher *Insn) const { if (!Insn || Insn->hasVariadicMatcher()) return false; diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h index 00fe073057c5c..0123ceb39e3b4 100644 --- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h +++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h @@ -2348,7 +2348,7 @@ class BuildMIAction : public MatchAction { std::vector UnsetFlags; /// True if the instruction can be built solely by mutating the opcode. - bool canMutate(RuleMatcher &Rule, const InstructionMatcher *Insn) const; + bool canMutate(const RuleMatcher &Rule, const InstructionMatcher *Insn) const; public: BuildMIAction(unsigned InsnID, const CodeGenInstruction *I) diff --git a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp index 590786bb7fced..fefbe01851ff2 100644 --- a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp @@ -424,7 +424,7 @@ static void FactorNodes(std::unique_ptr &InputMatcherPtr) { SmallVector, 8> Cases; for (unsigned i = 0, e = OptionsToMatch.size(); i != e; ++i) { CheckOpcodeMatcher *COM = cast(OptionsToMatch[i]); - assert(Opcodes.insert(COM->getOpcode().getEnumName()).second && + assert(const_cast&>(Opcodes).insert(COM->getOpcode().getEnumName()).second && "Duplicate opcodes not factored?"); Cases.push_back(std::pair(&COM->getOpcode(), COM->takeNext())); delete COM; diff --git a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp index 424f1ccb067f9..53567feb96f43 100644 --- a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp @@ -2583,7 +2583,7 @@ void GICombinerEmitter::emitTestSimplePredicate(raw_ostream &OS) { unsigned ExpectedID = 0; (void)ExpectedID; for (const auto &ID : keys(AllCombineRules)) { - assert(ExpectedID++ == ID && "combine rules are not ordered!"); + assert(const_cast(ExpectedID)++ == ID && "combine rules are not ordered!"); OS << " " << getIsEnabledPredicateEnumName(ID) << EnumeratorSeparator; EnumeratorSeparator = ",\n"; } diff --git a/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp b/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp index 23496a37d5ea1..b3c6fa9d7d68a 100644 --- a/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp +++ b/llvm/utils/TableGen/RISCVTargetDefEmitter.cpp @@ -218,7 +218,8 @@ static void emitRISCVExtensionBitmask(const RecordKeeper &RK, raw_ostream &OS) { }); #ifndef NDEBUG - llvm::DenseSet> Seen; + using SeenType = llvm::DenseSet> ; + SeenType Seen; #endif OS << "#ifdef GET_RISCVExtensionBitmaskTable_IMPL\n"; @@ -231,7 +232,7 @@ static void emitRISCVExtensionBitmask(const RecordKeeper &RK, raw_ostream &OS) { ExtName.consume_front("experimental-"); #ifndef NDEBUG - assert(Seen.insert(std::make_pair(GroupIDVal, BitPosVal)).second && + assert(const_cast(Seen).insert(std::make_pair(GroupIDVal, BitPosVal)).second && "duplicated bitmask"); #endif diff --git a/llvm/utils/TableGen/VTEmitter.cpp b/llvm/utils/TableGen/VTEmitter.cpp index 8f4bcd5fccc73..3b5c3ecaae512 100644 --- a/llvm/utils/TableGen/VTEmitter.cpp +++ b/llvm/utils/TableGen/VTEmitter.cpp @@ -112,8 +112,9 @@ void VTEmitter::run(raw_ostream &OS) { if (Valid) { if (!VTRanges.count(Key)) VTRanges[Key].First = Name; - assert(!VTRanges[Key].Closed && "Gap detected!"); - VTRanges[Key].Last = Name; + auto& RangeVal = VTRanges[Key]; + assert(!RangeVal.Closed && "Gap detected!"); + RangeVal.Last = Name; } else if (VTRanges.count(Key)) { VTRanges[Key].Closed = true; }