diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 484b947bda40..efb736e55e31 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,140 +11,4 @@ # See https://llvm.org/docs/DeveloperPolicy.html#maintainers as well as the # Maintainers.* files in the the respective subproject directories. -/libcxx/ @llvm/reviewers-libcxx -/libcxxabi/ @llvm/reviewers-libcxxabi -/libunwind/ @llvm/reviewers-libunwind -/runtimes/ @llvm/reviewers-libcxx - -/llvm/lib/Analysis/BasicAliasAnalysis.cpp @nikic -/llvm/lib/Analysis/InstructionSimplify.cpp @nikic -/llvm/lib/Analysis/LazyValueInfo.cpp @nikic -/llvm/lib/Analysis/ScalarEvolution.cpp @nikic -/llvm/lib/Analysis/ValueTracking.cpp @nikic -/llvm/lib/IR/ConstantRange.cpp @nikic -/llvm/lib/IR/Core.cpp @nikic -/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @nikic -/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @nikic -/llvm/lib/Transforms/InstCombine/ @nikic - -/clang/test/CXX/drs/ @Endilll -/clang/www/cxx_dr_status.html @Endilll -/clang/www/make_cxx_dr_status @Endilll - -/clang/include/clang/CIR @lanza @bcardosolopes -/clang/lib/CIR @lanza @bcardosolopes -/clang/tools/cir-* @lanza @bcardosolopes - -/lldb/ @JDevlieghere - -# MLIR Interfaces. -/mlir/include/mlir/Interfaces/TilingInterface.* @MaheshRavishankar @nicolasvasilache -/mlir/lib/Interfaces/TilingInterface.* @MaheshRavishankar @nicolasvasilache -/mlir/include/mlir/Interfaces/ValueBoundsOpInterface.* @matthias-springer -/mlir/lib/Interfaces/ValueBoundsOpInterface.* @matthias-springer -/mlir/**/ValueBoundsOpInterfaceImpl.* @matthias-springer -/mlir/include/mlir/Interfaces/RuntimeVerifiableOpInterface.* @matthias-springer -/mlir/lib/Interfaces/RuntimeVerifiableOpInterface.* @matthias-springer -/mlir/**/RuntimeVerifiableOpInterfaceImpl.* @matthias-springer -/mlir/include/mlir/Interfaces/SubsetOpInterface.* @matthias-springer -/mlir/lib/Interfaces/SubsetOpInterface.* @matthias-springer -/mlir/**/SubsetOpInterfaceImpl.* @matthias-springer -/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.* @matthias-springer -/mlir/lib/Interfaces/DestinationStyleOpInterface.* @matthias-springer - -# Bufferization Dialect in MLIR. -/mlir/include/mlir/Dialect/Bufferization @matthias-springer -/mlir/lib/Dialect/Bufferization @matthias-springer -/mlir/**/BufferizableOpInterfaceImpl.* @matthias-springer -/mlir/Dialect/*/Transforms/Bufferize.cpp @matthias-springer - -# Linalg Dialect in MLIR. -/mlir/include/mlir/Dialect/Linalg @dcaballe @nicolasvasilache @rengolin -/mlir/lib/Dialect/Linalg @dcaballe @nicolasvasilache @rengolin -/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp @MaheshRavishankar @nicolasvasilache -/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp @dcaballe @MaheshRavishankar @nicolasvasilache -/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @MaheshRavishankar @nicolasvasilache -/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp @hanhanW @nicolasvasilache -/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @dcaballe @hanhanW @nicolasvasilache -/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @banach-space @dcaballe @hanhanW @nicolasvasilache @Groverkss - -# MemRef Dialect in MLIR. -/mlir/lib/Dialect/MemRef/Transforms/EmulateNarrowType.cpp @MaheshRavishankar @nicolasvasilache - -# Vector Dialect in MLIR. -/mlir/**/*AMX* @aartbik @dcaballe -/mlir/**/*Neon* @banach-space @dcaballe @nicolasvasilache -/mlir/**/*SME* @banach-space @dcaballe @nicolasvasilache -/mlir/**/*SVE* @banach-space @dcaballe @nicolasvasilache -/mlir/**/*VectorInterfaces* @dcaballe @nicolasvasilache -/mlir/**/*VectorToSCF* @banach-space @dcaballe @matthias-springer @nicolasvasilache -/mlir/**/*VectorToLLVM* @banach-space @dcaballe @nicolasvasilache -/mlir/**/*X86Vector* @aartbik @dcaballe @nicolasvasilache -/mlir/include/mlir/Dialect/Vector @banach-space @dcaballe @nicolasvasilache @Groverkss -/mlir/include/mlir/Dialect/Vector/IR @kuhar -/mlir/lib/Dialect/Vector @banach-space @dcaballe @nicolasvasilache @Groverkss -/mlir/lib/Dialect/Vector/Transforms/* @banach-space @dcaballe @hanhanW @nicolasvasilache -/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp @banach-space @dcaballe @MaheshRavishankar @nicolasvasilache -/mlir/**/*EmulateNarrowType* @dcaballe @hanhanW - -# Presburger library in MLIR -/mlir/**/*Presburger* @Groverkss @Superty - -# Tensor Dialect in MLIR. -/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp @hanhanW @nicolasvasilache -/mlir/lib/Dialect/Tensor/Transforms/* @hanhanW @nicolasvasilache - -# Transform Dialect in MLIR. -/mlir/include/mlir/Dialect/Transform/* @ftynse @nicolasvasilache -/mlir/lib/Dialect/Transform/* @ftynse @nicolasvasilache -/mlir/**/*TransformOps* @ftynse @nicolasvasilache - -# SPIR-V Dialect in MLIR. -/mlir/**/SPIRV/ @antiagainst @kuhar -/mlir/**/SPIRVTo*/ @antiagainst @kuhar -/mlir/**/*ToSPIRV/ @antiagainst @kuhar -/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp @antiagainst @kuhar - -# MLIR Sparsifier. -/mlir/**/*SparseTensor*/ @aartbik @PeimingLiu @yinying-lisa-li @matthias-springer - -# MLIR NVGPU Dialect -/mlir/**/NVGPU*/ @grypp -/mlir/test/**/CUDA/ @grypp - -# MLIR NVVM Dialect in MLIR -/mlir/**/LLVMIR/**/BasicPtxBuilderInterface* @grypp -/mlir/**/NVVM* @grypp - -# MLIR Index Dialect -/mlir/**/Index* @mogball - -# MLIR Python Bindings -/mlir/test/python/ @ftynse @makslevental @stellaraccident -/mlir/python/ @ftynse @makslevental @stellaraccident - -# MLIR Mem2Reg/SROA -/mlir/**/Transforms/Mem2Reg.* @moxinilian -/mlir/**/Transforms/SROA.* @moxinilian - -# BOLT -/bolt/ @aaupov @maksfb @rafaelauler @ayermolo @dcci @yota9 - -# Bazel build system. -/utils/bazel/ @rupprecht @keith - -# InstallAPI and TextAPI -/llvm/**/TextAPI/ @cyndyishida -/clang/**/InstallAPI/ @cyndyishida -/clang/tools/clang-installapi/ @cyndyishida - -# ExtractAPI -/clang/**/ExtractAPI @daniel-grumberg @QuietMisdreavus - -# DWARFLinker, dwarfutil, dsymutil -/llvm/**/DWARFLinker/ @JDevlieghere -/llvm/**/dsymutil/ @JDevlieghere -/llvm/**/llvm-dwarfutil/ @JDevlieghere - -# libclang/Python bindings -/clang/bindings/python @DeinAlptraum +* @intel/npu-plugin-llvm-maintain diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000000..355ebf0ad697 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,15 @@ +## Summary +> Please add a short but exhaustive summary why you think your pull request is useful + +## JIRA ticket + +* E-xxxxx + +## Related PR in NPU Compiler and/or OpenVINO repository with sub-module update + +* PR-xxx + +### Other related tickets +> List tickets for additional work, eg, something was found during review but you agreed to address it in another Jira + +* E-xxxxx diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8c1dfd39b82c..028c0b2b74eb 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,3 +16,48 @@ updates: llvm-docs-requirements: patterns: - "*" + + - package-ecosystem: docker + directory: /.github/workflows/containers/github-action-ci + schedule: + interval: daily + + - package-ecosystem: docker + directory: /bolt/utils/docker + schedule: + interval: daily + + - package-ecosystem: nuget + directory: /clang/tools/clang-format-vs/ClangFormat + schedule: + interval: daily + + - package-ecosystem: docker + directory: /clang/tools/clang-fuzzer + schedule: + interval: daily + + - package-ecosystem: docker + directory: /clang/utils/analyzer + schedule: + interval: daily + + - package-ecosystem: pip + directory: /clang/utils/analyzer + schedule: + interval: daily + + - package-ecosystem: pip + directory: /flang/examples/FlangOmpReport + schedule: + interval: daily + + - package-ecosystem: docker + directory: /libc/utils/buildbot + schedule: + interval: daily + + - package-ecosystem: docker + directory: /libcxx/utils/ci + schedule: + interval: daily diff --git a/.github/new-issues-labeler.yml b/.github/new-issues-labeler.yml deleted file mode 100644 index ee7506c1366e..000000000000 --- a/.github/new-issues-labeler.yml +++ /dev/null @@ -1,38 +0,0 @@ -'clang': - - '/\bclang(?!\-)\b/i' - -'clang-format': - - '/\bclang-format/i' - -'clang-tidy': - - '/\bclang-tidy/i' - -'libc++': - - '/libc[+x]{2}(?!\-)/i' - -'libc++abi': - - '/libc[+x]{2}-?abi/i' - -'libc': - - '/\blibc(?![-+])\b/i' - -'flang': - - '/\bflang(?!\-)\b/i' - -'lld': - - '/\blld(?!\-)\b/i' - -'mlir': - - '/\bmlir(?!\-)\b/i' - -'bolt': - - '/\bbolt(?!\-)\b/i' - -'infra:commit-access-request': - - '/Request Commit Access/' - -'false-positive': - - '\bfalse[- ]positive\b' - -'false-negative': - - '\bfalse[- ]negative\b' diff --git a/.github/new-prs-labeler.yml b/.github/new-prs-labeler.yml deleted file mode 100644 index 566308bb3df8..000000000000 --- a/.github/new-prs-labeler.yml +++ /dev/null @@ -1,1040 +0,0 @@ -BOLT: - - bolt/**/* - -ClangIR: - - clang/include/clang/CIR/**/* - - clang/lib/CIR/**/* - - clang/tools/cir-*/**/* - - clang/test/CIR/**/* - -clang:dataflow: - - clang/include/clang/Analysis/FlowSensitive/**/* - - clang/lib/Analysis/FlowSensitive/**/* - - clang/unittests/Analysis/FlowSensitive/**/* - - clang/docs/DataFlowAnalysisIntro.md - - clang/docs/DataFlowAnalysisIntroImages/**/* - -clang:frontend: - - clang/lib/AST/**/* - - clang/include/clang/AST/**/* - - clang/lib/Basic/**/* - - clang/include/clang/Basic/**/* - - clang/lib/Interpreter/**/* - - clang/include/clang/Interpreter/**/* - - clang/lib/Lex/**/* - - clang/include/clang/Lex/**/* - - clang/lib/Parse/**/* - - clang/include/clang/Parse/**/* - - clang/lib/Sema/**/* - - clang/include/clang/Sema/**/* - -clang:headers: - - clang/lib/Headers/**/* - -compiler-rt: - - compiler-rt/**/* - -flang: - - flang/**/* - -flang:frontend: - - flang/Parser/**/* - - flang/Evaluate/**/* - - flang/Semantics/**/* - -HLSL: - - clang/*HLSL*/**/* - - clang/**/*HLSL* - - llvm/**/Frontend/HLSL/**/* - -lld: - - lld/**/* - -llvm-lit: - - llvm/utils/lit/**/* - -PGO: - - llvm/**/ProfileData/**/* - - llvm/**/SampleProfile* - - llvm/**/CodeGen/MIRSampleProfile* - - llvm/lib/Transforms/Instrumentation/CGProfile.cpp - - llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp - - llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp - - llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp - - llvm/lib/Transforms/Instrumentation/PGO* - - llvm/lib/Transforms/Instrumentation/ValueProfile* - - llvm/test/Instrumentation/InstrProfiling/**/* - - llvm/test/Transforms/PGOProfile/**/* - - llvm/test/Transforms/SampleProfile/**/* - - llvm/**/llvm-profdata/**/* - - llvm/**/llvm-profgen/**/* - -vectorizers: - - llvm/lib/Transforms/Vectorize/**/* - - llvm/include/llvm/Transforms/Vectorize/**/* - -# IMPORTED FROM CODEOWNERS -LTO: - - llvm/*/LTO/** - - llvm/*/Linker/** - - llvm/*/ThinLTO/** - - llvm/lib/Transforms/*/FunctionImport* - - llvm/tools/gold/** - -mc: - - llvm/*/MC/** - -clang:driver: - - clang/*/Driver/** - -compiler-rt:asan: - - compiler-rt/lib/asan/** - - compiler-rt/include/sanitizer/asan_interface.h - - compiler-rt/test/asan/** - - compiler-rt/lib/asan_abi/** - - compiler-rt/test/asan_abi/** - -compiler-rt:builtins: - - compiler-rt/lib/builtins/** - - compiler-rt/test/builtins/** - -compiler-rt:cfi: - - compiler-rt/lib/cfi/** - - compiler-rt/test/cfi/** - -compiler-rt:fuzzer: - - compiler-rt/lib/fuzzer/** - - compiler-rt/include/fuzzer/** - - compiler-rt/test/fuzzer/** - -compiler-rt:hwasan: - - compiler-rt/lib/hwasan/** - - compiler-rt/include/sanitizer/hwasan_interface.h - - compiler-rt/test/hwasan/** - -compiler-rt:lsan: - - compiler-rt/lib/lsan/** - - compiler-rt/include/sanitizer/lsan_interface.h - - compiler-rt/test/lsan/** - -compiler-rt:msan: - - compiler-rt/lib/msan/** - - compiler-rt/include/sanitizer/msan_interface.h - - compiler-rt/test/msan/** - -compiler-rt:sanitizer: - - llvm/lib/Transforms/Instrumentation/*Sanitizer* - - compiler-rt/lib/interception/** - - compiler-rt/lib/*san*/** - - compiler-rt/include/sanitizer/** - - compiler-rt/test/*san*/** - - compiler-rt/lib/fuzzer/** - - compiler-rt/include/fuzzer/** - - compiler-rt/test/fuzzer/** - - compiler-rt/lib/scudo/** - - compiler-rt/test/scudo/** - -compiler-rt:scudo: - - compiler-rt/lib/scudo/** - - compiler-rt/test/scudo/** - -compiler-rt:tsan: - - compiler-rt/lib/tsan/** - - compiler-rt/include/sanitizer/tsan_interface.h - - compiler-rt/include/sanitizer/tsan_interface_atomic.h - - compiler-rt/test/tsan/** - -compiler-rt:ubsan: - - compiler-rt/lib/ubsan/** - - compiler-rt/include/sanitizer/ubsan_interface.h - - compiler-rt/test/ubsan/** - - compiler-rt/lib/ubsan_minimal/** - - compiler-rt/test/ubsan_minimal/** - -xray: - - llvm/tools/llvm-xray/** - - compiler-rt/*/xray/** - - clang/include/clang/Basic/XRay* - - clang/lib/Basic/XRay* - - compiler-rt/*/xray/** - - llvm/include/llvm/XRay/** - - llvm/lib/XRay/** - - llvm/tools/llvm-xray/** - - llvm/unittests/XRay/** - - compiler-rt/*/xray/** - -clang:codegen: - - clang/lib/CodeGen/** - - clang/include/clang/CodeGen/** - -mlir: - - mlir/** - -mlir:core: - - mlir/include/mlir/Support/** - - mlir/lib/Support/** - - mlir/include/mlir/Parser/** - - mlir/lib/Parser/** - - mlir/include/mlir/IR/** - - mlir/lib/IR/** - - mlir/include/mlir/Bytecode/** - - mlir/lib/Bytecode/** - - mlir/include/mlir/AsmParser/** - - mlir/lib/AsmParser/** - - mlir/include/mlir/Pass/** - - mlir/lib/Pass/** - - mlir/include/mlir/Tools/** - - mlir/lib/Tools/** - - mlir/include/mlir/Reducer/** - - mlir/lib/Reducer/** - - mlir/include/mlir/Transforms/** - - mlir/lib/Transforms/** - - mlir/include/mlir/Debug/** - - mlir/lib/Debug/** - - mlir/tools/** - -mlir:ods: - - mlir/TableGen/** - - mlir/tblgen/** - - mlir/include/mlir/IR/*.td - -mlir:bindings: - - mlir/Bindings/** - -mlir:gpu: - - mlir/**/*GPU*/** - -mlir:amdgpu: - - mlir/**/AMDGPU/** - -mlir:amx: - - mlir/**/AMX/** - -mlir:affine: - - mlir/**/Affine/** - -mlir:arith: - - mlir/**/Arith/** - -mlir:neon: - - mlir/**/ArmNeon/** - -mlir:sme: - - mlir/**/ArmSME/** - -mlir:sve: - - mlir/**/ArmSVE/** - -mlir:async: - - mlir/**/Async/** - - mlir/**/Async/** - -mlir:bufferization: - - mlir/**/Bufferization/** - -mlir:complex: - - mlir/**/Complex/** - -mlir:cf: - - mlir/**/ControlFlow/** - -mlir:dlti: - - mlir/**/DLTI/** - -mlir:emitc: - - mlir/**/*EmitC*/** - - mlir/lib/Target/Cpp/** - -mlir:func: - - mlir/**/Func/** - -mlir:irdl: - - mlir/**/IRDL/** - -mlir:index: - - mlir/**/Index/** - -mlir:llvm: - - mlir/**/LLVM* - - mlir/**/LLVM*/** - -mlir:linalg: - - mlir/**/*linalg/** - - mlir/**/*Linalg/** - -mlir:mlprogram: - - mlir/**/MLProgram/** - -mlir:math: - - mlir/**/Math/** - -mlir:memref: - - mlir/**/MemRef/** - -mlir:nvgpu: - - mlir/**/NVGPU/** - -mlir:openacc: - - mlir/**/*OpenACC* - - mlir/**/*OpenACC*/** - -mlir:openmp: - - mlir/**/*OpenMP* - - mlir/**/*OpenMP*/** - -mlir:pdl: - - mlir/**/PDL/** - -mlir:quant: - - mlir/**/Quant/** - -mlir:scf: - - mlir/**/SCF/** - -mlir:spirv: - - mlir/**/SPIRV/** - - mlir/**/SPIRVTo*/** - - mlir/**/*ToSPIRV/** - - mlir/tools/mlir-spirv-cpu-runner/** - - mlir/tools/mlir-vulkan-runner/** - - mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp - -mlir:shape: - - mlir/**/Shape/** - -mlir:sparse: - - mlir/**/SparseTensor/** - -mlir:tensor: - - mlir/**/Tensor/** - -mlir:tosa: - - mlir/**/*Tosa*/** - -mlir:ub: - - mlir/**/UB/** - -mlir:vector: - - mlir/**/*Vector/** - -mlir:execution-engine: - - mlir/**/ExecutionEngine/** - -mlir:presburger: - - mlir/**/*Presburger*/** - -mlir:python: - - mlir/python/**/* - -mlir:vectorops: - - mlir/**/Vector/**/* - -coroutines: - - clang/docs/DebuggingCoroutines.rst - - clang/lib/Sema/SemaCoroutine.cpp - - clang/lib/CodeGen/CGCoroutine.cpp - - clang/test/CodeGenCoroutines/** - - llvm/docs/Coroutines.rst - - llvm/include/llvm/Transforms/Coroutines/** - - llvm/lib/Transforms/Coroutines/** - - llvm/test/Transforms/Coroutines/* - -clang:modules: - - clang/docs/StandardCPlusPlusModules.rst - - clang/include/clang/AST/AbstractBasicReader.h - - clang/include/clang/AST/AbstractBasicWriter.h - - clang/include/clang/AST/AbstractTypeReader.h - - clang/include/clang/AST/AbstractTypeWriter.h - - clang/include/clang/AST/PropertiesBase.td - - clang/include/clang/AST/ODRHash.h - - clang/include/clang/AST/TypeProperties.td - - clang/include/clang/Basic/Module.h - - clang/include/clang/Frontend/PrecompiledPreamble.h - - clang/include/clang/Lex/ModuleLoader.h - - clang/include/clang/Lex/ModuleMap.h - - clang/include/clang/Serialization/** - - clang/lib/AST/ODRHash.cpp - - clang/lib/AST/StmtProfile.cpp - - clang/lib/Basic/Module.cpp - - clang/lib/Frontend/ModuleDependencyCollector.cpp - - clang/lib/Frontend/PrecompiledPreamble.cpp - - clang/lib/Lex/ModuleMap.cpp - - clang/lib/Sema/SemaModule.cpp - - clang/lib/Serialization/** - - clang/test/CXX/module/** - - clang/test/Modules/** - - clang/unittests/Serialization/* - -clang-tidy: - - clang-tools-extra/clang-tidy/** - - clang-tools-extra/docs/clang-tidy/** - - clang-tools-extra/test/clang-tidy/** - -clang-tools-extra: - - clang-tools-extra/** - -tools:llvm-mca: - - llvm/tools/llvm-mca/** - - llvm/include/llvm/MCA/** - - llvm/lib/MCA/** - -clang: - - any: - - clang/** - - '!clang/**/Format/**' - - '!clang/tools/clang-format/**' - -testing-tools: - - llvm/include/llvm/FileCheck/** - - llvm/lib/FileCheck/** - - llvm/test/FileCheck/** - - llvm/unittests/FileCheck/** - - llvm/utils/lit/** - - llvm/utils/split-file/** - - llvm/utils/not/** - - llvm/utils/count/** - - llvm/utils/FileCheck/** - - llvm/docs/CommandGuide/FileCheck.rst - - llvm/docs/CommandGuide/lit.rst - - llvm/docs/TestingGuide.rst - - llvm/test/Other/FileCheck-space.txt - - llvm/utils/UpdateTestChecks/** - - llvm/utils/update*_test_checks.py - -debuginfo: - - clang/lib/CodeGen/CGDebugInfo.* - - llvm/include/llvm/BinaryFormat/Dwarf.* - - llvm/include/llvm/CodeGen/*Debug*.* - - llvm/include/llvm/DebugInfo/** - - llvm/include/llvm/Debuginfod/** - - llvm/include/llvm/Frontend/Debug/** - - llvm/include/llvm/IR/Debug*.* - - llvm/include/llvm/Object/*Debug*.* - - llvm/include/llvm/ObjectYAML/*Debug*.* - - llvm/include/llvm/Transforms/Utils/*Debug*.* - - llvm/include/llvm-c/DebugInfo.h - - llvm/lib/BinaryFormat/Dwarf.cpp - - llvm/lib/CodeGen/AsmPrinter/*Debug*.* - - llvm/lib/CodeGen/AsmPrinter/Dwarf*.* - - llvm/lib/CodeGen/AsmPrinter/DIE*.* - - llvm/lib/CodeGen/LiveDebugValues/** - - llvm/lib/CodeGen/*Debug*.* - - llvm/lib/CodeGen/DwarfEHPrepare.cpp - - llvm/lib/DebugInfo/** - - llvm/lib/Debuginfod/** - - llvm/lib/DWARFLinkerParallel/** - - llvm/lib/IR/Debug*.cpp - - llvm/lib/MC/MCDwarf.cpp - - llvm/lib/Transforms/Utils/*Debug*.* - - llvm/test/DebugInfo/** - - llvm/test/tools/dsymutil/** - - llvm/test/tools/llvm-debuginfo-analyzer/** - - llvm/test/tools/llvm-debuginfod/** - - llvm/test/tools/llvm-debuginfod-find/** - - llvm/test/tools/llvm-dwarfdump/** - - llvm/test/tools/llvm-dwarfutil/** - - llvm/test/tools/llvm-dwp/** - - llvm/test/tools/llvm-gsymutil/** - - llvm/test/tools/llvm-pdbuti/** - - llvm/tools/dsymutil/** - - llvm/tools/llvm-debuginfo-analyzer/** - - llvm/tools/llvm-debuginfod/** - - llvm/tools/llvm-debuginfod-find/** - - llvm/tools/llvm-dwarfdump/** - - llvm/tools/llvm-dwarfutil/** - - llvm/tools/llvm-dwp/** - - llvm/tools/llvm-gsymutil/** - - llvm/tools/llvm-pdbutil/** - -github:workflow: - - .github/workflows/** - -cmake: - - cmake/** - - llvm/cmake/** - - runtimes/** - -flang:driver: - - flang/tools/flang-driver/** - - flang/unittests/Frontend/** - - flang/lib/FrontendTool/** - - flang/lib/Frontend/** - - flang/include/flang/Frontend/** - - flang/include/flang/FrontendTool/** - - flang/test/Driver/** - -backend:m68k: - - llvm/lib/Target/M68k/** - - clang/lib/Basic/Targets/M68k.* - - clang/lib/CodeGen/Targets/M68k.cpp - - llvm/test/CodeGen/M68k/** - - llvm/test/MC/Disassembler/M68k/** - - llvm/test/MC/M68k/** - -libc++: - - libcxx/** - - .github/workflows/libcxx-* - -libc++abi: - - libcxxabi/** - -libunwind: - - libunwind/** - -objectyaml: - - llvm/include/llvm/ObjectYAML/** - - llvm/lib/ObjectYAML/** - - llvm/test/tools/obj2yaml/** - - llvm/test/tools/yaml2obj/** - - llvm/tools/obj2yaml/** - - llvm/tools/yaml2obj/** - -clang:analysis: - - clang/include/clang/Analysis/** - - clang/lib/Analysis/** - -clang:static analyzer: - - clang/include/clang/StaticAnalyzer/** - - clang/lib/StaticAnalyzer/** - - clang/tools/scan-build/** - - clang/utils/analyzer/** - - clang/docs/analyzer/** - -pgo: - - llvm/lib/Transforms/Instrumentation/CGProfile.cpp - - llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp - - llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp - - llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp - - llvm/lib/Transforms/Instrumentation/PGO* - - llvm/lib/Transforms/Instrumentation/ValueProfile* - - llvm/test/Instrumentation/InstrProfiling/** - - llvm/test/Transforms/PGOProfile/** - - compiler-rt/lib/profile/** - - compiler-rt/lib/memprof/** - - compiler-rt/test/profile/** - - compiler-rt/test/memprof/** - - llvm/tools/llvm-profdata/** - - llvm/tools/llvm-profgen/** - - llvm/test/tools/llvm-profdata/** - - llvm/test/tools/llvm-profgen/** - - llvm/unittests/ProfileData/* - -openacc: - - flang/**/OpenACC/** - - flang/include/flang/Lower/OpenACC.h - - flang/docs/OpenACC.md - - flang/lib/Parser/openacc-parsers.cpp - - flang/lib/Lower/OpenACC.cpp - - llvm/**/Frontend/OpenACC/** - - llvm/unittests/Frontend/OpenACCTest.cpp - - mlir/test/Target/LLVMIR/openacc-llvm.mlir - - mlir/**/*OpenACC/** - -flang:runtime: - - flang/runtime/** - -flang:parser: - - flang/**/Parser/** - -flang:semantics: - - flang/**/Evaluate/** - - flang/**/Semantics/** - -flang:fir-hlfir: - - flang/**/Lower/** - - flang/**/Optimizer/** - -flang:codegen: - - flang/**/CodeGen/** - -llvm:globalisel: - - llvm/**/GlobalISel/** - - llvm/utils/TableGen/GlobalISel* - -function-specialization: - - llvm/include/llvm/Transforms/Utils/SCCPSolver.h - - llvm/lib/Transforms/Utils/SCCPSolver.cpp - - llvm/include/llvm/Transforms/IPO/FunctionSpecialization.h - - llvm/lib/Transforms/IPO/FunctionSpecialization.cpp - - llvm/test/Transforms/FunctionSpecialization/* - -libc: - - libc/** - - utils/bazel/llvm-project-overlay/libc/** - -clang-format: - - clang/**/Format/** - - clang/tools/clang-format/** - -flang:openmp: - - flang/test/**/OpenMP/** - - flang/lib/Lower/OpenMP.cpp - - flang/lib/Semantics/resolve-directives.cpp - - flang/lib/Semantics/check-omp-structure.cpp - - flang/lib/Optimizer/Transforms/OMP* - - flang/test/Fir/convert-to-llvm-openmp-and-fir.fir - - flang/test/Lower/OpenMP/** - - flang/test/Transforms/omp* - - mlir/**/*OpenMP* - - mlir/test/Target/LLVMIR/openmp* - - llvm/lib/Frontend/OpenMP/** - - llvm/include/llvm/Frontend/OpenMP/** - - llvm/unittests/Frontend/OpenMP* - -llvm:ir: - - llvm/lib/IR/** - - llvm/include/llvm/IR/** - - llvm/docs/LangRef.rst - - llvm/unittests/IR/** - -llvm:SandboxIR: - - llvm/lib/SandboxIR/** - - llvm/include/llvm/SandboxIR/** - - llvm/docs/SandboxIR.md - - llvm/unittests/SandboxIR/** - -llvm:analysis: - - llvm/lib/Analysis/** - - llvm/include/llvm/Analysis/** - - llvm/test/Analysis/** - - llvm/unittests/Analysis/** - -llvm:adt: - - llvm/**/ADT/* - -llvm:support: - - llvm/**/Support/** - -llvm:transforms: - - llvm/lib/Transforms/** - - llvm/include/llvm/Transforms/** - - llvm/test/Transforms/** - - llvm/unittests/Transforms/** - -llvm:instcombine: - - llvm/lib/Analysis/InstructionSimplify.cpp - - llvm/lib/Transforms/InstCombine/** - - llvm/include/llvm/Transforms/InstCombine/ - - llvm/include/llvm/Analysis/InstructionSimplify.h - - llvm/test/Transforms/InstCombine/** - - llvm/test/Transforms/InstSimplify/** - -clangd: - - clang-tools-extra/clangd/** - -hlsl: - - clang/test/ParserHLSL/** - - clang/test/SemaHLSL/** - - clang/test/AST/HLSL/** - - clang/test/CodeGenHLSL/** - - clang/cmake/caches/HLSL.cmake - - clang/include/clang/Basic/HLSL*.h - - clang/include/clang/Sema/HLSL*.h - - clang/docs/HLSL/** - - clang/lib/Driver/ToolChains/HLSL* - - clang/lib/Parse/ParseHLSL.cpp - - clang/lib/Sema/HLSLExternalSemaSource.cpp - - clang/lib/Sema/SemaHLSL.cpp - - clang/lib/CodeGen/CGHLSLRuntime.* - - llvm/include/llvm/Frontend/HLSL/** - - llvm/lib/Frontend/HLSL/** - -llvm:SelectionDAG: - - llvm/include/llvm/CodeGen/SelectionDAG*.h - - llvm/include/llvm/CodeGen/SDNodeProperties.td - - llvm/include/llvm/Target/TargetSelectionDAG.td - - llvm/lib/CodeGen/SelectionDAG/** - - llvm/utils/TableGen/CodeGenDAG* - - llvm/utils/TableGen/DAGISel* - - llvm/include/llvm/CodeGen/DAGCombine.h - - llvm/include/llvm/CodeGen/ISDOpcodes.h - -backend:DirectX: - - '**/*DirectX*' - - '**/*DXIL*' - - '**/*dxil*' - - '**/*DirectX*/**' - - '**/*DXIL*/**' - - '**/*dxil*/**' - - '**/*DXContainer*' - - '**/*DXContainer*/**' - -backend:SPIR-V: - - clang/lib/Driver/ToolChains/SPIRV.* - - clang/lib/Sema/SemaSPIRV.cpp - - clang/include/clang/Sema/SemaSPIRV.h - - clang/include/clang/Basic/BuiltinsSPIRV.td - - clang/test/CodeGenSPIRV/** - - clang/test/SemaSPIRV/** - - llvm/lib/Target/SPIRV/** - - llvm/test/CodeGen/SPIRV/** - - llvm/test/Frontend/HLSL/** - - llvm/docs/SPIRVUsage.rst - -mlgo: - - llvm/lib/Analysis/ML* - - llvm/include/llvm/Analysis/ML* - - llvm/lib/Analysis/*Runner.cpp - - llvm/include/llvm/Analysis/*Runner.h - - llvm/unittests/Analysis/ML* - - llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp - - llvm/lib/Analysis/TrainingLogger.cpp - - llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h - - llvm/include/llvm/Analysis/Utils/TrainingLogger.h - - llvm/test/Analysis/FunctionPropertiesAnalysis/* - - llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp - - llvm/test/Transforms/inline/ML/** - - llvm/lib/CodeGen/ML* - - llvm/unittests/CodeGen/ML* - - llvm/test/CodeGen/MLRegAlloc/** - - llvm/utils/mlgo-utils/** - -tools:llvm-exegesis: - - llvm/tools/llvm-exegesis/** - - llvm/test/tools/llvm-exegesis/** - - llvm/unittests/tools/llvm-exegesis/** - -platform:windows: - - lld/COFF/** - - clang/lib/Driver/MSVC.cpp - - clang/lib/Driver/MinGW.cpp - - llvm/lib/DebugInfo/CodeView/** - - llvm/lib/DebugInfo/PDB/** - - llvm/lib/WindowsDriver/** - - llvm/lib/Support/Windows/** - - llvm/lib/BinaryFormat/COFF.cpp - -llvm:regalloc: - - llvm/**/CodeGen/CalcSpillWeights* - - llvm/**/CodeGen/InlineSpiller* - - llvm/**/CodeGen/InterferenceCache* - - llvm/**/CodeGen/LiveInterval* - - llvm/**/CodeGen/LiveRange* - - llvm/**/CodeGen/LiveReg* - - llvm/**/CodeGen/LiveVariables* - - llvm/**/CodeGen/MachineCopyPropagation* - - llvm/**/CodeGen/PHIElimination* - - llvm/**/CodeGen/ProcessImplicitDefs.cpp - - llvm/**/CodeGen/Register* - - llvm/**/CodeGen/RegUsage* - - llvm/**/CodeGen/RenameIndependentSubregs.cpp - - llvm/**/CodeGen/SlotIndexes.h - - llvm/**/CodeGen/SpillPlacement* - - llvm/**/CodeGen/SplitKit* - - llvm/**/CodeGen/VirtRegMap.h - - llvm/include/PBQP/** - - llvm/include/PBQPRAConstraint.h - - llvm/include/llvm/CodeGen/Spiller.h - - llvm/**/*RegAlloc - -lldb: - - lldb/** - -backend:AMDGPU: - - '**/*amdgpu*' - - '**/*AMDGPU*' - - '**/*amdgpu*/**' - - '**/*AMDGPU*/**' - -backend:NVPTX: - - 'llvm/**/*nvvm*' - - 'llvm/**/*NVVM*' - - 'llvm/**/*nvptx*' - - 'llvm/**/*NVPTX*' - - 'llvm/**/*nvvm*/**' - - 'llvm/**/*NVVM*/**' - - 'llvm/**/*nvptx*/**' - - 'llvm/**/*NVPTX*/**' - -backend:RISC-V: - - clang/**/*riscv* - - clang/**/*RISCV* - - llvm/**/*riscv* - - llvm/**/*RISCV* - -backend:Xtensa: - - clang/**/*xtensa* - - clang/**/*Xtensa* - - llvm/**/*xtensa* - - llvm/**/*Xtensa* - -lld:coff: - - lld/**/COFF/** - - lld/Common/** - -lld:elf: - - lld/**/ELF/** - - lld/Common/** - -lld:macho: - - lld/**/MachO/** - - lld/Common/** - -lld:wasm: - - lld/**/wasm/** - - lld/Common/** - -backend:ARM: - - llvm/include/llvm/IR/IntrinsicsARM.td - - llvm/test/MC/ARM/** - - llvm/lib/Target/ARM/** - - llvm/test/CodeGen/ARM/** - - clang/lib/Basic/Targets/ARM* - - clang/lib/Driver/ToolChains/Arch/ARM.* - - clang/lib/CodeGen/Targets/ARM.cpp - - clang/include/clang/Basic/BuiltinsARM* - - llvm/test/MC/DisasemblerARM/** - - clang/include/clang/Sema/SemaARM.h - - clang/lib/Sema/SemaARM.cpp - -backend:AArch64: - - llvm/include/llvm/IR/IntrinsicsAArch64.td - - llvm/test/MC/AArch64/** - - llvm/lib/Target/AArch64/** - - llvm/test/CodeGen/AArch64/** - - clang/lib/Basic/Targets/AArch64* - - clang/lib/Driver/ToolChains/Arch/AArch64.* - - clang/lib/CodeGen/Targets/AArch64.cpp - - clang/include/clang/Basic/BuiltinsAArch64* - - llvm/test/MC/Disassembler/AArch64/** - - clang/include/clang/Sema/SemaARM.h - - clang/lib/Sema/SemaARM.cpp - -backend:Hexagon: - - clang/include/clang/Basic/BuiltinsHexagon*.def - - clang/include/clang/Sema/SemaHexagon.h - - clang/lib/Basic/Targets/Hexagon.* - - clang/lib/CodeGen/Targets/Hexagon.cpp - - clang/lib/Driver/ToolChains/Hexagon.* - - clang/lib/Sema/SemaHexagon.cpp - - lld/ELF/Arch/Hexagon.cpp - - lldb/source/Plugins/ABI/Hexagon/** - - lldb/source/Plugins/DynamicLoader/Hexagon-DYLD/** - - llvm/include/llvm/BinaryFormat/ELFRelocs/Hexagon.def - - llvm/include/llvm/IR/IntrinsicsHexagon* - - llvm/include/llvm/Support/Hexagon* - - llvm/lib/Support/Hexagon* - - llvm/lib/Target/Hexagon/** - - llvm/test/CodeGen/Hexagon/** - - llvm/test/CodeGen/*/Hexagon/** - - llvm/test/DebugInfo/*/Hexagon/** - - llvm/test/Transforms/*/Hexagon - - llvm/test/MC/Disassembler/Hexagon/** - - llvm/test/MC/Hexagon/** - - llvm/test/tools/llvm-objdump/ELF/Hexagon/** - -backend:loongarch: - - llvm/include/llvm/IR/IntrinsicsLoongArch.td - - llvm/test/MC/LoongArch/** - - llvm/lib/Target/LoongArch/** - - llvm/test/CodeGen/LoongArch/** - - clang/lib/Basic/Targets/LoongArch* - - clang/lib/Driver/ToolChains/Arch/LoongArch.* - - clang/lib/CodeGen/Targets/LoongArch.cpp - - clang/include/clang/Basic/BuiltinsLoongArch* - - clang/include/clang/Sema/SemaLoongArch.h - - clang/lib/Sema/SemaLoongArch.cpp - -backend:MSP430: - - llvm/include/llvm/IR/IntrinsicsMSP430.td - - llvm/test/MC/MSP430/** - - llvm/lib/Target/MSP430/** - - llvm/test/CodeGen/MSP430/** - - clang/lib/Basic/Targets/MSP430* - - clang/lib/Driver/ToolChains/Arch/MSP430.* - - clang/lib/CodeGen/Targets/MSP430.cpp - - clang/include/clang/Basic/BuiltinsMSP430* - - llvm/test/MC/Disassembler/MSP430/** - -backend:Sparc: - - llvm/include/llvm/IR/IntrinsicsSparc.td - - llvm/test/MC/Sparc/** - - llvm/lib/Target/Sparc/** - - llvm/test/CodeGen/Sparc/** - - clang/lib/Basic/Targets/Sparc* - - clang/lib/Driver/ToolChains/Arch/Sparc.* - - clang/lib/CodeGen/Targets/Sparc.cpp - - clang/include/clang/Basic/BuiltinsSparc* - - llvm/test/MC/Disassembler/Sparc/** - -backend:WebAssembly: - - llvm/lib/Target/WebAssembly/** - - llvm/test/CodeGen/WebAssembly/** - - clang/lib/Basic/Targets/WebAssembly* - - clang/include/clang/Basic/BuiltinsWebAssembly.def - - clang/include/clang/Basic/WebAssemblyReferenceTypes.def - - clang/lib/CodeGen/Targets/WebAssembly* - - llvm/include/llvm/IR/IntinsicsWebAssembly.td - - llvm/include/llvm/Object/Wasm* - - llvm/lib/CodeGen/AsmPrinter/Wasm* - - llvm/lib/CodeGen/Wasm* - - llvm/lib/MC/MCParser/Wasm* - - llvm/lib/MC/Wasm* - - llvm/lib/ObjCopy/wasm/** - - llvm/lib/Object/Wasm* - - clang/lib/Driver/Toolchains/WebAssembly* - - clang/lib/Headers/wasm_simd128.h - - clang/test/CodeGen/WebAssembly/** - - clang/test/SemaCXX/*wasm* - - clang/test/Sema/*wasm* - - llvm/include/llvm/BinaryFormat/Wasm.h - - llvm/unittests/Target/WebAssembly/** - - llvm/test/DebugInfo/WebAssembly/** - - llvm/test/MC/WebAssembly/** - - clang/include/clang/Sema/SemaWasm.h - - clang/lib/Sema/SemaLoongWasm.cpp - -backend:X86: - - llvm/include/llvm/IR/IntrinsicsX86.td - - llvm/lib/Target/X86/** - - llvm/test/CodeGen/X86/** - - llvm/test/MC/X86/** - - llvm/test/MC/Disassembler/X86/** - - llvm/test/Analysis/CostModel/X86/** - - llvm/test/tools/llvm-mca/X86/** - - clang/lib/Basic/Targets/X86/** - - clang/lib/Driver/ToolChains/Arch/X86.* - - clang/lib/CodeGen/Targets/X86.* - - clang/lib/Headers/** - - clang/test/CodeGen/X86/** - - clang/include/clang/Basic/BuiltinsX86* - - llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h - - llvm/include/llvm/TargetParser/X86* - - llvm/lib/TargetParser/X86* - - llvm/utils/TableGen/X86* - - clang/include/clang/Sema/SemaX86.h - - clang/lib/Sema/SemaX86.cpp - -backend:PowerPC: - - llvm/include/llvm/BinaryFormat/ELFRelocs/PowerPC* - - llvm/include/llvm/BinaryFormat/XCOFF.h - - llvm/include/llvm/IR/IntrinsicsPowerPC.td - - llvm/lib/CodeGen/AsmPrinter/AIXException.cpp - - llvm/lib/Target/PowerPC/** - - llvm/test/Analysis/**/PowerPC/** - - llvm/test/CodeGen/PowerPC/** - - llvm/test/CodeGen/MIR/PowerPC/** - - llvm/test/DebugInfo/XCOFF/** - - llvm/test/DebugInfo/PowerPC/** - - llvm/test/LTO/PowerPC/** - - llvm/test/MC/Disassembler/PowerPC/** - - llvm/test/MC/PowerPC/** - - llvm/test/MC/XCOFF/** - - llvm/test/Transforms/**/PowerPC/** - - clang/include/clang/Basic/BuiltinsPPC.* - - clang/lib/Basic/Targets/PPC.* - - clang/lib/CodeGen/Targets/PPC.cpp - - clang/lib/Driver/ToolChains/PPC* - - clang/lib/Driver/ToolChains/AIX* - - clang/lib/Driver/ToolChains/Arch/PPC.* - - clang/test/CodeGen/PowerPC/** - - clang/include/clang/Sema/SemaPPC.h - - clang/lib/Sema/SemaPPC.cpp - -backend:SystemZ: - - llvm/include/llvm/BinaryFormat/ELFRelocs/SystemZ* - - llvm/include/llvm/BinaryFormat/GOFF.h - - llvm/include/llvm/IR/IntrinsicsSystemZ.td - - llvm/lib/Target/SystemZ/** - - llvm/test/Analysis/**/SystemZ/** - - llvm/test/CodeGen/SystemZ/** - - llvm/test/DebugInfo/SystemZ/** - - llvm/test/ExecutionEngine/**/SystemZ/** - - llvm/test/MC/Disassembler/SystemZ/** - - llvm/test/MC/GOFF/** - - llvm/test/MC/SystemZ/** - - llvm/test/Transforms/**/SystemZ/** - - clang/include/clang/Basic/BuiltinsSystemZ.* - - clang/lib/Basic/Targets/SystemZ.* - - clang/lib/CodeGen/Targets/SystemZ.cpp - - clang/lib/Driver/ToolChains/ZOS* - - clang/lib/Driver/ToolChains/Arch/SystemZ.* - - clang/test/CodeGen/SystemZ/** - - clang/include/clang/Sema/SemaSystemZ.h - - clang/lib/Sema/SemaSystemZ.cpp - -third-party:unittests: - - third-party/unittests/** - -third-party:benchmark: - - third-party/benchmark/** - -llvm:binary-utilities: - - llvm/docs/CommandGuide/llvm-* - - llvm/include/llvm/BinaryFormat/** - - llvm/include/llvm/DebugInfo/Symbolize/** - - llvm/include/llvm/ObjCopy/** - - llvm/include/llvm/Object/** - - llvm/lib/BinaryFormat/** - - llvm/lib/DebugInfo/Symbolize/** - - llvm/lib/ObjCopy/** - - llvm/lib/Object/** - - llvm/test/Object/** - - llvm/test/tools/llvm-ar/** - - llvm/test/tools/llvm-cxxfilt/** - - llvm/test/tools/llvm-nm/** - - llvm/test/tools/llvm-objcopy/** - - llvm/test/tools/llvm-objdump/** - - llvm/test/tools/llvm-readobj/** - - llvm/test/tools/llvm-size/** - - llvm/test/tools/llvm-strings/** - - llvm/test/tools/llvm-symbolizer/** - - llvm/tools/llvm-ar/** - - llvm/tools/llvm-cxxfilt/** - - llvm/tools/llvm-nm/** - - llvm/tools/llvm-objcopy/** - - llvm/tools/llvm-objdump/** - - llvm/tools/llvm-readobj/** - - llvm/tools/llvm-size/** - - llvm/tools/llvm-strings/** - - llvm/tools/llvm-symbolizer/** - -clang:openmp: - - clang/include/clang/Basic/OpenMP* - - clang/include/clang/AST/OpenMPClause.h - - clang/include/clang/AST/DeclOpenMP.h - - clang/include/clang/AST/ExprOpenMP.h - - clang/include/clang/AST/StmtOpenMP.h - - clang/lib/AST/DeclOpenMP.cpp - - clang/lib/AST/OpenMPClause.cpp - - clang/lib/AST/StmtOpenMP.cpp - - clang/lib/Headers/openmp_wrappers/** - - clang/lib/Parse/ParseOpenMP.cpp - - clang/lib/Basic/OpenMPKinds.cpp - - clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp - - clang/lib/Driver/ToolChains/AMDGPUOpenMP.h - - clang/lib/CodeGen/CgStmtOpenMP.cpp - - clang/lib/CodeGen/CGOpenMP* - - clang/lib/Sema/SemaOpenMP.cpp - - clang/test/OpenMP/** - - clang/test/AST/ast-dump-openmp-* - - llvm/lib/Frontend/OpenMP/** - - llvm/lib/Transforms/IPO/OpenMPOpt.cpp - - llvm/include/llvm/Frontend/OpenMP/** - - llvm/include/llvm/Transforms/IPO/OpenMPOpt.h - - llvm/unittests/Frontend/OpenMP* - - llvm/test/Transforms/OpenMP/** - -clang:as-a-library: - - clang/tools/libclang/** - - clang/bindings/** - - clang/include/clang-c/** - - clang/test/LibClang/** - - clang/unittest/libclang/** - -openmp:libomp: - - any: ['openmp/**', '!openmp/libomptarget/**'] - -openmp:libomptarget: - - any: ['openmp/**', '!openmp/runtime/**'] - -bazel: - - utils/bazel/** - -offload: - - offload/** - -tablegen: - - llvm/include/TableGen/** - - llvm/lib/TableGen/** - - llvm/utils/TableGen/** diff --git a/.github/workflows/build-ci-container-windows.yml b/.github/workflows/build-ci-container-windows.yml deleted file mode 100644 index bba34066a97c..000000000000 --- a/.github/workflows/build-ci-container-windows.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: Build Windows CI Container - -permissions: - contents: read - -on: - push: - branches: - - main - paths: - - .github/workflows/build-ci-container-windows.yml - - '.github/workflows/containers/github-action-ci-windows/**' - pull_request: - branches: - - main - paths: - - .github/workflows/build-ci-container-windows.yml - - '.github/workflows/containers/github-action-ci-windows/**' - -jobs: - build-ci-container-windows: - if: github.repository_owner == 'llvm' - runs-on: windows-2019 - outputs: - container-name: ${{ steps.vars.outputs.container-name }} - container-name-tag: ${{ steps.vars.outputs.container-name-tag }} - container-filename: ${{ steps.vars.outputs.container-filename }} - steps: - - name: Checkout LLVM - uses: actions/checkout@v4 - with: - sparse-checkout: .github/workflows/containers/github-action-ci-windows - - name: Write Variables - id: vars - run: | - $tag = [int64](Get-Date -UFormat %s) - $container_name="ghcr.io/$env:GITHUB_REPOSITORY_OWNER/ci-windows-2019" - echo "container-name=${container_name}" >> $env:GITHUB_OUTPUT - echo "container-name-tag=${container_name}:${tag}" >> $env:GITHUB_OUTPUT - echo "container-filename=ci-windows-${tag}.tar" >> $env:GITHUB_OUTPUT - - name: Build Container - working-directory: .github/workflows/containers/github-action-ci-windows - run: | - docker build -t ${{ steps.vars.outputs.container-name-tag }} . - - name: Save container image - run: | - docker save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} - - name: Upload container image - uses: actions/upload-artifact@v4 - with: - name: container - path: ${{ steps.vars.outputs.container-filename }} - retention-days: 14 - - push-ci-container: - if: github.event_name == 'push' - needs: - - build-ci-container-windows - permissions: - packages: write - runs-on: windows-2019 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - steps: - - name: Download container - uses: actions/download-artifact@v4 - with: - name: container - - name: Push Container - run: | - docker load -i ${{ needs.build-ci-container-windows.outputs.container-filename }} - docker tag ${{ needs.build-ci-container-windows.outputs.container-name-tag }} ${{ needs.build-ci-container-windows.outputs.container-name }}:latest - docker login -u ${{ github.actor }} -p $env:GITHUB_TOKEN ghcr.io - docker push ${{ needs.build-ci-container-windows.outputs.container-name-tag }} - docker push ${{ needs.build-ci-container-windows.outputs.container-name }}:latest diff --git a/.github/workflows/build-ci-container.yml b/.github/workflows/build-ci-container.yml deleted file mode 100644 index 8a81d4718646..000000000000 --- a/.github/workflows/build-ci-container.yml +++ /dev/null @@ -1,101 +0,0 @@ -name: Build CI Container - -permissions: - contents: read - -on: - push: - branches: - - main - paths: - - .github/workflows/build-ci-container.yml - - '.github/workflows/containers/github-action-ci/**' - pull_request: - branches: - - main - paths: - - .github/workflows/build-ci-container.yml - - '.github/workflows/containers/github-action-ci/**' - -jobs: - build-ci-container: - if: github.repository_owner == 'llvm' - runs-on: depot-ubuntu-22.04-16 - outputs: - container-name: ${{ steps.vars.outputs.container-name }} - container-name-agent: ${{ steps.vars.outputs.container-name-agent }} - container-name-tag: ${{ steps.vars.outputs.container-name-tag }} - container-name-agent-tag: ${{ steps.vars.outputs.container-name-agent-tag }} - container-filename: ${{ steps.vars.outputs.container-filename }} - container-agent-filename: ${{ steps.vars.outputs.container-agent-filename }} - steps: - - name: Checkout LLVM - uses: actions/checkout@v4 - with: - sparse-checkout: .github/workflows/containers/github-action-ci/ - - name: Write Variables - id: vars - run: | - tag=`date +%s` - container_name="ghcr.io/$GITHUB_REPOSITORY_OWNER/ci-ubuntu-22.04" - echo "container-name=$container_name" >> $GITHUB_OUTPUT - echo "container-name-agent=$container_name-agent" >> $GITHUB_OUTPUT - echo "container-name-tag=$container_name:$tag" >> $GITHUB_OUTPUT - echo "container-name-agent-tag=$container_name-agent:$tag" >> $GITHUB_OUTPUT - echo "container-filename=$(echo $container_name:$tag | sed -e 's/\//-/g' -e 's/:/-/g').tar" >> $GITHUB_OUTPUT - echo "container-agent-filename=$(echo $container_name-agent:$tag | sed -e 's/\//-/g' -e 's/:/-/g').tar" >> $GITHUB_OUTPUT - - name: Build container - working-directory: ./.github/workflows/containers/github-action-ci/ - run: | - podman build --target ci-container -t ${{ steps.vars.outputs.container-name-tag }} . - podman build --target ci-container-agent -t ${{ steps.vars.outputs.container-name-agent-tag }} . - - # Save the container so we have it in case the push fails. This also - # allows us to separate the push step into a different job so we can - # maintain minimal permissions while building the container. - - name: Save container image - run: | - podman save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} - podman save ${{ steps.vars.outputs.container-name-agent-tag }} > ${{ steps.vars.outputs.container-agent-filename }} - - - name: Upload container image - uses: actions/upload-artifact@v4 - with: - name: container - path: "*.tar" - retention-days: 14 - - - name: Test Container - run: | - for image in ${{ steps.vars.outputs.container-name-tag }}; do - # Use --pull=never to ensure we are testing the just built image. - podman run --pull=never --rm -it $image /usr/bin/bash -x -c 'cd $HOME && printf '\''#include \nint main(int argc, char **argv) { std::cout << "Hello\\n"; }'\'' | clang++ -x c++ - && ./a.out | grep Hello' - done - - push-ci-container: - if: github.event_name == 'push' - needs: - - build-ci-container - permissions: - packages: write - runs-on: ubuntu-24.04 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - steps: - - name: Download container - uses: actions/download-artifact@v4 - with: - name: container - - - name: Push Container - run: | - podman load -i ${{ needs.build-ci-container.outputs.container-filename }} - podman tag ${{ needs.build-ci-container.outputs.container-name-tag }} ${{ needs.build-ci-container.outputs.container-name }}:latest - podman login -u ${{ github.actor }} -p $GITHUB_TOKEN ghcr.io - podman push ${{ needs.build-ci-container.outputs.container-name-tag }} - podman push ${{ needs.build-ci-container.outputs.container-name }}:latest - - podman load -i ${{ needs.build-ci-container.outputs.container-agent-filename }} - podman tag ${{ needs.build-ci-container.outputs.container-name-agent-tag }} ${{ needs.build-ci-container.outputs.container-name-agent }}:latest - podman push ${{ needs.build-ci-container.outputs.container-name-agent-tag }} - podman push ${{ needs.build-ci-container.outputs.container-name-agent }}:latest diff --git a/.github/workflows/build-metrics-container.yml b/.github/workflows/build-metrics-container.yml deleted file mode 100644 index 751ab679411d..000000000000 --- a/.github/workflows/build-metrics-container.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: Build Metrics Container - -permissions: - contents: read - -on: - push: - branches: - - main - paths: - - .github/workflows/build-metrics-container.yml - - '.ci/metrics/**' - pull_request: - branches: - - main - paths: - - .github/workflows/build-metrics-container.yml - - '.ci/metrics/**' - -jobs: - build-metrics-container: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - outputs: - container-name: ${{ steps.vars.outputs.container-name }} - container-name-tag: ${{ steps.vars.outputs.container-name-tag }} - container-filename: ${{ steps.vars.outputs.container-filename }} - steps: - - name: Checkout LLVM - uses: actions/checkout@v4 - with: - sparse-checkout: .ci/metrics/ - - name: Write Variables - id: vars - run: | - tag=`date +%s` - container_name="ghcr.io/$GITHUB_REPOSITORY_OWNER/metrics" - echo "container-name=$container_name" >> $GITHUB_OUTPUT - echo "container-name-tag=$container_name:$tag" >> $GITHUB_OUTPUT - echo "container-filename=$(echo $container_name:$tag | sed -e 's/\//-/g' -e 's/:/-/g').tar" >> $GITHUB_OUTPUT - - name: Build Container - working-directory: ./.ci/metrics - run: | - podman build -t ${{ steps.vars.outputs.container-name-tag }} -f Dockerfile . - # Save the container so we have it in case the push fails. This also - # allows us to separate the push step into a different job so we can - # maintain minimal permissions while building the container. - - name: Save Container Image - run: | - podman save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} - - name: Upload Container Image - uses: actions/upload-artifact@v4 - with: - name: container - path: ${{ steps.vars.outputs.container-filename }} - retention-days: 14 - - push-metrics-container: - if: github.event_name == 'push' - needs: - - build-metrics-container - permissions: - packages: write - runs-on: ubuntu-24.04 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - steps: - - name: Download Container - uses: actions/download-artifact@v4 - with: - name: container - - name: Push Container - run: | - podman load -i ${{ needs.build-metrics-container.outputs.container-filename }} - podman tag ${{ needs.build-metrics-container.outputs.container-name-tag }} ${{ needs.build-metrics-container.outputs.container-name }}:latest - podman login -u ${{ github.actor }} -p $GITHUB_TOKEN ghcr.io - podman push ${{ needs.build-metrics-container.outputs.container-name-tag }} - podman push ${{ needs.build-metrics-container.outputs.container-name }}:latest diff --git a/.github/workflows/ci-post-commit-analyzer-run.py b/.github/workflows/ci-post-commit-analyzer-run.py deleted file mode 100644 index e5f52d3b2fa6..000000000000 --- a/.github/workflows/ci-post-commit-analyzer-run.py +++ /dev/null @@ -1,34 +0,0 @@ -import json -import multiprocessing -import os -import re -import subprocess -import sys - - -def run_analyzer(data): - os.chdir(data["directory"]) - command = ( - data["command"] - + f" --analyze --analyzer-output html -o analyzer-results -Xclang -analyzer-config -Xclang max-nodes=75000" - ) - print(command) - subprocess.run(command, shell=True, check=True) - - -def pool_error(e): - print("Error analyzing file:", e) - - -def main(): - db_path = sys.argv[1] - database = json.load(open(db_path)) - - with multiprocessing.Pool() as pool: - pool.map_async(run_analyzer, [k for k in database], error_callback=pool_error) - pool.close() - pool.join() - - -if __name__ == "__main__": - main() diff --git a/.github/workflows/ci-post-commit-analyzer.yml b/.github/workflows/ci-post-commit-analyzer.yml deleted file mode 100644 index d614dd07b3a4..000000000000 --- a/.github/workflows/ci-post-commit-analyzer.yml +++ /dev/null @@ -1,95 +0,0 @@ -name: Post-Commit Static Analyzer - -permissions: - contents: read - -on: - push: - branches: - - 'release/**' - paths: - - 'clang/**' - - 'llvm/**' - - '.github/workflows/ci-post-commit-analyzer.yml' - pull_request: - types: - - opened - - synchronize - - reopened - - closed - paths: - - '.github/workflows/ci-post-commit-analyzer.yml' - - '.github/workflows/ci-post-commit-analyzer-run.py' - schedule: - - cron: '30 0 * * *' - -concurrency: - group: >- - llvm-project-${{ github.workflow }}-${{ github.event_name == 'pull_request' && - ( github.event.pull_request.number || github.ref) }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} - -jobs: - post-commit-analyzer: - if: >- - github.repository_owner == 'llvm' && - github.event.action != 'closed' - runs-on: ubuntu-22.04 - container: - image: 'ghcr.io/llvm/ci-ubuntu-22.04:latest' - env: - LLVM_VERSION: 18 - steps: - - name: Checkout Source - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1 - with: - # A full build of llvm, clang, lld, and lldb takes about 250MB - # of ccache space. There's not much reason to have more than this, - # because we usually won't need to save cache entries from older - # builds. Also, there is an overall 10GB cache limit, and each - # run creates a new cache entry so we want to ensure that we have - # enough cache space for all the tests to run at once and still - # fit under the 10 GB limit. - # Default to 2G to workaround: https://github.com/hendrikmuhs/ccache-action/issues/174 - max-size: 2G - key: post-commit-analyzer - variant: sccache - - - name: Configure - run: | - cmake -B build -S llvm -G Ninja \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLVM_ENABLE_PROJECTS=clang \ - -DLLVM_BUILD_LLVM_DYLIB=ON \ - -DLLVM_LINK_LLVM_DYLIB=ON \ - -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang \ - -DCMAKE_CXX_COMPILER_LAUNCHER=sccache \ - -DCMAKE_C_COMPILER_LAUNCHER=sccache \ - -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ - -DLLVM_INCLUDE_TESTS=OFF \ - -DCLANG_INCLUDE_TESTS=OFF \ - -DCMAKE_BUILD_TYPE=Release - - - name: Build - run: | - # FIXME: We need to build all the generated header files in order to be able to run - # the analyzer on every file. Building libLLVM and libclang is probably overkill for - # this, but it's better than building every target. - ninja -v -C build libLLVM.so libclang.so - - # Run the analyzer. - python3 .github/workflows/ci-post-commit-analyzer-run.py build/compile_commands.json - - scan-build --generate-index-only build/analyzer-results - - - name: Upload Results - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - if: always() - with: - name: analyzer-results - path: 'build/analyzer-results/*' - diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000000..0bea0a7a3b5d --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,78 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: ["npu/release/20.x"] + pull_request: + # The branches below must be a subset of the branches above + branches: ["npu/release/20.x"] + schedule: + - cron: "0 0 * * 1" + +permissions: + contents: read + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: ["javascript", "python", "typescript"] + # CodeQL supports [ $supported-codeql-languages ] + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Harden Runner + uses: step-security/harden-runner@cb605e52c26070c328afc4562f0b4ada7618a84e # v2.10.4 + with: + egress-policy: audit + + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0 + + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/commit-access-review.py b/.github/workflows/commit-access-review.py deleted file mode 100644 index 4f539fe98004..000000000000 --- a/.github/workflows/commit-access-review.py +++ /dev/null @@ -1,402 +0,0 @@ -#!/usr/bin/env python3 -# ===-- commit-access-review.py --------------------------------------------===# -# -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -# ===------------------------------------------------------------------------===# -# -# ===------------------------------------------------------------------------===# - -import datetime -import github -import re -import requests -import time -import sys -import re - - -class User: - THRESHOLD = 5 - - def __init__(self, name, triage_list): - self.name = name - self.authored = 0 - self.merged = 0 - self.reviewed = 0 - self.triage_list = triage_list - - def add_authored(self, val=1): - self.authored += val - if self.meets_threshold(): - print(self.name, "meets the threshold with authored commits") - del self.triage_list[self.name] - - def set_authored(self, val): - self.authored = 0 - self.add_authored(val) - - def add_merged(self, val=1): - self.merged += val - if self.meets_threshold(): - print(self.name, "meets the threshold with merged commits") - del self.triage_list[self.name] - - def add_reviewed(self, val=1): - self.reviewed += val - if self.meets_threshold(): - print(self.name, "meets the threshold with reviewed commits") - del self.triage_list[self.name] - - def get_total(self): - return self.authored + self.merged + self.reviewed - - def meets_threshold(self): - return self.get_total() >= self.THRESHOLD - - def __repr__(self): - return "{} : a: {} m: {} r: {}".format( - self.name, self.authored, self.merged, self.reviewed - ) - - -def check_manual_requests( - gh: github.Github, start_date: datetime.datetime -) -> list[str]: - """ - Return a list of users who have been asked since ``start_date`` if they - want to keep their commit access or if they have applied for commit - access since ``start_date`` - """ - - query = """ - query ($query: String!, $after: String) { - search(query: $query, type: ISSUE, first: 100, after: $after) { - nodes { - ... on Issue { - author { - login - } - body - } - } - pageInfo { - hasNextPage - endCursor - } - } - } - """ - formatted_start_date = start_date.strftime("%Y-%m-%dT%H:%M:%S") - variables = { - "query": f"type:issue created:>{formatted_start_date} org:llvm repo:llvm-project label:infra:commit-access,infra:commit-access-request" - } - - has_next_page = True - users = [] - while has_next_page: - res_header, res_data = gh._Github__requester.graphql_query( - query=query, variables=variables - ) - data = res_data["data"] - for issue in data["search"]["nodes"]: - users.extend([user[1:] for user in re.findall("@[^ ,\n]+", issue["body"])]) - if issue["author"]: - users.append(issue["author"]["login"]) - has_next_page = data["search"]["pageInfo"]["hasNextPage"] - if has_next_page: - variables["after"] = data["search"]["pageInfo"]["endCursor"] - return users - - -def get_num_commits(gh: github.Github, user: str, start_date: datetime.datetime) -> int: - """ - Get number of commits that ``user`` has been made since ``start_date`. - """ - variables = { - "owner": "llvm", - "user": user, - "start_date": start_date.strftime("%Y-%m-%dT%H:%M:%S"), - } - - user_query = """ - query ($user: String!) { - user(login: $user) { - id - } - } - """ - - res_header, res_data = gh._Github__requester.graphql_query( - query=user_query, variables=variables - ) - data = res_data["data"] - variables["user_id"] = data["user"]["id"] - - query = """ - query ($owner: String!, $user_id: ID!, $start_date: GitTimestamp!){ - organization(login: $owner) { - teams(query: "llvm-committers" first:1) { - nodes { - repositories { - nodes { - ref(qualifiedName: "main") { - target { - ... on Commit { - history(since: $start_date, author: {id: $user_id }) { - totalCount - } - } - } - } - } - } - } - } - } - } - """ - count = 0 - res_header, res_data = gh._Github__requester.graphql_query( - query=query, variables=variables - ) - data = res_data["data"] - for repo in data["organization"]["teams"]["nodes"][0]["repositories"]["nodes"]: - count += int(repo["ref"]["target"]["history"]["totalCount"]) - if count >= User.THRESHOLD: - break - return count - - -def is_new_committer_query_repo( - gh: github.Github, user: str, start_date: datetime.datetime -) -> bool: - """ - Determine if ``user`` is a new committer. A new committer can keep their - commit access even if they don't meet the criteria. - """ - variables = { - "user": user, - } - - user_query = """ - query ($user: String!) { - user(login: $user) { - id - } - } - """ - - res_header, res_data = gh._Github__requester.graphql_query( - query=user_query, variables=variables - ) - data = res_data["data"] - variables["owner"] = "llvm" - variables["user_id"] = data["user"]["id"] - variables["start_date"] = start_date.strftime("%Y-%m-%dT%H:%M:%S") - - query = """ - query ($owner: String!, $user_id: ID!){ - organization(login: $owner) { - repository(name: "llvm-project") { - ref(qualifiedName: "main") { - target { - ... on Commit { - history(author: {id: $user_id }, first: 5) { - nodes { - committedDate - } - } - } - } - } - } - } - } - """ - - res_header, res_data = gh._Github__requester.graphql_query( - query=query, variables=variables - ) - data = res_data["data"] - repo = data["organization"]["repository"] - commits = repo["ref"]["target"]["history"]["nodes"] - if len(commits) == 0: - return True - committed_date = commits[-1]["committedDate"] - if datetime.datetime.strptime(committed_date, "%Y-%m-%dT%H:%M:%SZ") < start_date: - return False - return True - - -def is_new_committer( - gh: github.Github, user: str, start_date: datetime.datetime -) -> bool: - """ - Wrapper around is_new_commiter_query_repo to handle exceptions. - """ - try: - return is_new_committer_query_repo(gh, user, start_date) - except: - pass - return True - - -def get_review_count( - gh: github.Github, user: str, start_date: datetime.datetime -) -> int: - """ - Return the number of reviews that ``user`` has done since ``start_date``. - """ - query = """ - query ($query: String!) { - search(query: $query, type: ISSUE, first: 5) { - issueCount - } - } - """ - formatted_start_date = start_date.strftime("%Y-%m-%dT%H:%M:%S") - variables = { - "owner": "llvm", - "repo": "llvm-project", - "user": user, - "query": f"type:pr commenter:{user} -author:{user} merged:>{formatted_start_date} org:llvm", - } - - res_header, res_data = gh._Github__requester.graphql_query( - query=query, variables=variables - ) - data = res_data["data"] - return int(data["search"]["issueCount"]) - - -def count_prs(gh: github.Github, triage_list: dict, start_date: datetime.datetime): - """ - Fetch all the merged PRs for the project since ``start_date`` and update - ``triage_list`` with the number of PRs merged for each user. - """ - - query = """ - query ($query: String!, $after: String) { - search(query: $query, type: ISSUE, first: 100, after: $after) { - issueCount, - nodes { - ... on PullRequest { - author { - login - } - mergedBy { - login - } - } - } - pageInfo { - hasNextPage - endCursor - } - } - } - """ - date_begin = start_date - date_end = None - while date_begin < datetime.datetime.now(): - date_end = date_begin + datetime.timedelta(days=7) - formatted_date_begin = date_begin.strftime("%Y-%m-%dT%H:%M:%S") - formatted_date_end = date_end.strftime("%Y-%m-%dT%H:%M:%S") - variables = { - "query": f"type:pr is:merged merged:{formatted_date_begin}..{formatted_date_end} org:llvm", - } - has_next_page = True - while has_next_page: - print(variables) - res_header, res_data = gh._Github__requester.graphql_query( - query=query, variables=variables - ) - data = res_data["data"] - for pr in data["search"]["nodes"]: - # Users can be None if the user has been deleted. - if not pr["author"]: - continue - author = pr["author"]["login"] - if author in triage_list: - triage_list[author].add_authored() - - if not pr["mergedBy"]: - continue - merger = pr["mergedBy"]["login"] - if author == merger: - continue - if merger not in triage_list: - continue - triage_list[merger].add_merged() - - has_next_page = data["search"]["pageInfo"]["hasNextPage"] - if has_next_page: - variables["after"] = data["search"]["pageInfo"]["endCursor"] - date_begin = date_end - - -def main(): - token = sys.argv[1] - gh = github.Github(login_or_token=token) - org = gh.get_organization("llvm") - repo = org.get_repo("llvm-project") - one_year_ago = datetime.datetime.now() - datetime.timedelta(days=365) - triage_list = {} - for collaborator in repo.get_collaborators(permission="push"): - triage_list[collaborator.login] = User(collaborator.login, triage_list) - - print("Start:", len(triage_list), "triagers") - # Step 0 Check if users have requested commit access in the last year. - for user in check_manual_requests(gh, one_year_ago): - if user in triage_list: - print(user, "requested commit access in the last year.") - del triage_list[user] - print("After Request Check:", len(triage_list), "triagers") - - # Step 1 count all PRs authored or merged - count_prs(gh, triage_list, one_year_ago) - - print("After PRs:", len(triage_list), "triagers") - - if len(triage_list) == 0: - sys.exit(0) - - # Step 2 check for reviews - for user in list(triage_list.keys()): - review_count = get_review_count(gh, user, one_year_ago) - triage_list[user].add_reviewed(review_count) - - print("After Reviews:", len(triage_list), "triagers") - - if len(triage_list) == 0: - sys.exit(0) - - # Step 3 check for number of commits - for user in list(triage_list.keys()): - num_commits = get_num_commits(gh, user, one_year_ago) - # Override the total number of commits to not double count commits and - # authored PRs. - triage_list[user].set_authored(num_commits) - - print("After Commits:", len(triage_list), "triagers") - - # Step 4 check for new committers - for user in list(triage_list.keys()): - print("Checking", user) - if is_new_committer(gh, user, one_year_ago): - print("Removing new committer: ", user) - del triage_list[user] - - print("Complete:", len(triage_list), "triagers") - - with open("triagers.log", "w") as triagers_log: - for user in triage_list: - print(triage_list[user].__repr__()) - triagers_log.write(user + "\n") - - -if __name__ == "__main__": - main() diff --git a/.github/workflows/commit-access-review.yml b/.github/workflows/commit-access-review.yml deleted file mode 100644 index f9195a1863de..000000000000 --- a/.github/workflows/commit-access-review.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Commit Access Review - -on: - workflow_dispatch: - schedule: - # * is a special character in YAML so you have to quote this string - - cron: '0 7 1 * *' - -permissions: - contents: read - -jobs: - commit-access-review: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-22.04 - steps: - - name: Fetch LLVM sources - uses: actions/checkout@v4 - - - name: Install dependencies - run: | - pip install --require-hashes -r ./llvm/utils/git/requirements.txt - - - name: Run Script - env: - GITHUB_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - run: | - python3 .github/workflows/commit-access-review.py $GITHUB_TOKEN - - - name: Upload Triage List - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - with: - name: triagers - path: triagers.log diff --git a/.github/workflows/containers/github-action-ci-windows/Dockerfile b/.github/workflows/containers/github-action-ci-windows/Dockerfile deleted file mode 100644 index 9a1fab694c9d..000000000000 --- a/.github/workflows/containers/github-action-ci-windows/Dockerfile +++ /dev/null @@ -1,118 +0,0 @@ -# Agent image for LLVM org cluster. -# .net 4.8 is required by chocolately package manager. -FROM mcr.microsoft.com/dotnet/framework/sdk:4.8-windowsservercore-ltsc2019 - -# Restore the default Windows shell for correct batch processing. -SHELL ["cmd", "/S", "/C"] - -# Download the Build Tools bootstrapper. -ADD https://aka.ms/vs/16/release/vs_buildtools.exe /TEMP/vs_buildtools.exe - -RUN powershell -Command Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) - -# Download channel for fixed install. -ARG CHANNEL_URL=https://aka.ms/vs/16/release/channel -ADD ${CHANNEL_URL} /TEMP/VisualStudio.chman - -# Install Build Tools with C++ workload. -# - Documentation for docker installation -# https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container?view=vs-2019 -# - Documentation on workloads -# https://docs.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-build-tools?view=vs-2019#c-build-tools -# - Documentation on flags -# https://docs.microsoft.com/en-us/visualstudio/install/use-command-line-parameters-to-install-visual-studio?view=vs-2019 -RUN /TEMP/vs_buildtools.exe --quiet --wait --norestart --nocache \ - --channelUri C:\TEMP\VisualStudio.chman \ - --installChannelUri C:\TEMP\VisualStudio.chman \ - --installPath C:\BuildTools \ - --add Microsoft.VisualStudio.Workload.VCTools \ - --add Microsoft.VisualStudio.Component.VC.ATL \ - --includeRecommended \ - || IF "%ERRORLEVEL%"=="3010" EXIT 0 - -# Register DIA dll (Debug Interface Access) so it can be used to symbolize -# the stack traces. Register dll for 32 and 64 bit. -# see https://developercommunity.visualstudio.com/content/problem/290674/msdia140dll-is-not-registered-on-vs2017-hosts.html - -RUN regsvr32 /S "C:\BuildTools\DIA SDK\bin\amd64\msdia140.dll" & \ - regsvr32 /S "C:\BuildTools\DIA SDK\bin\msdia140.dll" - -# install tools as described in https://llvm.org/docs/GettingStartedVS.html -# and a few more that were not documented... -RUN choco install -y ninja git -# Pin an older version of Python; the current Python 3.10 fails when -# doing "pip install" for the other dependencies, as it fails to find libxml -# while compiling some package. -RUN choco install -y python3 --version 3.9.7 - -# ActivePerl is currently not installable via Chocolatey, see -# http://disq.us/p/2ipditb. Install StrawberryPerl instead. Unfortunately, -# StrawberryPerl not only installs Perl, but also a redundant C/C++ compiler -# toolchain, and a copy of pkg-config which can cause misdetections for other -# built products, see -# https://github.com/StrawberryPerl/Perl-Dist-Strawberry/issues/11 for further -# details. Remove the redundant and unnecessary parts of the StrawberryPerl -# install. -RUN choco install -y strawberryperl && \ - rmdir /q /s c:\strawberry\c && \ - del /q c:\strawberry\perl\bin\pkg-config* - -# libcxx requires clang(-cl) to be available -RUN choco install -y sccache llvm -RUN pip install psutil - -RUN curl -LO https://github.com/mstorsjo/llvm-mingw/releases/download/20230320/llvm-mingw-20230320-ucrt-x86_64.zip && \ - powershell Expand-Archive llvm-mingw-*-ucrt-x86_64.zip -DestinationPath . && \ - del llvm-mingw-*-ucrt-x86_64.zip && \ - ren llvm-mingw-20230320-ucrt-x86_64 llvm-mingw - -# configure Python encoding -ENV PYTHONIOENCODING=UTF-8 - -# update the path variable -# C:\Program Files\Git\usr\bin contains a usable bash and other unix tools. -# C:\llvm-mingw\bin contains Clang configured for mingw targets and -# corresponding sysroots. Both the 'llvm' package (with Clang defaulting -# to MSVC targets) and this directory contains executables named -# 'clang.exe' - add this last to let the other one have precedence. -# To use these compilers, use the triple prefixed form, e.g. -# x86_64-w64-mingw32-clang. -# C:\buildtools and SDK paths are ones that are set by c:\BuildTools\Common7\Tools\VsDevCmd.bat -arch=amd64 -host_arch=amd64 -RUN powershell -Command \ - [System.Environment]::SetEnvironmentVariable('PATH', \ - [System.Environment]::GetEnvironmentVariable('PATH', 'machine') + ';C:\Program Files\Git\usr\bin;C:\llvm-mingw\bin' \ - + ';C:\BuildTools\Common7\IDE\' \ - + ';C:\BuildTools\Common7\IDE\CommonExt ensions\Microsoft\TeamFoundation\Team Explorer' \ - + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin' \ - + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja' \ - + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\TeamFoundation\Team Explorer' \ - + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\TestWindow' \ - + ';C:\BuildTools\Common7\IDE\VC\VCPackages' \ - + ';C:\BuildTools\Common7\Tools\' \ - + ';C:\BuildTools\Common7\Tools\devinit' \ - + ';C:\BuildTools\MSBuild\Current\Bin' \ - + ';C:\BuildTools\MSBuild\Current\bin\Roslyn' \ - + ';C:\BuildTools\VC\Tools\MSVC\14.29.30133\bin\HostX64\x64' \ - + ';C:\Program Files (x86)\Microsoft SDKs\Windows\v10.0A\bin\NETFX 4.8 Tools\x64\' \ - + ';C:\Program Files (x86)\Windows Kits\10\bin\10.0.19041.0\x64' \ - + ';C:\Program Files (x86)\Windows Kits\10\bin\x64' \ - + ';C:\Windows\Microsoft.NET\Framework64\v4.0.30319' \ - ,'machine') - -# support long file names during git checkout -RUN git config --system core.longpaths true & \ - git config --global core.autocrlf false - -# handle for debugging of files beeing locked by some processes. -RUN choco install -y handle - -RUN pip3 install pywin32 buildbot-worker==2.8.4 - -ARG RUNNER_VERSION=2.322.0 -ENV RUNNER_VERSION=$RUNNER_VERSION - -RUN powershell -Command \ - Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v${env:RUNNER_VERSION}/actions-runner-win-x64-${env:RUNNER_VERSION}.zip -OutFile actions-runner-win.zip ; \ - Add-Type -AssemblyName System.IO.Compression.FileSystem ; \ - [System.IO.Compression.ZipFile]::ExtractToDirectory('actions-runner-win.zip', $PWD) ;\ - rm actions-runner-win.zip diff --git a/.github/workflows/containers/github-action-ci/Dockerfile b/.github/workflows/containers/github-action-ci/Dockerfile index 377b8f14402e..dc80159443d5 100644 --- a/.github/workflows/containers/github-action-ci/Dockerfile +++ b/.github/workflows/containers/github-action-ci/Dockerfile @@ -1,106 +1,48 @@ -FROM docker.io/library/ubuntu:22.04 as base -ENV LLVM_SYSROOT=/opt/llvm +FROM docker.io/library/ubuntu:22.04@sha256:0e5e4a57c2499249aafc3b40fcd541e9a456aab7296681a3994d631587203f97 as base +ENV LLVM_SYSROOT=/opt/llvm/ -FROM base as stage1-toolchain -ENV LLVM_VERSION=19.1.5 +FROM base as toolchain +ENV LLVM_MAJOR=17 +ENV LLVM_VERSION=${LLVM_MAJOR}.0.6 +ENV LLVM_DIRNAME=clang+llvm-${LLVM_VERSION}-x86_64-linux-gnu-ubuntu-22.04 +ENV LLVM_FILENAME=${LLVM_DIRNAME}.tar.xz RUN apt-get update && \ apt-get install -y \ - wget \ - gcc \ - g++ \ - cmake \ - ninja-build \ - python3 \ - git \ curl \ - zlib1g-dev + xz-utils -RUN curl -O -L https://github.com/llvm/llvm-project/archive/refs/tags/llvmorg-$LLVM_VERSION.tar.gz && tar -xf llvmorg-$LLVM_VERSION.tar.gz +RUN mkdir -p $LLVM_SYSROOT/bin/ $LLVM_SYSROOT/lib/ -WORKDIR /llvm-project-llvmorg-$LLVM_VERSION +RUN curl -O -L https://github.com/llvm/llvm-project/releases/download/llvmorg-$LLVM_VERSION/$LLVM_FILENAME -# Patch to enable better PGO profile data. -# TODO: Remove this for llvm 20 -ADD https://github.com/llvm/llvm-project/commit/738250989ce516f02f809bdfde474a039c77e81f.patch . +RUN tar -C $LLVM_SYSROOT --strip-components=1 -xJf $LLVM_FILENAME \ + $LLVM_DIRNAME/bin/clang \ + $LLVM_DIRNAME/bin/clang++ \ + $LLVM_DIRNAME/bin/clang-cl \ + $LLVM_DIRNAME/bin/clang-$LLVM_MAJOR \ + $LLVM_DIRNAME/bin/lld \ + $LLVM_DIRNAME/bin/ld.lld \ + $LLVM_DIRNAME/lib/clang/ -RUN patch -p1 < 738250989ce516f02f809bdfde474a039c77e81f.patch -RUN cmake -B ./build -G Ninja ./llvm \ - -C ./clang/cmake/caches/BOLT-PGO.cmake \ - -DBOOTSTRAP_LLVM_ENABLE_LLD=ON \ - -DBOOTSTRAP_BOOTSTRAP_LLVM_ENABLE_LLD=ON \ - -DPGO_INSTRUMENT_LTO=Thin \ - -DLLVM_ENABLE_RUNTIMES="compiler-rt" \ - -DCMAKE_INSTALL_PREFIX="$LLVM_SYSROOT" \ - -DLLVM_ENABLE_PROJECTS="bolt;clang;lld;clang-tools-extra" \ - -DLLVM_DISTRIBUTION_COMPONENTS="lld;compiler-rt;clang-format;scan-build" \ - -DCLANG_DEFAULT_LINKER="lld" +FROM base -RUN ninja -C ./build stage2-clang-bolt stage2-install-distribution && ninja -C ./build install-distribution - -FROM base as ci-container - -COPY --from=stage1-toolchain $LLVM_SYSROOT $LLVM_SYSROOT +COPY --from=toolchain $LLVM_SYSROOT $LLVM_SYSROOT # Need to install curl for hendrikmuhs/ccache-action # Need nodejs for some of the GitHub actions. # Need perl-modules for clang analyzer tests. -# Need git for SPIRV-Tools tests. RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - binutils \ + apt-get install -y \ + binutils \ cmake \ curl \ - git \ libstdc++-11-dev \ ninja-build \ nodejs \ perl-modules \ - python3-psutil \ - sudo \ - - # These are needed by the premerge pipeline. Pip is used to install - # dependent python packages and ccache is used for build caching. File and - # tzdata are used for tests. - python3-pip \ - ccache \ - file \ - tzdata - -# Install sccache as it is needed by most of the project test workflows and -# cannot be installed by the ccache action when executing as a non-root user. -# TODO(boomanaiden154): This should be switched to being installed with apt -# once we bump to Ubuntu 24.04. -RUN curl -L 'https://github.com/mozilla/sccache/releases/download/v0.7.6/sccache-v0.7.6-x86_64-unknown-linux-musl.tar.gz' > /tmp/sccache.tar.gz && \ - echo "2902a5e44c3342132f07b62e70cca75d9b23252922faf3b924f449808cc1ae58 /tmp/sccache.tar.gz" | sha256sum -c && \ - tar xzf /tmp/sccache.tar.gz -O --wildcards '*/sccache' > '/usr/local/bin/sccache' && \ - rm /tmp/sccache.tar.gz && \ - chmod +x /usr/local/bin/sccache + python3-psutil ENV LLVM_SYSROOT=$LLVM_SYSROOT ENV PATH=${LLVM_SYSROOT}/bin:${PATH} - -# Create a new user to avoid test failures related to a lack of expected -# permissions issues in some tests. Set the user id to 1001 as that is the -# user id that Github Actions uses to perform the checkout action. -RUN useradd gha -u 1001 -m -s /bin/bash - -# Also add the user to passwordless sudoers so that we can install software -# later on without having to rebuild the container. -RUN adduser gha sudo -RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers - -USER gha -WORKDIR /home/gha - -FROM ci-container as ci-container-agent - -ENV GITHUB_RUNNER_VERSION=2.322.0 - -RUN mkdir actions-runner && \ - cd actions-runner && \ - curl -O -L https://github.com/actions/runner/releases/download/v$GITHUB_RUNNER_VERSION/actions-runner-linux-x64-$GITHUB_RUNNER_VERSION.tar.gz && \ - tar xzf ./actions-runner-linux-x64-$GITHUB_RUNNER_VERSION.tar.gz && \ - rm ./actions-runner-linux-x64-$GITHUB_RUNNER_VERSION.tar.gz - diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml new file mode 100644 index 000000000000..1b495dbce772 --- /dev/null +++ b/.github/workflows/dependency-review.yml @@ -0,0 +1,27 @@ +# Dependency Review Action +# +# This Action will scan dependency manifest files that change as part of a Pull Request, +# surfacing known-vulnerable versions of the packages declared or updated in the PR. +# Once installed, if the workflow run is marked as required, +# PRs introducing known-vulnerable packages will be blocked from merging. +# +# Source repository: https://github.com/actions/dependency-review-action +name: 'Dependency Review' +on: [pull_request] + +permissions: + contents: read + +jobs: + dependency-review: + runs-on: ubuntu-latest + steps: + - name: Harden Runner + uses: step-security/harden-runner@cb605e52c26070c328afc4562f0b4ada7618a84e # v2.10.4 + with: + egress-policy: audit + + - name: 'Checkout Repository' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: 'Dependency Review' + uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4.5.0 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index b4fa27203236..000000000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,222 +0,0 @@ -# LLVM Documentation CI -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - -name: "Test documentation build" - -permissions: - contents: read - -on: - push: - branches: - - 'main' - paths: - - 'llvm/docs/**' - - 'clang/docs/**' - - 'clang/include/clang/Basic/AttrDocs.td' - - 'clang/include/clang/Driver/ClangOptionDocs.td' - - 'clang/include/clang/Basic/DiagnosticDocs.td' - - 'clang-tools-extra/docs/**' - - 'lldb/docs/**' - - 'libunwind/docs/**' - - 'libcxx/docs/**' - - 'libc/docs/**' - - 'lld/docs/**' - - 'openmp/docs/**' - - 'polly/docs/**' - - 'flang/docs/**' - - 'flang/include/flang/Optimizer/Dialect/FIROps.td' - - '.github/workflows/docs.yml' - pull_request: - paths: - - 'llvm/docs/**' - - 'clang/docs/**' - - 'clang/include/clang/Basic/AttrDocs.td' - - 'clang/include/clang/Driver/ClangOptionDocs.td' - - 'clang/include/clang/Basic/DiagnosticDocs.td' - - 'clang-tools-extra/docs/**' - - 'lldb/docs/**' - - 'libunwind/docs/**' - - 'libcxx/docs/**' - - 'libc/docs/**' - - 'lld/docs/**' - - 'openmp/docs/**' - - 'polly/docs/**' - - 'flang/docs/**' - - 'flang/include/flang/Optimizer/Dialect/FIROps.td' - - '.github/workflows/docs.yml' - -jobs: - check-docs-build: - name: "Test documentation build" - runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' - steps: - # Don't fetch before checking for file changes to force the file changes - # action to use the Github API in pull requests. If it's a push to a - # branch we can't use the Github API to get the diff, so we need to have - # a local checkout beforehand. - - name: Fetch LLVM sources (Push) - if: ${{ github.event_name == 'push' }} - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: Get subprojects that have doc changes - id: docs-changed-subprojects - uses: tj-actions/changed-files@v39 - with: - files_yaml: | - llvm: - - 'llvm/docs/**' - clang: - - 'clang/docs/**' - - 'clang/include/clang/Basic/AttrDocs.td' - - 'clang/include/clang/Driver/ClangOptionDocs.td' - - 'clang/include/clang/Basic/DiagnosticDocs.td' - clang-tools-extra: - - 'clang-tools-extra/docs/**' - lldb: - - 'lldb/docs/**' - libunwind: - - 'libunwind/docs/**' - libcxx: - - 'libcxx/docs/**' - libc: - - 'libc/docs/**' - lld: - - 'lld/docs/**' - openmp: - - 'openmp/docs/**' - polly: - - 'polly/docs/**' - flang: - - 'flang/docs/**' - - 'flang/include/flang/Optimizer/Dialect/FIROps.td' - workflow: - - '.github/workflows/docs.yml' - - name: Fetch LLVM sources (PR) - if: ${{ github.event_name == 'pull_request' }} - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: Setup Python env - uses: actions/setup-python@v5 - with: - python-version: '3.11' - cache: 'pip' - cache-dependency-path: 'llvm/docs/requirements-hashed.txt' - - name: Install python dependencies - run: pip install -r llvm/docs/requirements-hashed.txt - - name: Install system dependencies - run: | - sudo apt-get update - # swig and graphviz are lldb specific dependencies - sudo apt-get install -y cmake ninja-build swig graphviz - - name: Setup output folder - run: mkdir built-docs - - name: Build LLVM docs - if: | - steps.docs-changed-subprojects.outputs.llvm_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B llvm-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C llvm-build docs-llvm-html docs-llvm-man - mkdir built-docs/llvm - cp -r llvm-build/docs/* built-docs/llvm/ - - name: Build Clang docs - if: | - steps.docs-changed-subprojects.outputs.clang_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B clang-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang" -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C clang-build docs-clang-html docs-clang-man - mkdir built-docs/clang - cp -r clang-build/docs/* built-docs/clang/ - - name: Build clang-tools-extra docs - if: | - steps.docs-changed-subprojects.outputs.clang-tools-extra_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B clang-tools-extra-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra" -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C clang-tools-extra-build docs-clang-tools-html docs-clang-tools-man - mkdir built-docs/clang-tools-extra - cp -r clang-tools-extra-build/docs/* built-docs/clang-tools-extra/ - - name: Build LLDB docs - if: | - steps.docs-changed-subprojects.outputs.lldb_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B lldb-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;lldb" -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C lldb-build docs-lldb-html docs-lldb-man - mkdir built-docs/lldb - cp -r lldb-build/docs/* built-docs/lldb/ - - name: Build libunwind docs - if: | - steps.docs-changed-subprojects.outputs.libunwind_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B libunwind-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_RUNTIMES="libunwind" -DLLVM_ENABLE_SPHINX=ON ./runtimes - TZ=UTC ninja -C libunwind-build docs-libunwind-html - mkdir built-docs/libunwind - cp -r libunwind-build/libunwind/docs/* built-docs/libunwind - - name: Build libcxx docs - if: | - steps.docs-changed-subprojects.outputs.libcxx_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B libcxx-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_RUNTIMES="libcxxabi;libcxx;libunwind" -DLLVM_ENABLE_SPHINX=ON ./runtimes - TZ=UTC ninja -C libcxx-build docs-libcxx-html - mkdir built-docs/libcxx - cp -r libcxx-build/libcxx/docs/* built-docs/libcxx/ - - name: Build libc docs - if: | - steps.docs-changed-subprojects.outputs.libc_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B libc-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_RUNTIMES="libc" -DLLVM_ENABLE_SPHINX=ON ./runtimes - TZ=UTC ninja -C libc-build docs-libc-html - mkdir built-docs/libc - cp -r libc-build/libc/docs/* built-docs/libc/ - - name: Build LLD docs - if: | - steps.docs-changed-subprojects.outputs.lld_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B lld-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="lld" -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C lld-build docs-lld-html - mkdir built-docs/lld - cp -r lld-build/docs/* built-docs/lld/ - - name: Build OpenMP docs - if: | - steps.docs-changed-subprojects.outputs.openmp_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B openmp-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;openmp" -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C openmp-build docs-openmp-html - mkdir built-docs/openmp - cp -r openmp-build/docs/* built-docs/openmp/ - - name: Build Polly docs - if: | - steps.docs-changed-subprojects.outputs.polly_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B polly-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="polly" -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C polly-build docs-polly-html docs-polly-man - mkdir built-docs/polly - cp -r polly-build/docs/* built-docs/polly/ - - name: Build Flang docs - if: | - steps.docs-changed-subprojects.outputs.flang_any_changed == 'true' || - steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' - run: | - cmake -B flang-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;mlir;flang" -DLLVM_ENABLE_SPHINX=ON ./llvm - TZ=UTC ninja -C flang-build docs-flang-html - mkdir built-docs/flang - cp -r flang-build/docs/* built-docs/flang/ - - name: Upload docs - uses: actions/upload-artifact@v4 - with: - name: docs-output - path: built-docs/ diff --git a/.github/workflows/email-check.yaml b/.github/workflows/email-check.yaml deleted file mode 100644 index 8f32d020975f..000000000000 --- a/.github/workflows/email-check.yaml +++ /dev/null @@ -1,46 +0,0 @@ -name: "Check for private emails used in PRs" - -on: - pull_request: - types: - - opened - -permissions: - contents: read - -jobs: - validate_email: - runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' - steps: - - name: Fetch LLVM sources - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Extract author email - id: author - run: | - git log -1 - echo "EMAIL=$(git show -s --format='%ae' HEAD~0)" >> $GITHUB_OUTPUT - # Create empty comment file - echo "[]" > comments - - - name: Validate author email - if: ${{ endsWith(steps.author.outputs.EMAIL, 'noreply.github.com') }} - env: - COMMENT: >- - âš ī¸ We detected that you are using a GitHub private e-mail address to contribute to the repo.
- Please turn off [Keep my email addresses private](https://github.com/settings/emails) setting in your account.
- See [LLVM Discourse](https://discourse.llvm.org/t/hidden-emails-on-github-should-we-do-something-about-it) for more information. - run: | - cat << EOF > comments - [{"body" : "$COMMENT"}] - EOF - - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - if: always() - with: - name: workflow-args - path: | - comments diff --git a/.github/workflows/get-llvm-version/action.yml b/.github/workflows/get-llvm-version/action.yml deleted file mode 100644 index 2218d926fc13..000000000000 --- a/.github/workflows/get-llvm-version/action.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Get LLVM Version -description: >- - Get the LLVM version from the llvm-project source tree. This action assumes - the llvm-project sources have already been checked out into GITHUB_WORKSPACE. - -outputs: - major: - description: LLVM major version - value: ${{ steps.version.outputs.major }} - minor: - description: LLVM minor version - value: ${{ steps.version.outputs.minor }} - patch: - description: LLVM patch version - value: ${{ steps.version.outputs.patch }} - -runs: - using: "composite" - steps: - - name: Get Version - shell: bash - id: version - run: | - for v in major minor patch; do - echo "$v=`llvm/utils/release/get-llvm-version.sh --$v`" >> $GITHUB_OUTPUT - done diff --git a/.github/workflows/hlsl-matrix.yaml b/.github/workflows/hlsl-matrix.yaml deleted file mode 100644 index c63a32acd2b3..000000000000 --- a/.github/workflows/hlsl-matrix.yaml +++ /dev/null @@ -1,30 +0,0 @@ -name: HLSL Tests - -permissions: - contents: read - -on: - workflow_dispatch: - pull_request: - branches: - - main - paths: - - llvm/**/DirectX/** - - .github/workflows/hlsl* - - clang/*HLSL*/**/* - - clang/**/*HLSL* - - llvm/**/Frontend/HLSL/**/* - -jobs: - HLSL-Tests: - strategy: - fail-fast: false - matrix: - runs-on: - - hlsl-macos - - uses: ./.github/workflows/hlsl-test-all.yaml - with: - SKU: hlsl-macos - TestTarget: check-hlsl-clang-mtl # TODO: This target changes based on SKU - LLVM-ref: ${{ github.ref }} diff --git a/.github/workflows/hlsl-test-all.yaml b/.github/workflows/hlsl-test-all.yaml deleted file mode 100644 index 93a1c6d2662d..000000000000 --- a/.github/workflows/hlsl-test-all.yaml +++ /dev/null @@ -1,87 +0,0 @@ -name: HLSL Test - -permissions: - contents: read - -on: - workflow_call: - inputs: - OffloadTest-branch: - description: 'Test Suite Branch' - required: false - default: 'main' - type: string - LLVM-ref: - description: 'LLVM Branch' - required: false - default: 'main' - type: string - SKU: - required: true - type: string - TestTarget: - required: false - default: 'check-hlsl' - type: string - -jobs: - build: - runs-on: ${{ inputs.SKU }} - steps: - - name: Checkout DXC - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: Microsoft/DirectXShaderCompiler - ref: main - path: DXC - submodules: true - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - ref: ${{ inputs.LLVM-branch }} - path: llvm-project - - name: Checkout OffloadTest - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: llvm-beanz/offload-test-suite - ref: main - path: OffloadTest - - name: Checkout Golden Images - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: llvm-beanz/offload-golden-images - ref: main - path: golden-images - - name: Setup Windows - if: runner.os == 'Windows' - uses: llvm/actions/setup-windows@main - with: - arch: amd64 - - name: Build DXC - run: | - cd DXC - mkdir build - cd build - cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -C ${{ github.workspace }}/DXC/cmake/caches/PredefinedParams.cmake -C ${{ github.workspace }}/OffloadTest/cmake/caches/sccache.cmake -DHLSL_DISABLE_SOURCE_GENERATION=On ${{ github.workspace }}/DXC/ - ninja dxv llvm-dis - - name: Build LLVM - run: | - cd llvm-project - mkdir build - cd build - cmake -G Ninja -DDXIL_DIS=${{ github.workspace }}/DXC/build/bin/llvm-dis -DLLVM_INCLUDE_DXIL_TESTS=On -DCMAKE_BUILD_TYPE=Release -C ${{ github.workspace }}/llvm-project/clang/cmake/caches/HLSL.cmake -C ${{ github.workspace }}/OffloadTest/cmake/caches/sccache.cmake -DDXC_DIR=${{ github.workspace }}/DXC/build/bin -DLLVM_EXTERNAL_OFFLOADTEST_SOURCE_DIR=${{ github.workspace }}/OffloadTest -DLLVM_EXTERNAL_PROJECTS="OffloadTest" -DLLVM_LIT_ARGS="--xunit-xml-output=testresults.xunit.xml -v" -DGOLDENIMAGE_DIR=${{ github.workspace }}/golden-images ${{ github.workspace }}/llvm-project/llvm/ - ninja hlsl-test-depends llvm-test-depends clang-test-depends - - name: Run HLSL Tests - run: | - cd llvm-project - cd build - ninja check-llvm - ninja check-clang - ninja check-hlsl-unit - ninja ${{ inputs.TestTarget }} - - name: Publish Test Results - uses: EnricoMi/publish-unit-test-result-action/macos@170bf24d20d201b842d7a52403b73ed297e6645b # v2 - if: always() && runner.os == 'macOS' - with: - comment_mode: off - files: llvm-project/build/**/testresults.xunit.xml diff --git a/.github/workflows/issue-release-workflow.yml b/.github/workflows/issue-release-workflow.yml deleted file mode 100644 index 5027d4f3ea6f..000000000000 --- a/.github/workflows/issue-release-workflow.yml +++ /dev/null @@ -1,69 +0,0 @@ -# This contains the workflow definitions that allow users to test backports -# to the release branch using comments on issues. -# -# /cherry-pick <...> -# -# This comment will attempt to cherry-pick the given commits to the latest -# release branch (release/Y.x) and if successful, push the result to a branch -# on github. -# -# /branch // -# -# This comment will create a pull request from to the latest release -# branch. - -name: Issue Release Workflow - -permissions: - contents: read - -on: - issue_comment: - types: - - created - - edited - issues: - types: - - opened - -env: - COMMENT_BODY: ${{ github.event.action == 'opened' && github.event.issue.body || github.event.comment.body }} - -jobs: - backport-commits: - name: Backport Commits - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - if: >- - (github.repository == 'llvm/llvm-project') && - !startswith(github.event.comment.body, '') && - contains(github.event.action == 'opened' && github.event.issue.body || github.event.comment.body, '/cherry-pick') - steps: - - name: Fetch LLVM sources - uses: actions/checkout@v4 - with: - repository: llvm/llvm-project - # GitHub stores the token used for checkout and uses it for pushes - # too, but we want to use a different token for pushing, so we need - # to disable persist-credentials here. - persist-credentials: false - fetch-depth: 0 - - - name: Setup Environment - run: | - pip install --require-hashes -r ./llvm/utils/git/requirements.txt - ./llvm/utils/git/github-automation.py --token ${{ github.token }} setup-llvmbot-git - - - name: Backport Commits - run: | - printf "%s" "$COMMENT_BODY" | - ./llvm/utils/git/github-automation.py \ - --repo "$GITHUB_REPOSITORY" \ - --token "${{ secrets.RELEASE_WORKFLOW_PR_CREATE }}" \ - release-workflow \ - --branch-repo-token ${{ secrets.RELEASE_WORKFLOW_PUSH_SECRET }} \ - --issue-number ${{ github.event.issue.number }} \ - --requested-by ${{ (github.event.action == 'opened' && github.event.issue.user.login) || github.event.comment.user.login }} \ - auto diff --git a/.github/workflows/issue-subscriber.yml b/.github/workflows/issue-subscriber.yml deleted file mode 100644 index ef4fdf441819..000000000000 --- a/.github/workflows/issue-subscriber.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Issue Subscriber - -on: - issues: - types: - - labeled - -permissions: - contents: read - -jobs: - auto-subscribe: - runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' - steps: - - name: Checkout Automation Script - uses: actions/checkout@v4 - with: - sparse-checkout: llvm/utils/git/ - ref: main - - - name: Setup Automation Script - working-directory: ./llvm/utils/git/ - run: | - pip install --require-hashes -r requirements.txt - - - name: Update watchers - working-directory: ./llvm/utils/git/ - # https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable - env: - LABEL_NAME: ${{ github.event.label.name }} - run: | - python3 ./github-automation.py \ - --token '${{ secrets.ISSUE_SUBSCRIBER_TOKEN }}' \ - issue-subscriber \ - --issue-number '${{ github.event.issue.number }}' \ - --label-name "$LABEL_NAME" diff --git a/.github/workflows/issue-write.yml b/.github/workflows/issue-write.yml deleted file mode 100644 index 5334157a7fd2..000000000000 --- a/.github/workflows/issue-write.yml +++ /dev/null @@ -1,157 +0,0 @@ -name: Comment on an issue - -on: - workflow_run: - workflows: - - "Check code formatting" - - "Check for private emails used in PRs" - - "PR Request Release Note" - types: - - completed - -permissions: - contents: read - -jobs: - pr-comment: - runs-on: ubuntu-latest - permissions: - pull-requests: write - if: > - github.event.workflow_run.event == 'pull_request' && - ( - github.event.workflow_run.conclusion == 'success' || - github.event.workflow_run.conclusion == 'failure' - ) - steps: - - name: Fetch Sources - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/workflows/unprivileged-download-artifact/action.yml - sparse-checkout-cone-mode: false - - name: 'Download artifact' - uses: ./.github/workflows/unprivileged-download-artifact - id: download-artifact - with: - run-id: ${{ github.event.workflow_run.id }} - artifact-name: workflow-args - - - name: 'Comment on PR' - if: steps.download-artifact.outputs.artifact-id != '' - uses: actions/github-script@v3 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - var fs = require('fs'); - const comments = JSON.parse(fs.readFileSync('./comments')); - if (!comments || comments.length == 0) { - return; - } - - let runInfo = await github.actions.getWorkflowRun({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: context.payload.workflow_run.id - }); - - console.log(runInfo); - - - // Query to find the number of the pull request that triggered this job. - // The associated pull requests are based off of the branch name, so if - // you create a pull request for a branch, close it, and then create - // another pull request with the same branch, then this query will return - // two associated pull requests. This is why we have to fetch all the - // associated pull requests and then iterate through them to find the - // one that is open. - const gql_query = ` - query($repo_owner : String!, $repo_name : String!, $branch: String!) { - repository(owner: $repo_owner, name: $repo_name) { - ref (qualifiedName: $branch) { - associatedPullRequests(first: 100) { - nodes { - baseRepository { - owner { - login - } - } - number - state - } - } - } - } - } - ` - const gql_variables = { - repo_owner: runInfo.data.head_repository.owner.login, - repo_name: runInfo.data.head_repository.name, - branch: runInfo.data.head_branch - } - const gql_result = await github.graphql(gql_query, gql_variables); - console.log(gql_result); - // If the branch for the PR was deleted before this job has a chance - // to run, then the ref will be null. This can happen if someone: - // 1. Rebase the PR, which triggers some workflow. - // 2. Immediately merges the PR and deletes the branch. - // 3. The workflow finishes and triggers this job. - if (!gql_result.repository.ref) { - console.log("Ref has been deleted"); - return; - } - console.log(gql_result.repository.ref.associatedPullRequests.nodes); - - var pr_number = 0; - gql_result.repository.ref.associatedPullRequests.nodes.forEach((pr) => { - - // The largest PR number is the one we care about. The only way - // to have more than one associated pull requests is if all the - // old pull requests are in the closed state. - if (pr.baseRepository.owner.login = context.repo.owner && pr.number > pr_number) { - pr_number = pr.number; - } - }); - if (pr_number == 0) { - console.log("Error retrieving pull request number"); - return; - } - - await comments.forEach(function (comment) { - if (comment.id) { - // Security check: Ensure that this comment was created by - // the github-actions bot, so a malicious input won't overwrite - // a user's comment. - github.issues.getComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: comment.id - }).then((old_comment) => { - console.log(old_comment); - if (old_comment.data.user.login != "github-actions[bot]") { - console.log("Invalid comment id: " + comment.id); - return; - } - github.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: pr_number, - comment_id: comment.id, - body: comment.body - }); - }); - } else { - github.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: pr_number, - body: comment.body - }); - } - }); - - - name: Dump comments file - if: >- - always() && - steps.download-artifact.outputs.artifact-id != '' - run: cat comments diff --git a/.github/workflows/libc-fullbuild-tests.yml b/.github/workflows/libc-fullbuild-tests.yml deleted file mode 100644 index 2c88da653aae..000000000000 --- a/.github/workflows/libc-fullbuild-tests.yml +++ /dev/null @@ -1,96 +0,0 @@ -# This workflow is for pre-commit testing of the LLVM-libc project. -name: LLVM-libc Pre-commit Fullbuild Tests -permissions: - contents: read -on: - pull_request: - branches: [ "main" ] - paths: - - 'libc/**' - - '.github/workflows/libc-fullbuild-tests.yml' - -jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - os: ubuntu-24.04 - ccache-variant: sccache - c_compiler: clang - cpp_compiler: clang++ - # TODO: remove ccache logic when https://github.com/hendrikmuhs/ccache-action/issues/279 is resolved. - - os: ubuntu-24.04-arm - ccache-variant: ccache - c_compiler: clang - cpp_compiler: clang++ - # TODO: add back gcc build when it is fixed - # - c_compiler: gcc - # cpp_compiler: g++ - steps: - - uses: actions/checkout@v4 - - # Libc's build is relatively small comparing with other components of LLVM. - # A fresh fullbuild takes about 190MiB of uncompressed disk space, which can - # be compressed into ~40MiB. Limiting the cache size to 1G should be enough. - # Prefer sccache as it is more modern. - # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects - # frequent small object writes. - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - max-size: 1G - key: libc_fullbuild_${{ matrix.c_compiler }} - variant: ${{ matrix.ccache-variant }} - - # Notice: - # - MPFR is required by some of the mathlib tests. - # - Debian has a multilib setup, so we need to symlink the asm directory. - # For more information, see https://wiki.debian.org/Multiarch/LibraryPathOverview - - name: Prepare dependencies (Ubuntu) - run: | - sudo apt-get update - sudo apt-get install -y libmpfr-dev libgmp-dev libmpc-dev ninja-build linux-libc-dev - sudo ln -sf /usr/include/$(uname -p)-linux-gnu/asm /usr/include/asm - - - name: Set reusable strings - id: strings - shell: bash - run: | - echo "build-output-dir=${{ github.workspace }}/build" >> "$GITHUB_OUTPUT" - echo "build-install-dir=${{ github.workspace }}/install" >> "$GITHUB_OUTPUT" - - # Configure libc fullbuild with scudo. - # Use MinSizeRel to reduce the size of the build. - - name: Configure CMake - run: > - cmake -B ${{ steps.strings.outputs.build-output-dir }} - -DCMAKE_CXX_COMPILER=${{ matrix.cpp_compiler }} - -DCMAKE_C_COMPILER=${{ matrix.c_compiler }} - -DCMAKE_BUILD_TYPE=MinSizeRel - -DCMAKE_C_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} - -DCMAKE_CXX_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} - -DCMAKE_INSTALL_PREFIX=${{ steps.strings.outputs.build-install-dir }} - -DLLVM_ENABLE_RUNTIMES="libc;compiler-rt" - -DLLVM_LIBC_FULL_BUILD=ON - -DLLVM_LIBC_INCLUDE_SCUDO=ON - -DCOMPILER_RT_BUILD_SCUDO_STANDALONE_WITH_LLVM_LIBC=ON - -DCOMPILER_RT_BUILD_GWP_ASAN=OFF - -DCOMPILER_RT_SCUDO_STANDALONE_BUILD_SHARED=OFF - -G Ninja - -S ${{ github.workspace }}/runtimes - - - name: Build - run: > - cmake - --build ${{ steps.strings.outputs.build-output-dir }} - --parallel - --target install - - - name: Test - run: > - cmake - --build ${{ steps.strings.outputs.build-output-dir }} - --parallel - --target check-libc diff --git a/.github/workflows/libc-overlay-tests.yml b/.github/workflows/libc-overlay-tests.yml deleted file mode 100644 index 0a0916084b18..000000000000 --- a/.github/workflows/libc-overlay-tests.yml +++ /dev/null @@ -1,120 +0,0 @@ -# This workflow is for pre-commit testing of the LLVM-libc project. -name: LLVM-libc Pre-commit Overlay Tests -permissions: - contents: read -on: - pull_request: - branches: [ "main" ] - paths: - - 'libc/**' - - '.github/workflows/libc-overlay-tests.yml' - -jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - # Set fail-fast to false to ensure that feedback is delivered for all matrix combinations. - fail-fast: false - matrix: - include: - # TODO: add linux gcc when it is fixed - - os: ubuntu-24.04 - ccache-variant: sccache - compiler: - c_compiler: clang - cpp_compiler: clang++ - # TODO: remove ccache logic when https://github.com/hendrikmuhs/ccache-action/issues/279 is resolved. - - os: ubuntu-24.04-arm - ccache-variant: ccache - compiler: - c_compiler: clang - cpp_compiler: clang++ - - os: windows-2022 - ccache-variant: sccache - compiler: - c_compiler: clang-cl - cpp_compiler: clang-cl - - os: windows-2025 - ccache-variant: sccache - compiler: - c_compiler: clang-cl - cpp_compiler: clang-cl - - os: macos-14 - ccache-variant: sccache - compiler: - c_compiler: clang - cpp_compiler: clang++ - - steps: - - uses: actions/checkout@v4 - - # Libc's build is relatively small comparing with other components of LLVM. - # A fresh linux overlay takes about 180MiB of uncompressed disk space, which can - # be compressed into ~40MiB. MacOS and Windows overlay builds are less than 10MiB - # after compression. Limiting the cache size to 1G should be enough. - # Prefer sccache as it is modern and it has a guarantee to work with MSVC. - # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects - # frequent small object writes. - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1 - with: - max-size: 1G - key: libc_overlay_build_${{ matrix.os }}_${{ matrix.compiler.c_compiler }} - variant: ${{ matrix.ccache-variant }} - - # MPFR is required by some of the mathlib tests. - - name: Prepare dependencies (Ubuntu) - if: runner.os == 'Linux' - run: | - sudo apt-get update - sudo apt-get install -y libmpfr-dev libgmp-dev libmpc-dev ninja-build - - # Chocolatey is shipped with Windows runners. Windows Server 2025 recommends WinGet. - # Consider migrating to WinGet when Windows Server 2025 is available. - - name: Prepare dependencies (Windows) - if: runner.os == 'Windows' - run: | - choco install ninja - - - name: Prepare dependencies (macOS) - if: runner.os == 'macOS' - run: | - brew install ninja - - - name: Set reusable strings - id: strings - shell: bash - run: | - echo "build-output-dir=${{ github.workspace }}/build" >> "$GITHUB_OUTPUT" - - # Use MinSizeRel to reduce the size of the build. - # Notice that CMP0141=NEW and MSVC_DEBUG_INFORMATION_FORMAT=Embedded are required - # by the sccache tool. - - name: Configure CMake - run: > - cmake -B ${{ steps.strings.outputs.build-output-dir }} - -DCMAKE_CXX_COMPILER=${{ matrix.compiler.cpp_compiler }} - -DCMAKE_C_COMPILER=${{ matrix.compiler.c_compiler }} - -DCMAKE_BUILD_TYPE=MinSizeRel - -DCMAKE_C_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} - -DCMAKE_CXX_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} - -DCMAKE_POLICY_DEFAULT_CMP0141=NEW - -DCMAKE_MSVC_DEBUG_INFORMATION_FORMAT=Embedded - -DLLVM_ENABLE_RUNTIMES=libc - -G Ninja - -S ${{ github.workspace }}/runtimes - - - name: Build - run: > - cmake - --build ${{ steps.strings.outputs.build-output-dir }} - --parallel - --config MinSizeRel - --target libc - - - name: Test - run: > - cmake - --build ${{ steps.strings.outputs.build-output-dir }} - --parallel - --target check-libc diff --git a/.github/workflows/libclang-abi-tests.yml b/.github/workflows/libclang-abi-tests.yml deleted file mode 100644 index 65cffccff776..000000000000 --- a/.github/workflows/libclang-abi-tests.yml +++ /dev/null @@ -1,171 +0,0 @@ -name: libclang ABI Tests - -permissions: - contents: read - -on: - workflow_dispatch: - push: - branches: - - 'release/**' - paths: - - 'clang/**' - - '.github/workflows/libclang-abi-tests.yml' - pull_request: - branches: - - 'release/**' - paths: - - 'clang/**' - - '.github/workflows/libclang-abi-tests.yml' - -concurrency: - # Skip intermediate builds: always. - # Cancel intermediate builds: only if it is a pull request build. - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} - -jobs: - abi-dump-setup: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - outputs: - BASELINE_REF: ${{ steps.vars.outputs.BASELINE_REF }} - ABI_HEADERS: ${{ steps.vars.outputs.ABI_HEADERS }} - ABI_LIBS: ${{ steps.vars.outputs.ABI_LIBS }} - BASELINE_VERSION_MAJOR: ${{ steps.vars.outputs.BASELINE_VERSION_MAJOR }} - LLVM_VERSION_MAJOR: ${{ steps.version.outputs.major }} - LLVM_VERSION_MINOR: ${{ steps.version.outputs.minor }} - LLVM_VERSION_PATCH: ${{ steps.version.outputs.patch }} - steps: - - name: Checkout source - uses: actions/checkout@v4 - with: - fetch-depth: 250 - - - name: Get LLVM version - id: version - uses: ./.github/workflows/get-llvm-version - - - name: Setup Variables - id: vars - run: | - remote_repo='https://github.com/llvm/llvm-project' - if [ ${{ steps.version.outputs.patch }} -eq 0 ]; then - major_version=$(( ${{ steps.version.outputs.major }} - 1)) - baseline_ref="llvmorg-$major_version.1.0" - - # If there is a minor release, we want to use that as the base line. - minor_ref=$(git ls-remote --refs -t "$remote_repo" llvmorg-"$major_version".[1-9].[0-9] | tail -n1 | grep -o 'llvmorg-.\+' || true) - if [ -n "$minor_ref" ]; then - baseline_ref="$minor_ref" - else - # Check if we have a release candidate - rc_ref=$(git ls-remote --refs -t "$remote_repo" llvmorg-"$major_version".[1-9].[0-9]-rc* | tail -n1 | grep -o 'llvmorg-.\+' || true) - if [ -n "$rc_ref" ]; then - baseline_ref="$rc_ref" - fi - fi - { - echo "BASELINE_VERSION_MAJOR=$major_version" - echo "BASELINE_REF=$baseline_ref" - echo "ABI_HEADERS=clang-c" - echo "ABI_LIBS=libclang.so" - } >> "$GITHUB_OUTPUT" - else - { - echo "BASELINE_VERSION_MAJOR=${{ steps.version.outputs.major }}" - echo "BASELINE_REF=llvmorg-${{ steps.version.outputs.major }}.1.0" - echo "ABI_HEADERS=." - echo "ABI_LIBS=libclang.so libclang-cpp.so" - } >> "$GITHUB_OUTPUT" - fi - - abi-dump: - if: github.repository_owner == 'llvm' - needs: abi-dump-setup - runs-on: ubuntu-latest - strategy: - matrix: - name: - - build-baseline - - build-latest - include: - - name: build-baseline - llvm_version_major: ${{ needs.abi-dump-setup.outputs.BASELINE_VERSION_MAJOR }} - ref: ${{ needs.abi-dump-setup.outputs.BASELINE_REF }} - repo: llvm/llvm-project - - name: build-latest - llvm_version_major: ${{ needs.abi-dump-setup.outputs.LLVM_VERSION_MAJOR }} - ref: ${{ github.sha }} - repo: ${{ github.repository }} - steps: - - name: Install Ninja - uses: llvm/actions/install-ninja@main - - name: Install abi-compliance-checker - run: | - sudo apt-get update - sudo apt-get install -y abi-dumper autoconf pkg-config - - name: Install universal-ctags - run: | - git clone https://github.com/universal-ctags/ctags.git - cd ctags - ./autogen.sh - ./configure - sudo make install - - name: Download source code - uses: llvm/actions/get-llvm-project-src@main - with: - ref: ${{ matrix.ref }} - repo: ${{ matrix.repo }} - - name: Configure - run: | - mkdir install - cmake -B build -S llvm -G Ninja -DLLVM_ENABLE_PROJECTS=clang -DCMAKE_BUILD_TYPE=Debug -DLLVM_TARGETS_TO_BUILD="" -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_LINK_LLVM_DYLIB=ON -DCMAKE_C_FLAGS_DEBUG="-g1 -Og" -DCMAKE_CXX_FLAGS_DEBUG="-g1 -Og" -DCMAKE_INSTALL_PREFIX="$(pwd)"/install llvm - - name: Build - run: ninja -C build/ ${{ needs.abi-dump-setup.outputs.ABI_LIBS }} install-clang-headers - - name: Dump ABI - run: | - parallel abi-dumper -lver ${{ matrix.ref }} -skip-cxx -public-headers ./install/include/${{ needs.abi-dump-setup.outputs.ABI_HEADERS }} -o {}-${{ matrix.ref }}.abi ./build/lib/{} ::: ${{ needs.abi-dump-setup.outputs.ABI_LIBS }} - for lib in ${{ needs.abi-dump-setup.outputs.ABI_LIBS }}; do - # Remove symbol versioning from dumps, so we can compare across major versions. - sed -i 's/LLVM_[0-9]\+/LLVM_NOVERSION/' $lib-${{ matrix.ref }}.abi - done - - name: Upload ABI file - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 - with: - name: ${{ matrix.name }} - path: '*${{ matrix.ref }}.abi' - - abi-compare: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - needs: - - abi-dump-setup - - abi-dump - steps: - - name: Download baseline - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 - with: - name: build-baseline - path: build-baseline - - name: Download latest - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 - with: - name: build-latest - path: build-latest - - - name: Install abi-compliance-checker - run: | - sudo apt-get update - sudo apt-get install -y abi-compliance-checker - - name: Compare ABI - run: | - for lib in ${{ needs.abi-dump-setup.outputs.ABI_LIBS }}; do - abi-compliance-checker -lib $lib -old build-baseline/$lib*.abi -new build-latest/$lib*.abi - done - - name: Upload ABI Comparison - if: always() - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 - with: - name: compat-report-${{ github.sha }} - path: compat_reports/ diff --git a/.github/workflows/libclang-python-tests.yml b/.github/workflows/libclang-python-tests.yml deleted file mode 100644 index d8f58c5b8d1c..000000000000 --- a/.github/workflows/libclang-python-tests.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Libclang Python Binding Tests - -permissions: - contents: read - -on: - workflow_dispatch: - push: - branches: - - 'main' - paths: - - 'clang/bindings/python/**' - - 'clang/tools/libclang/**' - - 'clang/CMakeList.txt' - - '.github/workflows/libclang-python-tests.yml' - - '.github/workflows/llvm-project-tests.yml' - pull_request: - paths: - - 'clang/bindings/python/**' - - 'clang/tools/libclang/**' - - 'clang/CMakeList.txt' - - '.github/workflows/libclang-python-tests.yml' - - '.github/workflows/llvm-project-tests.yml' - -jobs: - check-clang-python: - # Build libclang and then run the libclang Python binding's unit tests. - name: Build and run Python unit tests - if: github.repository == 'llvm/llvm-project' - strategy: - fail-fast: false - matrix: - python-version: ["3.8", "3.11"] - uses: ./.github/workflows/llvm-project-tests.yml - with: - build_target: check-clang-python - projects: clang - # There is an issue running on "windows-2019". - # See https://github.com/llvm/llvm-project/issues/76601#issuecomment-1873049082. - os_list: '["ubuntu-22.04"]' - python_version: ${{ matrix.python-version }} diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml deleted file mode 100644 index 3346c1322a07..000000000000 --- a/.github/workflows/libcxx-build-and-test.yaml +++ /dev/null @@ -1,277 +0,0 @@ -# This file defines pre-commit CI for libc++, libc++abi, and libunwind (on Github). -# -# We split the configurations in multiple stages with the intent of saving compute time -# when a job fails early in the pipeline. This is why the jobs are marked as `continue-on-error: false`. -# We try to run the CI configurations with the most signal in the first stage. -# -# Stages 1 & 2 are meant to be "smoke tests", and are meant to catch most build/test failures quickly and without using -# too many resources. -# Stage 3 is "everything else", and is meant to catch breakages on more niche or unique configurations. -# -# Therefore, we "fail-fast" for any failures during stages 1 & 2, meaning any job failing cancels all other running jobs, -# under the assumption that if the "smoke tests" fail, then the other configurations will likely fail in the same way. -# However, stage 3 does not fail fast, as it's more likely that any one job failing is a flake or a configuration-specific -# -name: Build and Test libc++ -on: - pull_request: - paths: - - 'libcxx/**' - - 'libcxxabi/**' - - 'libunwind/**' - - 'runtimes/**' - - 'cmake/**' - - '.github/workflows/libcxx-build-and-test.yaml' - schedule: - # Run nightly at 08:00 UTC (aka 00:00 Pacific, aka 03:00 Eastern) - - cron: '0 8 * * *' - -permissions: - contents: read # Default everything to read-only - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number }} - cancel-in-progress: true - -jobs: - stage1: - if: github.repository_owner == 'llvm' - runs-on: libcxx-self-hosted-linux - container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 - continue-on-error: false - strategy: - fail-fast: false - matrix: - config: [ - 'frozen-cxx03-headers', - 'generic-cxx03', - 'generic-cxx26', - 'generic-modules' - ] - cc: [ 'clang-20' ] - cxx: [ 'clang++-20' ] - include: - - config: 'generic-gcc' - cc: 'gcc-14' - cxx: 'g++-14' - steps: - - uses: actions/checkout@v4 - - name: ${{ matrix.config }}.${{ matrix.cxx }} - run: libcxx/utils/ci/run-buildbot ${{ matrix.config }} - env: - CC: ${{ matrix.cc }} - CXX: ${{ matrix.cxx }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 - if: always() - with: - name: ${{ matrix.config }}-${{ matrix.cxx }}-results - path: | - **/test-results.xml - **/*.abilist - **/CMakeConfigureLog.yaml - **/CMakeError.log - **/CMakeOutput.log - **/crash_diagnostics/* - stage2: - if: github.repository_owner == 'llvm' - runs-on: libcxx-self-hosted-linux - container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 - needs: [ stage1 ] - continue-on-error: false - strategy: - fail-fast: false - matrix: - config: [ - 'generic-cxx11', - 'generic-cxx14', - 'generic-cxx17', - 'generic-cxx20', - 'generic-cxx23' - ] - cc: [ 'clang-20' ] - cxx: [ 'clang++-20' ] - include: - - config: 'generic-gcc-cxx11' - cc: 'gcc-14' - cxx: 'g++-14' - - config: 'generic-cxx23' - cc: 'clang-18' - cxx: 'clang++-18' - - config: 'generic-cxx26' - cc: 'clang-19' - cxx: 'clang++-19' - steps: - - uses: actions/checkout@v4 - - name: ${{ matrix.config }} - run: libcxx/utils/ci/run-buildbot ${{ matrix.config }} - env: - CC: ${{ matrix.cc }} - CXX: ${{ matrix.cxx }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 - if: always() # Upload artifacts even if the build or test suite fails - with: - name: ${{ matrix.config }}-${{ matrix.cxx }}-results - path: | - **/test-results.xml - **/*.abilist - **/CMakeConfigureLog.yaml - **/CMakeError.log - **/CMakeOutput.log - **/crash_diagnostics/* - stage3: - if: github.repository_owner == 'llvm' - needs: [ stage1, stage2 ] - continue-on-error: false - strategy: - fail-fast: false - max-parallel: 8 - matrix: - config: [ - 'generic-abi-unstable', - 'generic-hardening-mode-debug', - 'generic-hardening-mode-extensive', - 'generic-hardening-mode-fast', - 'generic-hardening-mode-fast-with-abi-breaks', - 'generic-merged', - 'generic-modules-lsv', - 'generic-no-exceptions', - 'generic-no-experimental', - 'generic-no-filesystem', - 'generic-no-localization', - 'generic-no-terminal', - 'generic-no-random_device', - 'generic-no-threads', - 'generic-no-tzdb', - 'generic-no-unicode', - 'generic-no-wide-characters', - 'generic-no-rtti', - 'generic-optimized-speed', - 'generic-static', - 'bootstrapping-build' - ] - machine: [ 'libcxx-self-hosted-linux' ] - include: - - config: 'generic-cxx26' - machine: libcxx-self-hosted-linux - - config: 'generic-asan' - machine: libcxx-self-hosted-linux - - config: 'generic-tsan' - machine: libcxx-self-hosted-linux - - config: 'generic-ubsan' - machine: libcxx-self-hosted-linux - # Use a larger machine for MSAN to avoid timeout and memory allocation issues. - - config: 'generic-msan' - machine: libcxx-self-hosted-linux - runs-on: ${{ matrix.machine }} - container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 - steps: - - uses: actions/checkout@v4 - - name: ${{ matrix.config }} - run: libcxx/utils/ci/run-buildbot ${{ matrix.config }} - env: - CC: clang-20 - CXX: clang++-20 - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 - if: always() - with: - name: ${{ matrix.config }}-results - path: | - **/test-results.xml - **/*.abilist - **/CMakeConfigureLog.yaml - **/CMakeError.log - **/CMakeOutput.log - **/crash_diagnostics/* - - macos: - needs: [ stage1 ] - strategy: - fail-fast: false - matrix: - include: - - config: generic-cxx03 - os: macos-15 - - config: generic-cxx23 - os: macos-15 - - config: generic-modules - os: macos-15 - - config: apple-configuration - os: macos-15 - - config: apple-system - os: macos-13 - - config: apple-system-hardened - os: macos-13 - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v4 - - uses: maxim-lobanov/setup-xcode@v1 - with: - xcode-version: 'latest' - - uses: seanmiddleditch/gha-setup-ninja@master - - name: Build and test - run: | - python3 -m venv .venv - source .venv/bin/activate - python -m pip install psutil - bash libcxx/utils/ci/run-buildbot ${{ matrix.config }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 - if: always() # Upload artifacts even if the build or test suite fails - with: - name: macos-${{ matrix.config }}-results - path: | - **/test-results.xml - **/*.abilist - **/CMakeConfigureLog.yaml - **/CMakeError.log - **/CMakeOutput.log - **/crash_diagnostics/* - - windows: - runs-on: windows-2022 - needs: [ stage1 ] - strategy: - fail-fast: false - matrix: - include: - - { config: clang-cl-dll, mingw: false } - - { config: clang-cl-static, mingw: false } - - { config: clang-cl-no-vcruntime, mingw: false } - - { config: clang-cl-debug, mingw: false } - - { config: clang-cl-static-crt, mingw: false } - - { config: mingw-dll, mingw: true } - - { config: mingw-static, mingw: true } - - { config: mingw-dll-i686, mingw: true } - - { config: mingw-incomplete-sysroot, mingw: true } - steps: - - uses: actions/checkout@v4 - - name: Install dependencies - run: | - choco install -y ninja - pip install psutil - - name: Install a current LLVM - if: ${{ matrix.mingw != true }} - run: | - choco install -y llvm --version=19.1.7 --allow-downgrade - - name: Install llvm-mingw - if: ${{ matrix.mingw == true }} - run: | - curl -LO https://github.com/mstorsjo/llvm-mingw/releases/download/20250114/llvm-mingw-20250114-ucrt-x86_64.zip - powershell Expand-Archive llvm-mingw*.zip -DestinationPath . - del llvm-mingw*.zip - mv llvm-mingw* c:\llvm-mingw - echo "c:\llvm-mingw\bin" | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append - - name: Simulate a from-scratch build of llvm-mingw - if: ${{ matrix.config == 'mingw-incomplete-sysroot' }} - run: | - rm -r c:\llvm-mingw\include\c++ - rm -r c:\llvm-mingw\*-w64-mingw32\lib\libc++* - rm -r c:\llvm-mingw\*-w64-mingw32\lib\libunwind* - - name: Add Git Bash to the path - run: | - echo "c:\Program Files\Git\usr\bin" | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf8 -Append - - name: Set up the MSVC dev environment - if: ${{ matrix.mingw != true }} - uses: ilammy/msvc-dev-cmd@v1 - - name: Build and test - run: | - bash libcxx/utils/ci/run-buildbot ${{ matrix.config }} diff --git a/.github/workflows/libcxx-build-containers.yml b/.github/workflows/libcxx-build-containers.yml deleted file mode 100644 index 2d040f712ce5..000000000000 --- a/.github/workflows/libcxx-build-containers.yml +++ /dev/null @@ -1,71 +0,0 @@ -# This file defines an action that builds the various Docker images used to run -# libc++ CI whenever modifications to those Docker files are pushed to `main`. -# -# The images are pushed to the LLVM package registry at https://github.com/orgs/llvm/packages -# and tagged appropriately. The selection of which Docker image version is used by the libc++ -# CI nodes at any given point is controlled from the workflow files themselves. - -name: Build Docker images for libc++ CI - -permissions: - contents: read - packages: write - -on: - push: - branches: - - main - paths: - - 'libcxx/utils/ci/**' - - '.github/workflows/libcxx-build-containers.yml' - pull_request: - branches: - - main - paths: - - 'libcxx/utils/ci/**' - - '.github/workflows/libcxx-build-containers.yml' - -jobs: - build-and-push: - runs-on: ubuntu-latest - if: github.repository_owner == 'llvm' - permissions: - packages: write - - steps: - - uses: actions/checkout@v4 - - - name: Build the Linux builder image - working-directory: libcxx/utils/ci - run: docker compose build actions-builder - env: - TAG: ${{ github.sha }} - - # - name: Build the Android builder image - # working-directory: libcxx/utils/ci - # run: docker compose build android-buildkite-builder - # env: - # TAG: ${{ github.sha }} - - - name: Log in to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Push the Linux builder image - if: github.event_name == 'push' - working-directory: libcxx/utils/ci - run: | - docker compose push actions-builder - env: - TAG: ${{ github.sha }} - - # - name: Push the Android builder image - # if: github.event_name == 'push' - # working-directory: libcxx/utils/ci - # run: | - # docker compose push android-buildkite-builder - # env: - # TAG: ${{ github.sha }} diff --git a/.github/workflows/libcxx-check-generated-files.yml b/.github/workflows/libcxx-check-generated-files.yml deleted file mode 100644 index 570055624b2a..000000000000 --- a/.github/workflows/libcxx-check-generated-files.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: "Check libc++ generated files" -on: - pull_request: - paths: - - 'libcxx/**' - -permissions: - contents: read - -jobs: - check_generated_files: - runs-on: ubuntu-latest - steps: - - name: Fetch LLVM sources - uses: actions/checkout@v4 - - - name: Install dependencies - uses: aminya/setup-cpp@v1 - with: - clangformat: 17.0.1 - ninja: true - - - name: Check generated files - run: libcxx/utils/ci/run-buildbot check-generated-output diff --git a/.github/workflows/libcxx-restart-preempted-jobs.yaml b/.github/workflows/libcxx-restart-preempted-jobs.yaml deleted file mode 100644 index e7e3772d4de2..000000000000 --- a/.github/workflows/libcxx-restart-preempted-jobs.yaml +++ /dev/null @@ -1,243 +0,0 @@ -name: Restart Preempted Libc++ Workflow - -# The libc++ builders run on preemptable VMs, which can be shutdown at any time. -# This workflow identifies when a workflow run was canceled due to the VM being preempted, -# and restarts the workflow run. - -# We identify a canceled workflow run by checking the annotations of the check runs in the check suite, -# which should contain the message "The runner has received a shutdown signal." - -# Note: If a job is both preempted and also contains a non-preemption failure, we do not restart the workflow. - -on: - workflow_run: - workflows: [Build and Test libc\+\+] - types: - - completed - -permissions: - contents: read - -jobs: - restart: - if: github.repository_owner == 'llvm' && (github.event.workflow_run.conclusion == 'failure' || github.event.workflow_run.conclusion == 'cancelled') - name: "Restart Job" - permissions: - statuses: read - checks: write - actions: write - runs-on: ubuntu-latest - steps: - - name: "Restart Job" - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 - with: - script: | - const failure_regex = /Process completed with exit code 1./ - const preemption_regex = /The runner has received a shutdown signal/ - - const wf_run = context.payload.workflow_run - core.notice(`Running on "${wf_run.display_title}" by @${wf_run.actor.login} (event: ${wf_run.event})\nWorkflow run URL: ${wf_run.html_url}`) - - - async function create_check_run(conclusion, message) { - // Create a check run on the given workflow run to indicate if - // we are restarting the workflow or not. - if (conclusion != 'success' && conclusion != 'skipped' && conclusion != 'neutral') { - core.setFailed('Invalid conclusion: ' + conclusion) - } - await github.rest.checks.create({ - owner: context.repo.owner, - repo: context.repo.repo, - name: 'Restart Preempted Job', - head_sha: wf_run.head_sha, - status: 'completed', - conclusion: conclusion, - output: { - title: 'Restarted Preempted Job', - summary: message - } - }) - } - - console.log('Listing check runs for suite') - const check_suites = await github.rest.checks.listForSuite({ - owner: context.repo.owner, - repo: context.repo.repo, - check_suite_id: context.payload.workflow_run.check_suite_id, - per_page: 100 // FIXME: We don't have 100 check runs yet, but we should handle this better. - }) - - check_run_ids = []; - for (check_run of check_suites.data.check_runs) { - console.log('Checking check run: ' + check_run.id); - if (check_run.status != 'completed') { - console.log('Check run was not completed. Skipping.'); - continue; - } - if (check_run.conclusion != 'failure' && check_run.conclusion != 'cancelled') { - console.log('Check run had conclusion: ' + check_run.conclusion + '. Skipping.'); - continue; - } - check_run_ids.push(check_run.id); - } - - has_preempted_job = false; - - for (check_run_id of check_run_ids) { - console.log('Listing annotations for check run: ' + check_run_id); - - annotations = await github.rest.checks.listAnnotations({ - owner: context.repo.owner, - repo: context.repo.repo, - check_run_id: check_run_id - }) - - // For temporary debugging purposes to see the structure of the annotations. - console.log(annotations); - - has_failed_job = false; - saved_failure_message = null; - - for (annotation of annotations.data) { - if (annotation.annotation_level != 'failure') { - continue; - } - - const preemption_match = annotation.message.match(preemption_regex); - - if (preemption_match != null) { - console.log('Found preemption message: ' + annotation.message); - has_preempted_job = true; - } - - const failure_match = annotation.message.match(failure_regex); - if (failure_match != null) { - has_failed_job = true; - saved_failure_message = annotation.message; - } - } - if (has_failed_job && (! has_preempted_job)) { - // We only want to restart the workflow if all of the failures were due to preemption. - // We don't want to restart the workflow if there were other failures. - // - // However, libcxx runners running inside docker containers produce both a preemption message and failure message. - // - // The desired approach is to ignore failure messages which appear on the same job as a preemption message - // (An job is a single run with a specific configuration, ex generic-gcc, gcc-14). - // - // However, it's unclear that this code achieves the desired approach, and it may ignore all failures - // if a preemption message is found at all on any run. - // - // For now, it's more important to restart preempted workflows than to avoid restarting workflows with - // non-preemption failures. - // - // TODO Figure this out. - core.notice('Choosing not to rerun workflow because we found a non-preemption failure' + - 'Failure message: "' + saved_failure_message + '"'); - await create_check_run('skipped', 'Choosing not to rerun workflow because we found a non-preemption failure\n' - + 'Failure message: ' + saved_failure_message) - return; - } - } - - if (!has_preempted_job) { - core.notice('No preempted jobs found. Not restarting workflow.'); - await create_check_run('neutral', 'No preempted jobs found. Not restarting workflow.') - return; - } - - core.notice("Restarted workflow: " + context.payload.workflow_run.id); - await github.rest.actions.reRunWorkflowFailedJobs({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: context.payload.workflow_run.id - }) - await create_check_run('success', 'Restarted workflow run due to preempted job') - - restart-test: - if: github.repository_owner == 'llvm' && (github.event.workflow_run.conclusion == 'failure' || github.event.workflow_run.conclusion == 'cancelled') && github.event.actor.login == 'ldionne' # TESTING ONLY - name: "Restart Job (test)" - permissions: - statuses: read - checks: write - actions: write - runs-on: ubuntu-latest - steps: - - name: "Restart Job (test)" - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 - with: - script: | - const FAILURE_REGEX = /Process completed with exit code 1./ - const PREEMPTION_REGEX = /(The runner has received a shutdown signal)|(The operation was canceled)/ - - function log(msg) { - core.notice(msg) - } - - const wf_run = context.payload.workflow_run - log(`Running on "${wf_run.display_title}" by @${wf_run.actor.login} (event: ${wf_run.event})\nWorkflow run URL: ${wf_run.html_url}`) - - log('Listing check runs for suite') - const check_suites = await github.rest.checks.listForSuite({ - owner: context.repo.owner, - repo: context.repo.repo, - check_suite_id: context.payload.workflow_run.check_suite_id, - per_page: 100 // FIXME: We don't have 100 check runs yet, but we should handle this better. - }) - - preemptions = []; - legitimate_failures = []; - for (check_run of check_suites.data.check_runs) { - log(`Checking check run: ${check_run.id}`); - if (check_run.status != 'completed') { - log('Check run was not completed. Skipping.'); - continue; - } - - if (check_run.conclusion != 'failure' && check_run.conclusion != 'cancelled') { - log(`Check run had conclusion: ${check_run.conclusion}. Skipping.`); - continue; - } - - annotations = await github.rest.checks.listAnnotations({ - owner: context.repo.owner, - repo: context.repo.repo, - check_run_id: check_run.id - }) - - preemption_annotation = annotations.data.find(function(annotation) { - return annotation.annotation_level == 'failure' && - annotation.message.match(PREEMPTION_REGEX) != null; - }); - if (preemption_annotation != null) { - log(`Found preemption message: ${preemption_annotation.message}`); - preemptions.push(check_run); - break; - } - - failure_annotation = annotations.data.find(function(annotation) { - return annotation.annotation_level == 'failure' && - annotation.message.match(FAILURE_REGEX) != null; - }); - if (failure_annotation != null) { - log(`Found legitimate failure annotation: ${failure_annotation.message}`); - legitimate_failures.push(check_run); - break; - } - } - - if (preemptions) { - log('Found some preempted jobs'); - if (legitimate_failures) { - log('Also found some legitimate failures, so not restarting the workflow.'); - } else { - log('Did not find any legitimate failures. Restarting workflow.'); - await github.rest.actions.reRunWorkflowFailedJobs({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: context.payload.workflow_run.id - }) - } - } else { - log('Did not find any preempted jobs. Not restarting the workflow.'); - } diff --git a/.github/workflows/linux-precommit.yml b/.github/workflows/linux-precommit.yml new file mode 100644 index 000000000000..6ceb52033708 --- /dev/null +++ b/.github/workflows/linux-precommit.yml @@ -0,0 +1,95 @@ +name: "Linux precommit" + +permissions: + contents: read + +on: + push: + branches: [ "npu/release/20.x" ] + pull_request: + branches: [ "npu/release/20.x" ] + workflow_dispatch: + branches: [ "npu/release/20.x" ] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + + +jobs: + Build: + name: Build and Test + runs-on: ubuntu-latest + defaults: + run: + shell: bash + env: + CMAKE_BUILD_TYPE: 'Release' + LLVM_ENABLE_ASSERTIONS: 'ON' + NPU_PLUGIN_LLVM_PROJECT: llvm + NPU_PLUGIN_LLVM_PROJECT_BUILD_DIR: llvm/build + NPU_PLUGIN_LLVM_PROJECT_INSTALL_DIR: llvm/install + steps: + - name: Clone NPU Plugin LLVM sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'true' + - name: Print system info + run: | + # Install pre-requisites for Fedora + if [[ -e /etc/fedora-release ]]; then + yum update -y -q && yum install -y -q procps + fi + echo "System: ${{ runner.os }}" + echo "System Architecture: ${{ runner.arch }}" + echo "CPU Info: "; lscpu + echo "RAM Info: "; free -h --si + echo "MEMORY Info: "; df -h + + - name: Configure CMake + run: | + cmake \ + -B ${NPU_PLUGIN_LLVM_PROJECT_BUILD_DIR} \ + -S ${NPU_PLUGIN_LLVM_PROJECT} \ + -DCMAKE_INSTALL_PREFIX=${NPU_PLUGIN_LLVM_PROJECT_INSTALL_DIR} \ + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \ + -DLLVM_ENABLE_WARNINGS=OFF \ + -DLLVM_ENABLE_BINDINGS=OFF \ + -DLLVM_ENABLE_RTTI=ON \ + -DLLVM_ENABLE_EH=ON \ + -DLLVM_ENABLE_BACKTRACES=ON \ + -DLLVM_ENABLE_CRASH_OVERRIDES=ON \ + -DLLVM_ENABLE_PROJECTS="mlir" \ + -DLLVM_ENABLE_ASSERTIONS=${LLVM_ENABLE_ASSERTIONS} \ + -DLLVM_INCLUDE_TESTS=ON \ + -DLLVM_INCLUDE_BENCHMARKS=OFF \ + -DLLVM_TARGETS_TO_BUILD="host" \ + -DLLVM_ENABLE_TERMINFO=OFF \ + -DLLVM_BUILD_EXAMPLES=OFF \ + -DLLVM_INCLUDE_EXAMPLES=OFF \ + -DLLVM_BUILD_TOOLS=OFF \ + -DLLVM_BUILD_UTILS=ON \ + -DLLVM_INSTALL_UTILS=ON \ + + - name: Build + run: | + cmake \ + --build ${NPU_PLUGIN_LLVM_PROJECT_BUILD_DIR} \ + --config ${CMAKE_BUILD_TYPE} \ + --parallel $(nproc) + + - name: Test LLVM + run: | + cmake \ + --build ${NPU_PLUGIN_LLVM_PROJECT_BUILD_DIR} \ + --config ${CMAKE_BUILD_TYPE} \ + --parallel $(nproc) \ + --target check-llvm + + - name: Test MLIR + run: | + cmake \ + --build ${NPU_PLUGIN_LLVM_PROJECT_BUILD_DIR} \ + --config ${CMAKE_BUILD_TYPE} \ + --parallel $(nproc) \ + --target check-mlir diff --git a/.github/workflows/llvm-bugs.yml b/.github/workflows/llvm-bugs.yml deleted file mode 100644 index c392078fa452..000000000000 --- a/.github/workflows/llvm-bugs.yml +++ /dev/null @@ -1,63 +0,0 @@ -name: LLVM Bugs notifier - -permissions: - contents: read - issues: read - -on: - issues: - types: - - opened - -jobs: - auto-subscribe: - runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' - steps: - - uses: actions/setup-node@v4 - with: - node-version: 18 - check-latest: true - - run: npm install mailgun.js form-data - - name: Send notification - uses: actions/github-script@v6 - env: - MAILGUN_API_KEY: ${{ secrets.LLVM_BUGS_KEY }} - with: - script: | - const Mailgun = require('mailgun.js'); - const formData = require('form-data'); - - const mailgun = new Mailgun(formData); - const DOMAIN = 'email.llvm.org'; - - const mg = mailgun.client({ username: 'api', key: process.env.MAILGUN_API_KEY }); - - github.rest.issues.get({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo - }) - .then((issue) => { - const payload = { - author : issue.data.user.login, - issue : issue.data.number, - title : issue.data.title, - url : issue.data.html_url, - labels : issue.data.labels.map((label) => label.name), - assignee : issue.data.assignees.map((assignee) => assignee.login), - body : issue.data.body - }; - - const data = { - from: 'LLVM Bugs ', - to: 'llvm-bugs@lists.llvm.org', - subject: `[Bug ${issue.data.number}] ${issue.data.title}`, - template: 'new-github-issue', - 'o:tracking-clicks': 'no', - 'h:X-Mailgun-Variables': JSON.stringify(payload) - }; - - return mg.messages.create(DOMAIN, data); - }) - .then((msg) => console.log(msg)); diff --git a/.github/workflows/llvm-project-tests.yml b/.github/workflows/llvm-project-tests.yml deleted file mode 100644 index 4ff84c511250..000000000000 --- a/.github/workflows/llvm-project-tests.yml +++ /dev/null @@ -1,154 +0,0 @@ -name: LLVM Project Tests - -permissions: - contents: read - -on: - workflow_dispatch: - inputs: - build_target: - required: false - projects: - required: false - extra_cmake_args: - required: false - os_list: - required: false - default: '["ubuntu-latest", "windows-2019", "macOS-13"]' - python_version: - required: false - type: string - default: '3.11' - workflow_call: - inputs: - build_target: - required: false - type: string - default: "all" - - projects: - required: true - type: string - - extra_cmake_args: - required: false - type: string - - os_list: - required: false - type: string - # Use windows-2019 due to: - # https://developercommunity.visualstudio.com/t/Prev-Issue---with-__assume-isnan-/1597317 - # Use ubuntu-22.04 rather than ubuntu-latest to match the ubuntu - # version in the CI container. Without this, setup-python tries - # to install a python version linked against a newer version of glibc. - # TODO(boomanaiden154): Bump the Ubuntu version once the version in the - # container is bumped. - default: '["ubuntu-22.04", "windows-2019", "macOS-13"]' - - python_version: - required: false - type: string - default: '3.11' - -concurrency: - # Skip intermediate builds: always. - # Cancel intermediate builds: only if it is a pull request build. - # If the group name here is the same as the group name in the workflow that includes - # this one, then the action will try to wait on itself and get stuck. - group: llvm-project-${{ github.workflow }}-${{ inputs.projects }}-${{ inputs.python_version }}${{ github.ref }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} - -jobs: - lit-tests: - name: Lit Tests - runs-on: ${{ matrix.os }} - container: - image: ${{(startsWith(matrix.os, 'ubuntu') && 'ghcr.io/llvm/ci-ubuntu-22.04:latest') || null}} - volumes: - - /mnt/:/mnt/ - strategy: - fail-fast: false - matrix: - os: ${{ fromJSON(inputs.os_list) }} - steps: - - name: Setup Windows - if: startsWith(matrix.os, 'windows') - uses: llvm/actions/setup-windows@main - with: - arch: amd64 - # On Windows, starting with win19/20220814.1, cmake choose the 32-bit - # python3.10.6 libraries instead of the 64-bit libraries when building - # lldb. Using this setup-python action to make 3.10 the default - # python fixes this. - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: ${{ inputs.python_version }} - - name: Install Ninja - if: runner.os != 'Linux' - uses: llvm/actions/install-ninja@main - # actions/checkout deletes any existing files in the new git directory, - # so this needs to either run before ccache-action or it has to use - # clean: false. - - uses: actions/checkout@v4 - with: - fetch-depth: 250 - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1 - with: - # A full build of llvm, clang, lld, and lldb takes about 250MB - # of ccache space. There's not much reason to have more than this, - # because we usually won't need to save cache entries from older - # builds. Also, there is an overall 10GB cache limit, and each - # run creates a new cache entry so we want to ensure that we have - # enough cache space for all the tests to run at once and still - # fit under the 10 GB limit. - # Default to 2G to workaround: https://github.com/hendrikmuhs/ccache-action/issues/174 - max-size: 2G - key: ${{ matrix.os }} - variant: sccache - - name: Build and Test - env: - # Workaround for https://github.com/actions/virtual-environments/issues/5900. - # This should be a no-op for non-mac OSes - PKG_CONFIG_PATH: /usr/local/Homebrew/Library/Homebrew/os/mac/pkgconfig//12 - shell: bash - id: build-llvm - run: | - if [ "${{ runner.os }}" == "Linux" ]; then - builddir="/mnt/build/" - sudo mkdir -p $builddir - sudo chown gha $builddir - extra_cmake_args="-DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang" - else - builddir="$(pwd)"/build - fi - if [ "${{ runner.os }}" == "macOS" ]; then - # Workaround test failure on some lld tests on MacOS - # https://github.com/llvm/llvm-project/issues/81967 - extra_cmake_args="-DLLVM_DISABLE_ASSEMBLY_FILES=ON" - fi - echo "llvm-builddir=$builddir" >> "$GITHUB_OUTPUT" - cmake -G Ninja \ - -B "$builddir" \ - -S llvm \ - -DLLVM_ENABLE_PROJECTS="${{ inputs.projects }}" \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLDB_INCLUDE_TESTS=OFF \ - -DLIBCLC_TARGETS_TO_BUILD="amdgcn--;amdgcn--amdhsa;r600--;nvptx--;nvptx64--;nvptx--nvidiacl;nvptx64--nvidiacl" \ - -DCMAKE_C_COMPILER_LAUNCHER=sccache \ - -DCMAKE_CXX_COMPILER_LAUNCHER=sccache \ - $extra_cmake_args \ - ${{ inputs.extra_cmake_args }} - ninja -C "$builddir" '${{ inputs.build_target }}' - - - name: Build and Test libclc - if: "!startsWith(matrix.os, 'windows') && contains(inputs.projects, 'libclc')" - env: - LLVM_BUILDDIR: ${{ steps.build-llvm.outputs.llvm-builddir }} - run: | - # The libclc tests don't have a generated check target so all we can - # do is build it. - ninja -C "$LLVM_BUILDDIR" diff --git a/.github/workflows/llvm-project-workflow-tests.yml b/.github/workflows/llvm-project-workflow-tests.yml deleted file mode 100644 index a2539b279be0..000000000000 --- a/.github/workflows/llvm-project-workflow-tests.yml +++ /dev/null @@ -1,32 +0,0 @@ -# This workflow will test the llvm-project-tests workflow in PRs -# targetting the main branch. Since this workflow doesn't normally -# run on main PRs, we need some way to test it to ensure new updates -# don't break it. - -name: LLVM Workflow Test - -permissions: - contents: read - -on: - pull_request: - branches: - - 'main' - paths: - - '.github/workflows/llvm-project-tests.yml' - - '.github/workflows/llvm-project-workflow-tests.yml' - -concurrency: - # Skip intermediate builds: always. - # Cancel intermediate builds: only if it is a pull request build. - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} - -jobs: - llvm-test: - if: github.repository_owner == 'llvm' - name: Build and Test - uses: ./.github/workflows/llvm-project-tests.yml - with: - build_target: check-all - projects: clang;lld;libclc;lldb diff --git a/.github/workflows/llvm-tests.yml b/.github/workflows/llvm-tests.yml deleted file mode 100644 index e2ca2ff44890..000000000000 --- a/.github/workflows/llvm-tests.yml +++ /dev/null @@ -1,185 +0,0 @@ -name: LLVM Tests - -permissions: - contents: read - -on: - workflow_dispatch: - push: - branches: - - 'release/**' - paths: - - 'llvm/**' - - '.github/workflows/llvm-tests.yml' - pull_request: - branches: - - 'release/**' - paths: - - 'llvm/**' - - '.github/workflows/llvm-tests.yml' - -concurrency: - # Skip intermediate builds: always. - # Cancel intermediate builds: only if it is a pull request build. - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} - -jobs: - abi-dump-setup: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - outputs: - BASELINE_REF: ${{ steps.vars.outputs.BASELINE_REF }} - ABI_HEADERS: ${{ steps.vars.outputs.ABI_HEADERS }} - BASELINE_VERSION_MAJOR: ${{ steps.vars.outputs.BASELINE_VERSION_MAJOR }} - BASELINE_VERSION_MINOR: ${{ steps.vars.outputs.BASELINE_VERSION_MINOR }} - LLVM_VERSION_MAJOR: ${{ steps.version.outputs.major }} - LLVM_VERSION_MINOR: ${{ steps.version.outputs.minor }} - LLVM_VERSION_PATCH: ${{ steps.version.outputs.patch }} - steps: - - name: Checkout source - uses: actions/checkout@v4 - with: - fetch-depth: 250 - - - name: Get LLVM version - id: version - uses: ./.github/workflows/get-llvm-version - - - name: Setup Variables - id: vars - run: | - # C++ ABI: - # 18.1.0 we aren't doing ABI checks. - # 18.1.1 We want to check 18.1.0. - # C ABI: - # 18.1.0 We want to check 17.0.x - # 18.1.1 We want to check 18.1.0 - echo "BASELINE_VERSION_MINOR=1" >> "$GITHUB_OUTPUT" - if [ ${{ steps.version.outputs.patch }} -eq 0 ]; then - { - echo "BASELINE_VERSION_MAJOR=$(( ${{ steps.version.outputs.major }} - 1))" - echo "ABI_HEADERS=llvm-c" - } >> "$GITHUB_OUTPUT" - else - { - echo "BASELINE_VERSION_MAJOR=${{ steps.version.outputs.major }}" - echo "ABI_HEADERS=." - } >> "$GITHUB_OUTPUT" - fi - - abi-dump: - if: github.repository_owner == 'llvm' - needs: abi-dump-setup - runs-on: ubuntu-latest - strategy: - matrix: - name: - - build-baseline - - build-latest - include: - - name: build-baseline - llvm_version_major: ${{ needs.abi-dump-setup.outputs.BASELINE_VERSION_MAJOR }} - ref: llvmorg-${{ needs.abi-dump-setup.outputs.BASELINE_VERSION_MAJOR }}.${{ needs.abi-dump-setup.outputs.BASELINE_VERSION_MINOR }}.0 - repo: llvm/llvm-project - - name: build-latest - llvm_version_major: ${{ needs.abi-dump-setup.outputs.LLVM_VERSION_MAJOR }} - ref: ${{ github.sha }} - repo: ${{ github.repository }} - steps: - - name: Install Ninja - uses: llvm/actions/install-ninja@main - - name: Install abi-compliance-checker - run: | - sudo apt-get update - sudo apt-get -y install abi-dumper autoconf pkg-config - - name: Install universal-ctags - run: | - git clone https://github.com/universal-ctags/ctags.git - cd ctags - ./autogen.sh - ./configure - sudo make install - - name: Download source code - uses: llvm/actions/get-llvm-project-src@main - with: - ref: ${{ matrix.ref }} - repo: ${{ matrix.repo }} - - name: Configure - run: | - mkdir install - cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=Debug -DLLVM_TARGETS_TO_BUILD="" -DLLVM_BUILD_LLVM_DYLIB=ON -DCMAKE_C_FLAGS_DEBUG="-g1 -Og" -DCMAKE_CXX_FLAGS_DEBUG="-g1 -Og" -DCMAKE_INSTALL_PREFIX="$(pwd)"/install llvm - - name: Build - # Need to run install-LLVM twice to ensure the symlink is installed (this is a bug). - run: | - ninja -C build install-LLVM - ninja -C build install-LLVM - ninja -C build install-llvm-headers - - name: Dump ABI - run: | - if [ "${{ needs.abi-dump-setup.outputs.ABI_HEADERS }}" = "llvm-c" ]; then - nm ./install/lib/libLLVM.so | awk "/T _LLVM/ || /T LLVM/ { print $3 }" | sort -u | sed -e "s/^_//g" | cut -d ' ' -f 3 > llvm.symbols - # Even though the -symbols-list option doesn't seem to filter out the symbols, I believe it speeds up processing, so I'm leaving it in. - export EXTRA_ARGS="-symbols-list llvm.symbols" - else - touch llvm.symbols - fi - abi-dumper $EXTRA_ARGS -lver ${{ matrix.ref }} -skip-cxx -public-headers ./install/include/${{ needs.abi-dump-setup.outputs.ABI_HEADERS }} -o ${{ matrix.ref }}.abi ./install/lib/libLLVM.so - # Remove symbol versioning from dumps, so we can compare across major versions. - sed -i 's/LLVM_${{ matrix.llvm_version_major }}/LLVM_NOVERSION/' ${{ matrix.ref }}.abi - - name: Upload ABI file - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 - with: - name: ${{ matrix.name }} - path: ${{ matrix.ref }}.abi - - - name: Upload symbol list file - if: matrix.name == 'build-baseline' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 - with: - name: symbol-list - path: llvm.symbols - - abi-compare: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - needs: - - abi-dump-setup - - abi-dump - steps: - - name: Download baseline - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 - with: - name: build-baseline - path: build-baseline - - name: Download latest - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 - with: - name: build-latest - path: build-latest - - name: Download symbol list - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 - with: - name: symbol-list - path: symbol-list - - - name: Install abi-compliance-checker - run: | - sudo apt-get update - sudo apt-get -y install abi-compliance-checker - - name: Compare ABI - run: | - if [ -s symbol-list/llvm.symbols ]; then - # This option doesn't seem to work with the ABI dumper, so passing it here. - export EXTRA_ARGS="-symbols-list symbol-list/llvm.symbols" - fi - # FIXME: Reading of gzip'd abi files on the GitHub runners stop - # working some time in March of 2021, likely due to a change in the - # runner's environment. - abi-compliance-checker $EXTRA_ARGS -l libLLVM.so -old build-baseline/*.abi -new build-latest/*.abi || test "${{ needs.abi-dump-setup.outputs.ABI_HEADERS }}" = "llvm-c" - - name: Upload ABI Comparison - if: always() - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 - with: - name: compat-report-${{ github.sha }} - path: compat_reports/ diff --git a/.github/workflows/merged-prs.yml b/.github/workflows/merged-prs.yml deleted file mode 100644 index e29afd4097f9..000000000000 --- a/.github/workflows/merged-prs.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: "Add buildbot information to first PRs from new contributors" - -permissions: - contents: read - -on: - # It's safe to use pull_request_target here, because we aren't checking out - # code from the pull request branch. - # See https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - pull_request_target: - types: - - closed - -jobs: - buildbot_comment: - runs-on: ubuntu-latest - permissions: - pull-requests: write - if: >- - (github.repository == 'llvm/llvm-project') && - (github.event.pull_request.merged == true) - steps: - - name: Checkout Automation Script - uses: actions/checkout@v4 - with: - sparse-checkout: llvm/utils/git/ - ref: main - - - name: Setup Automation Script - working-directory: ./llvm/utils/git/ - run: | - pip install --require-hashes -r requirements.txt - - - name: Add Buildbot information comment - working-directory: ./llvm/utils/git/ - run: | - python3 ./github-automation.py \ - --token '${{ secrets.GITHUB_TOKEN }}' \ - pr-buildbot-information \ - --issue-number "${{ github.event.pull_request.number }}" \ - --author "${{ github.event.pull_request.user.login }}" diff --git a/.github/workflows/new-issues.yml b/.github/workflows/new-issues.yml deleted file mode 100644 index 3cac57e26851..000000000000 --- a/.github/workflows/new-issues.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Labeling new issues -on: - issues: - types: ['opened'] - -permissions: - contents: read - -jobs: - automate-issues-labels: - permissions: - issues: write - runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' - steps: - - uses: llvm/actions/issue-labeler@main - with: - repo-token: ${{ secrets.ISSUE_SUBSCRIBER_TOKEN }} - configuration-path: .github/new-issues-labeler.yml - include-title: 1 - include-body: 0 - sync-labels: 0 - enable-versioned-regex: 0 diff --git a/.github/workflows/new-prs.yml b/.github/workflows/new-prs.yml deleted file mode 100644 index 88175d6f8d64..000000000000 --- a/.github/workflows/new-prs.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: "Labelling new pull requests" - -permissions: - contents: read - -on: - # It's safe to use pull_request_target here, because we aren't checking out - # code from the pull request branch. - # See https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - pull_request_target: - types: - - opened - - reopened - - ready_for_review - - synchronize - -jobs: - greeter: - runs-on: ubuntu-latest - permissions: - pull-requests: write - # Only comment on PRs that have been opened for the first time, by someone - # new to LLVM or to GitHub as a whole. Ideally we'd look for FIRST_TIMER - # or FIRST_TIME_CONTRIBUTOR, but this does not appear to work. Instead check - # that we do not have any of the other author associations. - # See https://docs.github.com/en/webhooks/webhook-events-and-payloads?actionType=opened#pull_request - # for all the possible values. - if: >- - (github.repository == 'llvm/llvm-project') && - (github.event.action == 'opened') && - (github.event.pull_request.author_association != 'COLLABORATOR') && - (github.event.pull_request.author_association != 'CONTRIBUTOR') && - (github.event.pull_request.author_association != 'MANNEQUIN') && - (github.event.pull_request.author_association != 'MEMBER') && - (github.event.pull_request.author_association != 'OWNER') - steps: - - name: Checkout Automation Script - uses: actions/checkout@v4 - with: - sparse-checkout: llvm/utils/git/ - ref: main - - - name: Setup Automation Script - working-directory: ./llvm/utils/git/ - run: | - pip install --require-hashes -r requirements.txt - - - name: Greet Author - working-directory: ./llvm/utils/git/ - run: | - python3 ./github-automation.py \ - --token '${{ secrets.GITHUB_TOKEN }}' \ - pr-greeter \ - --issue-number "${{ github.event.pull_request.number }}" - - automate-prs-labels: - # Greet first so that only the author gets that notification. - needs: greeter - runs-on: ubuntu-latest - # Ignore PRs with more than 10 commits. Pull requests with a lot of - # commits tend to be accidents usually when someone made a mistake while trying - # to rebase. We want to ignore these pull requests to avoid excessive - # notifications. - # always() means that even if greeter is skipped, this job will run. - if: > - always() && github.repository == 'llvm/llvm-project' && - github.event.pull_request.draft == false && - github.event.pull_request.commits < 10 - steps: - - uses: actions/labeler@v4 - with: - configuration-path: .github/new-prs-labeler.yml - # workaround for https://github.com/actions/labeler/issues/112 - sync-labels: '' - repo-token: ${{ secrets.ISSUE_SUBSCRIBER_TOKEN }} diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml index 0e6180acf4a4..f3c389e52f75 100644 --- a/.github/workflows/pr-code-format.yml +++ b/.github/workflows/pr-code-format.yml @@ -1,49 +1,38 @@ name: "Check code formatting" -permissions: - contents: read - on: - pull_request: + pull_request_target: branches: - - main - - 'users/**' + - npu/release/20.x + +permissions: + contents: read jobs: code_formatter: - runs-on: ubuntu-latest - timeout-minutes: 30 - concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number }} - cancel-in-progress: true - if: github.repository == 'llvm/llvm-project' + runs-on: ubuntu-24.04 + if: (github.repository == 'llvm/llvm-project' || github.repository == 'intel/npu-plugin-llvm') && !contains(github.event.pull_request.labels.*.name, 'disable-lint') steps: - name: Fetch LLVM sources - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Checkout through merge base - uses: rmacklin/fetch-through-merge-base@v0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: - base_ref: ${{ github.event.pull_request.base.ref }} - head_ref: ${{ github.event.pull_request.head.sha }} - deepen_length: 500 + fetch-depth: 2 - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v39 + uses: step-security/changed-files@3dbe17c78367e7d60f00d78ae6781a35be47b4a1 # v45.0.1 with: separator: "," skip_initial_fetch: true + base_sha: 'HEAD~1' + sha: 'HEAD' - # We need to pull the script from the main branch, so that we ensure - # we get the latest version of this script. + # We need to make sure that we aren't executing/using any code from the + # PR for security reasons as we're using pull_request_target. Checkout + # the target branch with the necessary files. - name: Fetch code formatting utils - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: - repository: ${{ github.repository }} - ref: ${{ github.base_ref }} sparse-checkout: | llvm/utils/git/requirements_formatting.txt llvm/utils/git/code-format-helper.py @@ -58,12 +47,12 @@ jobs: echo "$CHANGED_FILES" - name: Install clang-format - uses: aminya/setup-cpp@v1 + uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 with: - clangformat: 19.1.6 + clangformat: 20.1.8 - name: Setup Python env - uses: actions/setup-python@v5 + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 with: python-version: '3.11' cache: 'pip' @@ -78,24 +67,10 @@ jobs: START_REV: ${{ github.event.pull_request.base.sha }} END_REV: ${{ github.event.pull_request.head.sha }} CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }} - # TODO(boomanaiden154): Once clang v18 is released, we should be able - # to take advantage of the new --diff_from_common_commit option - # explicitly in code-format-helper.py and not have to diff starting at - # the merge base. - # Create an empty comments file so the pr-write job doesn't fail. run: | - echo "[]" > comments && python ./code-format-tools/llvm/utils/git/code-format-helper.py \ - --write-comment-to-file \ --token ${{ secrets.GITHUB_TOKEN }} \ --issue-number $GITHUB_PR_NUMBER \ - --start-rev $(git merge-base $START_REV $END_REV) \ - --end-rev $END_REV \ + --start-rev HEAD~1 \ + --end-rev HEAD \ --changed-files "$CHANGED_FILES" - - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - if: always() - with: - name: workflow-args - path: | - comments diff --git a/.github/workflows/pr-request-release-note.yml b/.github/workflows/pr-request-release-note.yml deleted file mode 100644 index 2fa501dda16b..000000000000 --- a/.github/workflows/pr-request-release-note.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: PR Request Release Note - -permissions: - contents: read - -on: - pull_request: - types: - - closed - -jobs: - request-release-note: - if: >- - github.repository_owner == 'llvm' && - startsWith(github.ref, 'refs/heads/release') - - runs-on: ubuntu-latest - steps: - # We need to pull the script from the main branch, so that we ensure - # we get the latest version of this script. - - name: Checkout Scripts - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - sparse-checkout: | - llvm/utils/git/requirements.txt - llvm/utils/git/github-automation.py - sparse-checkout-cone-mode: false - - - name: Install Dependencies - run: | - pip install --require-hashes -r llvm/utils/git/requirements.txt - - - name: Request Release Note - env: - # We need to use an llvmbot token here, because we are mentioning a user. - GITHUB_TOKEN: ${{ github.token }} - run: | - python3 llvm/utils/git/github-automation.py \ - --repo "$GITHUB_REPOSITORY" \ - --token "$GITHUB_TOKEN" \ - request-release-note \ - --pr-number ${{ github.event.pull_request.number}} - - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - if: always() - with: - name: workflow-args - path: | - comments diff --git a/.github/workflows/pr-subscriber.yml b/.github/workflows/pr-subscriber.yml deleted file mode 100644 index 272d3e2f9ef8..000000000000 --- a/.github/workflows/pr-subscriber.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: PR Subscriber - -on: - pull_request_target: - types: - - labeled - -permissions: - contents: read - -jobs: - auto-subscribe: - runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' - steps: - - name: Checkout Automation Script - uses: actions/checkout@v4 - with: - sparse-checkout: llvm/utils/git/ - ref: main - - - name: Setup Automation Script - working-directory: ./llvm/utils/git/ - run: | - pip install --require-hashes -r requirements.txt - - - name: Update watchers - working-directory: ./llvm/utils/git/ - run: | - python3 ./github-automation.py \ - --token '${{ secrets.ISSUE_SUBSCRIBER_TOKEN }}' \ - pr-subscriber \ - --issue-number "${{ github.event.number }}" \ - --label-name "${{ github.event.label.name }}" diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml deleted file mode 100644 index 9b6a1236823d..000000000000 --- a/.github/workflows/premerge.yaml +++ /dev/null @@ -1,212 +0,0 @@ -name: LLVM Premerge Checks - -permissions: - contents: read - -on: - pull_request: - types: - - opened - - synchronize - - reopened - # When a PR is closed, we still start this workflow, but then skip - # all the jobs, which makes it effectively a no-op. The reason to - # do this is that it allows us to take advantage of concurrency groups - # to cancel in progress CI jobs whenever the PR is closed. - - closed - push: - branches: - - 'main' - - 'release/**' - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true - -jobs: - premerge-checks-linux: - if: >- - false && github.repository_owner == 'llvm' && - (github.event_name != 'pull_request' || github.event.action != 'closed') - runs-on: llvm-premerge-linux-runners - steps: - - name: Checkout LLVM - uses: actions/checkout@v4 - with: - fetch-depth: 2 - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1.2.14 - with: - max-size: "2000M" - - name: Build and Test - # Mark the job as a success even if the step fails so that people do - # not get notified while the new premerge pipeline is in an - # experimental state. - # TODO(boomanaiden154): Remove this once the pipeline is stable and we - # are ready for people to start recieving notifications. - continue-on-error: true - run: | - git config --global --add safe.directory '*' - - modified_files=$(git diff --name-only HEAD~1...HEAD) - modified_dirs=$(echo "$modified_files" | cut -d'/' -f1 | sort -u) - - echo $modified_files - echo $modified_dirs - - . ./.ci/compute-projects.sh - - all_projects="bolt clang clang-tools-extra compiler-rt cross-project-tests flang libc libclc lld lldb llvm mlir openmp polly pstl" - modified_projects="$(keep-modified-projects ${all_projects})" - - linux_projects_to_test=$(exclude-linux $(compute-projects-to-test 0 ${modified_projects})) - linux_check_targets=$(check-targets ${linux_projects_to_test} | sort | uniq) - linux_projects=$(add-dependencies ${linux_projects_to_test} | sort | uniq) - - linux_runtimes_to_test=$(compute-runtimes-to-test ${linux_projects_to_test}) - linux_runtime_check_targets=$(check-targets ${linux_runtimes_to_test} | sort | uniq) - linux_runtimes=$(echo ${linux_runtimes_to_test} | sort | uniq) - - if [[ "${linux_projects}" == "" ]]; then - echo "No projects to build" - exit 0 - fi - - echo "Building projects: ${linux_projects}" - echo "Running project checks targets: ${linux_check_targets}" - echo "Building runtimes: ${linux_runtimes}" - echo "Running runtimes checks targets: ${linux_runtime_check_targets}" - - export CC=/opt/llvm/bin/clang - export CXX=/opt/llvm/bin/clang++ - - ./.ci/monolithic-linux.sh "$(echo ${linux_projects} | tr ' ' ';')" "$(echo ${linux_check_targets})" "$(echo ${linux_runtimes} | tr ' ' ';')" "$(echo ${linux_runtime_check_targets})" - - premerge-checks-windows: - if: >- - false && github.repository_owner == 'llvm' && - (github.event_name != 'pull_request' || github.event.action != 'closed') - runs-on: llvm-premerge-windows-runners - defaults: - run: - shell: bash - steps: - - name: Checkout LLVM - uses: actions/checkout@v4 - with: - fetch-depth: 2 - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1.2.14 - with: - variant: "sccache" - max-size: "2000M" - - name: Compute Projects - id: vars - run: | - modified_files=$(git diff --name-only HEAD~1...HEAD) - modified_dirs=$(echo "$modified_files" | cut -d'/' -f1 | sort | uniq) - - echo $modified_files - echo $modified_dirs - - . ./.ci/compute-projects.sh - - all_projects="bolt clang clang-tools-extra compiler-rt cross-project-tests flang libc libclc lld lldb llvm mlir openmp polly pstl" - modified_projects="$(keep-modified-projects ${all_projects})" - - windows_projects_to_test=$(exclude-windows $(compute-projects-to-test 1 ${modified_projects})) - windows_check_targets=$(check-targets ${windows_projects_to_test} | sort | uniq | tr -d '\r' | tr '\n' ' ') - windows_projects=$(add-dependencies ${windows_projects_to_test} | sort | uniq | tr -d '\r' | tr '\n' ';') - - if [[ "${windows_projects}" == "" ]]; then - echo "No projects to build" - fi - - echo "Building projects: ${windows_projects}" - echo "Running project checks targets: ${windows_check_targets}" - - echo "windows-projects=${windows_projects}" >> $GITHUB_OUTPUT - echo "windows-check-targets=${windows_check_targets}" >> $GITHUB_OUTPUT - - name: Build and Test - # Mark the job as a success even if the step fails so that people do - # not get notified while the new premerge pipeline is in an - # experimental state. - # TODO(boomanaiden154): Remove this once the pipeline is stable and we - # are ready for people to start recieving notifications. - continue-on-error: true - if: ${{ steps.vars.outputs.windows-projects != '' }} - shell: cmd - run: | - set MAX_PARALLEL_COMPILE_JOBS=64 - set MAX_PARALLEL_LINK_JOBS=64 - call C:\\BuildTools\\Common7\\Tools\\VsDevCmd.bat -arch=amd64 -host_arch=amd64 - bash .ci/monolithic-windows.sh "${{ steps.vars.outputs.windows-projects }}" "${{ steps.vars.outputs.windows-check-targets }}" - - permerge-check-macos: - runs-on: macos-14 - if: >- - github.repository_owner == 'llvm' && - (startswith(github.ref_name, 'release/') || - startswith(github.base_ref, 'release/')) && - (github.event_name != 'pull_request' || github.event.action != 'closed') - steps: - - name: Checkout LLVM - uses: actions/checkout@v4 - with: - fetch-depth: 2 - - name: Setup ccache - uses: hendrikmuhs/ccache-action@v1.2.14 - with: - max-size: "2000M" - - name: Install Ninja - uses: llvm/actions/install-ninja@main - - name: Build and Test - run: | - modified_files=$(git diff --name-only HEAD~1...HEAD) - modified_dirs=$(echo "$modified_files" | cut -d'/' -f1 | sort -u) - - echo $modified_files - echo $modified_dirs - - . ./.ci/compute-projects.sh - - all_projects="clang clang-tools-extra lld lldb llvm mlir" - modified_projects="$(keep-modified-projects ${all_projects})" - - # We have to disable the runtimes builds due to https://github.com/llvm/llvm-project/issues/90568 - # and the lldb tests depend on libcxx, so we need to skip them. - mac_check_targets=$(check-targets ${modified_projects} | sort | uniq | tr '\n' ' ' | sed -e 's/check-lldb //g') - mac_projects=$(add-dependencies ${modified_projects} | sort | uniq | tr '\n' ' ') - - mac_runtimes_to_test=$(compute-runtimes-to-test ${modified_projects}) - mac_runtime_check_targets=$(check-targets ${mac_runtimes_to_test} | sort | uniq | tr '\n' ' ') - mac_runtimes=$(echo ${mac_runtimes_to_test} | tr ' ' '\n' | sort | uniq | tr '\n' ' ') - - if [[ "${mac_projects}" == "" ]]; then - echo "No projects to build" - exit 0 - fi - - echo "Projects to test: ${modified_projects}" - echo "Runtimes to test: ${mac_runtimes_to_test}" - echo "Building projects: ${mac_projects}" - echo "Running project checks targets: ${mac_check_targets}" - echo "Building runtimes: ${mac_runtimes}" - echo "Running runtimes checks targets: ${mac_runtime_check_targets}" - - # -DLLVM_DISABLE_ASSEMBLY_FILES=ON is for - # https://github.com/llvm/llvm-project/issues/81967 - # Disable sharding in lit so that the LIT_XFAIL environment var works. - cmake -G Ninja \ - -B build \ - -S llvm \ - -DLLVM_ENABLE_PROJECTS="$(echo ${mac_projects} | tr ' ' ';')" \ - -DLLVM_DISABLE_ASSEMBLY_FILES=ON \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLDB_INCLUDE_TESTS=OFF \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DCMAKE_C_COMPILER_LAUNCHER=ccache \ - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - - # The libcxx tests fail, so we are skipping the runtime targets. - ninja -C build $mac_check_targets diff --git a/.github/workflows/release-asset-audit.py b/.github/workflows/release-asset-audit.py deleted file mode 100644 index cf6ad7fbbe14..000000000000 --- a/.github/workflows/release-asset-audit.py +++ /dev/null @@ -1,66 +0,0 @@ -import github -import sys - -_SPECIAL_CASE_BINARIES = { - "keith": {"clang+llvm-18.1.8-arm64-apple-macos11.tar.xz"}, -} - - -def _is_valid(uploader_name, valid_uploaders, asset_name): - if uploader_name in valid_uploaders: - return True - - if uploader_name in _SPECIAL_CASE_BINARIES: - return asset_name in _SPECIAL_CASE_BINARIES[uploader_name] - - return False - - -def main(): - token = sys.argv[1] - - gh = github.Github(login_or_token=token) - repo = gh.get_repo("llvm/llvm-project") - - uploaders = set( - [ - "DimitryAndric", - "stefanp-ibm", - "lei137", - "omjavaid", - "nicolerabjohn", - "amy-kwan", - "mandlebug", - "zmodem", - "androm3da", - "tru", - "rovka", - "rorth", - "quinnlp", - "kamaub", - "abrisco", - "jakeegan", - "maryammo", - "tstellar", - "github-actions[bot]", - ] - ) - - for release in repo.get_releases(): - print("Release:", release.title) - for asset in release.get_assets(): - created_at = asset.created_at - updated_at = ( - "" if asset.created_at == asset.updated_at else asset.updated_at - ) - print( - f"{asset.name} : {asset.uploader.login} [{created_at} {updated_at}] ( {asset.download_count} )" - ) - if not _is_valid(asset.uploader.login, uploaders, asset.name): - with open('comment', 'w') as file: - file.write(f'@{asset.uploader.login} is not a valid uploader.') - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/.github/workflows/release-asset-audit.yml b/.github/workflows/release-asset-audit.yml deleted file mode 100644 index 018c5d542f32..000000000000 --- a/.github/workflows/release-asset-audit.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Release Asset Audit - -on: - workflow_dispatch: - release: - schedule: - # * is a special character in YAML so you have to quote this string - # Run once an hour - - cron: '5 * * * *' - - pull_request: - paths: - - ".github/workflows/release-asset-audit.py" - - ".github/workflows/release-asset-audit.yml" - -permissions: - contents: read # Default everything to read-only - -jobs: - audit: - name: "Release Asset Audit" - runs-on: ubuntu-22.04 - if: github.repository == 'llvm/llvm-project' - steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 #v4.1.6 - - name: "Run Audit Script" - env: - GITHUB_TOKEN: ${{ github.token }} - run: | - pip install --require-hashes -r ./llvm/utils/git/requirements.txt - python3 ./.github/workflows/release-asset-audit.py $GITHUB_TOKEN - - name: "File Issue" - if: >- - github.event_name != 'pull_request' && - failure() - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 - with: - github-token: ${{ secrets.ISSUE_SUBSCRIBER_TOKEN }} - script: | - var fs = require('fs'); - var body = '' - if (fs.existsSync('./comment')) { - body = fs.readFileSync('./comment') + "\n\n"; - } - body = body + `\n\nhttps://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}` - - const issue = await github.rest.issues.create({ - owner: context.repo.owner, - repo: context.repo.repo, - title: "Release Asset Audit Failed", - labels: ['infrastructure'], - body: body - }); - console.log(issue); diff --git a/.github/workflows/release-binaries-all.yml b/.github/workflows/release-binaries-all.yml deleted file mode 100644 index fd4694ebea32..000000000000 --- a/.github/workflows/release-binaries-all.yml +++ /dev/null @@ -1,103 +0,0 @@ -name: Release Binaries All - -permissions: - contents: read # Default everything to read-only - -on: - workflow_dispatch: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - upload: - description: 'Upload binaries to the release page' - required: true - default: false - type: boolean - - workflow_call: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - upload: - description: 'Upload binaries to the release page' - required: true - default: false - type: boolean - secrets: - RELEASE_TASKS_USER_TOKEN: - description: "Secret used to check user permissions." - required: false - - pull_request: - types: - - opened - - synchronize - - reopened - # When a PR is closed, we still start this workflow, but then skip - # all the jobs, which makes it effectively a no-op. The reason to - # do this is that it allows us to take advantage of concurrency groups - # to cancel in progress CI jobs whenever the PR is closed. - - closed - paths: - - '.github/workflows/release-binaries-all.yml' - - '.github/workflows/release-binaries.yml' - - '.github/workflows/release-binaries-setup-stage/*' - - '.github/workflows/release-binaries-save-stage/*' - - 'clang/cmake/caches/Release.cmake' - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || 'dispatch' }} - cancel-in-progress: True - -jobs: - setup-variables: - if: >- - (github.event_name != 'pull_request' || github.event.action != 'closed') - runs-on: ubuntu-22.04 - outputs: - release-version: ${{ steps.vars.outputs.release-version }} - upload: ${{ steps.vars.outputs.upload }} - steps: - - shell: bash - id: vars - run: | - upload="${{ inputs.upload }}" - release_version="${{ inputs.release-version }}" - if [ "${{ github.event_name }}" = "pull_request" ]; then - upload="false" - release_version="" - fi - echo "release-version=$release_version" >> "$GITHUB_OUTPUT" - echo "upload=$upload" >> "$GITHUB_OUTPUT" - - release-binaries-all: - name: Build Release Binaries - needs: - - setup-variables - permissions: - contents: write # For release uploads - id-token: write # For artifact attestations - attestations: write # For artifact attestations - strategy: - fail-fast: false - matrix: - runs-on: - - ubuntu-22.04 - - ubuntu-22.04-arm - - macos-13 - - macos-14 - - uses: ./.github/workflows/release-binaries.yml - with: - release-version: "${{ needs.setup-variables.outputs.release-version }}" - upload: ${{ needs.setup-variables.outputs.upload == 'true'}} - runs-on: "${{ matrix.runs-on }}" - secrets: - # This will be empty for pull_request events, but that's fine, because - # the release-binaries workflow does not use this secret for the - # pull_request event. - RELEASE_TASKS_USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} diff --git a/.github/workflows/release-binaries-save-stage/action.yml b/.github/workflows/release-binaries-save-stage/action.yml deleted file mode 100644 index f08088c7bc56..000000000000 --- a/.github/workflows/release-binaries-save-stage/action.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Save Stage -description: >- - Upload the source and binary directories from a build stage so that they - can be re-used in the next stage. This action is used to the release - binaries workflow into multiple stages to avoid the 6 hour timeout on - the GitHub hosted runners. -inputs: - build-prefix: - description: "Directory containing the build directory." - required: true - type: 'string' - -permissions: - contents: read - -runs: - using: "composite" - steps: - # We need to create an archive of the build directory, because it has too - # many files to upload. - - name: Package Build and Source Directories - shell: bash - run: | - # Remove .git/config to avoid leaking GITHUB_TOKEN stored there. - # See https://unit42.paloaltonetworks.com/github-repo-artifacts-leak-tokens/ - rm -Rf .git/config - # Windows does not support symlinks, so we need to dereference them. - tar --exclude build/ ${{ (runner.os == 'Windows' && '-h') || '' }} -c . | zstd -T0 -c > ../llvm-project.tar.zst - mv ../llvm-project.tar.zst . - tar -C ${{ inputs.build-prefix }} -c build/ | zstd -T0 -c > build.tar.zst - - - name: Upload Stage 1 Source - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - with: - name: ${{ runner.os }}-${{ runner.arch }}-${{ github.job }}-source - path: llvm-project.tar.zst - retention-days: 2 - - - name: Upload Stage 1 Build Dir - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - with: - name: ${{ runner.os}}-${{ runner.arch }}-${{ github.job }}-build - path: build.tar.zst - retention-days: 2 diff --git a/.github/workflows/release-binaries-setup-stage/action.yml b/.github/workflows/release-binaries-setup-stage/action.yml deleted file mode 100644 index f5e5db27e659..000000000000 --- a/.github/workflows/release-binaries-setup-stage/action.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Setup Stage -description: >- - Setup the next stage of the release binaries workflow. This sets up the - environment correctly for a new stage of the release binaries workflow - and also restores the source and build directory from the previous stage. - -inputs: - previous-artifact: - description: >- - A unique descriptor for the artifact from the previous stage. This will - be used to construct the final artifact pattern, which is: - $RUNNER_OS-$RUNNER_ARCH-$PREVIOUS_ARTIFACT-* - required: false - type: 'string' - -outputs: - build-prefix: - description: "Directory containing the build directory." - value: ${{ steps.build-prefix.outputs.build-prefix }} - -runs: - using: "composite" - steps: - - name: Install Ninja - uses: llvm/actions/install-ninja@22e9f909d35b50bd1181709564bfe816eaeaae81 # main - - - name: Setup Windows - if: startsWith(runner.os, 'Windows') - uses: llvm/actions/setup-windows@main - with: - arch: amd64 - - - name: Set Build Prefix - id: build-prefix - shell: bash - run: | - build_prefix=`pwd` - if [ "${{ runner.os }}" = "Linux" ]; then - sudo chown $USER:$USER /mnt/ - build_prefix=/mnt/ - fi - echo "build-prefix=$build_prefix" >> $GITHUB_OUTPUT - - - name: Download Previous Stage Artifact - if: ${{ inputs.previous-artifact }} - id: download - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 - with: - pattern: ${{ runner.os }}-${{ runner.arch }}-${{ inputs.previous-artifact }}-* - merge-multiple: true - - - name: Unpack Artifact - if: ${{ steps.download.outputs.download-path }} - shell: bash - run: | - tar --zstd -xf llvm-project.tar.zst - rm llvm-project.tar.zst - tar --zstd -C ${{ steps.build-prefix.outputs.build-prefix}} -xf build.tar.zst - rm build.tar.zst diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml deleted file mode 100644 index 231dd26e54ae..000000000000 --- a/.github/workflows/release-binaries.yml +++ /dev/null @@ -1,355 +0,0 @@ -name: Release Binaries - -on: - workflow_dispatch: - inputs: - release-version: - description: 'Release Version' - required: false - type: string - upload: - description: 'Upload binaries to the release page' - required: true - default: false - type: boolean - runs-on: - description: "Runner to use for the build" - required: true - type: choice - options: - - ubuntu-22.04 - - ubuntu-22.04-arm - - macos-13 - - macos-14 - - workflow_call: - inputs: - release-version: - description: 'Release Version' - required: false - type: string - upload: - description: 'Upload binaries to the release page' - required: true - default: false - type: boolean - runs-on: - description: "Runner to use for the build" - required: true - type: string - secrets: - RELEASE_TASKS_USER_TOKEN: - description: "Secret used to check user permissions." - required: false - - -permissions: - contents: read # Default everything to read-only - -jobs: - prepare: - name: Prepare to build binaries - runs-on: ${{ inputs.runs-on }} - if: github.repository_owner == 'llvm' - outputs: - release-version: ${{ steps.vars.outputs.release-version }} - ref: ${{ steps.vars.outputs.ref }} - upload: ${{ steps.vars.outputs.upload }} - target-cmake-flags: ${{ steps.vars.outputs.target-cmake-flags }} - ccache: ${{ steps.vars.outputs.ccache }} - build-flang: ${{ steps.vars.outputs.build-flang }} - release-binary-basename: ${{ steps.vars.outputs.release-binary-basename }} - release-binary-filename: ${{ steps.vars.outputs.release-binary-filename }} - build-runs-on: ${{ steps.vars.outputs.build-runs-on }} - test-runs-on: ${{ steps.vars.outputs.build-runs-on }} - - steps: - # It's good practice to use setup-python, but this is also required on macos-14 - # due to https://github.com/actions/runner-images/issues/10385 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f - with: - python-version: '3.12' - - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Install Dependencies - shell: bash - run: | - pip install --require-hashes -r ./llvm/utils/git/requirements.txt - - - name: Check Permissions - if: github.event_name != 'pull_request' - env: - GITHUB_TOKEN: ${{ github.token }} - USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - shell: bash - run: | - ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --user "$GITHUB_ACTOR" --user-token "$USER_TOKEN" check-permissions - - - name: Collect Variables - id: vars - shell: bash - # In order for the test-release.sh script to run correctly, the LLVM - # source needs to be at the following location relative to the build dir: - # | X.Y.Z-rcN | ./rcN/llvm-project - # | X.Y.Z | ./final/llvm-project - # - # We also need to set divergent flags based on the release version: - # | X.Y.Z-rcN | -rc N -test-asserts - # | X.Y.Z | -final - run: | - trimmed=$(echo ${{ inputs.release-version }} | xargs) - if [ -n "$trimmed" ]; then - release_version="$trimmed" - ref="llvmorg-$release_version" - else - release_version="${{ (github.event_name == 'pull_request' && format('PR{0}', github.event.pull_request.number)) || 'CI'}}-$GITHUB_SHA" - ref="$GITHUB_SHA" - fi - if [ -n "${{ inputs.upload }}" ]; then - upload="${{ inputs.upload }}" - else - upload="false" - fi - echo "release-version=$release_version">> $GITHUB_OUTPUT - echo "ref=$ref" >> $GITHUB_OUTPUT - echo "upload=$upload" >> $GITHUB_OUTPUT - - release_binary_basename="LLVM-$release_version-$RUNNER_OS-$RUNNER_ARCH" - echo "release-binary-basename=$release_binary_basename" >> $GITHUB_OUTPUT - echo "release-binary-filename=$release_binary_basename.tar.xz" >> $GITHUB_OUTPUT - - target="$RUNNER_OS-$RUNNER_ARCH" - # The hendrikmuhs/ccache-action action does not support installing sccache - # on arm64 Linux. - if [ "$target" = "Linux-ARM64" ]; then - echo ccache=ccache >> $GITHUB_OUTPUT - else - echo ccache=sccache >> $GITHUB_OUTPUT - fi - - # The macOS builds try to cross compile some libraries so we need to - # add extra CMake args to disable them. - # See https://github.com/llvm/llvm-project/issues/99767 - if [ "$RUNNER_OS" = "macOS" ]; then - target_cmake_flags="$target_cmake_flags -DBOOTSTRAP_BOOTSTRAP_COMPILER_RT_ENABLE_IOS=OFF" - if [ "$RUNNER_ARCH" = "ARM64" ]; then - arches=arm64 - else - arches=x86_64 - # Disable Flang builds on macOS x86_64. The FortranLower library takes - # 2-3 hours to build on macOS, much slower than on Linux. - # The long build time causes the release build to time out on x86_64, - # so we need to disable flang there. - target_cmake_flags="$target_cmake_flags -DLLVM_RELEASE_ENABLE_PROJECTS='clang;lld;lldb;clang-tools-extra;polly;mlir'" - fi - target_cmake_flags="$target_cmake_flags -DBOOTSTRAP_BOOTSTRAP_DARWIN_osx_ARCHS=$arches -DBOOTSTRAP_BOOTSTRAP_DARWIN_osx_BUILTIN_ARCHS=$arches" - fi - - build_flang="true" - - if [ "$RUNNER_OS" = "Windows" ]; then - # The build times out on Windows, so we need to disable LTO. - target_cmake_flags="$target_cmake_flags -DLLVM_RELEASE_ENABLE_LTO=OFF" - fi - - echo "target-cmake-flags=$target_cmake_flags" >> $GITHUB_OUTPUT - echo "build-flang=$build_flang" >> $GITHUB_OUTPUT - case "${{ inputs.runs-on }}" in - ubuntu-22.04*) - build_runs_on="depot-${{ inputs.runs-on }}-16" - test_runs_on=$build_runs_on - ;; - macos-13) - if [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then - build_runs_on="${{ inputs.runs-on }}" - else - build_runs_on="macos-13-large" - fi - test_runs_on="${{ inputs.runs-on }}" - ;; - macos-14) - if [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then - build_runs_on="${{ inputs.runs-on }}" - else - build_runs_on="depot-macos-14" - fi - test_runs_on="${{ inputs.runs-on }}" - ;; - *) - test_runs_on="${{ inputs.runs-on }}" - build_runs_on=$test_runs_on - ;; - esac - echo "build-runs-on=$build_runs_on" >> $GITHUB_OUTPUT - echo "test-runs-on=$test_runs_on" >> $GITHUB_OUTPUT - - build-release-package: - name: "Build Release Package" - needs: prepare - if: github.repository_owner == 'llvm' - runs-on: ${{ needs.prepare.outputs.build-runs-on }} - steps: - - - name: Checkout Actions - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - ref: ${{ (github.event_name == 'pull_request' && github.sha) || 'main' }} - sparse-checkout: | - .github/workflows/ - sparse-checkout-cone-mode: false - # Check out outside of working directory so the source checkout doesn't - # remove it. - path: workflows - - # actions/checkout does not support paths outside of the GITHUB_WORKSPACE. - # Also, anything that we put inside of GITHUB_WORKSPACE will be overwritten - # by future actions/checkout steps. Therefore, in order to checkout the - # latest actions from main, we need to first checkout out the actions inside of - # GITHUB_WORKSPACE (see previous step), then use actions/checkout to checkout - # the code being built and the move the actions from main back into GITHUB_WORKSPACE, - # becasue the uses on composite actions only reads workflows from inside GITHUB_WORKSPACE. - - shell: bash - run: mv workflows ../workflows-main - - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - ref: ${{ needs.prepare.outputs.ref }} - - - name: Copy main workflows - shell: bash - run: | - mv ../workflows-main . - - - name: Setup Stage - id: setup-stage - uses: ./workflows-main/.github/workflows/release-binaries-setup-stage - - - name: Configure - id: build - shell: bash - env: - CCACHE_BIN: ${{ needs.prepare.outputs.ccache }} - run: | - # There were some issues on the ARM64 MacOS runners with trying to build x86 object, - # so we need to set some extra cmake flags to disable this. - cmake -G Ninja -S llvm -B ${{ steps.setup-stage.outputs.build-prefix }}/build \ - ${{ needs.prepare.outputs.target-cmake-flags }} \ - -C clang/cmake/caches/Release.cmake \ - -DBOOTSTRAP_LLVM_PARALLEL_LINK_JOBS=1 \ - -DBOOTSTRAP_BOOTSTRAP_CPACK_PACKAGE_FILE_NAME="${{ needs.prepare.outputs.release-binary-basename }}" - - - name: Build - shell: bash - run: | - ninja -v -C ${{ steps.setup-stage.outputs.build-prefix }}/build stage2-package - release_dir=`find ${{ steps.setup-stage.outputs.build-prefix }}/build -iname 'stage2-bins'` - mv $release_dir/${{ needs.prepare.outputs.release-binary-filename }} . - - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 - with: - name: ${{ runner.os }}-${{ runner.arch }}-release-binary - # Due to path differences on Windows when running in bash vs running on node, - # we need to search for files in the current workspace. - path: | - ${{ needs.prepare.outputs.release-binary-filename }} - - # Clean up some build files to reduce size of artifact. - - name: Clean Up Build Directory - shell: bash - run: | - find ${{ steps.setup-stage.outputs.build-prefix }}/build -iname ${{ needs.prepare.outputs.release-binary-filename }} -delete - find ${{ steps.setup-stage.outputs.build-prefix }}/build -iname _CPack_Packages -prune -exec rm -r {} + - - - name: Save Stage - uses: ./workflows-main/.github/workflows/release-binaries-save-stage - with: - build-prefix: ${{ steps.setup-stage.outputs.build-prefix }} - - upload-release-binaries: - name: "Upload Release Binaries" - needs: - - prepare - - build-release-package - if: >- - github.event_name != 'pull_request' && - needs.prepare.outputs.upload == 'true' - runs-on: ubuntu-22.04 - permissions: - contents: write # For release uploads - id-token: write # For artifact attestations - attestations: write # For artifact attestations - - steps: - - name: Checkout Release Scripts - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - sparse-checkout: | - llvm/utils/release/github-upload-release.py - llvm/utils/git/requirements.txt - sparse-checkout-cone-mode: false - - - name: 'Download artifact' - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 - with: - pattern: '*-release-binary' - merge-multiple: true - - - name: Attest Build Provenance - id: provenance - uses: actions/attest-build-provenance@897ed5eab6ed058a474202017ada7f40bfa52940 # v1.0.0 - with: - subject-path: ${{ needs.prepare.outputs.release-binary-filename }} - - - name: Rename attestation file - run: - mv ${{ steps.provenance.outputs.bundle-path }} ${{ needs.prepare.outputs.release-binary-filename }}.jsonl - - - name: Upload Build Provenance - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 #v4.3.3 - with: - name: ${{ needs.prepare.outputs.release-binary-filename }}-attestation - path: ${{ needs.prepare.outputs.release-binary-filename }}.jsonl - - - name: Install Python Requirements - run: | - pip install --require-hashes -r ./llvm/utils/git/requirements.txt - - - name: Upload Release - shell: bash - run: | - ./llvm/utils/release/github-upload-release.py \ - --token ${{ github.token }} \ - --release ${{ needs.prepare.outputs.release-version }} \ - upload \ - --files ${{ needs.prepare.outputs.release-binary-filename }}* - - test-release: - name: "Test Release" - needs: - - prepare - - build-release-package - if: >- - github.repository_owner == 'llvm' - runs-on: ${{ needs.prepare.outputs.test-runs-on }} - steps: - - name: Checkout Actions - uses: actions/checkout@v4 - with: - ref: ${{ (github.event_name == 'pull_request' && github.sha) || 'main' }} - sparse-checkout: | - .github/workflows/ - sparse-checkout-cone-mode: false - path: workflows - - name: Setup Stage - id: setup-stage - uses: ./workflows/.github/workflows/release-binaries-setup-stage - with: - previous-artifact: build-release-package - - - name: Run Tests - shell: bash - run: | - ninja -C ${{ steps.setup-stage.outputs.build-prefix }}/build stage2-check-all diff --git a/.github/workflows/release-documentation.yml b/.github/workflows/release-documentation.yml deleted file mode 100644 index 09e21585bfc5..000000000000 --- a/.github/workflows/release-documentation.yml +++ /dev/null @@ -1,91 +0,0 @@ -name: Release Documentation - -permissions: - contents: read - -on: - workflow_dispatch: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - upload: - description: 'Upload documentation' - required: false - type: boolean - - workflow_call: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - upload: - description: 'Upload documentation' - required: false - type: boolean - -jobs: - release-documentation: - name: Build and Upload Release Documentation - runs-on: ubuntu-latest - env: - upload: ${{ inputs.upload && !contains(inputs.release-version, 'rc') }} - steps: - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Setup Python env - uses: actions/setup-python@v5 - with: - cache: 'pip' - cache-dependency-path: './llvm/docs/requirements.txt' - - - name: Install Dependencies - run: | - sudo apt-get update - sudo apt-get install -y \ - graphviz \ - python3-github \ - ninja-build \ - texlive-font-utils - pip3 install --user -r ./llvm/docs/requirements.txt - - - name: Build Documentation - env: - GITHUB_TOKEN: ${{ github.token }} - run: | - ./llvm/utils/release/build-docs.sh -release "${{ inputs.release-version }}" -no-doxygen - - - name: Create Release Notes Artifact - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 - with: - name: release-notes - path: docs-build/html-export/ - - - name: Clone www-releases - if: env.upload - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: ${{ github.repository_owner }}/www-releases - ref: main - fetch-depth: 0 - path: www-releases - persist-credentials: false - - - name: Upload Release Notes - if: env.upload - env: - GH_TOKEN: ${{ secrets.WWW_RELEASES_TOKEN }} - run: | - mkdir -p www-releases/${{ inputs.release-version }} - mv ./docs-build/html-export/* www-releases/${{ inputs.release-version }} - cd www-releases - git checkout -b ${{ inputs.release-version }} - git add ${{ inputs.release-version }} - git config user.email "llvmbot@llvm.org" - git config user.name "llvmbot" - git commit -a -m "Add ${{ inputs.release-version }} documentation" - git push --force "https://$GH_TOKEN@github.com/llvmbot/www-releases.git" HEAD:refs/heads/${{ inputs.release-version }} - gh pr create -f -B main -H ${{ inputs.release-version }} -R llvmbot/www-releases diff --git a/.github/workflows/release-doxygen.yml b/.github/workflows/release-doxygen.yml deleted file mode 100644 index ea95e5bb12b2..000000000000 --- a/.github/workflows/release-doxygen.yml +++ /dev/null @@ -1,72 +0,0 @@ -name: Release Doxygen - -permissions: - contents: read - -on: - workflow_dispatch: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - upload: - description: 'Upload documentation' - required: false - type: boolean - - workflow_call: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - upload: - description: 'Upload documentation' - required: false - type: boolean - secrets: - RELEASE_TASKS_USER_TOKEN: - description: "Secret used to check user permissions." - required: false - -jobs: - release-doxygen: - name: Build and Upload Release Doxygen - runs-on: ubuntu-latest - permissions: - contents: write - env: - upload: ${{ inputs.upload && !contains(inputs.release-version, 'rc') }} - steps: - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Setup Python env - uses: actions/setup-python@v5 - with: - cache: 'pip' - cache-dependency-path: './llvm/docs/requirements.txt' - - - name: Install Dependencies - run: | - sudo apt-get update - sudo apt-get install -y \ - doxygen \ - graphviz \ - python3-github \ - ninja-build \ - texlive-font-utils - pip3 install --user -r ./llvm/docs/requirements.txt - - - name: Build Doxygen - run: | - ./llvm/utils/release/build-docs.sh -release "${{ inputs.release-version }}" -no-sphinx - - - name: Upload Doxygen - if: env.upload - env: - GITHUB_TOKEN: ${{ github.token }} - USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - run: | - ./llvm/utils/release/github-upload-release.py --token "$GITHUB_TOKEN" --release "${{ inputs.release-version }}" --user "${{ github.actor }}" --user-token "$USER_TOKEN" upload --files ./*doxygen*.tar.xz diff --git a/.github/workflows/release-lit.yml b/.github/workflows/release-lit.yml deleted file mode 100644 index 9d6f3140e688..000000000000 --- a/.github/workflows/release-lit.yml +++ /dev/null @@ -1,79 +0,0 @@ -name: Release Lit - -permissions: - contents: read - -on: - workflow_dispatch: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - - workflow_call: - inputs: - release-version: - description: 'Release Version' - required: true - type: string - secrets: - RELEASE_TASKS_USER_TOKEN: - description: "Secret used to check user permissions." - required: false - -jobs: - release-lit: - name: Release Lit - runs-on: ubuntu-latest - steps: - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - ref: "llvmorg-${{ inputs.release-version }}" - - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install -y python3-setuptools python3-psutil python3-github - - - name: Check Permissions - env: - GITHUB_TOKEN: ${{ github.token }} - USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - run: | - ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --user ${{ github.actor }} --user-token "$USER_TOKEN" check-permissions - - - name: Setup Cpp - uses: aminya/setup-cpp@v1 - with: - compiler: llvm-16.0.6 - cmake: true - ninja: true - - - name: Test lit - run: | - mkdir build && cd build - export FILECHECK_OPTS='-dump-input-filter=all -vv -color' - cmake ../llvm -DCMAKE_BUILD_TYPE=Release -G Ninja - ninja -v -j $(nproc) check-lit - - - name: Package lit - run: | - cd llvm/utils/lit - # Remove 'dev' suffix from lit version. - sed -i 's/ + "dev"//g' lit/__init__.py - python3 setup.py sdist bdist_wheel - - - name: Upload lit to test.pypi.org - uses: pypa/gh-action-pypi-publish@release/v1 - with: - password: ${{ secrets.LLVM_LIT_TEST_PYPI_API_TOKEN }} - repository-url: https://test.pypi.org/legacy/ - packages-dir: llvm/utils/lit/dist/ - - - name: Upload lit to pypi.org - uses: pypa/gh-action-pypi-publish@release/v1 - with: - password: ${{ secrets.LLVM_LIT_PYPI_API_TOKEN }} - packages-dir: llvm/utils/lit/dist/ diff --git a/.github/workflows/release-sources.yml b/.github/workflows/release-sources.yml deleted file mode 100644 index a6c86823f99d..000000000000 --- a/.github/workflows/release-sources.yml +++ /dev/null @@ -1,108 +0,0 @@ -name: Release Sources - -permissions: - contents: read - -on: - workflow_dispatch: - inputs: - release-version: - description: Release Version - required: true - type: string - workflow_call: - inputs: - release-version: - description: Release Version - required: true - type: string - secrets: - RELEASE_TASKS_USER_TOKEN: - description: "Secret used to check user permissions." - required: false - # Run on pull_requests for testing purposes. - pull_request: - paths: - - '.github/workflows/release-sources.yml' - types: - - opened - - synchronize - - reopened - # When a PR is closed, we still start this workflow, but then skip - # all the jobs, which makes it effectively a no-op. The reason to - # do this is that it allows us to take advantage of concurrency groups - # to cancel in progress CI jobs whenever the PR is closed. - - closed - -concurrency: - group: ${{ github.workflow }}-${{ inputs.release-version || github.event.pull_request.number }} - cancel-in-progress: True - -jobs: - inputs: - name: Collect Job Inputs - if: >- - github.repository_owner == 'llvm' && - github.event.action != 'closed' - outputs: - ref: ${{ steps.inputs.outputs.ref }} - export-args: ${{ steps.inputs.outputs.export-args }} - runs-on: ubuntu-latest - steps: - - id: inputs - run: | - ref=${{ (inputs.release-version && format('llvmorg-{0}', inputs.release-version)) || github.sha }} - if [ -n "${{ inputs.release-version }}" ]; then - export_args="-release ${{ inputs.release-version }} -final" - else - export_args="-git-ref ${{ github.sha }}" - fi - echo "ref=$ref" >> $GITHUB_OUTPUT - echo "export-args=$export_args" >> $GITHUB_OUTPUT - - release-sources: - name: Package Release Sources - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - needs: - - inputs - permissions: - id-token: write - attestations: write - steps: - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - ref: ${{ needs.inputs.outputs.ref }} - fetch-tags: true - - name: Install Dependencies - run: | - pip install --require-hashes -r ./llvm/utils/git/requirements.txt - - - name: Check Permissions - if: github.event_name != 'pull_request' - env: - GITHUB_TOKEN: ${{ github.token }} - USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - run: | - ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --user ${{ github.actor }} --user-token "$USER_TOKEN" check-permissions - - name: Create Tarballs - run: | - ./llvm/utils/release/export.sh ${{ needs.inputs.outputs.export-args }} - - name: Attest Build Provenance - if: github.event_name != 'pull_request' - id: provenance - uses: actions/attest-build-provenance@897ed5eab6ed058a474202017ada7f40bfa52940 # v1.0.0 - with: - subject-path: "*.xz" - - if: github.event_name != 'pull_request' - run: | - mv ${{ steps.provenance.outputs.bundle-path }} . - - name: Create Tarball Artifacts - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 #v4.3.3 - with: - path: | - *.xz - attestation.jsonl - - diff --git a/.github/workflows/release-tasks.yml b/.github/workflows/release-tasks.yml deleted file mode 100644 index 52076ea1821b..000000000000 --- a/.github/workflows/release-tasks.yml +++ /dev/null @@ -1,113 +0,0 @@ -name: Release Task - -permissions: - contents: read - -on: - push: - tags: - # The regex support here is limited, so just match everything that starts with llvmorg- and filter later. - - 'llvmorg-*' - -jobs: - validate-tag: - name: Validate Tag - runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' - outputs: - release-version: ${{ steps.validate-tag.outputs.release-version }} - steps: - - name: Validate Tag - id: validate-tag - run: | - echo "${{ github.ref_name }}" | grep -e '^llvmorg-[0-9]\+\.[0-9]\+\.[0-9]\+\(-rc[0-9]\+\)\?$' - release_version=$(echo "${{ github.ref_name }}" | sed 's/llvmorg-//g') - echo "release-version=$release_version" >> "$GITHUB_OUTPUT" - - release-create: - name: Create a New Release - runs-on: ubuntu-latest - permissions: - contents: write # For creating the release. - needs: validate-tag - - steps: - - name: Install Dependencies - run: | - sudo apt-get update - sudo apt-get install python3-github - - - name: Checkout LLVM - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Create Release - env: - GITHUB_TOKEN: ${{ github.token }} - USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - run: | - ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --release ${{ needs.validate-tag.outputs.release-version }} --user ${{ github.actor }} --user-token "$USER_TOKEN" create - release-documentation: - name: Build and Upload Release Documentation - needs: - - validate-tag - uses: ./.github/workflows/release-documentation.yml - with: - release-version: ${{ needs.validate-tag.outputs.release-version }} - upload: true - - release-doxygen: - name: Build and Upload Release Doxygen - permissions: - contents: write - needs: - - validate-tag - - release-create - uses: ./.github/workflows/release-doxygen.yml - with: - release-version: ${{ needs.validate-tag.outputs.release-version }} - upload: true - # Called workflows don't have access to secrets by default, so we need to explicitly pass secrets that we use. - secrets: - RELEASE_TASKS_USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - - release-lit: - name: Release Lit - needs: validate-tag - uses: ./.github/workflows/release-lit.yml - with: - release-version: ${{ needs.validate-tag.outputs.release-version }} - # Called workflows don't have access to secrets by default, so we need to explicitly pass secrets that we use. - secrets: - RELEASE_TASKS_USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - - release-binaries: - name: Build Release Binaries - permissions: - contents: write - id-token: write - attestations: write - needs: - - validate-tag - - release-create - uses: ./.github/workflows/release-binaries-all.yml - with: - release-version: ${{ needs.validate-tag.outputs.release-version }} - upload: true - # Called workflows don't have access to secrets by default, so we need to explicitly pass secrets that we use. - secrets: - RELEASE_TASKS_USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} - - release-sources: - name: Package Release Sources - permissions: - contents: read - id-token: write - attestations: write - needs: - - validate-tag - uses: ./.github/workflows/release-sources.yml - with: - release-version: ${{ needs.validate-tag.outputs.release-version }} - # Called workflows don't have access to secrets by default, so we need to explicitly pass secrets that we use. - secrets: - RELEASE_TASKS_USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index ff61cf83a6af..2d96a0a67825 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -2,7 +2,7 @@ # by a third-party and are governed by separate terms of service, privacy # policy, and support documentation. -# Check current LLVM-Project results here: https://securityscorecards.dev/viewer/?uri=github.com/llvm/llvm-project +# Check current LLVM-Project results here: https://securityscorecards.dev/viewer/?uri=github.com/intel/npu-plugin-llvm name: Scorecard supply-chain security on: @@ -13,50 +13,63 @@ on: # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained schedule: - cron: '38 20 * * *' + push: + branches: [ "npu/release/19.x" ] # Declare default permissions as read only. -permissions: - contents: read +permissions: read-all jobs: analysis: name: Scorecard analysis runs-on: ubuntu-latest - if: github.repository == 'llvm/llvm-project' permissions: # Needed to upload the results to code-scanning dashboard. security-events: write # Needed to publish results and get a badge (see publish_results below). - id-token: write + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read steps: - name: "Checkout code" - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + # Public repositories: # - Publish results to OpenSSF REST API for easy access by consumers # - Allows the repository to include the Scorecard badge. - # - See https://github.com/ossf/scorecard-action#publishing-results. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. publish_results: true # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + uses: actions/upload-artifact@c24449f33cd45d4826c6702db7e49f7cdb9b551d # v3.pre.node20 with: name: SARIF file path: results.sarif retention-days: 5 - # Upload the results to GitHub's code scanning dashboard. + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + uses: github/codeql-action/upload-sarif@v3 with: - sarif_file: results.sarif + sarif_file: results.sarif \ No newline at end of file diff --git a/.github/workflows/set-release-binary-outputs.sh b/.github/workflows/set-release-binary-outputs.sh deleted file mode 100644 index 14d0798364e9..000000000000 --- a/.github/workflows/set-release-binary-outputs.sh +++ /dev/null @@ -1,34 +0,0 @@ -# Usage: set-release-binary-outputs.sh - -set -e - -if [ -z "$GITHUB_OUTPUT" ]; then - export GITHUB_OUTPUT=`mktemp` - echo "Warning: Environment variable GITHUB_OUTPUT is not set." - echo "Writing output variables to $GITHUB_OUTPUT" -fi - -tag=$1 -upload=$2 - -if echo $tag | grep -e '^[0-9a-f]\+$'; then - # This is a plain commit. - # TODO: Don't hardcode this. - release_version="18" - upload='false' - ref="$tag" - -else - - pattern='^llvmorg-[0-9]\+\.[0-9]\+\.[0-9]\+\(-rc[0-9]\+\)\?$' - echo "$tag" | grep -e $pattern - if [ $? != 0 ]; then - echo "ERROR: Tag '$tag' doesn't match pattern: $pattern" - exit 1 - fi - release_version=`echo "$tag" | sed 's/llvmorg-//g'` - release=`echo "$release_version" | sed 's/-.*//g'` -fi -echo "release-version=$release_version" >> $GITHUB_OUTPUT -echo "upload=$upload" >> $GITHUB_OUTPUT -echo "ref=$tag" >> $GITHUB_OUTPUT diff --git a/.github/workflows/spirv-tests.yml b/.github/workflows/spirv-tests.yml deleted file mode 100644 index ea466dc6c52e..000000000000 --- a/.github/workflows/spirv-tests.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: SPIR-V Tests - -permissions: - contents: read - -on: - workflow_dispatch: - pull_request: - paths: - - 'llvm/lib/Target/SPIRV/**' - - 'llvm/test/CodeGen/SPIRV/**' - - '.github/workflows/spirv-tests.yml' - -concurrency: - # Skip intermediate builds: always. - # Cancel intermediate builds: only if it is a pull request build. - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} - -jobs: - check_spirv: - if: github.repository_owner == 'llvm' - name: Test SPIR-V - uses: ./.github/workflows/llvm-project-tests.yml - with: - build_target: check-llvm-codegen-spirv - projects: - extra_cmake_args: '-DLLVM_TARGETS_TO_BUILD="SPIRV" -DLLVM_INCLUDE_SPIRV_TOOLS_TESTS=ON' - os_list: '["ubuntu-22.04"]' diff --git a/.github/workflows/unprivileged-download-artifact/action.yml b/.github/workflows/unprivileged-download-artifact/action.yml deleted file mode 100644 index 9d8fb59a67c0..000000000000 --- a/.github/workflows/unprivileged-download-artifact/action.yml +++ /dev/null @@ -1,81 +0,0 @@ -name: Unprivileged Download Artifact -description: >- - Download artifacts from another workflow run without using an access token. -inputs: - run-id: - description: >- - The run-id for the workflow run that you want to download the artifact - from. If ommitted it will download the most recently created artifact - from the repo with the artifact-name. - required: false - artifact-name: - desciption: The name of the artifact to download. - required: true - - -outputs: - filename: - description: >- - The filename of the downloaded artifact or the empty string if the - artifact was not found. - value: ${{ steps.download-artifact.outputs.filename }} - artifact-id: - description: "The id of the artifact being downloaded." - value: ${{ steps.artifact-url.outputs.id }} - - -runs: - using: "composite" - steps: - - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 - id: artifact-url - with: - script: | - var response; - if (!"${{ inputs.run-id }}") { - response = await github.rest.actions.listArtifactsForRepo({ - owner: context.repo.owner, - repo: context.repo.repo, - name: "${{ inputs.artifact-name }}" - }) - } else { - response = await github.rest.actions.listWorkflowRunArtifacts({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: "${{ inputs.run-id }}", - name: "${{ inputs.artifact-name }}" - }) - } - - console.log(response) - - for (artifact of response.data.artifacts) { - console.log(artifact); - } - - if (response.data.artifacts.length == 0) { - console.log("Could not find artifact ${{ inputs.artifact-name }} for workflow run ${{ inputs.run-id }}") - return; - } - - const url_response = await github.rest.actions.downloadArtifact({ - owner: context.repo.owner, - repo: context.repo.repo, - artifact_id: response.data.artifacts[0].id, - archive_format: "zip" - }) - - core.setOutput("url", url_response.url); - core.setOutput("id", response.data.artifacts[0].id); - - - shell: bash - if: steps.artifact-url.outputs.url != '' - id: download-artifact - run: | - curl -L -o ${{ inputs.artifact-name }}.zip "${{ steps.artifact-url.outputs.url }}" - echo "filename=${{ inputs.artifact-name }}.zip" >> $GITHUB_OUTPUT - - - shell: bash - if: steps.download-artifact.outputs.filename != '' - run: | - unzip ${{ steps.download-artifact.outputs.filename }} diff --git a/.github/workflows/version-check.py b/.github/workflows/version-check.py deleted file mode 100755 index f75fd5030088..000000000000 --- a/.github/workflows/version-check.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/python3 - -from git import Repo -import re -import sys - - -def get_version_from_tag(tag): - m = re.match("llvmorg-([0-9]+)\.([0-9]+)\.([0-9]+)(-rc[0-9]+)?$", tag) - if m: - if m.lastindex == 4: - # We have an rc tag. - return m.group(1, 2, 3) - # We have a final release tag. - return (m.group(1), m.group(2), str(int(m.group(3)) + 1)) - - m = re.match("llvmorg-([0-9]+)-init", tag) - if m: - return (m.group(1), "1", "0") - - raise Exception(f"error: Tag is not valid: {tag}") - - -version = sys.argv[1] - -repo = Repo() - -tag = repo.git.describe(tags=True, abbrev=0) -expected_version = ".".join(get_version_from_tag(tag)) - -if version != expected_version: - print("error: Expected version", expected_version, "but found version", version) - sys.exit(1) - -print("Versions match:", version, expected_version) -sys.exit(0) diff --git a/.github/workflows/version-check.yml b/.github/workflows/version-check.yml deleted file mode 100644 index 894e07d323ca..000000000000 --- a/.github/workflows/version-check.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: LLVM Project Version Check - -on: - push: - branches: - - 'release/**' - pull_request: - branches: - - 'release/**' - -permissions: - contents: read - -jobs: - version_check: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - steps: - - name: Fetch LLVM sources - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Install dependencies - run: | - pip install --require-hashes -r ./llvm/utils/git/requirements.txt - - - name: Version Check - run: | - version=$(grep -o 'LLVM_VERSION_\(MAJOR\|MINOR\|PATCH\) [0-9]\+' cmake/Modules/LLVMVersion.cmake | cut -d ' ' -f 2 | tr "\n" "." | sed 's/.$//g') - .github/workflows/version-check.py "$version" diff --git a/README.md b/README.md index a9b29ecbc1a3..9936d03334e3 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,7 @@ # The LLVM Compiler Infrastructure -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/llvm/llvm-project/badge)](https://securityscorecards.dev/viewer/?uri=github.com/llvm/llvm-project) -[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8273/badge)](https://www.bestpractices.dev/projects/8273) -[![libc++](https://github.com/llvm/llvm-project/actions/workflows/libcxx-build-and-test.yaml/badge.svg?branch=main&event=schedule)](https://github.com/llvm/llvm-project/actions/workflows/libcxx-build-and-test.yaml?query=event%3Aschedule) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/intel/npu-plugin-llvm/badge)](https://securityscorecards.dev/viewer/?uri=github.com/intel/npu-plugin-llvm) +[![Linux precommit](https://github.com/intel/npu-plugin-llvm/actions/workflows/linux-precommit.yml/badge.svg?branch=npu%2Frelease%2F19.x&event=push)](https://github.com/intel/npu-plugin-llvm/actions/workflows/linux-precommit.yml?query=event%3Apush) Welcome to the LLVM project! diff --git a/lldb/tools/lldb-dap/package-lock.json b/lldb/tools/lldb-dap/package-lock.json index d1cb6d00ecf5..1cc3c63a03dd 100644 --- a/lldb/tools/lldb-dap/package-lock.json +++ b/lldb/tools/lldb-dap/package-lock.json @@ -361,9 +361,9 @@ "dev": true }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -1495,10 +1495,11 @@ } }, "node_modules/tar-fs": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", - "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.3.tgz", + "integrity": "sha512-090nwYJDmlhwFwEW3QQl+vaNnxsO2yVsd45eTKRBzSzu+hlb1w2K9inVq5b0ngXuLVqQ4ApvsUHHnu/zQNkWAg==", "dev": true, + "license": "MIT", "optional": true, "dependencies": { "chownr": "^1.1.1", diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake index 5db06ccdadbe..171d96bd84bc 100644 --- a/llvm/cmake/modules/HandleLLVMOptions.cmake +++ b/llvm/cmake/modules/HandleLLVMOptions.cmake @@ -787,6 +787,16 @@ if (MSVC) # Enable warnings if (LLVM_ENABLE_WARNINGS) + # Remove all -wd flag to enable warnings + if (NOT CLANG_CL) + set(msvc_warning_flags + # Promoted warnings. + -w14062 # Promote 'enumerator in switch of enum is not handled' to level 1 warning. + + # Promoted warnings to errors. + -we4238 # Promote 'nonstandard extension used : class rvalue used as lvalue' to error. + ) + endif(NOT CLANG_CL) # Put /W4 in front of all the -we flags. cl.exe doesn't care, but for # clang-cl having /W4 after the -we flags will re-enable the warnings # disabled by -we. diff --git a/llvm/docs/requirements-hashed.txt b/llvm/docs/requirements-hashed.txt index 07e051ca4a8b..7d71293c881e 100644 --- a/llvm/docs/requirements-hashed.txt +++ b/llvm/docs/requirements-hashed.txt @@ -1,150 +1,156 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --generate-hashes --output-file=requirements-hashed.txt requirements.txt +# pip-compile --generate-hashes --output-file=llvm/docs/requirements-hashed.txt llvm/docs/requirements.txt # -alabaster==0.7.13 \ - --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \ - --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2 +alabaster==0.7.16 \ + --hash=sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65 \ + --hash=sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92 # via sphinx -babel==2.14.0 \ - --hash=sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363 \ - --hash=sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287 +babel==2.17.0 \ + --hash=sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d \ + --hash=sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2 # via sphinx -beautifulsoup4==4.12.2 \ - --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ - --hash=sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a +beautifulsoup4==4.13.4 \ + --hash=sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b \ + --hash=sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195 # via furo -certifi==2023.11.17 \ - --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ - --hash=sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474 +certifi==2024.7.4 \ + --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ + --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 # via requests -charset-normalizer==3.3.2 \ - --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ - --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ - --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ - --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ - --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ - --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ - --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ - --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ - --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ - --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ - --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ - --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ - --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ - --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ - --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ - --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ - --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ - --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ - --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ - --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ - --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ - --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ - --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ - --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ - --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ - --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ - --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ - --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ - --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ - --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ - --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ - --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ - --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ - --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ - --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ - --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ - --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ - --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ - --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ - --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ - --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ - --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ - --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ - --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ - --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ - --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ - --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ - --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ - --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ - --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ - --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ - --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ - --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ - --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ - --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ - --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ - --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ - --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ - --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ - --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ - --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ - --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ - --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ - --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ - --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ - --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ - --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ - --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ - --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ - --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ - --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ - --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ - --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ - --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ - --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ - --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ - --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ - --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ - --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ - --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ - --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ - --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ - --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ - --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ - --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ - --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ - --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ - --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ - --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ - --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 +charset-normalizer==3.4.2 \ + --hash=sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4 \ + --hash=sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45 \ + --hash=sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7 \ + --hash=sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0 \ + --hash=sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7 \ + --hash=sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d \ + --hash=sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d \ + --hash=sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0 \ + --hash=sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184 \ + --hash=sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db \ + --hash=sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b \ + --hash=sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64 \ + --hash=sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b \ + --hash=sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8 \ + --hash=sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff \ + --hash=sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344 \ + --hash=sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58 \ + --hash=sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e \ + --hash=sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471 \ + --hash=sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148 \ + --hash=sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a \ + --hash=sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836 \ + --hash=sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e \ + --hash=sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63 \ + --hash=sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c \ + --hash=sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1 \ + --hash=sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01 \ + --hash=sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366 \ + --hash=sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58 \ + --hash=sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5 \ + --hash=sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c \ + --hash=sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2 \ + --hash=sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a \ + --hash=sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597 \ + --hash=sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b \ + --hash=sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5 \ + --hash=sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb \ + --hash=sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f \ + --hash=sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0 \ + --hash=sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941 \ + --hash=sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0 \ + --hash=sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86 \ + --hash=sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7 \ + --hash=sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7 \ + --hash=sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455 \ + --hash=sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6 \ + --hash=sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4 \ + --hash=sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0 \ + --hash=sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3 \ + --hash=sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1 \ + --hash=sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6 \ + --hash=sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981 \ + --hash=sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c \ + --hash=sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980 \ + --hash=sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645 \ + --hash=sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7 \ + --hash=sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12 \ + --hash=sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa \ + --hash=sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd \ + --hash=sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef \ + --hash=sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f \ + --hash=sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2 \ + --hash=sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d \ + --hash=sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5 \ + --hash=sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02 \ + --hash=sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3 \ + --hash=sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd \ + --hash=sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e \ + --hash=sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214 \ + --hash=sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd \ + --hash=sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a \ + --hash=sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c \ + --hash=sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681 \ + --hash=sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba \ + --hash=sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f \ + --hash=sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a \ + --hash=sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28 \ + --hash=sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691 \ + --hash=sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82 \ + --hash=sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a \ + --hash=sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027 \ + --hash=sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7 \ + --hash=sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518 \ + --hash=sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf \ + --hash=sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b \ + --hash=sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9 \ + --hash=sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544 \ + --hash=sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da \ + --hash=sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509 \ + --hash=sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f \ + --hash=sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a \ + --hash=sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f # via requests +colorama==0.4.6 \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via sphinx commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via recommonmark -docutils==0.20.1 \ - --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \ - --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b +docutils==0.21.2 \ + --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ + --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 # via - # -r requirements.txt + # -r llvm/docs/requirements.txt # myst-parser # recommonmark # sphinx -furo==2024.1.29 \ - --hash=sha256:3548be2cef45a32f8cdc0272d415fcb3e5fa6a0eb4ddfe21df3ecf1fe45a13cf \ - --hash=sha256:4d6b2fe3f10a6e36eb9cc24c1e7beb38d7a23fc7b3c382867503b7fcac8a1e02 - # via -r requirements.txt -idna==3.6 \ - --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ - --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f +furo==2024.5.6 \ + --hash=sha256:490a00d08c0a37ecc90de03ae9227e8eb5d6f7f750edf9807f398a2bdf2358de \ + --hash=sha256:81f205a6605ebccbb883350432b4831c0196dd3d1bc92f61e1f459045b3d2b0b + # via -r llvm/docs/requirements.txt +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 # via requests imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a # via sphinx -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +jinja2==3.1.6 \ + --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \ + --hash=sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67 # via # myst-parser # sphinx -markdown==3.5.1 \ - --hash=sha256:5874b47d4ee3f0b14d764324d2c94c03ea66bee56f2d929da9f2508d65e722dc \ - --hash=sha256:b65d7beb248dc22f2e8a31fb706d93798093c308dc1aba295aedeb9d41a813bd +markdown==3.8.2 \ + --hash=sha256:247b9a70dd12e27f67431ce62523e675b866d254f900c4fe75ce3dda62237c45 \ + --hash=sha256:5c83764dbd4e00bdd94d85a19b8d55ccca20fe35b2e678a1422b380324dd5f24 # via sphinx-markdown-tables markdown-it-py==3.0.0 \ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ @@ -152,163 +158,167 @@ markdown-it-py==3.0.0 \ # via # mdit-py-plugins # myst-parser -markupsafe==2.1.3 \ - --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ - --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ - --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ - --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ - --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ - --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ - --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ - --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ - --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ - --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ - --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ - --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ - --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ - --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ - --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ - --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ - --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ - --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ - --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ - --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ - --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ - --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ - --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ - --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ - --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ - --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ - --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ - --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ - --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ - --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ - --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ - --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ - --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ - --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ - --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ - --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ - --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ - --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ - --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ - --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ - --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ - --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ - --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ - --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ - --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ - --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ - --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ - --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ - --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ - --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ - --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ - --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ - --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ - --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ - --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ - --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ - --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ - --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ - --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ - --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 +markupsafe==3.0.2 \ + --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ + --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ + --hash=sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0 \ + --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \ + --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ + --hash=sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13 \ + --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \ + --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \ + --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \ + --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \ + --hash=sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0 \ + --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \ + --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \ + --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \ + --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \ + --hash=sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff \ + --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \ + --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \ + --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \ + --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \ + --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \ + --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \ + --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \ + --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \ + --hash=sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a \ + --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \ + --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \ + --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \ + --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \ + --hash=sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144 \ + --hash=sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f \ + --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \ + --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \ + --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \ + --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \ + --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \ + --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \ + --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \ + --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \ + --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \ + --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \ + --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \ + --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \ + --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \ + --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \ + --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \ + --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \ + --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \ + --hash=sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29 \ + --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \ + --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \ + --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \ + --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ + --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \ + --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \ + --hash=sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a \ + --hash=sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178 \ + --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \ + --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \ + --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ + --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 # via jinja2 -mdit-py-plugins==0.4.0 \ - --hash=sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9 \ - --hash=sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b +mdit-py-plugins==0.4.2 \ + --hash=sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636 \ + --hash=sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5 # via myst-parser mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -myst-parser==2.0.0 \ - --hash=sha256:7c36344ae39c8e740dad7fdabf5aa6fc4897a813083c6cc9990044eb93656b14 \ - --hash=sha256:ea929a67a6a0b1683cdbe19b8d2e724cd7643f8aa3e7bb18dd65beac3483bead - # via -r requirements.txt -packaging==23.2 \ - --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ - --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 +myst-parser==3.0.1 \ + --hash=sha256:6457aaa33a5d474aca678b8ead9b3dc298e89c68e67012e73146ea6fd54babf1 \ + --hash=sha256:88f0cb406cb363b077d176b51c476f62d60604d68a8dcdf4832e080441301a87 + # via -r llvm/docs/requirements.txt +packaging==25.0 \ + --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ + --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f # via sphinx -pygments==2.17.2 \ - --hash=sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c \ - --hash=sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367 +pygments==2.19.2 \ + --hash=sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887 \ + --hash=sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b # via # furo # sphinx -pyyaml==6.0.1 \ - --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \ - --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ - --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \ - --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ - --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \ - --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \ - --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \ - --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \ - --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \ - --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \ - --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \ - --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \ - --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \ - --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \ - --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \ - --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \ - --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \ - --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \ - --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \ - --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \ - --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \ - --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \ - --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \ - --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \ - --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \ - --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \ - --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ - --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ - --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ - --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ - --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ - --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ - --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \ - --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \ - --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \ - --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \ - --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \ - --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \ - --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \ - --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \ - --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \ - --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \ - --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \ - --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \ - --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \ - --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \ - --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \ - --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \ - --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ - --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f +pyyaml==6.0.2 \ + --hash=sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff \ + --hash=sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48 \ + --hash=sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086 \ + --hash=sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e \ + --hash=sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133 \ + --hash=sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5 \ + --hash=sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484 \ + --hash=sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee \ + --hash=sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5 \ + --hash=sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68 \ + --hash=sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a \ + --hash=sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf \ + --hash=sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99 \ + --hash=sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8 \ + --hash=sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85 \ + --hash=sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19 \ + --hash=sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc \ + --hash=sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a \ + --hash=sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1 \ + --hash=sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317 \ + --hash=sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c \ + --hash=sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631 \ + --hash=sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d \ + --hash=sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652 \ + --hash=sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5 \ + --hash=sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e \ + --hash=sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b \ + --hash=sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8 \ + --hash=sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476 \ + --hash=sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706 \ + --hash=sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563 \ + --hash=sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237 \ + --hash=sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b \ + --hash=sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083 \ + --hash=sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180 \ + --hash=sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425 \ + --hash=sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e \ + --hash=sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f \ + --hash=sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725 \ + --hash=sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183 \ + --hash=sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab \ + --hash=sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774 \ + --hash=sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725 \ + --hash=sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e \ + --hash=sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5 \ + --hash=sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d \ + --hash=sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290 \ + --hash=sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44 \ + --hash=sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed \ + --hash=sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4 \ + --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \ + --hash=sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12 \ + --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4 # via myst-parser recommonmark==0.7.1 \ --hash=sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f \ --hash=sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67 - # via -r requirements.txt -requests==2.31.0 \ - --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ - --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 + # via -r llvm/docs/requirements.txt +requests==2.32.4 \ + --hash=sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c \ + --hash=sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422 # via sphinx -snowballstemmer==2.2.0 \ - --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ - --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a +snowballstemmer==3.0.1 \ + --hash=sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064 \ + --hash=sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895 # via sphinx -soupsieve==2.5 \ - --hash=sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690 \ - --hash=sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7 +soupsieve==2.7 \ + --hash=sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4 \ + --hash=sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a # via beautifulsoup4 -sphinx==7.2.6 \ - --hash=sha256:1e09160a40b956dc623c910118fa636da93bd3ca0b9876a7b3df90f07d691560 \ - --hash=sha256:9a5160e1ea90688d5963ba09a2dcd8bdd526620edbb65c328728f1b2228d5ab5 +sphinx==7.3.7 \ + --hash=sha256:413f75440be4cacf328f580b4274ada4565fb2187d696a84970c23f77b64d8c3 \ + --hash=sha256:a4a7db75ed37531c05002d56ed6948d4c42f473a36f46e1382b0bd76ca9627bc # via - # -r requirements.txt + # -r llvm/docs/requirements.txt # furo # myst-parser # recommonmark @@ -322,7 +332,7 @@ sphinx==7.2.6 \ sphinx-automodapi==0.17.0 \ --hash=sha256:4d029cb79eef29413e94ab01bb0177ebd2d5ba86e9789b73575afe9c06ae1501 \ --hash=sha256:7ccdadad57add4aa9149d9f2bb5cf28c8f8b590280b4735b1156ea8355c423a1 - # via -r requirements.txt + # via -r llvm/docs/requirements.txt sphinx-basic-ng==1.0.0b2 \ --hash=sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9 \ --hash=sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b @@ -330,11 +340,11 @@ sphinx-basic-ng==1.0.0b2 \ sphinx-bootstrap-theme==0.8.1 \ --hash=sha256:683e3b735448dadd0149f76edecf95ff4bd9157787e9e77e0d048ca6f1d680df \ --hash=sha256:6ef36206c211846ea6cbdb45bc85645578e7c62d0a883361181708f8b6ea743b - # via -r requirements.txt + # via -r llvm/docs/requirements.txt sphinx-markdown-tables==0.0.17 \ --hash=sha256:2bd0c30779653e4dd120300cbd9ca412c480738cc2241f6dea477a883f299e04 \ --hash=sha256:6bc6d3d400eaccfeebd288446bc08dd83083367c58b85d40fe6c12d77ef592f1 - # via -r requirements.txt + # via -r llvm/docs/requirements.txt sphinx-reredirects==0.1.2 \ --hash=sha256:3a22161771aadd448bb608a4fe7277252182a337af53c18372b7104531d71489 \ --hash=sha256:a0e7213304759b01edc22f032f1715a1c61176fc8f167164e7a52b9feec9ac64 @@ -343,29 +353,33 @@ sphinxcontrib-applehelp==1.0.8 \ --hash=sha256:c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619 \ --hash=sha256:cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4 # via - # -r requirements.txt + # -r llvm/docs/requirements.txt # sphinx -sphinxcontrib-devhelp==1.0.5 \ - --hash=sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212 \ - --hash=sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f +sphinxcontrib-devhelp==2.0.0 \ + --hash=sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad \ + --hash=sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2 # via sphinx -sphinxcontrib-htmlhelp==2.0.4 \ - --hash=sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a \ - --hash=sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9 +sphinxcontrib-htmlhelp==2.1.0 \ + --hash=sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8 \ + --hash=sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9 # via sphinx sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 # via sphinx -sphinxcontrib-qthelp==1.0.6 \ - --hash=sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d \ - --hash=sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4 +sphinxcontrib-qthelp==2.0.0 \ + --hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \ + --hash=sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb # via sphinx -sphinxcontrib-serializinghtml==1.1.9 \ - --hash=sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54 \ - --hash=sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1 +sphinxcontrib-serializinghtml==2.0.0 \ + --hash=sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331 \ + --hash=sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d # via sphinx -urllib3==2.1.0 \ - --hash=sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3 \ - --hash=sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54 +typing-extensions==4.14.1 \ + --hash=sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36 \ + --hash=sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76 + # via beautifulsoup4 +urllib3==2.5.0 \ + --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ + --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc # via requests diff --git a/llvm/docs/requirements.txt b/llvm/docs/requirements.txt index 50cf8ef58e75..85bc7a0f5386 100644 --- a/llvm/docs/requirements.txt +++ b/llvm/docs/requirements.txt @@ -1,10 +1,10 @@ -sphinx==7.2.6 -docutils==0.20.1 +sphinx==7.3.7 +docutils==0.21.2 sphinx-markdown-tables==0.0.17 recommonmark==0.7.1 sphinx-automodapi==0.17.0 sphinx-bootstrap-theme==0.8.1 sphinxcontrib-applehelp==1.0.8 sphinx-reredirects==0.1.2 -furo==2024.1.29 -myst-parser==2.0.0 +furo==2024.5.6 +myst-parser==3.0.1 diff --git a/llvm/include/llvm/ADT/FunctionExtras.h b/llvm/include/llvm/ADT/FunctionExtras.h index d92868e3715f..33ad1db48716 100644 --- a/llvm/include/llvm/ADT/FunctionExtras.h +++ b/llvm/include/llvm/ADT/FunctionExtras.h @@ -153,7 +153,7 @@ template class UniqueFunctionBase { void *StoragePtr; size_t Size; size_t Alignment; - } OutOfLineStorage; + } OutOfLineStorage = {}; static_assert( sizeof(OutOfLineStorageT) <= InlineStorageSize, "Should always use all of the out-of-line storage for inline storage!"); diff --git a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp index 8bf513538de7..42ae28e22d6b 100644 --- a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp @@ -1434,8 +1434,8 @@ unsigned DWARFVerifier::verifyNameIndexAttribute( } if (AttrEnc.Index == dwarf::DW_IDX_parent) { - constexpr static auto AllowedForms = {dwarf::Form::DW_FORM_flag_present, - dwarf::Form::DW_FORM_ref4}; + static constexpr dwarf::Form AllowedForms[] = {dwarf::Form::DW_FORM_flag_present, + dwarf::Form::DW_FORM_ref4}; if (!is_contained(AllowedForms, AttrEnc.Form)) { ErrorCategory.Report("Unexpected NameIndex Abbreviation", [&]() { error() << formatv( diff --git a/llvm/lib/Support/Windows/Threading.inc b/llvm/lib/Support/Windows/Threading.inc index d862dbd7f71c..9c584415e0cb 100644 --- a/llvm/lib/Support/Windows/Threading.inc +++ b/llvm/lib/Support/Windows/Threading.inc @@ -42,6 +42,9 @@ void llvm_thread_join_impl(HANDLE hThread) { if (::WaitForSingleObject(hThread, INFINITE) == WAIT_FAILED) { ReportLastErrorFatal("WaitForSingleObject failed"); } + if (::CloseHandle(hThread) == FALSE) { + ReportLastErrorFatal("CloseHandle failed"); + } } void llvm_thread_detach_impl(HANDLE hThread) { diff --git a/llvm/utils/git/requirements.txt b/llvm/utils/git/requirements.txt index bbb9059b6b26..a0be1dcf58b7 100644 --- a/llvm/utils/git/requirements.txt +++ b/llvm/utils/git/requirements.txt @@ -1,14 +1,14 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --generate-hashes --output-file=requirements.txt requirements.txt.in +# pip-compile --generate-hashes --output-file=llvm/utils/git/requirements.txt llvm/utils/git/requirements.txt.in # -certifi==2024.8.30 \ - --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ - --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 +certifi==2025.6.15 \ + --hash=sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057 \ + --hash=sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b # via - # -r requirements.txt.in + # -r llvm/utils/git/requirements.txt.in # requests cffi==1.17.1 \ --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ @@ -81,142 +81,154 @@ cffi==1.17.1 \ # via # cryptography # pynacl -charset-normalizer==3.3.2 \ - --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ - --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ - --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ - --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ - --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ - --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ - --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ - --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ - --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ - --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ - --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ - --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ - --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ - --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ - --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ - --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ - --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ - --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ - --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ - --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ - --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ - --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ - --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ - --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ - --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ - --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ - --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ - --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ - --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ - --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ - --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ - --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ - --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ - --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ - --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ - --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ - --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ - --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ - --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ - --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ - --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ - --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ - --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ - --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ - --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ - --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ - --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ - --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ - --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ - --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ - --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ - --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ - --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ - --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ - --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ - --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ - --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ - --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ - --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ - --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ - --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ - --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ - --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ - --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ - --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ - --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ - --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ - --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ - --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ - --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ - --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ - --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ - --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ - --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ - --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ - --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ - --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ - --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ - --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ - --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ - --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ - --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ - --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ - --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ - --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ - --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ - --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ - --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ - --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ - --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 +charset-normalizer==3.4.2 \ + --hash=sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4 \ + --hash=sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45 \ + --hash=sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7 \ + --hash=sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0 \ + --hash=sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7 \ + --hash=sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d \ + --hash=sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d \ + --hash=sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0 \ + --hash=sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184 \ + --hash=sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db \ + --hash=sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b \ + --hash=sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64 \ + --hash=sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b \ + --hash=sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8 \ + --hash=sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff \ + --hash=sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344 \ + --hash=sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58 \ + --hash=sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e \ + --hash=sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471 \ + --hash=sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148 \ + --hash=sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a \ + --hash=sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836 \ + --hash=sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e \ + --hash=sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63 \ + --hash=sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c \ + --hash=sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1 \ + --hash=sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01 \ + --hash=sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366 \ + --hash=sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58 \ + --hash=sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5 \ + --hash=sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c \ + --hash=sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2 \ + --hash=sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a \ + --hash=sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597 \ + --hash=sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b \ + --hash=sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5 \ + --hash=sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb \ + --hash=sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f \ + --hash=sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0 \ + --hash=sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941 \ + --hash=sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0 \ + --hash=sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86 \ + --hash=sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7 \ + --hash=sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7 \ + --hash=sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455 \ + --hash=sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6 \ + --hash=sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4 \ + --hash=sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0 \ + --hash=sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3 \ + --hash=sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1 \ + --hash=sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6 \ + --hash=sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981 \ + --hash=sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c \ + --hash=sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980 \ + --hash=sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645 \ + --hash=sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7 \ + --hash=sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12 \ + --hash=sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa \ + --hash=sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd \ + --hash=sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef \ + --hash=sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f \ + --hash=sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2 \ + --hash=sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d \ + --hash=sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5 \ + --hash=sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02 \ + --hash=sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3 \ + --hash=sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd \ + --hash=sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e \ + --hash=sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214 \ + --hash=sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd \ + --hash=sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a \ + --hash=sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c \ + --hash=sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681 \ + --hash=sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba \ + --hash=sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f \ + --hash=sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a \ + --hash=sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28 \ + --hash=sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691 \ + --hash=sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82 \ + --hash=sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a \ + --hash=sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027 \ + --hash=sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7 \ + --hash=sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518 \ + --hash=sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf \ + --hash=sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b \ + --hash=sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9 \ + --hash=sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544 \ + --hash=sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da \ + --hash=sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509 \ + --hash=sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f \ + --hash=sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a \ + --hash=sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f # via requests -cryptography==43.0.1 \ - --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ - --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ - --hash=sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d \ - --hash=sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062 \ - --hash=sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2 \ - --hash=sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4 \ - --hash=sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1 \ - --hash=sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 \ - --hash=sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84 \ - --hash=sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042 \ - --hash=sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d \ - --hash=sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962 \ - --hash=sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2 \ - --hash=sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa \ - --hash=sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d \ - --hash=sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365 \ - --hash=sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96 \ - --hash=sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47 \ - --hash=sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d \ - --hash=sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d \ - --hash=sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c \ - --hash=sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb \ - --hash=sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277 \ - --hash=sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172 \ - --hash=sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034 \ - --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ - --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 +cryptography==45.0.5 \ + --hash=sha256:0027d566d65a38497bc37e0dd7c2f8ceda73597d2ac9ba93810204f56f52ebc7 \ + --hash=sha256:101ee65078f6dd3e5a028d4f19c07ffa4dd22cce6a20eaa160f8b5219911e7d8 \ + --hash=sha256:12e55281d993a793b0e883066f590c1ae1e802e3acb67f8b442e721e475e6463 \ + --hash=sha256:14d96584701a887763384f3c47f0ca7c1cce322aa1c31172680eb596b890ec30 \ + --hash=sha256:1e1da5accc0c750056c556a93c3e9cb828970206c68867712ca5805e46dc806f \ + --hash=sha256:206210d03c1193f4e1ff681d22885181d47efa1ab3018766a7b32a7b3d6e6afd \ + --hash=sha256:2089cc8f70a6e454601525e5bf2779e665d7865af002a5dec8d14e561002e135 \ + --hash=sha256:3a264aae5f7fbb089dbc01e0242d3b67dffe3e6292e1f5182122bdf58e65215d \ + --hash=sha256:3af26738f2db354aafe492fb3869e955b12b2ef2e16908c8b9cb928128d42c57 \ + --hash=sha256:3fcfbefc4a7f332dece7272a88e410f611e79458fab97b5efe14e54fe476f4fd \ + --hash=sha256:460f8c39ba66af7db0545a8c6f2eabcbc5a5528fc1cf6c3fa9a1e44cec33385e \ + --hash=sha256:57c816dfbd1659a367831baca4b775b2a5b43c003daf52e9d57e1d30bc2e1b0e \ + --hash=sha256:5aa1e32983d4443e310f726ee4b071ab7569f58eedfdd65e9675484a4eb67bd1 \ + --hash=sha256:6ff8728d8d890b3dda5765276d1bc6fb099252915a2cd3aff960c4c195745dd0 \ + --hash=sha256:7259038202a47fdecee7e62e0fd0b0738b6daa335354396c6ddebdbe1206af2a \ + --hash=sha256:72e76caa004ab63accdf26023fccd1d087f6d90ec6048ff33ad0445abf7f605a \ + --hash=sha256:7760c1c2e1a7084153a0f68fab76e754083b126a47d0117c9ed15e69e2103492 \ + --hash=sha256:8c4a6ff8a30e9e3d38ac0539e9a9e02540ab3f827a3394f8852432f6b0ea152e \ + --hash=sha256:9024beb59aca9d31d36fcdc1604dd9bbeed0a55bface9f1908df19178e2f116e \ + --hash=sha256:90cb0a7bb35959f37e23303b7eed0a32280510030daba3f7fdfbb65defde6a97 \ + --hash=sha256:91098f02ca81579c85f66df8a588c78f331ca19089763d733e34ad359f474174 \ + --hash=sha256:926c3ea71a6043921050eaa639137e13dbe7b4ab25800932a8498364fc1abec9 \ + --hash=sha256:982518cd64c54fcada9d7e5cf28eabd3ee76bd03ab18e08a48cad7e8b6f31b18 \ + --hash=sha256:9b4cf6318915dccfe218e69bbec417fdd7c7185aa7aab139a2c0beb7468c89f0 \ + --hash=sha256:ad0caded895a00261a5b4aa9af828baede54638754b51955a0ac75576b831b27 \ + --hash=sha256:b85980d1e345fe769cfc57c57db2b59cff5464ee0c045d52c0df087e926fbe63 \ + --hash=sha256:b8fa8b0a35a9982a3c60ec79905ba5bb090fc0b9addcfd3dc2dd04267e45f25e \ + --hash=sha256:b9e38e0a83cd51e07f5a48ff9691cae95a79bea28fe4ded168a8e5c6c77e819d \ + --hash=sha256:bd4c45986472694e5121084c6ebbd112aa919a25e783b87eb95953c9573906d6 \ + --hash=sha256:be97d3a19c16a9be00edf79dca949c8fa7eff621763666a145f9f9535a5d7f42 \ + --hash=sha256:c648025b6840fe62e57107e0a25f604db740e728bd67da4f6f060f03017d5097 \ + --hash=sha256:d05a38884db2ba215218745f0781775806bde4f32e07b135348355fe8e4991d9 \ + --hash=sha256:dd420e577921c8c2d31289536c386aaa30140b473835e97f83bc71ea9d2baf2d \ + --hash=sha256:e357286c1b76403dd384d938f93c46b2b058ed4dfcdce64a770f0537ed3feb6f \ + --hash=sha256:e6c00130ed423201c5bc5544c23359141660b07999ad82e34e7bb8f882bb78e0 \ + --hash=sha256:e74d30ec9c7cb2f404af331d5b4099a9b322a8a6b25c4632755c8757345baac5 \ + --hash=sha256:f3562c2f23c612f2e4a6964a61d942f891d29ee320edb62ff48ffb99f3de9ae8 # via pyjwt -deprecated==1.2.14 \ - --hash=sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c \ - --hash=sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3 +deprecated==1.2.18 \ + --hash=sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d \ + --hash=sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec # via pygithub -gitdb==4.0.11 \ - --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ - --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b +gitdb==4.0.12 \ + --hash=sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571 \ + --hash=sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf # via gitpython -gitpython==3.1.43 \ - --hash=sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c \ - --hash=sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff - # via -r requirements.txt.in -idna==3.8 \ - --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ - --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 +gitpython==3.1.44 \ + --hash=sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110 \ + --hash=sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269 + # via -r llvm/utils/git/requirements.txt.in +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 # via requests pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -225,10 +237,10 @@ pycparser==2.22 \ pygithub==2.4.0 \ --hash=sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051 \ --hash=sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24 - # via -r requirements.txt.in -pyjwt[crypto]==2.9.0 \ - --hash=sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850 \ - --hash=sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c + # via -r llvm/utils/git/requirements.txt.in +pyjwt[crypto]==2.10.1 \ + --hash=sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953 \ + --hash=sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb # via pygithub pynacl==1.5.0 \ --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ @@ -242,93 +254,102 @@ pynacl==1.5.0 \ --hash=sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b \ --hash=sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543 # via pygithub -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 +requests==2.32.4 \ + --hash=sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c \ + --hash=sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422 # via pygithub -smmap==5.0.1 \ - --hash=sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62 \ - --hash=sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da +smmap==5.0.2 \ + --hash=sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5 \ + --hash=sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e # via gitdb typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 # via pygithub -urllib3==2.2.3 \ - --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ - --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 - # via - # pygithub - # requests -wrapt==1.16.0 \ - --hash=sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc \ - --hash=sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81 \ - --hash=sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09 \ - --hash=sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e \ - --hash=sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca \ - --hash=sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0 \ - --hash=sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb \ - --hash=sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487 \ - --hash=sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40 \ - --hash=sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c \ - --hash=sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060 \ - --hash=sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202 \ - --hash=sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41 \ - --hash=sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9 \ - --hash=sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b \ - --hash=sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664 \ - --hash=sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d \ - --hash=sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362 \ - --hash=sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00 \ - --hash=sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc \ - --hash=sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1 \ - --hash=sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267 \ - --hash=sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956 \ - --hash=sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966 \ - --hash=sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1 \ - --hash=sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228 \ - --hash=sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72 \ - --hash=sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d \ - --hash=sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292 \ - --hash=sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0 \ - --hash=sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0 \ - --hash=sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36 \ - --hash=sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c \ - --hash=sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5 \ - --hash=sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f \ - --hash=sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73 \ - --hash=sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b \ - --hash=sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2 \ - --hash=sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593 \ - --hash=sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39 \ - --hash=sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389 \ - --hash=sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf \ - --hash=sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf \ - --hash=sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89 \ - --hash=sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c \ - --hash=sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c \ - --hash=sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f \ - --hash=sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440 \ - --hash=sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465 \ - --hash=sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136 \ - --hash=sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b \ - --hash=sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8 \ - --hash=sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3 \ - --hash=sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8 \ - --hash=sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6 \ - --hash=sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e \ - --hash=sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f \ - --hash=sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c \ - --hash=sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e \ - --hash=sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8 \ - --hash=sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2 \ - --hash=sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020 \ - --hash=sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35 \ - --hash=sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d \ - --hash=sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3 \ - --hash=sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537 \ - --hash=sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809 \ - --hash=sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d \ - --hash=sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a \ - --hash=sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4 +urllib3==2.5.0 \ + --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \ + --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc + # via requests +types-cryptography==3.3.23.2 + # via pyjwt +wrapt==1.17.2 \ + --hash=sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f \ + --hash=sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c \ + --hash=sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a \ + --hash=sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b \ + --hash=sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555 \ + --hash=sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c \ + --hash=sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b \ + --hash=sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6 \ + --hash=sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8 \ + --hash=sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662 \ + --hash=sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061 \ + --hash=sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998 \ + --hash=sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb \ + --hash=sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62 \ + --hash=sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984 \ + --hash=sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392 \ + --hash=sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2 \ + --hash=sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306 \ + --hash=sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7 \ + --hash=sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3 \ + --hash=sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9 \ + --hash=sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6 \ + --hash=sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192 \ + --hash=sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317 \ + --hash=sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f \ + --hash=sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda \ + --hash=sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563 \ + --hash=sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a \ + --hash=sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f \ + --hash=sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d \ + --hash=sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9 \ + --hash=sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8 \ + --hash=sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82 \ + --hash=sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9 \ + --hash=sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845 \ + --hash=sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82 \ + --hash=sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125 \ + --hash=sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504 \ + --hash=sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b \ + --hash=sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7 \ + --hash=sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc \ + --hash=sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6 \ + --hash=sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40 \ + --hash=sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a \ + --hash=sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3 \ + --hash=sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a \ + --hash=sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72 \ + --hash=sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681 \ + --hash=sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438 \ + --hash=sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae \ + --hash=sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2 \ + --hash=sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb \ + --hash=sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5 \ + --hash=sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a \ + --hash=sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3 \ + --hash=sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8 \ + --hash=sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2 \ + --hash=sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22 \ + --hash=sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72 \ + --hash=sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061 \ + --hash=sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f \ + --hash=sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9 \ + --hash=sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04 \ + --hash=sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98 \ + --hash=sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9 \ + --hash=sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f \ + --hash=sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b \ + --hash=sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925 \ + --hash=sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6 \ + --hash=sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0 \ + --hash=sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9 \ + --hash=sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c \ + --hash=sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991 \ + --hash=sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6 \ + --hash=sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000 \ + --hash=sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb \ + --hash=sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119 \ + --hash=sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b \ + --hash=sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58 # via deprecated diff --git a/llvm/utils/git/requirements_formatting.txt b/llvm/utils/git/requirements_formatting.txt index 18e2626c7946..bc9e48008fa6 100644 --- a/llvm/utils/git/requirements_formatting.txt +++ b/llvm/utils/git/requirements_formatting.txt @@ -1,372 +1,54 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --generate-hashes --output-file=requirements_formatting.txt requirements_formatting.txt.in +# pip-compile --output-file=llvm/utils/git/requirements_formatting.txt llvm/utils/git/requirements_formatting.txt.in # -black==23.12.1 \ - --hash=sha256:0808494f2b2df923ffc5723ed3c7b096bd76341f6213989759287611e9837d50 \ - --hash=sha256:1fa88a0f74e50e4487477bc0bb900c6781dbddfdfa32691e780bf854c3b4a47f \ - --hash=sha256:25e57fd232a6d6ff3f4478a6fd0580838e47c93c83eaf1ccc92d4faf27112c4e \ - --hash=sha256:2d9e13db441c509a3763a7a3d9a49ccc1b4e974a47be4e08ade2a228876500ec \ - --hash=sha256:3e1b38b3135fd4c025c28c55ddfc236b05af657828a8a6abe5deec419a0b7055 \ - --hash=sha256:3fa4be75ef2a6b96ea8d92b1587dd8cb3a35c7e3d51f0738ced0781c3aa3a5a3 \ - --hash=sha256:4ce3ef14ebe8d9509188014d96af1c456a910d5b5cbf434a09fef7e024b3d0d5 \ - --hash=sha256:4f0031eaa7b921db76decd73636ef3a12c942ed367d8c3841a0739412b260a54 \ - --hash=sha256:602cfb1196dc692424c70b6507593a2b29aac0547c1be9a1d1365f0d964c353b \ - --hash=sha256:6d1bd9c210f8b109b1762ec9fd36592fdd528485aadb3f5849b2740ef17e674e \ - --hash=sha256:78baad24af0f033958cad29731e27363183e140962595def56423e626f4bee3e \ - --hash=sha256:8d4df77958a622f9b5a4c96edb4b8c0034f8434032ab11077ec6c56ae9f384ba \ - --hash=sha256:97e56155c6b737854e60a9ab1c598ff2533d57e7506d97af5481141671abf3ea \ - --hash=sha256:9c4352800f14be5b4864016882cdba10755bd50805c95f728011bcb47a4afd59 \ - --hash=sha256:a4d6a9668e45ad99d2f8ec70d5c8c04ef4f32f648ef39048d010b0689832ec6d \ - --hash=sha256:a920b569dc6b3472513ba6ddea21f440d4b4c699494d2e972a1753cdc25df7b0 \ - --hash=sha256:ae76c22bde5cbb6bfd211ec343ded2163bba7883c7bc77f6b756a1049436fbb9 \ - --hash=sha256:b18fb2ae6c4bb63eebe5be6bd869ba2f14fd0259bda7d18a46b764d8fb86298a \ - --hash=sha256:c04b6d9d20e9c13f43eee8ea87d44156b8505ca8a3c878773f68b4e4812a421e \ - --hash=sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba \ - --hash=sha256:dd15245c8b68fe2b6bd0f32c1556509d11bb33aec9b5d0866dd8e2ed3dba09c2 \ - --hash=sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2 +black==24.10.0 # via - # -r requirements_formatting.txt.in + # -r llvm/utils/git/requirements_formatting.txt.in # darker -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 +certifi==2025.6.15 # via requests -cffi==1.17.0 \ - --hash=sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f \ - --hash=sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab \ - --hash=sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499 \ - --hash=sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058 \ - --hash=sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693 \ - --hash=sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb \ - --hash=sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377 \ - --hash=sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885 \ - --hash=sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2 \ - --hash=sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401 \ - --hash=sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4 \ - --hash=sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b \ - --hash=sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59 \ - --hash=sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f \ - --hash=sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c \ - --hash=sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555 \ - --hash=sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa \ - --hash=sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424 \ - --hash=sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb \ - --hash=sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2 \ - --hash=sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8 \ - --hash=sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e \ - --hash=sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9 \ - --hash=sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82 \ - --hash=sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828 \ - --hash=sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759 \ - --hash=sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc \ - --hash=sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118 \ - --hash=sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf \ - --hash=sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932 \ - --hash=sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a \ - --hash=sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29 \ - --hash=sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206 \ - --hash=sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2 \ - --hash=sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c \ - --hash=sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c \ - --hash=sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0 \ - --hash=sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a \ - --hash=sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195 \ - --hash=sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6 \ - --hash=sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9 \ - --hash=sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc \ - --hash=sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb \ - --hash=sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0 \ - --hash=sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7 \ - --hash=sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb \ - --hash=sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a \ - --hash=sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492 \ - --hash=sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720 \ - --hash=sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42 \ - --hash=sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7 \ - --hash=sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d \ - --hash=sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d \ - --hash=sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb \ - --hash=sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4 \ - --hash=sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2 \ - --hash=sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b \ - --hash=sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8 \ - --hash=sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e \ - --hash=sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204 \ - --hash=sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3 \ - --hash=sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150 \ - --hash=sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4 \ - --hash=sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76 \ - --hash=sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e \ - --hash=sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb \ - --hash=sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91 +cffi==1.17.1 # via # cryptography # pynacl -charset-normalizer==3.3.2 \ - --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ - --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ - --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ - --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ - --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ - --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ - --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ - --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ - --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ - --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ - --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ - --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ - --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ - --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ - --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ - --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ - --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ - --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ - --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ - --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ - --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ - --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ - --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ - --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ - --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ - --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ - --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ - --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ - --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ - --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ - --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ - --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ - --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ - --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ - --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ - --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ - --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ - --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ - --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ - --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ - --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ - --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ - --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ - --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ - --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ - --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ - --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ - --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ - --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ - --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ - --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ - --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ - --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ - --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ - --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ - --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ - --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ - --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ - --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ - --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ - --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ - --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ - --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ - --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ - --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ - --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ - --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ - --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ - --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ - --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ - --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ - --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ - --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ - --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ - --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ - --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ - --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ - --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ - --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ - --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ - --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ - --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ - --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ - --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ - --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ - --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ - --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ - --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ - --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ - --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 +charset-normalizer==3.4.2 # via requests -click==8.1.7 \ - --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ - --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de +click==8.2.1 # via black -colorama==0.4.6 \ - --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ - --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 +colorama==0.4.6 # via click -cryptography==43.0.0 \ - --hash=sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709 \ - --hash=sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069 \ - --hash=sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2 \ - --hash=sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b \ - --hash=sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e \ - --hash=sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70 \ - --hash=sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778 \ - --hash=sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22 \ - --hash=sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895 \ - --hash=sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf \ - --hash=sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431 \ - --hash=sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f \ - --hash=sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947 \ - --hash=sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74 \ - --hash=sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc \ - --hash=sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66 \ - --hash=sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66 \ - --hash=sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf \ - --hash=sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f \ - --hash=sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5 \ - --hash=sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e \ - --hash=sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f \ - --hash=sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55 \ - --hash=sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1 \ - --hash=sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47 \ - --hash=sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5 \ - --hash=sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0 +cryptography==45.0.5 # via pyjwt -darker==1.7.2 \ - --hash=sha256:ec5b7c382d9537611c164f3ecca2e1b8a7923bc5a02bf22f6e7f6c8bcbdf593a \ - --hash=sha256:ec9d130ab2a0f7fa49ab68a08fd231a5bec66147ecbbf94c92a1f33d97b5ef6f - # via -r requirements_formatting.txt.in -deprecated==1.2.14 \ - --hash=sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c \ - --hash=sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3 +darker==1.7.2 + # via -r llvm/utils/git/requirements_formatting.txt.in +deprecated==1.2.18 # via pygithub -idna==3.8 \ - --hash=sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac \ - --hash=sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603 +idna==3.10 # via requests -mypy-extensions==1.0.0 \ - --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ - --hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782 +mypy-extensions==1.1.0 # via black -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 +packaging==25.0 # via black -pathspec==0.12.1 \ - --hash=sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08 \ - --hash=sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712 +pathspec==0.12.1 # via black -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.3.8 # via black -pycparser==2.22 \ - --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ - --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc +pycparser==2.22 # via cffi -pygithub==1.59.1 \ - --hash=sha256:3d87a822e6c868142f0c2c4bf16cce4696b5a7a4d142a7bd160e1bdf75bc54a9 \ - --hash=sha256:c44e3a121c15bf9d3a5cc98d94c9a047a5132a9b01d22264627f58ade9ddc217 - # via -r requirements_formatting.txt.in -pyjwt[crypto]==2.9.0 \ - --hash=sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850 \ - --hash=sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c +pygithub==1.59.1 + # via -r llvm/utils/git/requirements_formatting.txt.in +pyjwt[crypto]==2.10.1 # via pygithub -pynacl==1.5.0 \ - --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ - --hash=sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d \ - --hash=sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93 \ - --hash=sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1 \ - --hash=sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92 \ - --hash=sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff \ - --hash=sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba \ - --hash=sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 \ - --hash=sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b \ - --hash=sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543 +pynacl==1.5.0 # via pygithub -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 +requests==2.32.4 # via pygithub -toml==0.10.2 \ - --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ - --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f +toml==0.10.2 # via darker -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 +urllib3==2.5.0 # via requests -wrapt==1.16.0 \ - --hash=sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc \ - --hash=sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81 \ - --hash=sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09 \ - --hash=sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e \ - --hash=sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca \ - --hash=sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0 \ - --hash=sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb \ - --hash=sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487 \ - --hash=sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40 \ - --hash=sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c \ - --hash=sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060 \ - --hash=sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202 \ - --hash=sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41 \ - --hash=sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9 \ - --hash=sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b \ - --hash=sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664 \ - --hash=sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d \ - --hash=sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362 \ - --hash=sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00 \ - --hash=sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc \ - --hash=sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1 \ - --hash=sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267 \ - --hash=sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956 \ - --hash=sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966 \ - --hash=sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1 \ - --hash=sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228 \ - --hash=sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72 \ - --hash=sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d \ - --hash=sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292 \ - --hash=sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0 \ - --hash=sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0 \ - --hash=sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36 \ - --hash=sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c \ - --hash=sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5 \ - --hash=sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f \ - --hash=sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73 \ - --hash=sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b \ - --hash=sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2 \ - --hash=sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593 \ - --hash=sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39 \ - --hash=sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389 \ - --hash=sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf \ - --hash=sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf \ - --hash=sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89 \ - --hash=sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c \ - --hash=sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c \ - --hash=sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f \ - --hash=sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440 \ - --hash=sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465 \ - --hash=sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136 \ - --hash=sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b \ - --hash=sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8 \ - --hash=sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3 \ - --hash=sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8 \ - --hash=sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6 \ - --hash=sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e \ - --hash=sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f \ - --hash=sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c \ - --hash=sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e \ - --hash=sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8 \ - --hash=sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2 \ - --hash=sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020 \ - --hash=sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35 \ - --hash=sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d \ - --hash=sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3 \ - --hash=sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537 \ - --hash=sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809 \ - --hash=sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d \ - --hash=sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a \ - --hash=sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4 +wrapt==1.17.2 # via deprecated diff --git a/llvm/utils/git/requirements_formatting.txt.in b/llvm/utils/git/requirements_formatting.txt.in index 4aac571af1cf..244cb435a01b 100644 --- a/llvm/utils/git/requirements_formatting.txt.in +++ b/llvm/utils/git/requirements_formatting.txt.in @@ -1,3 +1,3 @@ -black~=23.0 +black~=24.3 darker==1.7.2 PyGithub==1.59.1 diff --git a/llvm/utils/vscode/llvm/package-lock.json b/llvm/utils/vscode/llvm/package-lock.json index 9559768c1919..ec888c3eb0cf 100644 --- a/llvm/utils/vscode/llvm/package-lock.json +++ b/llvm/utils/vscode/llvm/package-lock.json @@ -93,9 +93,9 @@ "dev": true }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -358,9 +358,9 @@ } }, "node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", "dev": true, "bin": { "semver": "bin/semver" diff --git a/mlir/CMakeLists.txt b/mlir/CMakeLists.txt index 9e786154a2b4..e7bd52c150d6 100644 --- a/mlir/CMakeLists.txt +++ b/mlir/CMakeLists.txt @@ -217,6 +217,24 @@ set(MLIR_PDLL_TABLEGEN_TARGET "${MLIR_PDLL_TABLEGEN_TARGET}" CACHE INTERNAL "") set(MLIR_SRC_SHARDER_TABLEGEN_EXE "${MLIR_SRC_SHARDER_TABLEGEN_EXE}" CACHE INTERNAL "") set(MLIR_SRC_SHARDER_TABLEGEN_TARGET "${MLIR_SRC_SHARDER_TABLEGEN_TARGET}" CACHE INTERNAL "") +# XeGPU Dialect Option (Default OFF) +option(MLIR_DIALECT_XEGPU_ENABLE + "Enable the XeGPU dialect." + OFF) + +if(MLIR_DIALECT_XEGPU_ENABLE) + add_compile_definitions(MLIR_DIALECT_XEGPU_ENABLE) +endif() + +# TosaToTensor Conversion Option (Default OFF) +option(MLIR_CONVERSION_TOSATOTENSOR_ENABLE + "Enable TosaToTensor conversion" + OFF) + +if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) + add_compile_definitions(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) +endif() + add_subdirectory(include/mlir) add_subdirectory(lib) # C API needs all dialects for registration, but should be built before tests. diff --git a/mlir/cmake/modules/AddMLIR.cmake b/mlir/cmake/modules/AddMLIR.cmake index 2fc84a5b2879..9df1f062cff8 100644 --- a/mlir/cmake/modules/AddMLIR.cmake +++ b/mlir/cmake/modules/AddMLIR.cmake @@ -196,6 +196,14 @@ function(add_mlir_interface interface) add_dependencies(mlir-generic-headers MLIR${interface}IncGen) endfunction() +# Declare a dialect in the include directory +function(add_mlir_type_interface interface) + set(LLVM_TARGET_DEFINITIONS ${interface}.td) + mlir_tablegen(${interface}.h.inc -gen-type-interface-decls) + mlir_tablegen(${interface}.cpp.inc -gen-type-interface-defs) + add_public_tablegen_target(MLIR${interface}IncGen) + add_dependencies(mlir-generic-headers MLIR${interface}IncGen) +endfunction() # Generate Documentation function(add_mlir_doc doc_filename output_file output_directory command) diff --git a/mlir/include/mlir-c/Dialect/Quant.h b/mlir/include/mlir-c/Dialect/Quant.h index a7d98dc3c1a7..dc0989e53344 100644 --- a/mlir/include/mlir-c/Dialect/Quant.h +++ b/mlir/include/mlir-c/Dialect/Quant.h @@ -172,6 +172,47 @@ mlirUniformQuantizedPerAxisTypeGetQuantizedDimension(MlirType type); MLIR_CAPI_EXPORTED bool mlirUniformQuantizedPerAxisTypeIsFixedPoint(MlirType type); +//===---------------------------------------------------------------------===// +// UniformQuantizedSubChannelType +//===---------------------------------------------------------------------===// + +/// Returns `true` if the given type is a UniformQuantizedSubChannel. +MLIR_CAPI_EXPORTED bool +mlirTypeIsAUniformQuantizedSubChannelType(MlirType type); + +/// Creates a UniformQuantizedSubChannelType with the given parameters. +/// +/// The type is owned by the context. `scalesAttr` and `zeroPointsAttr` must be +/// DenseElementsAttrs. `quantizedDimensions` and `blockSizes` +/// point to `blockSizeInfoLength` number of elements, describing respectively +/// the quantization axis and corresponding block size. +MLIR_CAPI_EXPORTED MlirType mlirUniformQuantizedSubChannelTypeGet( + unsigned flags, MlirType storageType, MlirType expressedType, + MlirAttribute scalesAttr, MlirAttribute zeroPointsAttr, + intptr_t blockSizeInfoLength, int32_t *quantizedDimensions, + int64_t *blockSizes, int64_t storageTypeMin, int64_t storageTypeMax); + +/// Returns the number of block sizes provided in type. +MLIR_CAPI_EXPORTED intptr_t +mlirUniformQuantizedSubChannelTypeGetNumBlockSizes(MlirType type); + +/// Returns the quantized dimension at the given position. +MLIR_CAPI_EXPORTED int32_t +mlirUniformQuantizedSubChannelTypeGetQuantizedDimension(MlirType type, + intptr_t pos); + +/// Returns the block size at the given position. +MLIR_CAPI_EXPORTED int64_t +mlirUniformQuantizedSubChannelTypeGetBlockSize(MlirType type, intptr_t pos); + +/// Returns the scales of the quantized type. +MLIR_CAPI_EXPORTED MlirAttribute +mlirUniformQuantizedSubChannelTypeGetScales(MlirType type); + +/// Returns the zero-points of the quantized type. +MLIR_CAPI_EXPORTED MlirAttribute +mlirUniformQuantizedSubChannelTypeGetZeroPoints(MlirType type); + //===---------------------------------------------------------------------===// // CalibratedQuantizedType //===---------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Conversion/CMakeLists.txt b/mlir/include/mlir/Conversion/CMakeLists.txt index 9f76ab659215..0f452883c5f3 100644 --- a/mlir/include/mlir/Conversion/CMakeLists.txt +++ b/mlir/include/mlir/Conversion/CMakeLists.txt @@ -7,4 +7,10 @@ add_public_tablegen_target(MLIRConversionPassIncGen) add_mlir_doc(Passes ConversionPasses ./ -gen-pass-doc) +if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) + add_subdirectory(TosaToTensor) +endif() +if(MLIR_DIALECT_XEGPU_ENABLE) + add_subdirectory(VectorToXeGPU) +endif() add_subdirectory(ConvertToLLVM) diff --git a/mlir/include/mlir/Conversion/Passes.h b/mlir/include/mlir/Conversion/Passes.h index e9761c20642c..5e22bf0b2946 100644 --- a/mlir/include/mlir/Conversion/Passes.h +++ b/mlir/include/mlir/Conversion/Passes.h @@ -71,7 +71,9 @@ #include "mlir/Conversion/TosaToLinalg/TosaToLinalg.h" #include "mlir/Conversion/TosaToMLProgram/TosaToMLProgram.h" #include "mlir/Conversion/TosaToSCF/TosaToSCF.h" +#ifdef MLIR_CONVERSION_TOSATOTENSOR_ENABLE #include "mlir/Conversion/TosaToTensor/TosaToTensor.h" +#endif #include "mlir/Conversion/UBToLLVM/UBToLLVM.h" #include "mlir/Conversion/UBToSPIRV/UBToSPIRV.h" #include "mlir/Conversion/VectorToArmSME/VectorToArmSME.h" @@ -79,7 +81,9 @@ #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.h" #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" #include "mlir/Conversion/VectorToSPIRV/VectorToSPIRVPass.h" +#ifdef MLIR_DIALECT_XEGPU_ENABLE #include "mlir/Conversion/VectorToXeGPU/VectorToXeGPU.h" +#endif namespace mlir { diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index 4cd6c17e3379..08007790b6a0 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -1259,7 +1259,7 @@ def TosaToSCF : Pass<"tosa-to-scf"> { //===----------------------------------------------------------------------===// // TosaToTensor //===----------------------------------------------------------------------===// - +#ifdef MLIR_CONVERSION_TOSATOTENSOR_ENABLE def TosaToTensor : Pass<"tosa-to-tensor"> { let summary = "Lower TOSA to the Tensor dialect"; let dependentDialects = [ @@ -1272,6 +1272,7 @@ def TosaToTensor : Pass<"tosa-to-tensor"> { let constructor = "tosa::createTosaToTensor()"; } +#endif //===----------------------------------------------------------------------===// // UBToLLVM @@ -1464,6 +1465,7 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv"> { // VectorToXeGPU //===----------------------------------------------------------------------===// +#ifdef MLIR_DIALECT_XEGPU_ENABLE def ConvertVectorToXeGPU : Pass<"convert-vector-to-xegpu"> { let summary = "Lower the operations from the vector dialect into the XeGPU " "dialect"; @@ -1473,5 +1475,6 @@ def ConvertVectorToXeGPU : Pass<"convert-vector-to-xegpu"> { "vector::VectorDialect", "xegpu::XeGPUDialect" ]; } +#endif #endif // MLIR_CONVERSION_PASSES diff --git a/mlir/include/mlir/Conversion/TosaToTensor/CMakeLists.txt b/mlir/include/mlir/Conversion/TosaToTensor/CMakeLists.txt new file mode 100644 index 000000000000..b754397270b9 --- /dev/null +++ b/mlir/include/mlir/Conversion/TosaToTensor/CMakeLists.txt @@ -0,0 +1,7 @@ +if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) + set(LLVM_TARGET_DEFINITIONS Passes.td) + mlir_tablegen(Passes.h.inc -gen-pass-decls -name TosaToTensor) + mlir_tablegen(Passes.capi.h.inc -gen-pass-capi-header --prefix TosaToTensor) + mlir_tablegen(Passes.capi.cpp.inc -gen-pass-capi-impl --prefix TosaToTensor) + add_public_tablegen_target(MLIRTosaToTensorPassIncGen) +endif() \ No newline at end of file diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt index f71023519733..df4a7238e55c 100644 --- a/mlir/include/mlir/Dialect/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CMakeLists.txt @@ -42,4 +42,6 @@ add_subdirectory(UB) add_subdirectory(Utils) add_subdirectory(Vector) add_subdirectory(X86Vector) -add_subdirectory(XeGPU) +if(MLIR_DIALECT_XEGPU_ENABLE) + add_subdirectory(XeGPU) +endif() diff --git a/mlir/include/mlir/Dialect/IRDL/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/IRDL/IR/CMakeLists.txt index ec3e06bedae0..861db0ca9667 100644 --- a/mlir/include/mlir/Dialect/IRDL/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/IRDL/IR/CMakeLists.txt @@ -1,5 +1,5 @@ add_mlir_dialect(IRDL irdl) -add_mlir_doc(IRDLOps IRDL Dialects/ -gen-dialect-doc) +add_mlir_doc(IRDLOps IRDL Dialects/ -gen-dialect-doc -dialect=irdl) # Add IRDL interfaces set(LLVM_TARGET_DEFINITIONS IRDLInterfaces.td) diff --git a/mlir/include/mlir/Dialect/Quant/IR/QuantBase.td b/mlir/include/mlir/Dialect/Quant/IR/QuantBase.td index 791cb9de48d0..9c215cb1e257 100644 --- a/mlir/include/mlir/Dialect/Quant/IR/QuantBase.td +++ b/mlir/include/mlir/Dialect/Quant/IR/QuantBase.td @@ -40,13 +40,17 @@ def Quant_Dialect : Dialect { encodes the necessary information for (lossy) round-trip conversion between an expressed and a stored value. - The `quant.uniform` type has two variants: per-layer quantization and - per-channel (or per-axis) quantization. In per-layer quantization, the - quantization information affects an entire tensor uniformly. Conversely, in - per-channel quantization, the data type encodes the specific tensor axis - that serves as the channel and includes quantization information for each - individual channel within the tensor. Below are the specific syntactic and - semantic considerations for each modality. + The `quant.uniform` type has three variants: per-layer quantization, + per-channel (or per-axis) quantization, and sub-channel (or blockwize) + quantization. In per-layer quantization, the quantization information + affects an entire tensor uniformly. Conversely, in per-channel + quantization, the data type encodes the specific tensor axis that serves + as the channel and includes quantization information for each individual + channel within the tensor. Sub-channel quantization is a generalization + of per-tensor and per-channel quantization, where the quantization + parameters are defined for blocks of elements along one or more + dimensions of the tensor. Below are the specific syntactic and semantic + considerations for each modality. ### Per-layer quantization @@ -145,7 +149,7 @@ def Quant_Dialect : Dialect { ``` // A 2x3x4 tensor contains 8-bit signed integers representing 32-bit // floats. Dimension 1 of the tensor acts as the channel dimension. Its - // size 3 matches the number of provided scale values. Tensor elemenets at + // size 3 matches the number of provided scale values. Tensor elements at // positions [*][0][*], [*][1][*], and [*][2][*] use scales 3.0, 4.0, and // 5.0, respectively. tensor<2x3x4x!quant.uniform> @@ -159,6 +163,72 @@ def Quant_Dialect : Dialect { tensor> ``` + ### Sub-channel quantization + + Sub-channel quantization, also known as blockwise quantization, provides + finer-grained control than per-tensor or per-channel quantization. It + divides a tensor into blocks of elements, each with its own quantization + parameters (scale and zero point). This is particularly useful when + different regions of a tensor exhibit distinct value ranges. + + The `!quant.uniform` type represents sub-channel quantization with the + following syntax: + + ``` + `!quant.uniform` `<` + storedType (`<` storageMin `:` storageMax `>`)? `:` + expressedType `:` blockSizeInfo + scaleZeroTensor `>` + + blockSizeInfo ::= `{` `}` | `{` axisBlock (`,` axisBlock)*)? `}` + axisBlock ::= axis `:` blockSize + scaleZeroTensor ::= scaleZeroDenseExp | scaleZeroList + scaleZeroDenseExp ::= `{` scaleZeroTensor (`,` scaleZeroTensor)* `}` + scaleZeroList ::= scaleZero (`,` scaleZero)* + scaleZero ::= scale (`:` zeroPoint)? + + scaleZeroTensor ::= scale-zero-dense-exp | scale-zero-list + scale-zero-dense-exp ::= `{` scale-zero-tensor (`,` scale-zero-tensor)* `}` + scale-zero-list ::= scale (`:` zeroPoint)? (`,` scale (`:` zeroPoint)?)* + ``` + + The `blockSize` field specifies the size of the blocks along dimension + `axis` of the tensor. The `scale` and `zeroPoint` fields specify the + quantization parameters for a particular block. Specifically, the tensor + element at position [i0...iN] uses + `scaleZeroTensor[i/blockSize0...i/blockSizeN].scale` and + `scaleZeroTensor[i/blockSize0...i/blockSizeN].zeroPoint` as scale + and zeroPoint respectively. + + Here are some examples: + + ``` + // A 3x4 tensor of i8 values representing f32 values, quantized + // along axis-0 and axis-1 with block sizes 1 and 2, + // respectively. As a result, the shape of the scales (or zero-points) will + // be `[3,4]/[1,2] = [3,2]`, which essentially represents the number of + // blocks along each axis. Tensor elements at positions + // [0][0] and [0][1] use scale `s00` and zero point `z00`, + // [0][2] and [0][3] use scale `s01` and zero point `z01`, + // [1][0] and [1][1] use scale `s10` and zero point `z10`, + // [1][2] and [1][3] use scale `s11` and zero point `z11`, + // [2][0] and [2][1] use scale `s20` and zero point `z20`, + // [2][2] and [2][3] use scale `s21` and zero point `z21`, + tensor<3x4x!quant.uniform> + + // A 2D dynamically sized tensor contains u16 values + // representing f32 values. Since the shape of the quantization + // parameters (i.e. scales and zero-points) is given as [2,2] and + // the blocks-sizes are given as [1,2], the shape of the tensor is expected + // to be [2,4] (= [2,2] * [1,2]) at runtime. Tensor elements at positions + // [0][0] and [0][1] use scale `s00` and zero point `z00`, + // [0][2] and [0][3] use scale `s01` and zero point `z01`, + // [1][0] and [1][1] use scale `s10` and zero point `z10`, + // [1][2] and [1][3] use scale `s11` and zero point `z11`, + tensor> + ``` ## Per-axis quantization integrity @@ -170,7 +240,7 @@ def Quant_Dialect : Dialect { respected in any context in which the `!quant.uniform` data type is used, such as the header of a `func.func` op, or the input of an arithmetic operation. - + - A quantized type with per-channel quantization information must be the element type of a tensor container type, and may not occur directly as the data type of a scalar value. @@ -209,6 +279,110 @@ def Quant_Dialect : Dialect { // Correct. The quantized type now includes 3 scale values, matching the // size of dimension 1 of the result tensor. %result = quant.qcast %input : tensor to tensor> + + ## Sub-channel quantization integrity + + When type `!quant.uniform` contains sub-channel quantization information, + the following rules are enforced. For efficiency, these rules are actively + enforced by the verifiers of `quant` dialect ops, but they must be + respected in any context in which the `!quant.uniform` data type is used, + such as the header of a `func.func` op, or the input of an arithmetic + operation. + + - A quantized type with sub-channel quantization information must be the + element type of a tensor container type, and may not occur directly as + the data type of a scalar value. + + ``` + // Incorrect. Type !quant.uniform specifies sub-channel quantization for a + // scalar type. + %result = quant.qcast %input : f32 to !quant.uniform + + // Correct. Type `!quant.uniform` with sub-channel quantization is wrapped + // in a `tensor` type. + %result = quant.qcast %input : tensor<2x2xf32> to + tensor<2x2x!quant.uniform> + ``` + + - The tensor containing the sub-channel quantized type must be ranked. + + ``` + // Incorrect. Type !quant.uniform specifies sub-channel quantization for a + // unranked tensor type. + %result = quant.qcast %input : tensor<*xf32> to + tensor<*x!quant.uniform> + ``` + + - The axis for which a block size is specified should be valid for a tensor + of a given rank. Block sizes can be specified for a subset of axes. + Any unspecified block size for an axis i defaults to the tensor dimension + size of that axis (shape(tensor)[i]). + + ``` + // Incorrect. The block-size is specified for axis 2 which is greater than + // the rank of the tensor. + %result = quant.qcast %input : tensor<2x2xf32> to + tensor<2x2x!quant.uniform> + + // Incorrect. The block-size is specified for a negative axis. + %result = quant.qcast %input : tensor<2x2xf32> to + tensor<2x2x!quant.uniform> + + // Correct. The block size for axis 1 is skipped which should be assumed as + // 2, the dim-size of tensor at axis 1. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + + // Correct. The block size for all the axes are skipped making the + // sub-channel type essentially a per-tensor type. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + ``` + + - Block size for a particular axis should be a positive integer and should + be less than the dimension size of the tensor along that axis. + + ``` + // Incorrect. The block size for axis 0 is -1. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + + // Incorrect. The block size for axis 0 is 8 which is greater than the + // dimension size of tensor at axis 0 (which is 6). + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + + // Correct. The block size for axis 0 is now 3. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + ``` + + - shape(tensor) % blockSizes = 0 where blockSizes = [block sizes for + axis i in [0, 1, ..., rank(tensor)-1]]. + + ``` + // Incorrect. The block size for axis 0 is 4 and the corresponding + // dimension size is 6 and 6 % 4 != 0. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + + // Correct. The block size for axis 0 is now 3 making 6 % 3 = 0. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + ``` + + - shape(scales) = shape(zeroPoints) = shape(tensor) / blockSizes. + + ``` + // Incorrect. shape(tensor) = [6,2], blockSizes = [3,2], but + // shape(scales) is [1,2] which is not equal to [6,2]/[3,2]. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> + + // Correct. shape(tensor) = [6,2], blockSizes = [3,2], and + // shape(scales) equals [6,2]/[3,2]. + %result = quant.qcast %input : tensor<6x2xf32> to + tensor<6x2x!quant.uniform> ``` }]; let cppNamespace = "::mlir::quant"; @@ -226,6 +400,34 @@ class quant_ScalarOrTensorOf : def quant_QuantizedType : Type($_self)">, "quantized type">; + +def quant_QuantileQuantizedType : + DialectType($_self)">, + "QuantileQuantizedType">; + +def quant_QuantileQuantizedValueType : + quant_ScalarOrTensorOf; + +// UniformQuantizedPerAxisType +def quant_UniformQuantizedPerAxisType : + DialectType($_self)">, + "UniformQuantizedPerAxisType">; + +// QuantileQuantizedPerAxisType +def quant_QuantileQuantizedPerAxisType : + DialectType($_self)">, + "QuantileQuantizedPerAxisType">; + +// Predicate for detecting a container or primitive of UniformQuantizedPerAxisType. +def quant_UniformQuantizedPerAxisValueType : + quant_ScalarOrTensorOf; + +// Predicate for detecting a container or primitive of QuantileQuantizedPerAxisType. +def quant_QuantileQuantizedPerAxisValueType : + quant_ScalarOrTensorOf; def quant_ScalarType : Type:$quantizedDimensions, + Array:$blockSizes, DenseElementsAttr:$scales, + DenseElementsAttr:$zeroPoints)> { + // Note: builder order differs from bytecode. + let cBuilder = [{ + get<$_resultType>(context, flags, storageType, expressedType, scales, + zeroPoints, llvm::to_vector(llvm::map_range(quantizedDimensions, + [](int64_t dim) { return static_cast(dim);})), blockSizes, + storageTypeMin, storageTypeMax) + }]; +} + +def QuantileQuantizedType: DialectType<(type + VarInt:$flags, + Type:$storageType, + Type:$quantileType, + Type:$expressedType, + Array:$quantiles, + DoubleAPFloat:$scale, + SignedVarInt:$zeroPoint, + SignedVarInt:$storageTypeMin, + SignedVarInt:$storageTypeMax +)>; + +def QuantileQuantizedPerAxisType: DialectType<(type + VarInt:$flags, + Type:$storageType, + Type:$quantileType, + Type:$expressedType, + VarInt:$quantizedDimension, + SignedVarInt:$storageTypeMin, + SignedVarInt:$storageTypeMax, + Array:$quantiles, + Array:$scales, + Array:$zeroPoints +)> { + // Note: builder order differs from bytecode. + let cBuilder = [{ + get<$_resultType>(context, flags, storageType, quantileType, expressedType, quantiles, scales, + zeroPoints, quantizedDimension, storageTypeMin, storageTypeMax) + }]; +} + /// This enum contains marker codes used to indicate which attribute is /// currently being decoded, and how it should be decoded. The order of these /// codes should generally be unchanged, as any changes will inevitably break /// compatibility with older bytecode. def QuantDialectTypes : DialectTypes<"Quant"> { - let elems = [ - ReservedOrDead, - AnyQuantizedType, - AnyQuantizedTypeWithExpressedType, - CalibratedQuantizedType, - UniformQuantizedType, - UniformQuantizedPerAxisType - ]; + let elems = [ReservedOrDead, AnyQuantizedType, + AnyQuantizedTypeWithExpressedType, CalibratedQuantizedType, + UniformQuantizedType, UniformQuantizedPerAxisType, + QuantileQuantizedType, + QuantileQuantizedPerAxisType, + UniformQuantizedSubChannelType]; } -#endif // QUANT_BYTECODE \ No newline at end of file +#endif // QUANT_BYTECODE diff --git a/mlir/include/mlir/Dialect/Quant/IR/QuantTypes.h b/mlir/include/mlir/Dialect/Quant/IR/QuantTypes.h index 43440ba623b9..324380fd388f 100644 --- a/mlir/include/mlir/Dialect/Quant/IR/QuantTypes.h +++ b/mlir/include/mlir/Dialect/Quant/IR/QuantTypes.h @@ -23,8 +23,11 @@ namespace detail { struct QuantizedTypeStorage; struct AnyQuantizedTypeStorage; +struct UniformQuantizedSubChannelTypeStorage; struct UniformQuantizedTypeStorage; struct UniformQuantizedPerAxisTypeStorage; +struct QuantileQuantizedTypeStorage; +struct QuantileQuantizedPerAxisTypeStorage; struct CalibratedQuantizedTypeStorage; } // namespace detail @@ -82,6 +85,18 @@ class QuantizedType : public Type { return llvm::maxUIntN(integralWidth); } + static constexpr int64_t getDefaultMaximumForF8E4M3FN() { return 448; } + + static constexpr int64_t getDefaultMinimumForF8E4M3FN() { + return -getDefaultMaximumForF8E4M3FN(); + } + + static constexpr int64_t getDefaultMaximumForF8E5M2() { return 57344; } + + static constexpr int64_t getDefaultMinimumForF8E5M2() { + return -getDefaultMaximumForF8E5M2(); + } + /// Gets the original expressed type that this quantized type approximates. /// Note that this presumes that the quantized type was always derived from /// a floating point type, which in the broadest definition, is not true (i.e. @@ -286,6 +301,8 @@ class UniformQuantizedType int64_t zeroPoint, int64_t storageTypeMin, int64_t storageTypeMax); + static bool classof(mlir::Type type); + /// Gets the scale term. The scale designates the difference between the real /// values corresponding to consecutive quantized values differing by 1. double getScale() const; @@ -349,6 +366,8 @@ class UniformQuantizedPerAxisType int32_t quantizedDimension, int64_t storageTypeMin, int64_t storageTypeMax); + static bool classof(mlir::Type type); + /// Gets the quantization scales. The scales designate the difference between /// the real values corresponding to consecutive quantized values differing /// by 1. The ith scale corresponds to the ith slice in the @@ -382,6 +401,278 @@ class UniformQuantizedPerAxisType } }; +/// Represents sub-channel (also known as blockwise quantization). +/// +/// Syntax synopsis: +/// UniformQuantizedSubChannelType ::= '!quant.uniform' '<' +/// storageType ('<' storageMin ':' storageMax '>')? ':' +/// expressedType ':' BlockSizeInfo ',' ScaleZeroTensor '>' +/// BlockSizeInfo: '{' '}' | '{' AxisBlock (',' AxisBlock)* '}' +/// AxisBlock ::= AxisSpec ':' BlockSizeSpec +/// ScaleZeroTensor ::= ScaleZeroDenseExp | ScaleZeroList +/// ScaleZeroDenseExp ::= '{' ScaleZeroTensor (',' ScaleZeroTensor)* '}' +/// ScaleZeroList ::= ScaleZero (',' ScaleZero)* +/// ScaleZero ::= Scale (':' ZeroPoint)? +/// +/// StorageType: 'i'|'u' NumBits +/// ExpressedType: 'f16', 'f32', 'bf16', 'f64' +/// AxisSpec: An integer value +/// BlockSizeSpec: An integer value +/// Scale: An attribute (usually floating-point value) +/// ZeroPoint: An attribute (usually integer value) +class UniformQuantizedSubChannelType + : public Type::TypeBase { +public: + using Base::Base; + using Base::getChecked; + + static constexpr StringLiteral name = "quant.uniform_sub_channel"; + + /// Gets an instance of the type with all parameters specified but not + /// checked. + static UniformQuantizedSubChannelType + get(unsigned flags, Type storageType, Type expressedType, + DenseElementsAttr scales, DenseElementsAttr zeroPoints, + ArrayRef quantizedDimensions, ArrayRef blockSizes, + int64_t storageTypeMin, int64_t storageTypeMax); + + /// Gets an instance of the type with all specified parameters checked. + /// Returns a nullptr convertible type on failure. + static UniformQuantizedSubChannelType + getChecked(function_ref emitError, unsigned flags, + Type storageType, Type expressedType, DenseElementsAttr scales, + DenseElementsAttr zeroPoints, + ArrayRef quantizedDimensions, + ArrayRef blockSizes, int64_t storageTypeMin, + int64_t storageTypeMax); + + /// Verifies construction invariants and issues errors/warnings. + static LogicalResult + verifyInvariants(function_ref emitError, unsigned flags, + Type storageType, Type expressedType, + DenseElementsAttr scales, DenseElementsAttr zeroPoints, + ArrayRef quantizedDimensions, + ArrayRef blockSizes, int64_t storageTypeMin, + int64_t storageTypeMax); + + /// Gets the quantization scales. The scales are organized in a + /// multi-dimensional tensor. The size of each dimension in the scales tensor + /// is determined by the number of blocks along the corresponding dimension in + /// the quantized data tensor. + /// + /// For example, if the quantized data tensor has shape [X0, X1, ..., XR-1] + /// and the block sizes are [B0, B1, ..., BR-1], then the scales tensor will + /// have shape [X0/B0, X1/B1, ..., XR-1/BR-1]. + /// + /// The scale value for a specific element in the quantized data tensor at + /// position [i0, i1, ..., iR-1] is determined by accessing the corresponding + /// element in the scales tensor at position [i0/B0, i1/B1, ..., iR-1/BR-1]. + DenseElementsAttr getScales() const; + + /// Gets the quantization zero-points. The zero-points are organized in a + /// multi-dimensional tensor. The size of each dimension in the zero-point + /// tensor is determined by the number of blocks along the corresponding + /// dimension in the quantized data tensor. + /// + /// For example, if the quantized data tensor has shape [X0, X1, ..., XR-1] + /// and the block sizes are [B0, B1, ..., BR-1], then the zero-point tensor + /// will have shape [X0/B0, X1/B1, ..., XR-1/BR-1]. + /// + /// The zero-point value for a specific element in the quantized data tensor + /// at position [i0, i1, ..., iR-1] is determined by accessing the + /// corresponding element in the zero-point tensor at position [i0/B0, i1/B1, + /// ..., iR-1/BR-1]. + DenseElementsAttr getZeroPoints() const; + + /// Gets the quantized dimensions. Each element in the returned list + /// represents an axis of the quantized data tensor that has a specified block + /// size. The order of elements corresponds to the order of block sizes + /// returned by `getBlockSizes()`. + /// + /// It means that the data tensor is quantized along the `i`-th dimension in + /// the returned list using the `i`-th block size from `getBlockSizes()`. + /// + /// Note that the type expression does not have to specify the block size for + /// all axes in the data tensor. Any unspecified block size for an axis `i` + /// defaults to the tensor dimension size of that axis. + /// + /// For example, for a quantized type: + /// `tensor<8x4x2x!quant.uniform` + /// + /// `getQuantizedDimensions()` returns [1, 0]. + /// `getBlockSizes()` returns [2, 8]. + /// + /// This indicates that: + /// * Axis 1 (second dimension) is quantized with a block size of 2. + /// * Axis 0 (first dimension) is quantized with a block size of 8. + /// Since axis 2 is not specified, it implicitly has a block size equal to + /// the size of the third dimension (which is 2 in this case). + ArrayRef getQuantizedDimensions() const; + + /// Gets the block sizes for the quantized dimensions. The `i`-th element in + /// the returned list corresponds to the block size for the `i`-th dimension + /// in the list returned by `getQuantizedDimensions()`. + /// + /// See `getQuantizedDimensions()` for more details and examples. + ArrayRef getBlockSizes() const; + + /// Gets the block size information. This returns a list of pairs, where each + /// pair represents a quantized dimension and its corresponding block size. + /// + /// For example, for the type: + /// `tensor<8x4x!quant.uniform` + /// + /// This method returns: + /// `[(1, 2), (0, 8)]` + /// + /// This list indicates that axis 1 has a block size of 2, and axis 0 has a + /// block size of 8. + const SmallVector> getBlockSizeInfo() const; +}; + +/// QuantileQuantizedType derives from UniformQuantizedType and adds to it a +/// look up table array of quantile values. The type of the data in the look up table is determined by +/// the quantileType member: supported quantileType types are integer/unsigned/hf8/bf8/f16/bf16/f32/f64. +/// +/// Syntax synopsis: +/// Per-layer, all parameters expressed: +/// !quant +/// Per-layer, optional parameters omitted: +/// !quant +/// +/// StorageType: 'i'|'u' NumBits +/// QuantileType: 'i'|'u' NumBits, 'hf8', 'bf8', 'f16', 'bf16', 'f32', 'f64' +/// ExpressedType: 'f16', 'f32', 'bf16', 'f64' +/// Quantiles: Quantile+ +/// Quantile: A legal double value +/// Scale: A legal double value +/// ZeroPoint: An integer value +class QuantileQuantizedType + : public Type::TypeBase { +public: + using Base::Base; + using Base::getChecked; + + static constexpr StringLiteral name = "quant.quantile"; + + /// Gets an instance of the type with all parameters specified but not + /// checked. + static QuantileQuantizedType get(unsigned flags, Type storageType, + Type quantileType, Type expressedType, + ArrayRef quantiles, double scale, + int64_t zeroPoint, int64_t storageTypeMin, + int64_t storageTypeMax); + + static QuantileQuantizedType + getChecked(function_ref emitError, unsigned flags, + Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, double scale, int64_t zeroPoint, + int64_t storageTypeMin, int64_t storageTypeMax); + + /// Verifies construction invariants and issues errors/warnings. + static LogicalResult verifyInvariants(function_ref emitError, + unsigned flags, Type storageType, + Type quantileType, Type expressedType, + ArrayRef quantiles, double scale, + int64_t zeroPoint, int64_t storageTypeMin, + int64_t storageTypeMax); + + static bool classof(mlir::Type type); + + /// Gets the quantileType + Type getQuantileType() const; + + /// Gets the quantileType bit width + unsigned getQuantileTypeIntegralWidth() const; + + /// Gets the quantile values + ArrayRef getQuantiles() const; + + // Fixed point values are real numbers divided by a scale. + // Currently, only signed storage types are treated as fixed point. + // A fixed point value can be obtained from an affine value by subtracting + // the zeroPoint. + // In the future, this may be explicit versus implied by type and zeroPoint. + bool isFixedPoint() const { return isSigned() && getZeroPoint() == 0; } +}; + +/// Represents per-axis QuantileQuantizedType (also known as per-channel +/// quantization). The type of the data in the look up table is determined by the +/// quantileType member: supported quantileType types are integer/unsigned/hf8/bf8/f16/bf16/f32/f64. +/// +/// Syntax synopsis: +/// Per-axis, all parameters expressed: +/// !quant +/// Per-axis, optional parameters omitted: +/// !quant +/// +/// StorageType: 'i'|'u' NumBits +/// QuantileType: 'i'|'u' NumBits, 'hf8', 'bf8', 'f16', 'bf16', 'f32', 'f64' +/// ExpressedType: 'f16', 'f32', 'bf16', 'f64' +/// QuantizedDim: An integer value +/// Quantiles: Quantile+ +/// Quantile: A legal double value +/// QuantParams: (Scale ':' ZeroPoint)+ +/// Scale: A legal double value +/// ZeroPoint: An integer value +class QuantileQuantizedPerAxisType + : public Type::TypeBase { +public: + using Base::Base; + using Base::getChecked; + + static constexpr StringLiteral name = "quant.quantile_per_axis"; + + /// Gets an instance of the type with all parameters specified but not + /// checked. + static QuantileQuantizedPerAxisType + get(unsigned flags, Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, ArrayRef scales, + ArrayRef zeroPoints, int32_t quantizedDimension, + int64_t storageTypeMin, int64_t storageTypeMax); + + /// Gets an instance of the type with all specified parameters checked. + /// Returns a nullptr convertible type on failure. + static QuantileQuantizedPerAxisType + getChecked(function_ref emitError, unsigned flags, + Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, ArrayRef scales, + ArrayRef zeroPoints, int32_t quantizedDimension, + int64_t storageTypeMin, int64_t storageTypeMax); + + /// Verifies construction invariants and issues errors/warnings. + static LogicalResult + verifyInvariants(function_ref emitError, unsigned flags, + Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, ArrayRef scales, + ArrayRef zeroPoints, int32_t quantizedDimension, + int64_t storageTypeMin, int64_t storageTypeMax); + + static bool classof(mlir::Type type); + + /// Gets the quantileType + Type getQuantileType() const; + + /// Gets the quantileType bit width + unsigned getQuantileTypeIntegralWidth() const; + + /// Gets the quantile values + ArrayRef getQuantiles() const; + + /// Fixed point values are real numbers divided by a scale. + /// Currently, only signed storage types are treated as fixed point. + /// A fixed point value can be obtained from an affine value by subtracting + /// the zeroPoint. + /// In the future, this may be explicit versus implied by type and zeroPoint. + bool isFixedPoint() const { + return isSigned() && !llvm::is_contained(getZeroPoints(), 0); + } +}; + /// A quantized type that infers its range from given min/max values. /// /// Typical syntax: diff --git a/mlir/include/mlir/Dialect/Quant/Transforms/Passes.td b/mlir/include/mlir/Dialect/Quant/Transforms/Passes.td index b25296d4db5a..a62315c0395f 100644 --- a/mlir/include/mlir/Dialect/Quant/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Quant/Transforms/Passes.td @@ -31,6 +31,44 @@ def LowerQuantOps : Pass<"lower-quant-ops", "func::FuncOp"> { ]; } +def NormalizeQuantTypes : Pass<"normalize-quant-types", "func::FuncOp"> { + let summary = "Normalize generic quantized types to specific quantized types"; + let description = [{ + This pass converts generic quantized types in the `quant` dialect to more + specific types when possible. + + The following conversions are performed: + + 1. Sub-channel to per-axis: If the shape of the scales tensor of sub-channel + quantized type has all but one non-one value, it is converted to a + per-axis quantized type. + + For example: + + * `!quant.uniform` + -> `!quant.uniform` + * `tensor>` + -> `tensor>` + + 2. Sub-channel to per-tensor: If a sub-channel quantized type has only + one scale or zero-point, it is converted to a per-tensor + quantized type. + + For example: + + * `!quant.uniform` + -> `!quant.uniform` + * `tensor>` + -> `tensor>` + + The rationale for these conversions is that the decompositions / handling of + more precise quantized types tends to be more efficient than treating + everything as subchannel type. + + }]; + let dependentDialects = ["func::FuncDialect", "quant::QuantDialect"]; +} + def StripFuncQuantTypes : Pass<"strip-func-quant-types"> { let summary = "Strip quantized types from function headers"; let description = [{ diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td index 8aa2c5557015..5559995aa261 100644 --- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td +++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td @@ -143,24 +143,20 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> { /// Return the number of elements present in the given shape. static int64_t getNumElements(ArrayRef shape); + }]; + let extraSharedClassDeclaration = [{ /// Return a clone of this type with the given new shape and element type. - /// The returned type is ranked, even if this type is unranked. auto clone(::llvm::ArrayRef shape, Type elementType) { - return cloneWith(shape, elementType); + return $_type.cloneWith(shape, elementType); } - /// Return a clone of this type with the given new shape. The returned type - /// is ranked, even if this type is unranked. + /// Return a clone of this type with the given new shape. auto clone(::llvm::ArrayRef shape) { - return cloneWith(shape, getElementType()); + return $_type.cloneWith(shape, $_type.getElementType()); } - }]; - let extraSharedClassDeclaration = [{ - /// Return a clone of this type with the given new element type. The - /// returned type is ranked if and only if this type is ranked. In that - /// case, the returned type has the same shape as this type. + /// Return a clone of this type with the given new element type. auto clone(::mlir::Type elementType) { return $_type.cloneWith(/*shape=*/std::nullopt, elementType); } @@ -227,4 +223,68 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> { }]; } +//===----------------------------------------------------------------------===// +// TensorTypeInterface +//===----------------------------------------------------------------------===// + +def TensorTypeInterface : TypeInterface<"TensorType", [ + ShapedTypeInterface + ] + > { + let cppNamespace = "::mlir"; + let description = [{ + This interface provides a shared interface for ranked and unranked type + and customized tensor types. + + This class attaches the ShapedTypeInterface to act as a mixin to + provide many useful utility functions. + }]; + + let extraClassDeclaration = [{ + // Return true if the specified element type is ok in a tensor. + static bool isValidElementType(::mlir::Type type); + }]; + + let extraClassOf = [{ + return $_type.hasTrait<::mlir::TensorType::Trait>(); + }]; + +} + +//===----------------------------------------------------------------------===// +// BaseMemRefTypeInterface +//===----------------------------------------------------------------------===// + +def BaseMemRefTypeInterface : TypeInterface<"BaseMemRefType", [ + ShapedTypeInterface + ] + > { + let cppNamespace = "::mlir"; + let description = [{ + This interface provides a shared interface for ranked and unranked memref and + customized memref types. + + This interface attaches the ShapedTypeInterface to act as a mixin to + provide many useful utility functions. + }]; + + let methods = [ + InterfaceMethod<[{ + Returns the memory space in which data referred to by this memref resides. + }], + "::mlir::Attribute", "getMemorySpace">, + ]; + + let extraClassDeclaration = [{ + // Return true if the specified element type is ok in a memref. + static bool isValidElementType(::mlir::Type type); + + unsigned getMemorySpaceAsInt() const; + }]; + + let extraClassOf = [{ + return $_type.hasTrait<::mlir::BaseMemRefType::Trait>(); + }]; +} + #endif // MLIR_IR_BUILTINTYPEINTERFACES_TD_ diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h index df1e02732617..5e65587aa4e5 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -43,119 +43,19 @@ template class ValueSemantics : public TypeTrait::TraitBase {}; -//===----------------------------------------------------------------------===// -// TensorType -//===----------------------------------------------------------------------===// - -/// Tensor types represent multi-dimensional arrays, and have two variants: -/// RankedTensorType and UnrankedTensorType. -/// Note: This class attaches the ShapedType trait to act as a mixin to -/// provide many useful utility functions. This inheritance has no effect -/// on derived tensor types. -class TensorType : public Type, public ShapedType::Trait { -public: - using Type::Type; - - /// Returns the element type of this tensor type. - Type getElementType() const; - - /// Returns if this type is ranked, i.e. it has a known number of dimensions. - bool hasRank() const; - - /// Returns the shape of this tensor type. - ArrayRef getShape() const; - - /// Clone this type with the given shape and element type. If the - /// provided shape is `std::nullopt`, the current shape of the type is used. - TensorType cloneWith(std::optional> shape, - Type elementType) const; - - // Make sure that base class overloads are visible. - using ShapedType::Trait::clone; - - /// Return a clone of this type with the given new shape and element type. - /// The returned type is ranked, even if this type is unranked. - RankedTensorType clone(ArrayRef shape, Type elementType) const; - - /// Return a clone of this type with the given new shape. The returned type - /// is ranked, even if this type is unranked. - RankedTensorType clone(ArrayRef shape) const; - - /// Return true if the specified element type is ok in a tensor. - static bool isValidElementType(Type type); - - /// Methods for support type inquiry through isa, cast, and dyn_cast. - static bool classof(Type type); - - /// Allow implicit conversion to ShapedType. - operator ShapedType() const { return llvm::cast(*this); } -}; - -//===----------------------------------------------------------------------===// -// BaseMemRefType -//===----------------------------------------------------------------------===// - -/// This class provides a shared interface for ranked and unranked memref types. -/// Note: This class attaches the ShapedType trait to act as a mixin to -/// provide many useful utility functions. This inheritance has no effect -/// on derived memref types. -class BaseMemRefType : public Type, public ShapedType::Trait { -public: - using Type::Type; - - /// Returns the element type of this memref type. - Type getElementType() const; - - /// Returns if this type is ranked, i.e. it has a known number of dimensions. - bool hasRank() const; - - /// Returns the shape of this memref type. - ArrayRef getShape() const; - - /// Clone this type with the given shape and element type. If the - /// provided shape is `std::nullopt`, the current shape of the type is used. - BaseMemRefType cloneWith(std::optional> shape, - Type elementType) const; - - // Make sure that base class overloads are visible. - using ShapedType::Trait::clone; - - /// Return a clone of this type with the given new shape and element type. - /// The returned type is ranked, even if this type is unranked. - MemRefType clone(ArrayRef shape, Type elementType) const; - - /// Return a clone of this type with the given new shape. The returned type - /// is ranked, even if this type is unranked. - MemRefType clone(ArrayRef shape) const; - - /// Return true if the specified element type is ok in a memref. - static bool isValidElementType(Type type); - - /// Methods for support type inquiry through isa, cast, and dyn_cast. - static bool classof(Type type); - - /// Returns the memory space in which data referred to by this memref resides. - Attribute getMemorySpace() const; - - /// [deprecated] Returns the memory space in old raw integer representation. - /// New `Attribute getMemorySpace()` method should be used instead. - unsigned getMemorySpaceAsInt() const; - - /// Allow implicit conversion to ShapedType. - operator ShapedType() const { return llvm::cast(*this); } -}; - } // namespace mlir //===----------------------------------------------------------------------===// // Tablegen Type Declarations //===----------------------------------------------------------------------===// +// Include QuantizationInterface before BuiltinTypes to resolve dependencies +#include "mlir/IR/QuantizationInterface.h" + #define GET_TYPEDEF_CLASSES #include "mlir/IR/BuiltinTypes.h.inc" namespace mlir { -#include "mlir/IR/BuiltinTypeConstraints.h.inc" //===----------------------------------------------------------------------===// // MemRefType @@ -353,7 +253,7 @@ enum class SliceVerificationResult { /// code. SliceVerificationResult isRankReducedType(ShapedType originalType, ShapedType candidateReducedType); - + //===----------------------------------------------------------------------===// // Convenience wrappers for VectorType // @@ -390,10 +290,6 @@ class FixedVectorType : public VectorType { // Deferred Method Definitions //===----------------------------------------------------------------------===// -inline bool BaseMemRefType::classof(Type type) { - return llvm::isa(type); -} - inline bool BaseMemRefType::isValidElementType(Type type) { return type.isIntOrIndexOrFloat() || llvm::isa( @@ -401,14 +297,37 @@ inline bool BaseMemRefType::isValidElementType(Type type) { llvm::isa(type); } -inline bool TensorType::classof(Type type) { - return llvm::isa(type); -} - //===----------------------------------------------------------------------===// // Type Utilities //===----------------------------------------------------------------------===// +/// Returns the strides of the MemRef if the layout map is in strided form. +/// MemRefs with a layout map in strided form include: +/// 1. empty or identity layout map, in which case the stride information is +/// the canonical form computed from sizes; +/// 2. a StridedLayoutAttr layout; +/// 3. any other layout that be converted into a single affine map layout of +/// the form `K + k0 * d0 + ... kn * dn`, where K and ki's are constants or +/// symbols. +/// +/// A stride specification is a list of integer values that are either static +/// or dynamic (encoded with ShapedType::kDynamic). Strides encode +/// the distance in the number of elements between successive entries along a +/// particular dimension. +LogicalResult getStridesAndOffset(MemRefType t, + SmallVectorImpl &strides, + int64_t &offset); + +/// Wrapper around getStridesAndOffset(MemRefType, SmallVectorImpl, +/// int64_t) that will assert if the logical result is not succeeded. +std::pair, int64_t> getStridesAndOffset(MemRefType t); + +/// Return a version of `t` with identity layout if it can be determined +/// statically that the layout is the canonical contiguous strided layout. +/// Otherwise pass `t`'s layout into `simplifyAffineMap` and return a copy of +/// `t` with simplified layout. +MemRefType canonicalizeStridedLayout(MemRefType t); + /// Given MemRef `sizes` that are either static or dynamic, returns the /// canonical "contiguous" strides AffineExpr. Strides are multiplicative and /// once a dynamic dimension is encountered, all canonical strides become @@ -431,6 +350,24 @@ AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef sizes, /// where `exprs` is {d0, d1, .., d_(sizes.size()-1)} AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef sizes, MLIRContext *context); + +/// Return "true" if the layout for `t` is compatible with strided semantics. +bool isStrided(MemRefType t); + +/// Return "true" if the last dimension of the given type has a static unit +/// stride. Also return "true" for types with no strides. +bool isLastMemrefDimUnitStride(MemRefType type); + +/// Return "true" if the last N dimensions of the given type are contiguous. +/// +/// Examples: +/// - memref<5x4x3x2xi8, strided<[24, 6, 2, 1]> is contiguous when +/// considering both _all_ and _only_ the trailing 3 dims, +/// - memref<5x4x3x2xi8, strided<[48, 6, 2, 1]> is _only_ contiguous when +/// considering the trailing 3 dims. +/// +bool trailingNDimsContiguous(MemRefType type, int64_t n); + } // namespace mlir #endif // MLIR_IR_BUILTINTYPES_H diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td index e5a2ae81da0c..7c8d28291f37 100644 --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -18,6 +18,7 @@ include "mlir/IR/AttrTypeBase.td" include "mlir/IR/BuiltinDialect.td" include "mlir/IR/BuiltinTypeInterfaces.td" include "mlir/IR/CommonTypeConstraints.td" +include "mlir/IR/QuantizationInterface.td" // TODO: Currently the types defined in this file are prefixed with `Builtin_`. // This is to differentiate the types here with the ones in OpBase.td. We should @@ -79,7 +80,7 @@ def Builtin_Complex : Builtin_Type<"Complex", "complex"> { //===----------------------------------------------------------------------===// // Base class for Builtin dialect float types. -class Builtin_FloatType traits = [], list declaredInterfaceMethods = []> : Builtin_Type declaredInterfaceMethods = []> - : Builtin_FloatType { + : Builtin_FloatType { let extraClassDeclaration = [{ static }] # name # [{Type get(MLIRContext *context); }]; @@ -99,7 +100,8 @@ class Builtin_CachedFloatType { +def Builtin_Float8E5M2 : Builtin_FloatType<"Float8E5M2", "f8E5M2", + [QuantizationInterface]> { let summary = "8-bit floating point with 2 bit mantissa"; let description = [{ An 8-bit floating point type with 1 sign bit, 5 bits exponent and 2 bits @@ -115,6 +117,21 @@ def Builtin_Float8E5M2 : Builtin_FloatType<"Float8E5M2", "f8E5M2"> { Described in: https://arxiv.org/abs/2209.05433 }]; + + let extraClassDeclaration = [{ + /// QuantizationInterface method implementations + bool isStorageSigned() const { return true; } + unsigned getStorageWidth() const { return 8; } + int64_t getDefaultMaximum([[maybe_unused]] bool isSigned, [[maybe_unused]] unsigned integralWidth) const { + return 448; + } + int64_t getDefaultMinimum(bool isSigned, unsigned integralWidth) const { + return -getDefaultMaximum(isSigned, integralWidth); + } + std::string printStorageType([[maybe_unused]] bool isSigned, [[maybe_unused]] unsigned storageWidth) const { + return "f8E5M2"; + } + }]; } //===----------------------------------------------------------------------===// @@ -139,7 +156,8 @@ def Builtin_Float8E4M3 : Builtin_FloatType<"Float8E4M3", "f8E4M3"> { //===----------------------------------------------------------------------===// // Float8E4M3FNType -def Builtin_Float8E4M3FN : Builtin_FloatType<"Float8E4M3FN", "f8E4M3FN"> { +def Builtin_Float8E4M3FN : Builtin_FloatType<"Float8E4M3FN", "f8E4M3FN", + [QuantizationInterface]> { let summary = "8-bit floating point with 3 bit mantissa"; let description = [{ An 8-bit floating point type with 1 sign bit, 4 bits exponent and 3 bits @@ -156,6 +174,21 @@ def Builtin_Float8E4M3FN : Builtin_FloatType<"Float8E4M3FN", "f8E4M3FN"> { Described in: https://arxiv.org/abs/2209.05433 }]; + + let extraClassDeclaration = [{ + /// QuantizationInterface method implementations + bool isStorageSigned() const { return true; } + unsigned getStorageWidth() const { return 8; } + int64_t getDefaultMaximum([[maybe_unused]] bool isSigned, [[maybe_unused]] unsigned integralWidth) const { + return 57344; + } + int64_t getDefaultMinimum(bool isSigned, unsigned integralWidth) const{ + return -getDefaultMaximum(isSigned, integralWidth); + } + std::string printStorageType([[maybe_unused]] bool isSigned, [[maybe_unused]] unsigned storageWidth) const { + return "f8E4M3FN"; + } + }]; } //===----------------------------------------------------------------------===// @@ -477,7 +510,8 @@ def Builtin_Index : Builtin_Type<"Index", "index"> { // IntegerType //===----------------------------------------------------------------------===// -def Builtin_Integer : Builtin_Type<"Integer", "integer"> { +def Builtin_Integer : Builtin_Type<"Integer", "integer", + [QuantizationInterface]> { let summary = "Integer type with arbitrary precision up to a fixed limit"; let description = [{ Syntax: @@ -534,6 +568,25 @@ def Builtin_Integer : Builtin_Type<"Integer", "integer"> { /// Integer representation maximal bitwidth. /// Note: This is aligned with the maximum width of llvm::IntegerType. static constexpr unsigned kMaxWidth = (1 << 24) - 1; + + /// QuantizationInterface method implementations + bool isStorageSigned() const { return !isUnsigned(); } + unsigned getStorageWidth() const { return getWidth(); } + int64_t getDefaultMinimum(bool isSigned, unsigned integralWidth) const { + if (isSigned) { + return llvm::minIntN(integralWidth); + } + return 0; + } + int64_t getDefaultMaximum(bool isSigned, unsigned integralWidth) const { + if (isSigned) { + return llvm::maxIntN(integralWidth); + } + return llvm::maxUIntN(integralWidth); + } + std::string printStorageType(bool isSigned, unsigned storageWidth) const { + return (isSigned ? "i" : "u") + std::to_string(storageWidth); + } }]; } @@ -542,8 +595,8 @@ def Builtin_Integer : Builtin_Type<"Integer", "integer"> { //===----------------------------------------------------------------------===// def Builtin_MemRef : Builtin_Type<"MemRef", "memref", [ - ShapedTypeInterface - ], "BaseMemRefType"> { + BaseMemRefTypeInterface + ]> { let summary = "Shaped reference to a region of memory"; let description = [{ Syntax: @@ -794,7 +847,7 @@ def Builtin_MemRef : Builtin_Type<"MemRef", "memref", [ "unsigned":$memorySpaceInd)> ]; let extraClassDeclaration = [{ - using BaseMemRefType::clone; + using ShapedType::Trait::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -828,6 +881,14 @@ def Builtin_MemRef : Builtin_Type<"MemRef", "memref", [ /// New `Attribute getMemorySpace()` method should be used instead. unsigned getMemorySpaceAsInt() const; + /// Returns if this type is ranked (always true). + bool hasRank() const { return true; } + + /// Returns a clone of this type with the given shape and element + /// type. If a shape is not provided, the current shape of the type is used. + MemRefType cloneWith(std::optional> shape, + Type elementType) const; + /// Returns the strides of the MemRef if the layout map is in strided form. /// MemRefs with a layout map in strided form include: /// 1. empty or identity layout map, in which case the stride information @@ -934,8 +995,8 @@ def Builtin_Opaque : Builtin_Type<"Opaque", "opaque"> { //===----------------------------------------------------------------------===// def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "tensor", [ - ShapedTypeInterface, ValueSemantics - ], "TensorType"> { + TensorTypeInterface, ValueSemantics + ]> { let summary = "Multi-dimensional array with a fixed number of dimensions"; let description = [{ Syntax: @@ -1016,7 +1077,7 @@ def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "tensor", [ }]> ]; let extraClassDeclaration = [{ - using TensorType::clone; + using ShapedType::Trait::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -1030,11 +1091,12 @@ def Builtin_RankedTensor : Builtin_Type<"RankedTensor", "tensor", [ /// Arguments that are passed into the builder must outlive the builder. class Builder; - /// Return a clone of this type with the given new element type and the same - /// shape as this type. - RankedTensorType clone(::mlir::Type elementType) { - return ::llvm::cast(cloneWith(getShape(), elementType)); - } + /// Returns if this type is ranked (always true). + bool hasRank() const { return true; } + + /// Returns a clone of this type with the given shape and element type. + RankedTensorType cloneWith(std::optional> shape, + Type elementType) const; }]; let skipDefaultBuilders = 1; let genVerifyDecl = 1; @@ -1112,8 +1174,8 @@ def Builtin_Tuple : Builtin_Type<"Tuple", "tuple"> { //===----------------------------------------------------------------------===// def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ - ShapedTypeInterface - ], "BaseMemRefType"> { + BaseMemRefTypeInterface + ]> { let summary = "Shaped reference, with unknown rank, to a region of memory"; let description = [{ Syntax: @@ -1159,7 +1221,7 @@ def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ }]> ]; let extraClassDeclaration = [{ - using BaseMemRefType::clone; + using ShapedType::Trait::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -1175,11 +1237,12 @@ def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ /// New `Attribute getMemorySpace()` method should be used instead. unsigned getMemorySpaceAsInt() const; - /// Return a clone of this type with the given new element type and the same - /// shape as this type. - MemRefType clone(::mlir::Type elementType) { - return ::llvm::cast(cloneWith(getShape(), elementType)); - } + /// Returns if this type is ranked (always false). + bool hasRank() const { return false; } + + /// Returns a clone of this type with the given shape and element type. + BaseMemRefType cloneWith(std::optional> shape, + Type elementType) const; }]; let skipDefaultBuilders = 1; let genVerifyDecl = 1; @@ -1190,8 +1253,8 @@ def Builtin_UnrankedMemRef : Builtin_Type<"UnrankedMemRef", "unranked_memref", [ //===----------------------------------------------------------------------===// def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ - ShapedTypeInterface, ValueSemantics - ], "TensorType"> { + TensorTypeInterface, ValueSemantics + ]> { let summary = "Multi-dimensional array with unknown dimensions"; let description = [{ Syntax: @@ -1218,7 +1281,7 @@ def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ }]> ]; let extraClassDeclaration = [{ - using TensorType::clone; + using ShapedType::Trait::clone; using ShapedType::Trait::getElementTypeBitWidth; using ShapedType::Trait::getRank; using ShapedType::Trait::getNumElements; @@ -1229,6 +1292,13 @@ def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ using ShapedType::Trait::getDynamicDimIndex; ArrayRef getShape() const { return std::nullopt; } + + /// Returns if this type is ranked (always false). + bool hasRank() const { return false; } + + /// Returns a clone of this type with the given shape and element type. + TensorType cloneWith(std::optional> shape, + Type elementType) const; }]; let skipDefaultBuilders = 1; let genVerifyDecl = 1; @@ -1238,10 +1308,6 @@ def Builtin_UnrankedTensor : Builtin_Type<"UnrankedTensor", "unranked_tensor", [ // VectorType //===----------------------------------------------------------------------===// -def Builtin_VectorTypeElementType : AnyTypeOf<[AnyInteger, Index, AnyFloat]> { - let cppFunctionName = "isValidVectorTypeElementType"; -} - def Builtin_Vector : Builtin_Type<"Vector", "vector", [ShapedTypeInterface, ValueSemantics], "Type"> { let summary = "Multi-dimensional SIMD vector type"; @@ -1292,7 +1358,7 @@ def Builtin_Vector : Builtin_Type<"Vector", "vector", }]; let parameters = (ins ArrayRefParameter<"int64_t">:$shape, - Builtin_VectorTypeElementType:$elementType, + "Type":$elementType, ArrayRefParameter<"bool">:$scalableDims ); let builders = [ @@ -1316,8 +1382,11 @@ def Builtin_Vector : Builtin_Type<"Vector", "vector", class Builder; /// Returns true if the given type can be used as an element of a vector - /// type. See "Builtin_VectorTypeElementType" for allowed types. - static bool isValidElementType(Type t); + /// type. In particular, vectors can consist of integer, index, or float + /// primitives. + static bool isValidElementType(Type t) { + return ::llvm::isa(t); + } /// Returns true if the vector contains scalable dimensions. bool isScalable() const { diff --git a/mlir/include/mlir/IR/CMakeLists.txt b/mlir/include/mlir/IR/CMakeLists.txt index 0c7937dfd69e..6eff1e73590d 100644 --- a/mlir/include/mlir/IR/CMakeLists.txt +++ b/mlir/include/mlir/IR/CMakeLists.txt @@ -9,6 +9,8 @@ mlir_tablegen(OpAsmTypeInterface.cpp.inc -gen-type-interface-defs) add_public_tablegen_target(MLIROpAsmInterfaceIncGen) add_dependencies(mlir-generic-headers MLIROpAsmInterfaceIncGen) +add_mlir_type_interface(QuantizationInterface) + set(LLVM_TARGET_DEFINITIONS BuiltinAttributes.td) mlir_tablegen(BuiltinAttributes.h.inc -gen-attrdef-decls) mlir_tablegen(BuiltinAttributes.cpp.inc -gen-attrdef-defs) diff --git a/mlir/include/mlir/IR/DialectResourceBlobManager.h b/mlir/include/mlir/IR/DialectResourceBlobManager.h index e3f32b7a9ab5..6c30efde306e 100644 --- a/mlir/include/mlir/IR/DialectResourceBlobManager.h +++ b/mlir/include/mlir/IR/DialectResourceBlobManager.h @@ -93,9 +93,14 @@ class DialectResourceBlobManager { return HandleT(&entry, dialect); } + /// Provide access to all the registered blobs via a callable. During access + /// the blob map is guaranteed to remain unchanged. + void getBlobMap(llvm::function_ref &)> + accessor) const; + private: /// A mutex to protect access to the blob map. - llvm::sys::SmartRWMutex blobMapLock; + mutable llvm::sys::SmartRWMutex blobMapLock; /// The internal map of tracked blobs. StringMap stores entries in distinct /// allocations, so we can freely take references to the data without fear of diff --git a/mlir/include/mlir/IR/OpImplementation.h b/mlir/include/mlir/IR/OpImplementation.h index d9c925a9c56e..e4ab72217de2 100644 --- a/mlir/include/mlir/IR/OpImplementation.h +++ b/mlir/include/mlir/IR/OpImplementation.h @@ -116,6 +116,16 @@ class AsmPrinter { /// Return the raw output stream used by this printer. virtual raw_ostream &getStream() const; + /// Print a newline and indent the printer to the start of the current + /// operation. + virtual void printNewline(); + + /// Increase indentation. + virtual void increaseIndent(); + + /// Decrease indentation. + virtual void decreaseIndent(); + /// Print the given floating point value in a stabilized form that can be /// roundtripped through the IR. This is the companion to the 'parseFloat' /// hook on the AsmParser. @@ -417,16 +427,6 @@ class OpAsmPrinter : public AsmPrinter { /// Print a loc(...) specifier if printing debug info is enabled. virtual void printOptionalLocationSpecifier(Location loc) = 0; - /// Print a newline and indent the printer to the start of the current - /// operation. - virtual void printNewline() = 0; - - /// Increase indentation. - virtual void increaseIndent() = 0; - - /// Decrease indentation. - virtual void decreaseIndent() = 0; - /// Print a block argument in the usual format of: /// %ssaName : type {attr1=42} loc("here") /// where location printing is controlled by the standard internal option. @@ -1602,14 +1602,20 @@ class OpAsmParser : public AsmParser { SmallVectorImpl &result) { size_t operandSize = llvm::range_size(operands); size_t typeSize = llvm::range_size(types); - if (operandSize != typeSize) + if (typeSize != 0 && operandSize != typeSize) return emitError(loc) << "number of operands and types do not match: got " << operandSize << " operands and " << typeSize << " types"; - for (auto [operand, type] : llvm::zip_equal(operands, types)) - if (resolveOperand(operand, type, result)) - return failure(); + if (typeSize == 0) { + for (auto it : operands) + if (resolveOperand(it, Type(), result)) + return failure(); + } else { + for (auto [operand, type] : llvm::zip_equal(operands, types)) + if (resolveOperand(operand, type, result)) + return failure(); + } return success(); } diff --git a/mlir/include/mlir/IR/QuantizationInterface.h b/mlir/include/mlir/IR/QuantizationInterface.h new file mode 100644 index 000000000000..00a3a9484808 --- /dev/null +++ b/mlir/include/mlir/IR/QuantizationInterface.h @@ -0,0 +1,23 @@ +//===- QuantizationInterface.h - Quantile Float Interfaces --------*- C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_IR_QuantizationInterface_H +#define MLIR_IR_QuantizationInterface_H + +#include "mlir/IR/Types.h" + +// Forward declarations for the types we need in the implementation +namespace mlir { +class IntegerType; +class FloatType; +} // namespace mlir + +#include "mlir/IR/QuantizationInterface.h.inc" + +#endif // MLIR_IR_QuantizationInterface_H diff --git a/mlir/include/mlir/IR/QuantizationInterface.td b/mlir/include/mlir/IR/QuantizationInterface.td new file mode 100644 index 000000000000..9be6753cff2b --- /dev/null +++ b/mlir/include/mlir/IR/QuantizationInterface.td @@ -0,0 +1,45 @@ +#ifndef MLIR_IR_QUANTIZATIONINTERFACE +#define MLIR_IR_QUANTIZATIONINTERFACE + +include "mlir/IR/OpBase.td" + +def QuantizationInterface : TypeInterface<"QuantizationInterface"> { + let description = [{ + Interface for types that can be used as quantile storage types. + This interface provides methods to determine storage characteristics + like width and signedness for quantization purposes. + }]; + let cppNamespace = "::mlir"; + + let methods = [ + InterfaceMethod<[{ + Get the storage type width in bits. + Returns the number of bits used to store values of this type. + }], + "unsigned", "getStorageWidth", (ins)>, + + InterfaceMethod<[{ + Check if the storage type is signed. + Returns true if the type represents signed values, false for unsigned. + }], + "bool", "isStorageSigned", (ins)>, + + InterfaceMethod<[{ + Get the default minimum value for the storage type. + }], + "int64_t", "getDefaultMinimum", (ins "bool":$isSigned, "unsigned":$integralWidth)>, + + InterfaceMethod<[{ + Get the default maximum value for the storage type. + }], + "int64_t", "getDefaultMaximum", (ins "bool":$isSigned, "unsigned":$integralWidth)>, + + InterfaceMethod<[{ + Get the name of the storage type. + }], + "std::string", "printStorageType", (ins "bool":$isSigned, "unsigned":$storageWidth)> + ]; + +} + +#endif // MLIR_IR_QUANTIZATIONINTERFACE diff --git a/mlir/include/mlir/IR/SymbolInterfaces.td b/mlir/include/mlir/IR/SymbolInterfaces.td index a8b04d045311..f715d82199bd 100644 --- a/mlir/include/mlir/IR/SymbolInterfaces.td +++ b/mlir/include/mlir/IR/SymbolInterfaces.td @@ -171,11 +171,6 @@ def Symbol : OpInterface<"SymbolOpInterface"> { if (concreteOp.isDeclaration() && concreteOp.isPublic()) return concreteOp.emitOpError("symbol declaration cannot have public " "visibility"); - auto parent = $_op->getParentOp(); - if (parent && !parent->hasTrait() && parent->isRegistered()) { - return concreteOp.emitOpError("symbol's parent must have the SymbolTable " - "trait"); - } return success(); }]; @@ -227,4 +222,6 @@ def SymbolUserOpInterface : OpInterface<"SymbolUserOpInterface"> { // Op defines a symbol table. def SymbolTable : NativeOpTrait<"SymbolTable">; +def SymbolContainer : NativeOpTrait<"SymbolContainer">; + #endif // MLIR_IR_SYMBOLINTERFACES diff --git a/mlir/include/mlir/IR/SymbolTable.h b/mlir/include/mlir/IR/SymbolTable.h index 597c6a9a1d89..e7c50691308e 100644 --- a/mlir/include/mlir/IR/SymbolTable.h +++ b/mlir/include/mlir/IR/SymbolTable.h @@ -259,6 +259,12 @@ class SymbolTable { StringAttr newSymbolName, Region *from); + static LogicalResult replaceAllSymbolUses(Operation *oldSymbol, + SymbolRefAttr newSymbolName, + Operation *from); + static LogicalResult replaceAllSymbolUses(Operation *oldSymbol, + SymbolRefAttr newSymbolName, + Region *from); private: Operation *symbolTableOp; @@ -405,6 +411,7 @@ class SymbolUserMap { /// Replace all of the uses of the given symbol with `newSymbolName`. void replaceAllUsesWith(Operation *symbol, StringAttr newSymbolName); + void replaceAllUsesWith(Operation *symbol, SymbolRefAttr newSymbolName); private: /// A reference to the symbol table used to construct this map. @@ -465,6 +472,40 @@ class SymbolTable : public TraitBase { } }; +template +class SymbolContainer : public TraitBase { + public: + static LogicalResult verifyTrait(Operation *op) { + return mlir::success(); //TODO::implement + } + + /// Look up a symbol with the specified name, returning null if no such + /// name exists. Symbol names never include the @ on them. Note: This + /// performs a linear scan of held symbols. + Operation *lookupSymbol(StringAttr name) { + return mlir::SymbolTable::lookupSymbolIn(this->getOperation(), name); + } + template + T lookupSymbol(StringAttr name) { + return dyn_cast_or_null(lookupSymbol(name)); + } + Operation *lookupSymbol(SymbolRefAttr symbol) { + return mlir::SymbolTable::lookupSymbolIn(this->getOperation(), symbol); + } + template + T lookupSymbol(SymbolRefAttr symbol) { + return dyn_cast_or_null(lookupSymbol(symbol)); + } + + Operation *lookupSymbol(StringRef name) { + return mlir::SymbolTable::lookupSymbolIn(this->getOperation(), name); + } + template + T lookupSymbol(StringRef name) { + return dyn_cast_or_null(lookupSymbol(name)); + } +}; + } // namespace OpTrait //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h index 0da82825c828..c5f661c41068 100644 --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -94,7 +94,9 @@ #include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h" #include "mlir/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.h" #include "mlir/Dialect/X86Vector/X86VectorDialect.h" +#ifdef MLIR_DIALECT_XEGPU_ENABLE #include "mlir/Dialect/XeGPU/IR/XeGPU.h" +#endif #include "mlir/IR/Dialect.h" #include "mlir/Interfaces/CastInterfaces.h" #include "mlir/Target/LLVM/NVVM/Target.h" @@ -149,10 +151,13 @@ inline void registerAllDialects(DialectRegistry ®istry) { transform::TransformDialect, ub::UBDialect, vector::VectorDialect, - x86vector::X86VectorDialect, - xegpu::XeGPUDialect>(); + x86vector::X86VectorDialect>(); // clang-format on +#ifdef MLIR_DIALECT_XEGPU_ENABLE + register.insert(); +#endif + // Register all external models. affine::registerValueBoundsOpInterfaceExternalModels(registry); arith::registerBufferDeallocationOpInterfaceExternalModels(registry); diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h index dd8b292a8734..385a24575a1f 100644 --- a/mlir/include/mlir/InitAllPasses.h +++ b/mlir/include/mlir/InitAllPasses.h @@ -45,7 +45,9 @@ #include "mlir/Dialect/Tosa/Transforms/Passes.h" #include "mlir/Dialect/Transform/Transforms/Passes.h" #include "mlir/Dialect/Vector/Transforms/Passes.h" +#ifdef MLIR_DIALECT_XEGPU_ENABLE #include "mlir/Dialect/XeGPU/Transforms/Passes.h" +#endif #include "mlir/Transforms/Passes.h" #include @@ -94,7 +96,10 @@ inline void registerAllPasses() { arm_sme::registerArmSMEPasses(); arm_sve::registerArmSVEPasses(); emitc::registerEmitCPasses(); + +#ifdef MLIR_DIALECT_XEGPU_ENABLE xegpu::registerXeGPUPasses(); +#endif // Dialect pipelines bufferization::registerBufferizationPipelines(); diff --git a/mlir/include/mlir/Interfaces/SideEffectInterfaces.h b/mlir/include/mlir/Interfaces/SideEffectInterfaces.h index aef7ec622fe4..e8d1d2514a9f 100644 --- a/mlir/include/mlir/Interfaces/SideEffectInterfaces.h +++ b/mlir/include/mlir/Interfaces/SideEffectInterfaces.h @@ -147,7 +147,7 @@ class EffectInstance { effectOnFullRegion(effectOnFullRegion) {} template ::value, + llvm::is_one_of::value, bool> = true> EffectInstance(EffectT *effect, T value, Resource *resource = DefaultResource::get()) @@ -155,7 +155,7 @@ class EffectInstance { effectOnFullRegion(false) {} template ::value, + llvm::is_one_of::value, bool> = true> EffectInstance(EffectT *effect, T value, int stage, bool effectOnFullRegion, Resource *resource = DefaultResource::get()) @@ -223,6 +223,9 @@ class EffectInstance { if (OpResult result = llvm::dyn_cast_if_present(value)) { return result; } + if (Value result = llvm::dyn_cast_if_present(value)) { + return result; + } return cast_if_present(value); } @@ -264,7 +267,7 @@ class EffectInstance { /// The Symbol, OpOperand, OpResult or BlockArgument that the effect applies /// to. This is optionally null. - PointerUnion value; + PointerUnion value; /// Additional parameters of the effect instance. An attribute is used for /// type-safe structured storage and context-based uniquing. Concrete effects diff --git a/mlir/include/mlir/Pass/Pass.h b/mlir/include/mlir/Pass/Pass.h index 7725a3a2910b..e8afc8e9e916 100644 --- a/mlir/include/mlir/Pass/Pass.h +++ b/mlir/include/mlir/Pass/Pass.h @@ -296,6 +296,10 @@ class Pass { /// Copy the option values from 'other', which is another instance of this /// pass. void copyOptionValuesFrom(const Pass *other); + + /// Copy the option values from 'other', which are PassPipeline options. + /// Here we copy only those options that have the same argument name. + void copyOptionValuesFrom(const detail::PassOptions &other); private: /// Out of line virtual method to ensure vtables and metadata are emitted to a diff --git a/mlir/include/mlir/Pass/PassOptions.h b/mlir/include/mlir/Pass/PassOptions.h index b5a9c25e3bac..dd9aa024c449 100644 --- a/mlir/include/mlir/Pass/PassOptions.h +++ b/mlir/include/mlir/Pass/PassOptions.h @@ -342,6 +342,9 @@ class PassOptions : protected llvm::cl::SubCommand { /// Copy the option values from 'other' into 'this', where 'other' has the /// same options as 'this'. void copyOptionValuesFrom(const PassOptions &other); + + /// Copy only those options that have the same argument name. + void matchAndCopyOptionValuesFrom(const PassOptions &otherPassOptions); /// Parse options out as key=value pairs that can then be handed off to the /// `llvm::cl` command line passing infrastructure. Everything is space @@ -380,7 +383,7 @@ class PassOptions : protected llvm::cl::SubCommand { /// ListOption someListFlag{*this, "flag-name", llvm::cl::desc("...")}; /// }; template -class PassPipelineOptions : public detail::PassOptions { +class PassPipelineOptions : public virtual detail::PassOptions { public: /// Factory that parses the provided options and returns a unique_ptr to the /// struct. diff --git a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h index 09bd86b9581d..c003cbd95ee6 100644 --- a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h +++ b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h @@ -294,6 +294,11 @@ class MlirOptMainConfig { std::string generateReproducerFileFlag = ""; }; +/// VPUX-specific method to get value for arch kind from command line +/// and register HW-specific passes and pipelines +using AdditionalRegistrationFn = + std::function; + /// This defines the function type used to setup the pass manager. This can be /// used to pass in a callback to setup a default pass pipeline to be applied on /// the loaded IR. @@ -321,18 +326,12 @@ LogicalResult MlirOptMain(llvm::raw_ostream &outputStream, /// Implementation for tools like `mlir-opt`. /// - toolName is used for the header displayed by `--help`. /// - registry should contain all the dialects that can be parsed in the source. +/// - additionalRegistration will be called before the main command line parsing +/// to perform additional registrations. LogicalResult MlirOptMain(int argc, char **argv, llvm::StringRef toolName, - DialectRegistry ®istry); - -/// Implementation for tools like `mlir-opt`. -/// This function can be used with registerAndParseCLIOptions so that -/// CLI options can be accessed before running MlirOptMain. -/// - inputFilename is the name of the input mlir file. -/// - outputFilename is the name of the output file. -/// - registry should contain all the dialects that can be parsed in the source. -LogicalResult MlirOptMain(int argc, char **argv, llvm::StringRef inputFilename, - llvm::StringRef outputFilename, - DialectRegistry ®istry); + DialectRegistry ®istry, + const AdditionalRegistrationFn &additionalRegistration + = [](llvm::StringRef){}); /// Helper wrapper to return the result of MlirOptMain directly from main. /// diff --git a/mlir/include/mlir/Transforms/InliningUtils.h b/mlir/include/mlir/Transforms/InliningUtils.h index becfe9b047ef..1b70db7f6ef8 100644 --- a/mlir/include/mlir/Transforms/InliningUtils.h +++ b/mlir/include/mlir/Transforms/InliningUtils.h @@ -172,6 +172,14 @@ class DialectInlinerInterface return result; } + /// Hook to cleanup IR before erase call op + virtual void eraseCall(Operation *call) const; + + /// Hook to get proper place where callable region will be inlined + /// By default returns block of the call operation + virtual std::tuple + getInlineBlockAndPoint(Operation *call) const; + /// Process a set of blocks that have been inlined for a call. This callback /// is invoked before inlined terminator operations have been processed. virtual void processInlinedCallBlocks( diff --git a/mlir/lib/AsmParser/Parser.cpp b/mlir/lib/AsmParser/Parser.cpp index eccb3241012a..a368fa45d1da 100644 --- a/mlir/lib/AsmParser/Parser.cpp +++ b/mlir/lib/AsmParser/Parser.cpp @@ -1065,16 +1065,24 @@ Value OperationParser::resolveSSAUse(UnresolvedOperand useInfo, Type type) { // If we have already seen a value of this name, return it. if (useInfo.number < entries.size() && entries[useInfo.number].value) { Value result = entries[useInfo.number].value; + // Check that the type matches the other uses. - if (result.getType() == type) - return maybeRecordUse(result); - - emitError(useInfo.location, "use of value '") - .append(useInfo.name, - "' expects different type than prior uses: ", type, " vs ", - result.getType()) - .attachNote(getEncodedSourceLocation(entries[useInfo.number].loc)) - .append("prior use here"); + if (type && result.getType() != type) { + emitError(useInfo.location, "use of value '") + .append(useInfo.name, + "' expects different type than prior uses: ", type, " vs ", + result.getType()) + .attachNote(getEncodedSourceLocation(entries[useInfo.number].loc)) + .append("prior use here"); + return nullptr; + } + + return maybeRecordUse(result); + } + + if (!type) { + emitError(useInfo.location, "forward reference of value '") + .append(useInfo.name, "' requires explicit type specification"); return nullptr; } diff --git a/mlir/lib/Bindings/Python/DialectQuant.cpp b/mlir/lib/Bindings/Python/DialectQuant.cpp index 29f19c9c5006..55571cd1e50a 100644 --- a/mlir/lib/Bindings/Python/DialectQuant.cpp +++ b/mlir/lib/Bindings/Python/DialectQuant.cpp @@ -9,10 +9,11 @@ #include #include +#include "mlir-c/BuiltinAttributes.h" #include "mlir-c/Dialect/Quant.h" #include "mlir-c/IR.h" -#include "mlir/Bindings/Python/NanobindAdaptors.h" #include "mlir/Bindings/Python/Nanobind.h" +#include "mlir/Bindings/Python/NanobindAdaptors.h" namespace nb = nanobind; using namespace llvm; @@ -284,6 +285,79 @@ static void populateDialectQuantSubmodule(const nb::module_ &m) { }, "Fixed point values are real numbers divided by a scale."); + //===-------------------------------------------------------------------===// + // UniformQuantizedSubChannelType + //===-------------------------------------------------------------------===// + auto uniformQuantizedSubChannelType = mlir_type_subclass( + m, "UniformQuantizedSubChannelType", + mlirTypeIsAUniformQuantizedSubChannelType, quantizedType.get_class()); + uniformQuantizedSubChannelType.def_classmethod( + "get", + [](nb::object cls, unsigned flags, MlirType storageType, + MlirType expressedType, MlirAttribute scales, MlirAttribute zeroPoints, + std::vector quantizedDimensions, + std::vector blockSizes, int64_t storageTypeMin, + int64_t storageTypeMax) { + return cls(mlirUniformQuantizedSubChannelTypeGet( + flags, storageType, expressedType, scales, zeroPoints, + static_cast(blockSizes.size()), + quantizedDimensions.data(), blockSizes.data(), storageTypeMin, + storageTypeMax)); + }, + "Gets an instance of UniformQuantizedSubChannel in the same context as " + "the provided storage type.", + nb::arg("cls"), nb::arg("flags"), nb::arg("storage_type"), + nb::arg("expressed_type"), nb::arg("scales"), nb::arg("zero_points"), + nb::arg("quantized_dimensions"), nb::arg("block_sizes"), + nb::arg("storage_type_min"), nb::arg("storage_type_max")); + uniformQuantizedSubChannelType.def_property_readonly( + "quantized_dimensions", + [](MlirType type) { + intptr_t nDim = + mlirUniformQuantizedSubChannelTypeGetNumBlockSizes(type); + std::vector quantizedDimensions; + quantizedDimensions.reserve(nDim); + for (intptr_t i = 0; i < nDim; ++i) { + quantizedDimensions.push_back( + mlirUniformQuantizedSubChannelTypeGetQuantizedDimension(type, i)); + } + return quantizedDimensions; + }, + "Gets the quantized dimensions. Each element in the returned list " + "represents an axis of the quantized data tensor that has a specified " + "block size. The order of elements corresponds to the order of block " + "sizes returned by 'block_sizes' method. It means that the data tensor " + "is quantized along the i-th dimension in the returned list using the " + "i-th block size from block_sizes method."); + uniformQuantizedSubChannelType.def_property_readonly( + "block_sizes", + [](MlirType type) { + intptr_t nDim = + mlirUniformQuantizedSubChannelTypeGetNumBlockSizes(type); + std::vector blockSizes; + blockSizes.reserve(nDim); + for (intptr_t i = 0; i < nDim; ++i) { + blockSizes.push_back( + mlirUniformQuantizedSubChannelTypeGetBlockSize(type, i)); + } + return blockSizes; + }, + "Gets the block sizes for the quantized dimensions. The i-th element in " + "the returned list corresponds to the block size for the i-th dimension " + "in the list returned by quantized_dimensions method."); + uniformQuantizedSubChannelType.def_property_readonly( + "scales", + [](MlirType type) -> MlirAttribute { + return mlirUniformQuantizedSubChannelTypeGetScales(type); + }, + "The scales of the quantized type."); + uniformQuantizedSubChannelType.def_property_readonly( + "zero_points", + [](MlirType type) -> MlirAttribute { + return mlirUniformQuantizedSubChannelTypeGetZeroPoints(type); + }, + "The zero points of the quantized type."); + //===-------------------------------------------------------------------===// // CalibratedQuantizedType //===-------------------------------------------------------------------===// diff --git a/mlir/lib/CAPI/Dialect/Quant.cpp b/mlir/lib/CAPI/Dialect/Quant.cpp index c94dbb5692fd..01a6a948f1dc 100644 --- a/mlir/lib/CAPI/Dialect/Quant.cpp +++ b/mlir/lib/CAPI/Dialect/Quant.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "mlir-c/Dialect/Quant.h" +#include "mlir-c/BuiltinAttributes.h" #include "mlir/CAPI/Registration.h" #include "mlir/Dialect/Quant/IR/Quant.h" #include "mlir/Dialect/Quant/IR/QuantTypes.h" @@ -194,6 +195,61 @@ bool mlirUniformQuantizedPerAxisTypeIsFixedPoint(MlirType type) { return cast(unwrap(type)).isFixedPoint(); } +//===---------------------------------------------------------------------===// +// UniformQuantizedSubChannelType +//===---------------------------------------------------------------------===// + +bool mlirTypeIsAUniformQuantizedSubChannelType(MlirType type) { + return isa(unwrap(type)); +} + +MlirType mlirUniformQuantizedSubChannelTypeGet( + unsigned flags, MlirType storageType, MlirType expressedType, + MlirAttribute scalesAttr, MlirAttribute zeroPointsAttr, intptr_t nDims, + int32_t *quantizedDimensions, int64_t *blockSizes, int64_t storageTypeMin, + int64_t storageTypeMax) { + auto scales = dyn_cast(unwrap(scalesAttr)); + auto zeroPoints = dyn_cast(unwrap(zeroPointsAttr)); + + if (!scales || !zeroPoints) { + return {}; + } + + return wrap(quant::UniformQuantizedSubChannelType::get( + flags, unwrap(storageType), unwrap(expressedType), scales, zeroPoints, + llvm::ArrayRef(quantizedDimensions, nDims), + llvm::ArrayRef(blockSizes, nDims), storageTypeMin, + storageTypeMax)); +} + +intptr_t mlirUniformQuantizedSubChannelTypeGetNumBlockSizes(MlirType type) { + return cast(unwrap(type)) + .getBlockSizes() + .size(); +} + +int32_t mlirUniformQuantizedSubChannelTypeGetQuantizedDimension(MlirType type, + intptr_t pos) { + return cast(unwrap(type)) + .getQuantizedDimensions()[pos]; +} + +int64_t mlirUniformQuantizedSubChannelTypeGetBlockSize(MlirType type, + intptr_t pos) { + return cast(unwrap(type)) + .getBlockSizes()[pos]; +} + +MlirAttribute mlirUniformQuantizedSubChannelTypeGetScales(MlirType type) { + return wrap( + cast(unwrap(type)).getScales()); +} + +MlirAttribute mlirUniformQuantizedSubChannelTypeGetZeroPoints(MlirType type) { + return wrap(cast(unwrap(type)) + .getZeroPoints()); +} + //===---------------------------------------------------------------------===// // CalibratedQuantizedType //===---------------------------------------------------------------------===// diff --git a/mlir/lib/Conversion/CMakeLists.txt b/mlir/lib/Conversion/CMakeLists.txt index a570978f0375..81d6eb320a32 100644 --- a/mlir/lib/Conversion/CMakeLists.txt +++ b/mlir/lib/Conversion/CMakeLists.txt @@ -62,7 +62,9 @@ add_subdirectory(TosaToArith) add_subdirectory(TosaToLinalg) add_subdirectory(TosaToMLProgram) add_subdirectory(TosaToSCF) -add_subdirectory(TosaToTensor) +if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) + add_subdirectory(TosaToTensor) +endif() add_subdirectory(UBToLLVM) add_subdirectory(UBToSPIRV) add_subdirectory(VectorToArmSME) @@ -70,4 +72,6 @@ add_subdirectory(VectorToGPU) add_subdirectory(VectorToLLVM) add_subdirectory(VectorToSCF) add_subdirectory(VectorToSPIRV) -add_subdirectory(VectorToXeGPU) +if(MLIR_DIALECT_XEGPU_ENABLE) + add_subdirectory(VectorToXeGPU) +endif() diff --git a/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt b/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt index 2870baa20757..992b2f816478 100644 --- a/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt +++ b/mlir/lib/Conversion/TosaToTensor/CMakeLists.txt @@ -1,20 +1,22 @@ -add_mlir_conversion_library(MLIRTosaToTensor - TosaToTensor.cpp - TosaToTensorPass.cpp +if(MLIR_CONVERSION_TOSATOTENSOR_ENABLE) + add_mlir_conversion_library(MLIRTosaToTensor + TosaToTensor.cpp + TosaToTensorPass.cpp - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Tosa - ${MLIR_MAIN_INCLUDE_DIR}/mlir/IR + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Tosa + ${MLIR_MAIN_INCLUDE_DIR}/mlir/IR - DEPENDS - MLIRConversionPassIncGen + DEPENDS + MLIRConversionPassIncGen - LINK_LIBS PUBLIC - MLIRTensorDialect - MLIRTensorUtils - MLIRIR - MLIRPass - MLIRTosaDialect - MLIRTosaTransforms - MLIRSupport - ) + LINK_LIBS PUBLIC + MLIRTensorDialect + MLIRTensorUtils + MLIRIR + MLIRPass + MLIRTosaDialect + MLIRTosaTransforms + MLIRSupport + ) +endif() diff --git a/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt b/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt index 567083da0023..b90c874ef56b 100644 --- a/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt +++ b/mlir/lib/Conversion/VectorToXeGPU/CMakeLists.txt @@ -1,16 +1,18 @@ -add_mlir_conversion_library(MLIRVectorToXeGPU - VectorToXeGPU.cpp +if(MLIR_DIALECT_XEGPU_ENABLE) + add_mlir_conversion_library(MLIRVectorToXeGPU + VectorToXeGPU.cpp - ADDITIONAL_HEADER_DIRS - ${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorToXeGPU + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorToXeGPU - DEPENDS - MLIRConversionPassIncGen + DEPENDS + MLIRConversionPassIncGen - LINK_LIBS PUBLIC - MLIRArithDialect - MLIRMemRefDialect - MLIRTransforms - MLIRVectorDialect - MLIRXeGPUDialect - ) + LINK_LIBS PUBLIC + MLIRArithDialect + MLIRMemRefDialect + MLIRTransforms + MLIRVectorDialect + MLIRXeGPUDialect + ) +endif() \ No newline at end of file diff --git a/mlir/lib/Dialect/Arith/IR/CMakeLists.txt b/mlir/lib/Dialect/Arith/IR/CMakeLists.txt index 4beb99ccfdfb..ffcdf46d7a80 100644 --- a/mlir/lib/Dialect/Arith/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/Arith/IR/CMakeLists.txt @@ -22,6 +22,7 @@ add_mlir_dialect_library(MLIRArithDialect MLIRArithOpsInterfacesIncGen LINK_LIBS PUBLIC + MLIRTransformUtils MLIRCastInterfaces MLIRDialect MLIRInferIntRangeCommon diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp index 1eb27e44810b..7967f2973e6a 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -719,8 +719,7 @@ void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter, if (llvm::isa(opResult.getType())) { // The OpResult is a tensor. Such values are replaced with memrefs during // bufferization. - assert((llvm::isa(replacement.getType()) || - llvm::isa(replacement.getType())) && + assert(llvm::isa(replacement.getType()) && "tensor op result should be replaced with a memref value"); // The existing uses of the OpResult still expect a tensor. Insert a // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp index 6be55a1d2822..1ee5345cff41 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -14,6 +14,7 @@ #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/Matchers.h" +#include "mlir/IR/BuiltinTypeInterfaces.h" #include using namespace mlir; diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt index 80b0ef068d96..30d24df93a58 100644 --- a/mlir/lib/Dialect/CMakeLists.txt +++ b/mlir/lib/Dialect/CMakeLists.txt @@ -42,7 +42,9 @@ add_subdirectory(UB) add_subdirectory(Utils) add_subdirectory(Vector) add_subdirectory(X86Vector) -add_subdirectory(XeGPU) +if(MLIR_DIALECT_XEGPU_ENABLE) + add_subdirectory(XeGPU) +endif() set(LLVM_OPTIONAL_SOURCES Traits.cpp diff --git a/mlir/lib/Dialect/LLVMIR/CMakeLists.txt b/mlir/lib/Dialect/LLVMIR/CMakeLists.txt index c9a3b9729456..8437f2630c05 100644 --- a/mlir/lib/Dialect/LLVMIR/CMakeLists.txt +++ b/mlir/lib/Dialect/LLVMIR/CMakeLists.txt @@ -28,6 +28,7 @@ add_mlir_dialect_library(MLIRLLVMDialect Core LINK_LIBS PUBLIC + MLIRTransformUtils MLIRCallInterfaces MLIRControlFlowInterfaces MLIRDataLayoutInterfaces diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp index 556922a64b09..c35a2cf35c2f 100644 --- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp +++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp @@ -153,7 +153,7 @@ DiagnosedSilenceableFailure transform::CreateAsyncGroupsOp::applyToOne( /// Returns true if the given type has the default memory space. static bool hasDefaultMemorySpace(BaseMemRefType type) { - return !type.getMemorySpace() || type.getMemorySpaceAsInt() == 0; + return !type.getMemorySpace(); } /// Returns true if the given type has the shared (workgroup) memory space. diff --git a/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp b/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp index 6a4ac310eb05..44ec0c517d56 100644 --- a/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp +++ b/mlir/lib/Dialect/Quant/IR/QuantDialectBytecode.cpp @@ -13,6 +13,7 @@ #include "mlir/Dialect/Quant/IR/QuantTypes.h" #include "mlir/IR/Diagnostics.h" #include "llvm/ADT/APFloat.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TypeSwitch.h" diff --git a/mlir/lib/Dialect/Quant/IR/QuantOps.cpp b/mlir/lib/Dialect/Quant/IR/QuantOps.cpp index c584903f3a15..804fe38b2bbb 100644 --- a/mlir/lib/Dialect/Quant/IR/QuantOps.cpp +++ b/mlir/lib/Dialect/Quant/IR/QuantOps.cpp @@ -25,22 +25,17 @@ namespace { // Verify the integrity of per-axis quantization information, if present. // -// - quantizedType -// Any quantized type. Any quantized type with no per-axis quantization is -// ignored. +// - uniformQuantizedPerAxisType +// A quantized type with per-axis quantization. // // - containerType // Original input or result type of the operation using the provided quantized // type. Used to ensure that the quantized type appears within a tensor and // that the tensor is compatible with per-axis quantization information. // -LogicalResult verifyPerAxisQuantization(Operation *op, - QuantizedType quantizedType, - Type containerType) { - auto quantizedPerAxisType = dyn_cast(quantizedType); - if (!quantizedPerAxisType) - return success(); - +LogicalResult verifyPerAxisQuantization( + Operation *op, UniformQuantizedPerAxisType uniformQuantizedPerAxisType, + Type containerType) { auto tensorType = dyn_cast(containerType); if (!tensorType) return op->emitError("scalar types may not use per-axis quantization"); @@ -48,19 +43,112 @@ LogicalResult verifyPerAxisQuantization(Operation *op, if (!tensorType.hasRank()) return success(); - int64_t quantizedDimension = quantizedPerAxisType.getQuantizedDimension(); - if (quantizedDimension >= tensorType.getRank()) + int32_t quantizedDimension = + uniformQuantizedPerAxisType.getQuantizedDimension(); + if ((int64_t)quantizedDimension >= tensorType.getRank()) return op->emitError("quantized dimension must be less than tensor rank"); int64_t quantizedDimensionSize = tensorType.getDimSize(quantizedDimension); if (quantizedDimensionSize != ShapedType::kDynamic && - quantizedDimensionSize != (int64_t)quantizedPerAxisType.getScales().size()) + quantizedDimensionSize != + (int64_t)uniformQuantizedPerAxisType.getScales().size()) return op->emitError( "quantized dimension size does not match number of scales"); return success(); } +// Verifies that the sub-channel quantization parameters are consistent with +// the given container type. The function checks the following: +// +// - The container type must be a ranked tensor type. +// - Each quantized dimension must be less than the rank of the tensor. +// - The size of each dimension at the quantized dimension must be divisible +// by the corresponding block size. +// - The scale dimension size at each axis index should match the tensor +// dimension at the index divided by the corresponding block size. +// +// The `uniformQuantizedSubChannelType` argument provides the sub-channel +// quantization parameters, and the `containerType` argument specifies the +// type of the container holding the quantized data. +// +LogicalResult verifySubChannelQuantization( + Operation *op, + UniformQuantizedSubChannelType uniformQuantizedSubChannelType, + Type containerType) { + auto tensorType = dyn_cast(containerType); + if (!tensorType) + return op->emitError("scalar types may not use sub-channel quantization"); + + if (!tensorType.hasRank()) + return op->emitError( + "tensor containing the sub-channel quantized type must be ranked"); + + const SmallVector> &blockSizeInfo = + uniformQuantizedSubChannelType.getBlockSizeInfo(); + auto shape = tensorType.getShape(); + + // The dimension size of scale for an axis which is not specified as quantized + // dimension should be 1. + SmallVector expectedScaleShape(tensorType.getShape().size(), 1); + for (auto [quantizedDimension, blockSize] : blockSizeInfo) { + if (quantizedDimension >= tensorType.getRank()) + return op->emitError() + << "quantized dimension " << quantizedDimension + << " must be less than tensor rank " << tensorType.getRank(); + if (!tensorType.isDynamicDim(quantizedDimension) && + tensorType.getDimSize(quantizedDimension) % blockSize != 0) + return op->emitError() + << "tensor dimension size " + << tensorType.getDimSize(quantizedDimension) << " at axis " + << quantizedDimension + << " must be divisible by the corresponding block size " + << blockSize; + if (tensorType.isDynamicDim(quantizedDimension)) + expectedScaleShape[quantizedDimension] = ShapedType::kDynamic; + else + expectedScaleShape[quantizedDimension] = + tensorType.getDimSize(quantizedDimension) / blockSize; + } + + // Block sizes must be greater than 0 and divide the corresponding dimension + // size. While a block size b must be less than or equal to the corresponding + // dimension size d, this constraint is implicitly enforced by requiring that + // d % b == 0 when d != 0. + // + // However, a problem arises when d = 0. The divisibility constraint allows b + // to be any value, potentially violating the requirement that b <= d. + // Furthermore, if b is unspecified (implicitly equal to d), it violates the + // constraint that b > 0. + // + // Therefore, we explicitly disallow the case where d = 0 to maintain + // consistency and avoid these issues. + if (llvm::find(tensorType.getShape(), 0) != tensorType.getShape().end()) { + return op->emitError() << "tensor dimension size of zero is not allowed " + "with sub-channel quantization"; + } + + auto scaleShape = + uniformQuantizedSubChannelType.getScales().getType().getShape(); + if (scaleShape.size() != shape.size()) { + return op->emitError() << "Rank of scales " << scaleShape.size() + << " must match " + << "the rank of the tensor " << shape.size(); + } + + for (auto [index, scaleDim] : llvm::enumerate(expectedScaleShape)) { + if (expectedScaleShape[index] != ShapedType::kDynamic && + expectedScaleShape[index] != scaleShape[index]) + return op->emitError() << "dimension size " << scaleDim + << " of scales tensor at axis " << index + << " should match (tensor dimension at axis / " + "block sizes at axis) = " + << expectedScaleShape[index]; + } + + return success(); +} + // Common verification logic for 'quant.dcast' and 'quant.qcast' ops. // // - quantizedType @@ -80,8 +168,20 @@ LogicalResult verifyQuantizationOp(Operation *op, QuantizedType quantizedType, return op->emitError( "expressed type in quantized type expected to match float type"); - // Veriy integrity of per-axis quantization information, if present. - return verifyPerAxisQuantization(op, quantizedType, containerType); + // Verify integrity of per-axis quantization information, if present. + if (auto quantizedPerAxisType = + dyn_cast(quantizedType)) { + return verifyPerAxisQuantization(op, quantizedPerAxisType, containerType); + } + + if (auto quantizedSubChannelType = + dyn_cast(quantizedType)) { + return verifySubChannelQuantization(op, quantizedSubChannelType, + containerType); + } + + // At this point the type is UniformQuantizedType + return success(); } } // namespace @@ -93,7 +193,8 @@ LogicalResult verifyQuantizationOp(Operation *op, QuantizedType quantizedType, void QuantDialect::initialize() { addTypes(); + UniformQuantizedPerAxisType, QuantileQuantizedType, + QuantileQuantizedPerAxisType, UniformQuantizedSubChannelType>(); addOperations< #define GET_OP_LIST #include "mlir/Dialect/Quant/IR/QuantOps.cpp.inc" @@ -175,7 +276,20 @@ LogicalResult StorageCastOp::verify() { // Verify integrity of per-axis quantization information, if available. While // the quantization type may appear in the input or the result, their tensor // shapes are guaranteed to be identical at this point. - return verifyPerAxisQuantization(*this, quantizedType, getInput().getType()); + if (auto quantizedPerAxisType = + dyn_cast(quantizedType)) { + return verifyPerAxisQuantization(*this, quantizedPerAxisType, + getInput().getType()); + } + + if (auto quantizedSunChannelType = + dyn_cast(quantizedType)) { + return verifySubChannelQuantization(*this, quantizedSunChannelType, + getInput().getType()); + } + + // At this point the type is UniformQuantizedType + return success(); } OpFoldResult StorageCastOp::fold(FoldAdaptor adaptor) { diff --git a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp index 7c0d36964865..714fb44beea9 100644 --- a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp +++ b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp @@ -6,15 +6,17 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/Quant/IR/QuantTypes.h" #include "TypeDetail.h" #include "mlir/Dialect/Quant/IR/Quant.h" -#include "mlir/Dialect/Quant/IR/QuantTypes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/MLIRContext.h" +#include "mlir/IR/QuantizationInterface.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/MathExtras.h" +#include using namespace mlir; using namespace mlir::quant; @@ -34,7 +36,7 @@ double getMaxScale(Type expressedType) { return APFloat::getLargest(floatType.getFloatSemantics()).convertToDouble(); } -} // namespace +} // namespace unsigned QuantizedType::getFlags() const { return static_cast(impl)->flags; @@ -49,31 +51,42 @@ QuantizedType::verifyInvariants(function_ref emitError, unsigned flags, Type storageType, Type expressedType, int64_t storageTypeMin, int64_t storageTypeMax) { - // Verify that the storage type is integral. - // This restriction may be lifted at some point in favor of using bf16 - // or f16 as exact representations on hardware where that is advantageous. - auto intStorageType = llvm::dyn_cast(storageType); - if (!intStorageType) - return emitError() << "storage type must be integral"; - unsigned integralWidth = intStorageType.getWidth(); - - // Verify storage width. - if (integralWidth == 0 || integralWidth > MaxStorageBits) - return emitError() << "illegal storage type size: " << integralWidth; - // Verify storageTypeMin and storageTypeMax. + std::cout << "verify QuantizedType" << std::endl; bool isSigned = (flags & QuantizationFlags::Signed) == QuantizationFlags::Signed; - int64_t defaultIntegerMin = - getDefaultMinimumForInteger(isSigned, integralWidth); - int64_t defaultIntegerMax = - getDefaultMaximumForInteger(isSigned, integralWidth); - if (storageTypeMax - storageTypeMin <= 0 || - storageTypeMin < defaultIntegerMin || - storageTypeMax > defaultIntegerMax) { + + // Integral storage type width checks + if (storageType.isa()) { + unsigned integralWidth = + llvm::dyn_cast(storageType).getWidth(); + + if (integralWidth == 0 || integralWidth > MaxStorageBits) + return emitError() << "illegal storage type size: " << integralWidth; + } + + std::cout << "Before quantile cast" << std::endl; + int64_t defaultMin, defaultMax; + if (auto quantizationInterface = + llvm::dyn_cast(storageType)) { + // const auto width = llvm::dyn_cast(storageType).getWidth(); + const auto width = quantizationInterface.getStorageWidth(); + defaultMin = quantizationInterface.getDefaultMinimum(isSigned, width); + defaultMax = quantizationInterface.getDefaultMaximum(isSigned, width); + std::cout << "defaultMin: " << defaultMin << ", defaultMax: " << defaultMax + << std::endl; + } else { + return emitError() << "illegal storage type, supported types are: integral " + "types, Float8E4M3FNType and Float8E5M2Type "; + } + + // Verify storageTypeMin and storageTypeMax. + if (storageTypeMax - storageTypeMin <= 0 || storageTypeMin < defaultMin || + storageTypeMax > defaultMax) { return emitError() << "illegal storage min and storage max: (" << storageTypeMin << ":" << storageTypeMax << ")"; } + std::cout << "verify QuantizedType END" << std::endl; return success(); } @@ -82,10 +95,28 @@ Type QuantizedType::getStorageType() const { } int64_t QuantizedType::getStorageTypeMin() const { + Type storageType = static_cast(impl)->storageType; + + if (auto quantizationInterface = + llvm::dyn_cast(storageType)) { + unsigned storageWidth = quantizationInterface.getStorageWidth(); + bool isSigned = quantizationInterface.isStorageSigned(); + return quantizationInterface.getDefaultMinimum(isSigned, storageWidth); + } + return static_cast(impl)->storageTypeMin; } int64_t QuantizedType::getStorageTypeMax() const { + Type storageType = static_cast(impl)->storageType; + + if (auto quantizationInterface = + llvm::dyn_cast(storageType)) { + unsigned storageWidth = quantizationInterface.getStorageWidth(); + bool isSigned = quantizationInterface.isStorageSigned(); + return quantizationInterface.getDefaultMaximum(isSigned, storageWidth); + } + return static_cast(impl)->storageTypeMax; } @@ -103,7 +134,14 @@ bool QuantizedType::hasStorageTypeBounds() const { unsigned QuantizedType::getStorageTypeIntegralWidth() const { // NOTE: If ever supporting non-integral storage types, some other scheme // for determining the width will be needed. - return static_cast(impl)->storageType.getIntOrFloatBitWidth(); + Type storageType = static_cast(impl)->storageType; + + if (auto quantizationInterface = + llvm::dyn_cast(storageType)) { + return quantizationInterface.getStorageWidth(); + } + + return storageType.getIntOrFloatBitWidth(); } Type QuantizedType::getExpressedType() const { @@ -285,6 +323,7 @@ UniformQuantizedType UniformQuantizedType::get(unsigned flags, Type storageType, int64_t zeroPoint, int64_t storageTypeMin, int64_t storageTypeMax) { + std::cout << "Creating UniformQuantizedType" << std::endl; return Base::get(storageType.getContext(), flags, storageType, expressedType, scale, zeroPoint, storageTypeMin, storageTypeMax); } @@ -293,6 +332,7 @@ UniformQuantizedType UniformQuantizedType::getChecked( function_ref emitError, unsigned flags, Type storageType, Type expressedType, double scale, int64_t zeroPoint, int64_t storageTypeMin, int64_t storageTypeMax) { + std::cout << "getChecked UniformQuantizedType" << std::endl; return Base::getChecked(emitError, storageType.getContext(), flags, storageType, expressedType, scale, zeroPoint, storageTypeMin, storageTypeMax); @@ -302,6 +342,7 @@ LogicalResult UniformQuantizedType::verifyInvariants( function_ref emitError, unsigned flags, Type storageType, Type expressedType, double scale, int64_t zeroPoint, int64_t storageTypeMin, int64_t storageTypeMax) { + std::cout << "verifying UniformQuantizedType" << std::endl; if (failed(QuantizedType::verifyInvariants(emitError, flags, storageType, expressedType, storageTypeMin, storageTypeMax))) { @@ -320,15 +361,18 @@ LogicalResult UniformQuantizedType::verifyInvariants( return emitError() << "expressed type must be floating point"; // Verify scale. - double minScale = getMinScale(expressedType); - double maxScale = getMaxScale(expressedType); - if (scale < minScale || scale > maxScale) - return emitError() << "scale out of expressed type range [" << minScale - << ", " << maxScale << "]"; + if (std::isinf(scale) || std::isnan(scale)) + return emitError() << "illegal scale: " << scale; + std::cout << "verifying UniformQuantizedType END" << std::endl; return success(); } +bool UniformQuantizedType::classof(mlir::Type type) { + return type.getTypeID() == mlir::TypeID::get() || + type.getTypeID() == mlir::TypeID::get(); +} + double UniformQuantizedType::getScale() const { return getImpl()->scale; } int64_t UniformQuantizedType::getZeroPoint() const { @@ -383,12 +427,9 @@ LogicalResult UniformQuantizedPerAxisType::verifyInvariants( << scales.size() << ", " << zeroPoints.size(); // Verify scale. - double minScale = getMinScale(expressedType); - double maxScale = getMaxScale(expressedType); for (double scale : scales) { - if (scale < minScale || scale > maxScale) - return emitError() << "scale out of expressed type range [" << minScale - << ", " << maxScale << "]"; + if (std::isinf(scale) || std::isnan(scale)) + return emitError() << "illegal scale: " << scale; } // Verify quantized dimension. @@ -398,6 +439,11 @@ LogicalResult UniformQuantizedPerAxisType::verifyInvariants( return success(); } +bool UniformQuantizedPerAxisType::classof(mlir::Type type) { + return type.getTypeID() == mlir::TypeID::get() || + type.getTypeID() == mlir::TypeID::get(); +} + ArrayRef UniformQuantizedPerAxisType::getScales() const { return getImpl()->getScales(); } @@ -410,6 +456,291 @@ int32_t UniformQuantizedPerAxisType::getQuantizedDimension() const { return getImpl()->quantizedDimension; } +UniformQuantizedSubChannelType UniformQuantizedSubChannelType::get( + unsigned flags, Type storageType, Type expressedType, + DenseElementsAttr scales, DenseElementsAttr zeroPoints, + ArrayRef quantizedDimensions, ArrayRef blockSizes, + int64_t storageTypeMin, int64_t storageTypeMax) { + return Base::get(storageType.getContext(), flags, storageType, expressedType, + scales, zeroPoints, quantizedDimensions, blockSizes, + storageTypeMin, storageTypeMax); +} + +UniformQuantizedSubChannelType UniformQuantizedSubChannelType::getChecked( + function_ref emitError, unsigned flags, + Type storageType, Type expressedType, DenseElementsAttr scales, + DenseElementsAttr zeroPoints, ArrayRef quantizedDimensions, + ArrayRef blockSizes, int64_t storageTypeMin, + int64_t storageTypeMax) { + return Base::getChecked(emitError, storageType.getContext(), flags, + storageType, expressedType, scales, zeroPoints, + quantizedDimensions, blockSizes, storageTypeMin, + storageTypeMax); +} + +LogicalResult UniformQuantizedSubChannelType::verifyInvariants( + function_ref emitError, unsigned flags, + Type storageType, Type expressedType, DenseElementsAttr scales, + DenseElementsAttr zeroPoints, ArrayRef quantizedDimensions, + ArrayRef blockSizes, int64_t storageTypeMin, + int64_t storageTypeMax) { + if (failed(QuantizedType::verifyInvariants(emitError, flags, storageType, + expressedType, storageTypeMin, + storageTypeMax))) { + return failure(); + } + + // Uniform quantization requires fully expressed parameters, including + // expressed type. + if (!expressedType) + return emitError() << "uniform quantization requires expressed type"; + + // Verify that the expressed type is floating point. + // If this restriction is ever eliminated, the parser/printer must be + // extended. + if (!llvm::isa(expressedType)) + return emitError() << "expressed type must be floating point"; + + // Verify scale type to match expressedType. + if (scales.getType().getElementType() != expressedType) { + return emitError() << "type of scale values " + << scales.getType().getElementType() + << " must match the expressed type " << expressedType; + } + + // Verify zero-point type to match storageType. + if (zeroPoints.getType().getElementType() != storageType) { + return emitError() << "type of zero point values " + << zeroPoints.getType().getElementType() + << " must match the storage type " << storageType; + } + + // Ensure that the shape of scales and zeroPoints match. + if (scales.getType().getShape() != zeroPoints.getType().getShape()) + return emitError() << "shape of scales and zeroPoints (" + << scales.getType().getShape() << " vs " + << zeroPoints.getType().getShape() << ") does not match"; + + // Ensure that the number of quantized-dimensions and block-sizes match. + if (quantizedDimensions.size() != blockSizes.size()) + return emitError() << "number of quantized dimensions and block sizes (" + << scales.size() << " vs " << zeroPoints.size() + << ") does not match"; + + // Verify quantized dimension. + for (auto quantizedDimension : quantizedDimensions) { + if (quantizedDimension < 0) + return emitError() << "illegal quantized dimension: " + << quantizedDimension; + } + + // Verify block sizes. + for (auto blockSize : blockSizes) { + if (blockSize <= 0) + return emitError() << "illegal block size: " << blockSize; + } + + return success(); +} + +DenseElementsAttr UniformQuantizedSubChannelType::getScales() const { + return getImpl()->getScales(); +} + +DenseElementsAttr UniformQuantizedSubChannelType::getZeroPoints() const { + return getImpl()->getZeroPoints(); +} + +ArrayRef +UniformQuantizedSubChannelType::getQuantizedDimensions() const { + return getImpl()->getQuantizedDimensions(); +} + +ArrayRef UniformQuantizedSubChannelType::getBlockSizes() const { + return getImpl()->getBlockSizes(); +} + +const SmallVector> +UniformQuantizedSubChannelType::getBlockSizeInfo() const { + SmallVector> result; + result.reserve(getQuantizedDimensions().size()); + + for (auto [dim, size] : + llvm::zip(getQuantizedDimensions(), getBlockSizes())) { + result.push_back({dim, size}); + } + + return result; +} + +QuantileQuantizedType +QuantileQuantizedType::get(unsigned flags, Type storageType, Type quantileType, + Type expressedType, ArrayRef quantiles, + double scale, int64_t zeroPoint, + int64_t storageTypeMin, int64_t storageTypeMax) { + return Base::get(storageType.getContext(), flags, storageType, quantileType, + expressedType, quantiles, scale, zeroPoint, storageTypeMin, + storageTypeMax); +} + +QuantileQuantizedType QuantileQuantizedType::getChecked( + function_ref emitError, unsigned flags, + Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, double scale, int64_t zeroPoint, + int64_t storageTypeMin, int64_t storageTypeMax) { + return Base::getChecked(emitError, storageType.getContext(), flags, + storageType, quantileType, expressedType, quantiles, + scale, zeroPoint, storageTypeMin, storageTypeMax); +} +LogicalResult QuantileQuantizedType::verifyInvariants( + function_ref emitError, unsigned flags, + Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, double scale, int64_t zeroPoint, + int64_t storageTypeMin, int64_t storageTypeMax) { + if (failed(UniformQuantizedType::verifyInvariants( + emitError, flags, storageType, expressedType, scale, zeroPoint, + storageTypeMin, storageTypeMax))) { + return failure(); + } + + unsigned typeWidth{}; + if (storageType.isa()) { + typeWidth = llvm::dyn_cast(storageType).getWidth(); + } else if (storageType.isa() || + storageType.isa()) { + // Both Float8E5M2Type and Float8E4M3FNType derive from FloatType. + typeWidth = llvm::dyn_cast(storageType).getWidth(); + } else { + return emitError() << "illegal storage type, supported types are: integral " + "types, Float8E4M3FNType and Float8E5M2Type "; + } + + const size_t storageTypeRange = storageTypeMax - storageTypeMin + 1; + const size_t typeWidthSize = 1 << typeWidth; + const size_t expectedSize = + (storageTypeRange < typeWidthSize) ? storageTypeRange : typeWidthSize; + + const auto quantileArraySize = quantiles.size(); + if (quantileArraySize != expectedSize) { + return emitError() << "quantiles array size needs to be equal to " + "2^(bit_size(storageType)), or (storageTypeMax - " + "storageTypeMin + 1) when max and min differ from " + "the type limits; expected: " + << expectedSize << ", found: " << quantileArraySize; + } + + // Verify quantiles + for (double quantile : quantiles) { + if (std::isinf(quantile) || std::isnan(quantile)) { + return emitError() << "illegal quantile value: " << quantile; + } + } + + return success(); +} + +bool QuantileQuantizedType::classof(mlir::Type type) { + return type.getTypeID() == mlir::TypeID::get(); +} + +Type QuantileQuantizedType::getQuantileType() const { + return getImpl()->quantileType; +} + +unsigned QuantileQuantizedType::getQuantileTypeIntegralWidth() const { + return getImpl()->getQuantileType().getIntOrFloatBitWidth(); +} + +ArrayRef QuantileQuantizedType::getQuantiles() const { + return getImpl()->getQuantiles(); +} + +QuantileQuantizedPerAxisType QuantileQuantizedPerAxisType::get( + unsigned flags, Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, ArrayRef scales, + ArrayRef zeroPoints, int32_t quantizedDimension, + int64_t storageTypeMin, int64_t storageTypeMax) { + return Base::get(storageType.getContext(), flags, storageType, quantileType, + expressedType, quantiles, scales, zeroPoints, + quantizedDimension, storageTypeMin, storageTypeMax); +} + +QuantileQuantizedPerAxisType QuantileQuantizedPerAxisType::getChecked( + function_ref emitError, unsigned flags, + Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, ArrayRef scales, + ArrayRef zeroPoints, int32_t quantizedDimension, + int64_t storageTypeMin, int64_t storageTypeMax) { + return Base::getChecked(emitError, storageType.getContext(), flags, + storageType, quantileType, expressedType, quantiles, + scales, zeroPoints, quantizedDimension, + storageTypeMin, storageTypeMax); +} + +LogicalResult QuantileQuantizedPerAxisType::verifyInvariants( + function_ref emitError, unsigned flags, + Type storageType, Type quantileType, Type expressedType, + ArrayRef quantiles, ArrayRef scales, + ArrayRef zeroPoints, int32_t quantizedDimension, + int64_t storageTypeMin, int64_t storageTypeMax) { + if (failed(UniformQuantizedPerAxisType::verifyInvariants( + emitError, flags, storageType, expressedType, scales, zeroPoints, + quantizedDimension, storageTypeMin, storageTypeMax))) { + return failure(); + } + + unsigned typeWidth{}; + if (storageType.isa()) { + typeWidth = llvm::dyn_cast(storageType).getWidth(); + } else if (storageType.isa() || + storageType.isa()) { + // Both Float8E5M2Type and Float8E4M3FNType derive from FloatType. + typeWidth = llvm::dyn_cast(storageType).getWidth(); + } else { + return emitError() << "illegal storage type, supported types are: integral " + "types, Float8E4M3FNType and Float8E5M2Type "; + } + + const size_t storageTypeRange = storageTypeMax - storageTypeMin + 1; + const size_t typeWidthSize = 1 << typeWidth; + const size_t expectedSize = + (storageTypeRange < typeWidthSize) ? storageTypeRange : typeWidthSize; + + const auto quantileArraySize = quantiles.size(); + if (quantileArraySize != expectedSize) { + return emitError() << "quantiles array size needs to be equal to " + "2^(bit_size(storageType)), or (storageTypeMax - " + "storageTypeMin + 1) when max and min differ from " + "the type limits; expected: " + << expectedSize << ", found: " << quantileArraySize; + } + + // Verify quantiles + for (double quantile : quantiles) { + if (std::isinf(quantile) || std::isnan(quantile)) { + return emitError() << "illegal quantile value: " << quantile; + } + } + + return success(); +} + +bool QuantileQuantizedPerAxisType::classof(mlir::Type type) { + return type.getTypeID() == mlir::TypeID::get(); +} + +Type QuantileQuantizedPerAxisType::getQuantileType() const { + return getImpl()->quantileType; +} + +unsigned QuantileQuantizedPerAxisType::getQuantileTypeIntegralWidth() const { + return getImpl()->getQuantileType().getIntOrFloatBitWidth(); +} + +ArrayRef QuantileQuantizedPerAxisType::getQuantiles() const { + return getImpl()->getQuantiles(); +} + CalibratedQuantizedType CalibratedQuantizedType::get(Type expressedType, double min, double max) { return Base::get(expressedType.getContext(), expressedType, min, max); diff --git a/mlir/lib/Dialect/Quant/IR/TypeDetail.h b/mlir/lib/Dialect/Quant/IR/TypeDetail.h index ef098811927c..7734e50b65b9 100644 --- a/mlir/lib/Dialect/Quant/IR/TypeDetail.h +++ b/mlir/lib/Dialect/Quant/IR/TypeDetail.h @@ -9,6 +9,7 @@ #ifndef TYPE_DETAIL_H_ #define TYPE_DETAIL_H_ +#include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/TypeSupport.h" #include "mlir/IR/Types.h" @@ -253,6 +254,281 @@ struct UniformQuantizedPerAxisTypeStorage : public QuantizedTypeStorage { int32_t quantizedDimension; }; +struct UniformQuantizedSubChannelTypeStorage : public QuantizedTypeStorage { + struct KeyTy { + KeyTy(unsigned flags, Type storageType, Type expressedType, + DenseElementsAttr scales, DenseElementsAttr zeroPoints, + ArrayRef quantizedDimensions, ArrayRef blockSizes, + int64_t storageTypeMin, int64_t storageTypeMax) + : flags(flags), storageType(storageType), expressedType(expressedType), + scales(scales), zeroPoints(zeroPoints), + quantizedDimensions(quantizedDimensions), blockSizes(blockSizes), + storageTypeMin(storageTypeMin), storageTypeMax(storageTypeMax) {} + /// Flags corresponding to the bitmapped enum QuantizationFlags::FlagValue. + unsigned flags; + + // Integral type for the storage point representation. + Type storageType; + + // Floating point type that the quantized type approximates. + Type expressedType; + + DenseElementsAttr scales; + DenseElementsAttr zeroPoints; + ArrayRef quantizedDimensions; + ArrayRef blockSizes; + int64_t storageTypeMin; + int64_t storageTypeMax; + + DenseElementsAttr getScales() const { return scales; } + + DenseElementsAttr getZeroPoints() const { return zeroPoints; } + + // Check for equality of two structures that share KeyTy data members + // (by name). + template + static bool genericIsEqual(const T &lhs, const U &rhs) { + return lhs.flags == rhs.flags && lhs.storageType == rhs.storageType && + lhs.expressedType == rhs.expressedType && + lhs.scales == rhs.scales && lhs.zeroPoints == rhs.zeroPoints && + lhs.quantizedDimensions == rhs.quantizedDimensions && + lhs.blockSizes == rhs.blockSizes && + lhs.storageTypeMin == rhs.storageTypeMin && + lhs.storageTypeMax == rhs.storageTypeMax; + } + + bool operator==(const KeyTy &other) const { + return genericIsEqual(*this, other); + } + + unsigned getHashValue() const { + // Hash the scalar attributes. + unsigned hash = llvm::hash_combine(flags, storageType, expressedType, + storageTypeMin, storageTypeMax); + + // Hash the scales. + for (auto scaleAttr : scales.getValues()) { + hash = llvm::hash_combine( + hash, llvm::bit_cast(scaleAttr.convertToDouble())); + } + + // Hash the zero points. (Assumed to be integers, adjust if needed). + for (auto zeroPointAttr : zeroPoints.getValues()) { + hash = llvm::hash_combine(hash, zeroPointAttr.getSExtValue()); + } + + // Hash the quantized dimensions and block sizes. + hash = llvm::hash_combine( + hash, + llvm::hash_combine_range(quantizedDimensions.begin(), + quantizedDimensions.end()), + llvm::hash_combine_range(blockSizes.begin(), blockSizes.end())); + + return hash; + } + }; + + // We pass scales and zeroPoints in directly rather than relying on KeyTy + // because we have to create new reallocated versions in `construct` below. + UniformQuantizedSubChannelTypeStorage(const KeyTy &key, + DenseElementsAttr scales, + DenseElementsAttr zeroPoints, + ArrayRef quantizedDimensions, + ArrayRef blockSizes) + : QuantizedTypeStorage(key.flags, key.storageType, key.expressedType, + key.storageTypeMin, key.storageTypeMax), + scales(scales), zeroPoints(zeroPoints), + quantizedDimensions(quantizedDimensions), blockSizes(blockSizes) {} + + bool operator==(const KeyTy &key) const { + return KeyTy::genericIsEqual(*this, key); + } + + /// Construction. + static UniformQuantizedSubChannelTypeStorage * + construct(TypeStorageAllocator &allocator, const KeyTy &key) { + DenseElementsAttr scales = key.scales; + DenseElementsAttr zeroPoints = key.zeroPoints; + ArrayRef quantizedDimensions = + allocator.copyInto(key.quantizedDimensions); + ArrayRef blockSizes = allocator.copyInto(key.blockSizes); + return new (allocator.allocate()) + UniformQuantizedSubChannelTypeStorage(key, scales, zeroPoints, + quantizedDimensions, blockSizes); + } + + static unsigned hashKey(const KeyTy &key) { return key.getHashValue(); } + + DenseElementsAttr getScales() const { return scales; } + + DenseElementsAttr getZeroPoints() const { return zeroPoints; } + + ArrayRef getQuantizedDimensions() const { + return quantizedDimensions; + } + + ArrayRef getBlockSizes() const { return blockSizes; } + + DenseElementsAttr scales; + DenseElementsAttr zeroPoints; + ArrayRef quantizedDimensions; + ArrayRef blockSizes; +}; + +struct QuantileQuantizedTypeStorage : public UniformQuantizedTypeStorage { + struct KeyTy : public UniformQuantizedTypeStorage::KeyTy { + KeyTy(unsigned flags, Type storageType, Type quantileType, + Type expressedType, ArrayRef quantiles, double scale, + int64_t zeroPoint, int64_t storageTypeMin, int64_t storageTypeMax) + : UniformQuantizedTypeStorage::KeyTy(flags, storageType, expressedType, + scale, zeroPoint, storageTypeMin, + storageTypeMax), + quantileType(quantileType), quantiles(quantiles) {} + + Type quantileType; + ArrayRef quantiles; + Type getQuantileType() const { return quantileType; } + ArrayRef getQuantiles() const { return quantiles; } + + // Check for equality of two structures that share KeyTy data members + // (by name). + template + static bool genericIsEqual(const T &lhs, const U &rhs) { + return UniformQuantizedTypeStorage::KeyTy::genericIsEqual(lhs, rhs) && + lhs.getQuantileType() == rhs.getQuantileType() && + lhs.getQuantiles() == rhs.getQuantiles(); + } + + bool operator==(const KeyTy &other) const { + return genericIsEqual(*this, other); + } + + unsigned getHashValue() const { + int64_t scaleBits = llvm::bit_cast(scale); + int64_t *quantilesCast = llvm::bit_cast(quantiles.data()); + ArrayRef quantilesBits(quantilesCast, quantiles.size()); + return llvm::hash_combine( + flags, storageType, quantileType, expressedType, + llvm::hash_combine_range(quantilesBits.begin(), quantilesBits.end()), + scaleBits, zeroPoint, storageTypeMin, storageTypeMax); + } + }; + + QuantileQuantizedTypeStorage(const KeyTy &key, ArrayRef quantiles) + : UniformQuantizedTypeStorage(key), quantileType(key.getQuantileType()), + quantilesElements(quantiles.data()), + quantilesParamsSize(quantiles.size()) {} + + bool operator==(const KeyTy &key) const { + return KeyTy::genericIsEqual(*this, key); + } + + /// Construction. + static QuantileQuantizedTypeStorage * + construct(TypeStorageAllocator &allocator, const KeyTy &key) { + ArrayRef quantiles = allocator.copyInto(key.quantiles); + return new (allocator.allocate()) + QuantileQuantizedTypeStorage(key, quantiles); + } + + static unsigned hashKey(const KeyTy &key) { return key.getHashValue(); } + + Type getQuantileType() const { return quantileType; } + + ArrayRef getQuantiles() const { + return ArrayRef(quantilesElements, quantilesParamsSize); + } + + Type quantileType; + const double *quantilesElements; + unsigned quantilesParamsSize; +}; + +struct QuantileQuantizedPerAxisTypeStorage + : public UniformQuantizedPerAxisTypeStorage { + struct KeyTy : public UniformQuantizedPerAxisTypeStorage::KeyTy { + KeyTy(unsigned flags, Type storageType, Type quantileType, + Type expressedType, ArrayRef quantiles, + ArrayRef scales, ArrayRef zeroPoints, + int32_t quantizedDimension, int64_t storageTypeMin, + int64_t storageTypeMax) + : UniformQuantizedPerAxisTypeStorage::KeyTy( + flags, storageType, expressedType, scales, zeroPoints, + quantizedDimension, storageTypeMin, storageTypeMax), + quantileType(quantileType), quantiles(quantiles) {} + + Type quantileType; + ArrayRef quantiles; + Type getQuantileType() const { return quantileType; } + ArrayRef getQuantiles() const { return quantiles; } + + // Check for equality of two structures that share KeyTy data members + // (by name). + template + static bool genericIsEqual(const T &lhs, const U &rhs) { + return UniformQuantizedPerAxisTypeStorage::KeyTy::genericIsEqual(lhs, + rhs) && + lhs.getQuantileType() == rhs.getQuantileType() && + lhs.getQuantiles() == rhs.getQuantiles(); + } + + bool operator==(const KeyTy &other) const { + return genericIsEqual(*this, other); + } + + unsigned getHashValue() const { + int64_t *scalesCast = llvm::bit_cast(scales.data()); + ArrayRef scalesBits(scalesCast, scales.size()); + int64_t *quantilesCast = llvm::bit_cast(quantiles.data()); + ArrayRef quantilesBits(quantilesCast, quantiles.size()); + return llvm::hash_combine( + flags, storageType, quantileType, expressedType, + llvm::hash_combine_range(quantilesBits.begin(), quantilesBits.end()), + llvm::hash_combine_range(scalesBits.begin(), scalesBits.end()), + llvm::hash_combine_range(zeroPoints.begin(), zeroPoints.end()), + storageTypeMin, storageTypeMax); + } + }; + + // We pass quantiles, scales and zeroPoints in directly rather than relying on + // KeyTy because we have to create new reallocated versions in `construct` + // below. + QuantileQuantizedPerAxisTypeStorage(const KeyTy &key, + ArrayRef quantiles, + ArrayRef scales, + ArrayRef zeroPoints) + : UniformQuantizedPerAxisTypeStorage(key, scales, zeroPoints), + quantileType(key.getQuantileType()), + quantilesElements(quantiles.data()), + quantilesParamsSize(quantiles.size()) {} + + bool operator==(const KeyTy &key) const { + return KeyTy::genericIsEqual(*this, key); + } + + /// Construction. + static QuantileQuantizedPerAxisTypeStorage * + construct(TypeStorageAllocator &allocator, const KeyTy &key) { + ArrayRef quantiles = allocator.copyInto(key.quantiles); + ArrayRef scales = allocator.copyInto(key.scales); + ArrayRef zeroPoints = allocator.copyInto(key.zeroPoints); + return new (allocator.allocate()) + QuantileQuantizedPerAxisTypeStorage(key, quantiles, scales, zeroPoints); + } + + static unsigned hashKey(const KeyTy &key) { return key.getHashValue(); } + + Type getQuantileType() const { return quantileType; } + + ArrayRef getQuantiles() const { + return ArrayRef(quantilesElements, quantilesParamsSize); + } + + Type quantileType; + const double *quantilesElements; + unsigned quantilesParamsSize; +}; // namespace detail + struct CalibratedQuantizedTypeStorage : public QuantizedTypeStorage { struct KeyTy { KeyTy(Type expressedType, double min, double max) diff --git a/mlir/lib/Dialect/Quant/IR/TypeParser.cpp b/mlir/lib/Dialect/Quant/IR/TypeParser.cpp index 851763d8942e..2ee1e96ae706 100644 --- a/mlir/lib/Dialect/Quant/IR/TypeParser.cpp +++ b/mlir/lib/Dialect/Quant/IR/TypeParser.cpp @@ -11,41 +11,60 @@ #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/IR/Location.h" +#include "mlir/IR/QuantizationInterface.h" #include "mlir/IR/Types.h" #include "llvm/ADT/APFloat.h" #include "llvm/Support/Format.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/raw_ostream.h" - +#include using namespace mlir; using namespace quant; -static IntegerType parseStorageType(DialectAsmParser &parser, bool &isSigned) { +static Type parseStorageType(DialectAsmParser &parser, bool &isSigned) { auto typeLoc = parser.getCurrentLocation(); - IntegerType type; + Type type; // Parse storage type (alpha_ident, integer_literal). StringRef identifier; unsigned storageTypeWidth = 0; + // type.print(llvm::outs()); + std::cout << "BEFORE " << std::endl; + // llvm::outs() << "\n"; + OptionalParseResult result = parser.parseOptionalType(type); if (result.has_value()) { if (!succeeded(*result)) return nullptr; - isSigned = !type.isUnsigned(); - storageTypeWidth = type.getWidth(); - } else if (succeeded(parser.parseKeyword(&identifier))) { - // Otherwise, this must be an unsigned integer (`u` integer-literal). - if (!identifier.consume_front("u")) { - parser.emitError(typeLoc, "illegal storage type prefix"); + + if (auto quantizationInterface = + llvm::dyn_cast(type)) { + isSigned = + quantizationInterface.isStorageSigned(); // Change name or logic + storageTypeWidth = quantizationInterface.getStorageWidth(); + std::cout << llvm::dyn_cast(type).isSignless() + << " HERE: " << isSigned << " " << storageTypeWidth + << std::endl; + } else { + parser.emitError(typeLoc, "illegal quantized storage type alias"); return nullptr; } - if (identifier.getAsInteger(10, storageTypeWidth)) { - parser.emitError(typeLoc, "expected storage type width"); + } else if (succeeded(parser.parseKeyword(&identifier))) { + // Otherwise, this must be an unsigned integer (`u` integer-literal) + if (identifier.consume_front("u")) { + if (identifier.getAsInteger(10, storageTypeWidth)) { + parser.emitError(typeLoc, "expected storage type width"); + return nullptr; + } + isSigned = false; + type = parser.getBuilder().getIntegerType(storageTypeWidth); + + } else { + parser.emitError(typeLoc, "illegal quantized storage type alias"); return nullptr; } - isSigned = false; - type = parser.getBuilder().getIntegerType(storageTypeWidth); + } else { return nullptr; } @@ -60,35 +79,91 @@ static IntegerType parseStorageType(DialectAsmParser &parser, bool &isSigned) { return type; } -static ParseResult parseStorageRange(DialectAsmParser &parser, - IntegerType storageType, bool isSigned, - int64_t &storageTypeMin, +static Type parseQuantileType(DialectAsmParser &parser) { + auto typeLoc = parser.getCurrentLocation(); + Type type; + + // Parse storage type (alpha_ident, integer_literal). + StringRef identifier; + unsigned storageTypeWidth = 0; + OptionalParseResult result = parser.parseOptionalType(type); + if (result.has_value()) { + if (!succeeded(*result)) + return nullptr; + + if (!type.isa() && !type.isa()) { + parser.emitError(typeLoc, "illegal quantile type alias"); + return nullptr; + } + } else if (succeeded(parser.parseKeyword(&identifier))) { + // Otherwise, this must be an unsigned integer (`u` integer-literal) + if (identifier.consume_front("u")) { + if (identifier.getAsInteger(10, storageTypeWidth)) { + parser.emitError(typeLoc, "expected quantile type width"); + return nullptr; + } + constexpr bool isSigned = false; + type = parser.getBuilder().getIntegerType(storageTypeWidth, isSigned); + + } else { + parser.emitError(typeLoc, "illegal quantile type alias"); + return nullptr; + } + } else { + return nullptr; + } + + return type; +} + +static ParseResult +checkStorageRange(DialectAsmParser &parser, int64_t storageTypeMin, + int64_t storageTypeMax, int64_t defaultStorageTypeMin, + int64_t defaultStorageTypeMax, SMLoc minLoc, SMLoc maxLoc) { + if (storageTypeMin < defaultStorageTypeMin) { + return parser.emitError(minLoc, "illegal storage type minimum: ") + << storageTypeMin; + } + if (storageTypeMax > defaultStorageTypeMax) { + return parser.emitError(maxLoc, "illegal storage type maximum: ") + << storageTypeMax; + } + return success(); +} + +static ParseResult parseStorageRange(DialectAsmParser &parser, Type storageType, + bool isSigned, int64_t &storageTypeMin, int64_t &storageTypeMax) { - int64_t defaultIntegerMin = QuantizedType::getDefaultMinimumForInteger( - isSigned, storageType.getWidth()); - int64_t defaultIntegerMax = QuantizedType::getDefaultMaximumForInteger( - isSigned, storageType.getWidth()); + int64_t defaultMin, defaultMax; + if (auto quantizationInterface = + llvm::dyn_cast(storageType)) { + // const auto width = llvm::dyn_cast(storageType).getWidth(); + const auto width = quantizationInterface.getStorageWidth(); + + defaultMin = quantizationInterface.getDefaultMinimum(isSigned, width); + defaultMax = quantizationInterface.getDefaultMaximum(isSigned, width); + } else { + defaultMin = std::numeric_limits::max(); + defaultMax = std::numeric_limits::min(); + } + if (failed(parser.parseOptionalLess())) { - storageTypeMin = defaultIntegerMin; - storageTypeMax = defaultIntegerMax; + storageTypeMin = defaultMin; + storageTypeMax = defaultMax; return success(); } // Explicit storage min and storage max. + // F8 min and max values are integers, so parseInteger() is used. SMLoc minLoc = parser.getCurrentLocation(), maxLoc; if (parser.parseInteger(storageTypeMin) || parser.parseColon() || parser.getCurrentLocation(&maxLoc) || parser.parseInteger(storageTypeMax) || parser.parseGreater()) return failure(); - if (storageTypeMin < defaultIntegerMin) { - return parser.emitError(minLoc, "illegal storage type minimum: ") - << storageTypeMin; - } - if (storageTypeMax > defaultIntegerMax) { - return parser.emitError(maxLoc, "illegal storage type maximum: ") - << storageTypeMax; - } - return success(); + + std::cout << "parseStorageRange OK" << std::endl; + return checkStorageRange(parser, storageTypeMin, storageTypeMax, defaultMin, + defaultMax, minLoc, maxLoc); } static FloatType parseExpressedTypeAndRange(DialectAsmParser &parser, @@ -118,7 +193,7 @@ static FloatType parseExpressedTypeAndRange(DialectAsmParser &parser, /// storage-type ::= (`i` | `u`) integer-literal /// expressed-type-spec ::= `:` `f` integer-literal static Type parseAnyType(DialectAsmParser &parser) { - IntegerType storageType; + Type storageType; FloatType expressedType; unsigned typeFlags = 0; int64_t storageTypeMin; @@ -159,46 +234,203 @@ static Type parseAnyType(DialectAsmParser &parser) { typeFlags, storageType, expressedType, storageTypeMin, storageTypeMax); } -static ParseResult parseQuantParams(DialectAsmParser &parser, double &scale, +/// Checks if the given scale value is within the valid range of the expressed +/// type. The `expressedType` argument is the floating-point type used for +/// expressing the quantized values, and `scale` is the double value to check. +LogicalResult +isScaleInExpressedTypeRange(function_ref emitError, + Type expressedType, double scale) { + auto floatType = cast(expressedType); + double minScale = + APFloat::getLargest(floatType.getFloatSemantics(), true).convertToDouble(); + double maxScale = + APFloat::getLargest(floatType.getFloatSemantics()).convertToDouble(); + if (scale < minScale || scale > maxScale) + return emitError() << "scale " << scale << " out of expressed type range [" + << minScale << ", " << maxScale << "]"; + return success(); +} + +/// Parses a quantization parameter, which is either a scale value (float) or a +/// scale-zero point pair (float:integer). `expressedType`, expressing the type +/// of scale values, is used to validate the scale. The parsed scale and zero +/// point (if any) are stored in `scale` and `zeroPoint`. +static ParseResult parseQuantParams(DialectAsmParser &parser, + Type expressedType, double &scale, int64_t &zeroPoint) { - // scale[:zeroPoint]? - // scale. - if (parser.parseFloat(scale)) + + if (parser.parseFloat(scale)) { + return failure(); + } + + if (failed(isScaleInExpressedTypeRange( + [&]() { return parser.emitError(parser.getCurrentLocation()); }, + expressedType, scale))) { return failure(); + } - // zero point. zeroPoint = 0; if (failed(parser.parseOptionalColon())) { - // Default zero point. return success(); } return parser.parseInteger(zeroPoint); } -/// Parses a UniformQuantizedType. +/// Parses block size information for sub-channel quantization, assuming the +/// leading '{' has already been parsed. The block size information is provided +/// as a comma-separated list of "Axis:BlockSize" pairs, terminated by a '}'. +/// +/// The parsed axis indices are stored in `quantizedDimensions`, and the +/// corresponding block sizes are stored in `blockSizes`. +static ParseResult +parseBlockSizeInfoUntilRBrace(DialectAsmParser &parser, + SmallVectorImpl &quantizedDimensions, + SmallVectorImpl &blockSizes) { + // Empty block-sizes info. + if (succeeded(parser.parseOptionalRBrace())) { + return success(); + } + + auto parseBlockSizeElements = [&]() -> ParseResult { + quantizedDimensions.resize(quantizedDimensions.size() + 1); + blockSizes.resize(blockSizes.size() + 1); + if (parser.parseInteger(quantizedDimensions.back()) || + parser.parseColon() || parser.parseInteger(blockSizes.back())) + return failure(); + return success(); + }; + + if (parser.parseCommaSeparatedList(parseBlockSizeElements) || + parser.parseRBrace()) { + return failure(); + } + + return success(); +} + +/// Parses a bracketed list of quantization parameters, returning the dimensions +/// of the parsed sub-tensors in `dims`. The dimension of the list is prepended +/// to the dimensions of the sub-tensors. This function assumes that the initial +/// left brace has already been parsed. For example: +/// +/// parseQuantParamListUntilRBrace(1.0:1, 2.0:4, 3.0:4}) -> Success, +/// dims = [3], scales = [1.0, 2.0, 3.0], zeroPoints = [1, 4, 4] +/// +/// parseQuantParamListUntilRBrace({1.0, 2.0}, {3.0:1, 4.0:9}}) -> Success, +/// dims = [2, 2], scales = [1.0, 2.0, 3.0, 4.0], zeroPoints = [0, 0, 1, +/// 9] +/// +/// This function expects all sub-tensors to have the same rank. +static ParseResult +parseQuantParamListUntilRBrace(DialectAsmParser &parser, Type expressedType, + SmallVectorImpl &scales, + SmallVectorImpl &zeroPoints, + SmallVectorImpl &dims) { + auto checkDims = [&](const SmallVectorImpl &prevDims, + const SmallVectorImpl &newDims) -> ParseResult { + if (prevDims == newDims) + return success(); + return parser.emitError(parser.getCurrentLocation()) + << "tensor literal is invalid; ranks are not consistent " + "between elements"; + }; + + bool first = true; + SmallVector newDims; + unsigned size = 0; + + auto parseOneElement = [&]() -> ParseResult { + SmallVector thisDims; + if (succeeded(parser.parseOptionalLBrace())) { + if (parseQuantParamListUntilRBrace(parser, expressedType, scales, + zeroPoints, thisDims)) + return failure(); + } else { + zeroPoints.resize(zeroPoints.size() + 1); + scales.resize(scales.size() + 1); + if (parseQuantParams(parser, expressedType, scales.back(), + zeroPoints.back())) { + return failure(); + } + } + ++size; + if (!first) + return checkDims(newDims, thisDims); + newDims = thisDims; + first = false; + return success(); + }; + + if (parser.parseCommaSeparatedList(parseOneElement) || parser.parseRBrace()) { + return failure(); + } + + // Return the sublists' dimensions with 'size' prepended. + dims.clear(); + dims.push_back(size); + dims.append(newDims.begin(), newDims.end()); + + return success(); +} + +/// Parses a UniformQuantizedType or a QuantileQuantizedType. /// /// uniform_type ::= uniform_per_layer /// | uniform_per_axis +/// | uniform_sub_channel /// uniform_per_layer ::= `uniform<` storage-spec expressed-type-spec /// `,` scale-zero `>` /// uniform_per_axis ::= `uniform<` storage-spec expressed-type-spec -/// axis-spec `,` scale-zero-list `>` +/// axis-spec `,` `{` scale-zero-list `}` `>` +/// uniform_sub_channel ::= `uniform<` storage-spec expressed-type-spec +/// block-size-info `,` scale-zero-tensor `>` /// storage-spec ::= storage-type (`<` storage-range `>`)? /// storage-range ::= integer-literal `:` integer-literal /// storage-type ::= (`i` | `u`) integer-literal /// expressed-type-spec ::= `:` `f` integer-literal /// axis-spec ::= `:` integer-literal -/// scale-zero ::= float-literal `:` integer-literal -/// scale-zero-list ::= `{` scale-zero (`,` scale-zero)* `}` -static Type parseUniformType(DialectAsmParser &parser) { - IntegerType storageType; +/// scale-zero ::= scale (`:` zero-point)? +/// scale ::= float-literal +/// zero-point ::= integer-literal +/// scale-zero-list ::= scale-zero (`,` scale-zero)* +/// block-size-info ::= `{` `}` | `{` axis-block `:` (`,` axis-block)* `}` +/// axis-block ::= axis-spec `:` block-size-spec +/// block-size-spec ::= integer-literal +/// scale-zero-tensor ::= scale-zero-dense-exp | scale-zero-list +/// scale-zero-dense-exp ::= `{` +/// scale-zero-tensor (`,` scale-zero-tensor)* +/// `}` +/// +/// quantile_type ::= quantile_per_layer +/// | quantile_per_axis +/// quantile_per_layer ::= `quantile<` storage-spec quantile-type-spec +/// expressed-type-spec `,` quantiles-list `,` +/// scale-zero `>` +/// quantile_per_axis ::= `quantile<` storage-spec quantile-type-spec +/// expressed-type-spec axis-spec `,` quantiles-list +/// scale-zero-list `>` +/// storage-spec ::= storage-type (`<` storage-range `>`)? +/// storage-range ::= integer-literal `:` integer-literal +/// storage-type ::= (`i` | `u`) integer-literal +/// quantile-type-spec ::= `:` ((`i` | `u` | `f`) integer-literal | `f8E5M2` | +/// `f8E4M3FN`) +/// expressed-type-spec ::= `:` `f` integer-literal axis-spec ::= +/// `:` integer-literal quantiles-list ::= `{` quantile (`,` quantile)* `}` +/// scale-zero ::= `:` float-literal `:` integer-literal +/// scale-zero-list ::= `:` `{` scale-zero (`,` scale-zero)* `}` +static Type parseUniformType(DialectAsmParser &parser, bool isQuantile) { + Type storageType; + Type quantileType; FloatType expressedType; unsigned typeFlags = 0; int64_t storageTypeMin; int64_t storageTypeMax; bool isPerAxis = false; - int32_t quantizedDimension; + bool isSubChannel = false; + SmallVector quantizedDimensions; + SmallVector blockSizes; + SmallVector quantiles; SmallVector scales; SmallVector zeroPoints; @@ -223,16 +455,38 @@ static Type parseUniformType(DialectAsmParser &parser) { return nullptr; } + // quantile type. + if (isQuantile) { + if (parser.parseColon()) { + return nullptr; + } + quantileType = parseQuantileType(parser); + if (!quantileType) { + return nullptr; + } + } + // Expressed type. if (parser.parseColon() || parser.parseType(expressedType)) { return nullptr; } - // Optionally parse quantized dimension for per-axis quantization. + // Optionally parse quantized dimension for per-axis or sub-channel + // quantization. if (succeeded(parser.parseOptionalColon())) { - if (parser.parseInteger(quantizedDimension)) - return nullptr; - isPerAxis = true; + if (succeeded(parser.parseOptionalLBrace())) { + isSubChannel = true; + if (parseBlockSizeInfoUntilRBrace(parser, quantizedDimensions, + blockSizes)) { + return nullptr; + } + } else { + isPerAxis = true; + quantizedDimensions.resize(1); + if (parser.parseInteger(quantizedDimensions.back())) { + return nullptr; + } + } } // Comma leading into range_spec. @@ -240,26 +494,43 @@ static Type parseUniformType(DialectAsmParser &parser) { return nullptr; } - // Parameter specification. - // For per-axis, ranges are in a {} delimitted list. - if (isPerAxis) { + // Quantile list + if (isQuantile) { if (parser.parseLBrace()) { return nullptr; } + + do { + quantiles.emplace_back(); + if (parser.parseFloat(quantiles.back())) { + return nullptr; + } + } while (succeeded(parser.parseOptionalComma())); + + if (parser.parseRBrace()) { + return nullptr; + } + + if (parser.parseColon()) { + return nullptr; + } } - // Parse scales/zeroPoints. - SMLoc scaleZPLoc = parser.getCurrentLocation(); - do { - scales.resize(scales.size() + 1); + // Quantization parameter (scales/zeroPoints) specification. + bool isPerTensor = !isPerAxis && !isSubChannel; + SmallVector dims; + if (isPerTensor) { zeroPoints.resize(zeroPoints.size() + 1); - if (parseQuantParams(parser, scales.back(), zeroPoints.back())) { + scales.resize(scales.size() + 1); + if (parseQuantParams(parser, expressedType, scales.back(), + zeroPoints.back())) { return nullptr; } - } while (isPerAxis && succeeded(parser.parseOptionalComma())); - if (isPerAxis) { - if (parser.parseRBrace()) { + } else { + if (parser.parseLBrace() || + parseQuantParamListUntilRBrace(parser, expressedType, scales, + zeroPoints, dims)) { return nullptr; } } @@ -268,19 +539,48 @@ static Type parseUniformType(DialectAsmParser &parser) { return nullptr; } - if (!isPerAxis && scales.size() > 1) { - return (parser.emitError(scaleZPLoc, - "multiple scales/zeroPoints provided, but " - "quantizedDimension wasn't specified"), - nullptr); + if (isQuantile) { + ArrayRef quantilesRef(quantiles.begin(), quantiles.end()); + + if (isPerAxis) { + ArrayRef scalesRef(scales.begin(), scales.end()); + ArrayRef zeroPointsRef(zeroPoints.begin(), zeroPoints.end()); + + return parser.getChecked( + typeFlags, storageType, quantileType, expressedType, quantilesRef, + scalesRef, zeroPointsRef, quantizedDimensions[0], storageTypeMin, + storageTypeMax); + } + + return parser.getChecked( + typeFlags, storageType, quantileType, expressedType, quantilesRef, + scales.front(), zeroPoints.front(), storageTypeMin, storageTypeMax); } if (isPerAxis) { - ArrayRef scalesRef(scales.begin(), scales.end()); - ArrayRef zeroPointsRef(zeroPoints.begin(), zeroPoints.end()); return parser.getChecked( + typeFlags, storageType, expressedType, scales, zeroPoints, + quantizedDimensions[0], storageTypeMin, storageTypeMax); + } else if (isSubChannel) { + SmallVector apFloatScales = + llvm::to_vector(llvm::map_range(scales, [&](double scale) -> APFloat { + APFloat apFloatScale(scale); + bool unused; + apFloatScale.convert(expressedType.getFloatSemantics(), + APFloat::rmNearestTiesToEven, &unused); + return apFloatScale; + })); + SmallVector apIntZeroPoints = llvm::to_vector( + llvm::map_range(zeroPoints, [&](int64_t zeroPoint) -> APInt { + return APInt(storageType.getIntOrFloatBitWidth(), zeroPoint); + })); + auto scalesRef = mlir::DenseElementsAttr::get( + RankedTensorType::get(dims, expressedType), apFloatScales); + auto zeroPointsRef = mlir::DenseElementsAttr::get( + RankedTensorType::get(dims, storageType), apIntZeroPoints); + return parser.getChecked( typeFlags, storageType, expressedType, scalesRef, zeroPointsRef, - quantizedDimension, storageTypeMin, storageTypeMax); + quantizedDimensions, blockSizes, storageTypeMin, storageTypeMax); } return parser.getChecked( @@ -318,13 +618,16 @@ static Type parseCalibratedType(DialectAsmParser &parser) { /// Parse a type registered to this dialect. Type QuantDialect::parseType(DialectAsmParser &parser) const { + // All types start with an identifier that we switch on. StringRef typeNameSpelling; if (failed(parser.parseKeyword(&typeNameSpelling))) return nullptr; if (typeNameSpelling == "uniform") - return parseUniformType(parser); + return parseUniformType(parser, false); + if (typeNameSpelling == "quantile") + return parseUniformType(parser, true); if (typeNameSpelling == "any") return parseAnyType(parser); if (typeNameSpelling == "calibrated") @@ -339,19 +642,47 @@ static void printStorageType(QuantizedType type, DialectAsmPrinter &out) { // storage type unsigned storageWidth = type.getStorageTypeIntegralWidth(); bool isSigned = type.isSigned(); - if (isSigned) { - out << "i" << storageWidth; + int64_t defaultMin, defaultMax; + + if (auto quantizationInterface = + llvm::dyn_cast(type.getStorageType())) { + out << quantizationInterface.printStorageType(isSigned, storageWidth); + + defaultMin = + quantizationInterface.getDefaultMinimum(isSigned, storageWidth); + defaultMax = + quantizationInterface.getDefaultMaximum(isSigned, storageWidth); + } else { - out << "u" << storageWidth; + defaultMin = std::numeric_limits::max(); + defaultMax = std::numeric_limits::min(); } - // storageTypeMin and storageTypeMax if not default. - if (type.hasStorageTypeBounds()) { + if (defaultMin != type.getStorageTypeMin() || + defaultMax != type.getStorageTypeMax()) { out << "<" << type.getStorageTypeMin() << ":" << type.getStorageTypeMax() << ">"; } } +static void printQuantileType(Type quantileType, DialectAsmPrinter &out) { + if (auto intType = llvm::dyn_cast(quantileType)) { + const unsigned storageTypeWidth = intType.getWidth(); + if (intType.isUnsigned()) { + out << ":u" << storageTypeWidth; + } else { + out << ":i" << storageTypeWidth; + } + } else if (quantileType.isa()) { + out << ":f8E5M2"; + } else if (quantileType.isa()) { + out << ":f8E4M3FN"; + } else { + // Float types + out << ":" << quantileType; + } +} + static void printQuantParams(double scale, int64_t zeroPoint, DialectAsmPrinter &out) { out << scale; @@ -360,6 +691,17 @@ static void printQuantParams(double scale, int64_t zeroPoint, } } +static void +printBlockSizeInfo(ArrayRef> blockSizeInfo, + DialectAsmPrinter &out) { + out << "{"; + llvm::interleaveComma( + llvm::seq(0, blockSizeInfo.size()), out, [&](size_t index) { + out << blockSizeInfo[index].first << ":" << blockSizeInfo[index].second; + }); + out << "}"; +} + /// Helper that prints a AnyQuantizedType. static void printAnyQuantizedType(AnyQuantizedType type, DialectAsmPrinter &out) { @@ -405,6 +747,124 @@ static void printUniformQuantizedPerAxisType(UniformQuantizedPerAxisType type, out << "}>"; } +/// Prints quantization parameters as a nested list of `scale`[:`zero_point`] +/// elements. The nesting corresponds to the `shape` dimensions. +/// +/// Elements are delimited by commas, and the inner dimensions are enclosed in +/// braces. `zero_point` is only printed if it is non-zero. For example: +/// +/// printDenseQuantizationParameters(scales=[1.0, 2.0, 3.0, 4.0], +/// zeroPoints=[0, 0, 1, 9], +/// shape=[2, 2]) +/// +/// would print: +/// +/// {{1.0, 2.0}, {3.0:1, 4.0:9}} +void printDenseQuantizationParameters(ArrayRef scales, + ArrayRef zeroPoints, + ArrayRef shape, + DialectAsmPrinter &out) { + int64_t rank = shape.size(); + SmallVector counter(rank, 0); + unsigned openBrackets = 0; + + auto incrementCounterAndDelimit = [&]() { + ++counter[rank - 1]; + for (unsigned i = rank - 1; i > 0; --i) { + if (counter[i] >= shape[i]) { + counter[i] = 0; + ++counter[i - 1]; + --openBrackets; + out << '}'; + } + } + }; + + for (unsigned idx = 0, e = scales.size(); idx < e; ++idx) { + if (idx != 0) + out << ", "; + while (openBrackets++ < rank) + out << '{'; + openBrackets = rank; + out << scales[idx]; + if (zeroPoints[idx] != 0) { + out << ":" << zeroPoints[idx]; + } + incrementCounterAndDelimit(); + } + while (openBrackets-- > 0) + out << '}'; +} + +/// Helper that prints a UniformQuantizedSubChannelType. +static void +printUniformQuantizedSubChannelType(UniformQuantizedSubChannelType type, + DialectAsmPrinter &out) { + out << "uniform<"; + printStorageType(type, out); + out << ":" << type.getExpressedType() << ":"; + printBlockSizeInfo(type.getBlockSizeInfo(), out); + out << ", "; + + auto scalesItr = type.getScales().getValues(); + auto zeroPointsItr = type.getZeroPoints().getValues(); + SmallVector scales(scalesItr.begin(), scalesItr.end()); + SmallVector zeroPoints(zeroPointsItr.begin(), zeroPointsItr.end()); + printDenseQuantizationParameters(scales, zeroPoints, + type.getScales().getType().getShape(), out); + out << ">"; +} + +/// Helper that prints a QuantileQuantizedType. +static void printQuantileQuantizedType(QuantileQuantizedType type, + DialectAsmPrinter &out) { + out << "quantile<"; + printStorageType(type, out); + printQuantileType(type.getQuantileType(), out); + out << ":" << type.getExpressedType() << ", "; + + // scheme specific parameters + ArrayRef quantiles = type.getQuantiles(); + out << "{"; + llvm::interleave( + llvm::seq(0, quantiles.size()), out, + [&](size_t index) { out << quantiles[index]; }, ","); + out << "}:"; + + printQuantParams(type.getScale(), type.getZeroPoint(), out); + out << ">"; +} + +/// Helper that prints a QuantileQuantizedPerAxisType. +static void printQuantileQuantizedPerAxisType(QuantileQuantizedPerAxisType type, + DialectAsmPrinter &out) { + out << "quantile<"; + printStorageType(type, out); + printQuantileType(type.getQuantileType(), out); + out << ":" << type.getExpressedType() << ":"; + out << type.getQuantizedDimension(); + out << ", "; + + // scheme specific parameters + ArrayRef quantiles = type.getQuantiles(); + out << "{"; + llvm::interleave( + llvm::seq(0, quantiles.size()), out, + [&](size_t index) { out << quantiles[index]; }, ","); + out << "}:"; + + ArrayRef scales = type.getScales(); + ArrayRef zeroPoints = type.getZeroPoints(); + out << "{"; + llvm::interleave( + llvm::seq(0, scales.size()), out, + [&](size_t index) { + printQuantParams(scales[index], zeroPoints[index], out); + }, + ","); + out << "}>"; +} + /// Helper that prints a CalibratedQuantizedType. static void printCalibratedQuantizedType(CalibratedQuantizedType type, DialectAsmPrinter &out) { @@ -417,10 +877,18 @@ static void printCalibratedQuantizedType(CalibratedQuantizedType type, void QuantDialect::printType(Type type, DialectAsmPrinter &os) const { if (auto anyType = llvm::dyn_cast(type)) printAnyQuantizedType(anyType, os); + else if (auto uniformType = llvm::dyn_cast(type)) + printQuantileQuantizedType(uniformType, os); + else if (auto perAxisType = + llvm::dyn_cast(type)) + printQuantileQuantizedPerAxisType(perAxisType, os); else if (auto uniformType = llvm::dyn_cast(type)) printUniformQuantizedType(uniformType, os); else if (auto perAxisType = llvm::dyn_cast(type)) printUniformQuantizedPerAxisType(perAxisType, os); + else if (auto perAxisType = + llvm::dyn_cast(type)) + printUniformQuantizedSubChannelType(perAxisType, os); else if (auto calibratedType = llvm::dyn_cast(type)) printCalibratedQuantizedType(calibratedType, os); else diff --git a/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt index 2fd4a41999d4..825d11992d30 100644 --- a/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt @@ -1,5 +1,6 @@ add_mlir_dialect_library(MLIRQuantTransforms LowerQuantOps.cpp + NormalizeQuantTypes.cpp StripFuncQuantTypes.cpp ADDITIONAL_HEADER_DIRS diff --git a/mlir/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp b/mlir/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp index 4adeb9218ff8..c2dbcde1aeba 100644 --- a/mlir/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp +++ b/mlir/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp @@ -38,11 +38,11 @@ Type getScalarType(Type inputType) { return inputType; } -// Return the shape of an input value as a list of attributes (static dimensions) -// and values (dynamic dimensions). If 'input' is a scalar, an empty list is -// returned. If 'input' is a tensor, its shape is returned. -SmallVector -getScalarOrTensorShape(OpBuilder &builder, Location loc, Value input) { +// Return the shape of an input value as a list of attributes (static +// dimensions) and values (dynamic dimensions). If 'input' is a scalar, an empty +// list is returned. If 'input' is a tensor, its shape is returned. +SmallVector getScalarOrTensorShape(OpBuilder &builder, + Location loc, Value input) { if (isa(input.getType())) return tensor::getMixedSizes(builder, loc, input); return {}; @@ -100,16 +100,16 @@ std::pair flattenUnrankedTensor(OpBuilder &builder, Location loc, // Turn input size into 1D tensor auto flatShapeType = shape::getExtentTensorType(context, 1); - auto flatInputShape = builder.create( - loc, flatShapeType, inputSize); + auto flatInputShape = + builder.create(loc, flatShapeType, inputSize); // Reshape input tensor into 1D auto inputType = cast(input.getType()); auto elementType = inputType.getElementType(); auto flatInputType = RankedTensorType::get({ShapedType::kDynamic}, elementType); - auto flatInput = builder.create( - loc, flatInputType, input, flatInputShape); + auto flatInput = builder.create(loc, flatInputType, input, + flatInputShape); return std::make_pair(flatInput, inputShape); } @@ -135,11 +135,9 @@ std::pair flattenUnrankedTensor(OpBuilder &builder, Location loc, // - inputShape // 1D extent tensor containing the shape of the original unranked input. // -std::pair flattenUnrankedTensorAroundAxis(OpBuilder &builder, - Location loc, - Value input, - int64_t axis, - int64_t axisSize) { +std::pair +flattenUnrankedTensorAroundAxis(OpBuilder &builder, Location loc, Value input, + int64_t axis, int64_t axisSize) { // Get full tensor shape auto *context = builder.getContext(); auto indexType = builder.getIndexType(); @@ -149,16 +147,20 @@ std::pair flattenUnrankedTensorAroundAxis(OpBuilder &builder, // Get shape and sizes on left and right of axis auto axisValue = builder.create(loc, axis); auto axisNextValue = builder.create(loc, axis + 1); - auto shapeLeft = builder.create( - loc, TypeRange{shapeType, shapeType}, inputShape, axisValue) - .getResult(0); - auto sizeLeft = builder.create( - loc, indexType, shapeLeft); - auto shapeRight = builder.create( - loc, TypeRange{shapeType, shapeType}, inputShape, axisNextValue) - .getResult(1); - auto sizeRight = builder.create( - loc, indexType, shapeRight); + auto shapeLeft = + builder + .create(loc, TypeRange{shapeType, shapeType}, + inputShape, axisValue) + .getResult(0); + auto sizeLeft = + builder.create(loc, indexType, shapeLeft); + auto shapeRight = + builder + .create(loc, TypeRange{shapeType, shapeType}, + inputShape, axisNextValue) + .getResult(1); + auto sizeRight = + builder.create(loc, indexType, shapeRight); // Compute flat input shape as a 3-element 1D tensor auto axisSizeValue = builder.create(loc, axisSize); @@ -171,8 +173,8 @@ std::pair flattenUnrankedTensorAroundAxis(OpBuilder &builder, auto elementType = inputType.getElementType(); auto flatInputType = RankedTensorType::get( {ShapedType::kDynamic, axisSize, ShapedType::kDynamic}, elementType); - auto flatInput = builder.create( - loc, flatInputType, input, flatInputShape); + auto flatInput = builder.create(loc, flatInputType, input, + flatInputShape); return std::make_pair(flatInput, inputShape); } @@ -190,7 +192,8 @@ Value restoreUnrankedTensorShape(OpBuilder &builder, Location loc, Value input, auto inputType = cast(input.getType()); auto elementType = inputType.getElementType(); auto unrankedType = UnrankedTensorType::get(elementType); - return builder.create(loc, unrankedType, input, inputShape); + return builder.create(loc, unrankedType, input, + inputShape); } // Create a tensor constant containing all scales in a per-channel quantized @@ -209,7 +212,8 @@ Value materializePerChannelScales(OpBuilder &builder, Location loc, auto scaleAttrs = llvm::map_to_vector(scales, [&](double scale) -> Attribute { return builder.getFloatAttr(expressedType, scale); }); - auto tensorType = RankedTensorType::get({(int64_t) scales.size()}, expressedType); + auto tensorType = + RankedTensorType::get({(int64_t)scales.size()}, expressedType); auto scalesAttr = DenseElementsAttr::get(tensorType, scaleAttrs); return builder.create(loc, tensorType, scalesAttr); } @@ -228,9 +232,8 @@ Value materializePerChannelZeroPoints( UniformQuantizedPerAxisType quantizedType) { auto zeroPoints = quantizedType.getZeroPoints(); auto storageType = quantizedType.getStorageType(); - auto zeroPointAttrs = llvm::map_to_vector( - zeroPoints, - [&](int64_t zeroPoint) -> Attribute { + auto zeroPointAttrs = + llvm::map_to_vector(zeroPoints, [&](int64_t zeroPoint) -> Attribute { return builder.getIntegerAttr(storageType, zeroPoint); }); auto tensorType = @@ -239,6 +242,54 @@ Value materializePerChannelZeroPoints( return builder.create(loc, tensorType, zeroPointsAttr); } +// Create a tensor constant containing all scales in a sub-channel quantized +// type. Example: +// +// !quant.uniform +// +// produces +// +// %cst = arith.constant dense<[[2.0, 3.0], [4.0, 5.0]]> : tensor<2x2xf32> +// +Value materializeSubChannelScales( + OpBuilder &builder, Location loc, + UniformQuantizedSubChannelType quantizedType) { + auto scales = quantizedType.getScales(); + auto expressedType = quantizedType.getExpressedType(); + auto scaleAttrs = llvm::map_to_vector( + scales.getValues(), [&](APFloat scale) -> Attribute { + return builder.getFloatAttr(expressedType, scale); + }); + auto tensorType = + RankedTensorType::get(scales.getType().getShape(), expressedType); + auto scalesAttr = DenseElementsAttr::get(tensorType, scaleAttrs); + return builder.create(loc, tensorType, scalesAttr); +} + +// Create a tensor constant containing all zero points in a sub-channel +// quantized type. Example: +// +// !quant.uniform +// +// produces +// +// %cst = arith.constant dense<[[10, 20], [30, 40]]> : tensor<2x2xi8> +// +Value materializeSubChannelZeroPoints( + OpBuilder &builder, Location loc, + UniformQuantizedSubChannelType quantizedType) { + auto zeroPoints = quantizedType.getZeroPoints(); + auto storageType = quantizedType.getStorageType(); + auto zeroPointAttrs = llvm::map_to_vector( + zeroPoints.getValues(), [&](APInt zeroPoint) -> Attribute { + return builder.getIntegerAttr(storageType, zeroPoint); + }); + auto tensorType = + RankedTensorType::get(zeroPoints.getType().getShape(), storageType); + auto zeroPointsAttr = DenseElementsAttr::get(tensorType, zeroPointAttrs); + return builder.create(loc, tensorType, zeroPointsAttr); +} + // Clamp the given scalar or tensor input using the storage bounds encoded in // the given quantized type, if present. // @@ -299,7 +350,7 @@ Value convertIntegerToFloat(OpBuilder &builder, Location loc, Value input, return builder.create(loc, resultType, input); } -// Quantize a scalar or ranked tensor value. The stored value is clamped using +// Quantize a scalar or ranked tensor value. The stored value is clamped using // the storage bounds encoded in the given quantized type. // // See function 'convertRanked()' below for a description of the arguments. @@ -308,8 +359,7 @@ Value quantizeValue(OpBuilder &builder, Location loc, Value input, Value zeroPoint, QuantizedType quantizedType) { // Convert scale to tensor if necessary auto inputType = input.getType(); - scale = getScalarOrTensorConstant( - builder, loc, scale, inputType, inputShape); + scale = getScalarOrTensorConstant(builder, loc, scale, inputType, inputShape); // Scale input auto scaledValue = builder.create(loc, input, scale); @@ -322,8 +372,7 @@ Value quantizeValue(OpBuilder &builder, Location loc, Value input, inputShape); // Convert zero point from storage to expressed type - zeroPoint = convertIntegerToFloat(builder, loc, zeroPoint, - scale.getType(), + zeroPoint = convertIntegerToFloat(builder, loc, zeroPoint, scale.getType(), quantizedType.isSigned()); // Add zero point to stored value @@ -334,9 +383,9 @@ Value quantizeValue(OpBuilder &builder, Location loc, Value input, // Convert stored value to storage type auto storageScalarOrTensorType = getScalarOrTensorType(quantizedType.getStorageType(), inputType); - auto storedValueInt = convertFloatToInteger( - builder, loc, storedValueFloat, storageScalarOrTensorType, - quantizedType.isSigned()); + auto storedValueInt = convertFloatToInteger(builder, loc, storedValueFloat, + storageScalarOrTensorType, + quantizedType.isSigned()); // Clamp stored value it if the storage type is bound auto storedValueClamped = clampScalarOrTensor(builder, loc, storedValueInt, @@ -352,12 +401,11 @@ Value dequantizeValue(OpBuilder &builder, Location loc, Value input, Value zeroPoint, QuantizedType quantizedType) { // Convert scale to tensor if necessary auto inputType = input.getType(); - scale = getScalarOrTensorConstant( - builder, loc, scale, inputType, inputShape); + scale = getScalarOrTensorConstant(builder, loc, scale, inputType, inputShape); // Convert stored value to float - auto result = convertIntegerToFloat( - builder, loc, input, scale.getType(), quantizedType.isSigned()); + auto result = convertIntegerToFloat(builder, loc, input, scale.getType(), + quantizedType.isSigned()); // Skip unnecessary computations if no zero point is given if (!matchPattern(zeroPoint, m_Zero())) { @@ -366,8 +414,7 @@ Value dequantizeValue(OpBuilder &builder, Location loc, Value input, inputShape); // Convert zero point from storage to expressed type - zeroPoint = convertIntegerToFloat(builder, loc, zeroPoint, - scale.getType(), + zeroPoint = convertIntegerToFloat(builder, loc, zeroPoint, scale.getType(), quantizedType.isSigned()); // Subtract zero point to stored value @@ -501,35 +548,33 @@ Value convertPerChannelRanked(OpBuilder &builder, Location loc, Operation *op, auto initShape = tensor::getMixedSizes(builder, loc, input); Value init = builder.create(loc, initShape, elementType); - SmallVector iteratorTypes( - inputRank, utils::IteratorType::parallel); + SmallVector iteratorTypes(inputRank, + utils::IteratorType::parallel); auto channelAxisAffineMap = AffineMap::get( inputRank, 0, builder.getAffineDimExpr(channelAxis), context); SmallVector indexingMaps{ - builder.getMultiDimIdentityMap(inputRank), - channelAxisAffineMap, - channelAxisAffineMap, - builder.getMultiDimIdentityMap(inputRank) - }; - auto result = builder.create( - loc, - init.getType(), // resultType - ValueRange{input, scales, zeroPoints}, // inputs - ValueRange{init}, // outputs - indexingMaps, - iteratorTypes, - [&](OpBuilder& builder, Location loc, ValueRange args) { - assert(args.size() == 4); - auto input = args[0]; - auto scale = args[1]; - auto zeroPoint = args[2]; - - auto result = convertRanked(builder, loc, op, input, {}, scale, - zeroPoint, quantizedType); - - builder.create(loc, result); - }) - .getResult(0); + builder.getMultiDimIdentityMap(inputRank), channelAxisAffineMap, + channelAxisAffineMap, builder.getMultiDimIdentityMap(inputRank)}; + auto result = builder + .create( + loc, + init.getType(), // resultType + ValueRange{input, scales, zeroPoints}, // inputs + ValueRange{init}, // outputs + indexingMaps, iteratorTypes, + [&](OpBuilder &builder, Location loc, ValueRange args) { + assert(args.size() == 4); + auto input = args[0]; + auto scale = args[1]; + auto zeroPoint = args[2]; + + auto result = + convertRanked(builder, loc, op, input, {}, scale, + zeroPoint, quantizedType); + + builder.create(loc, result); + }) + .getResult(0); return result; } @@ -551,7 +596,7 @@ Value convertPerChannel(OpBuilder &builder, Location loc, Operation *op, // Flatten unranked tensor into a 3D ranked tensor if necessary bool isUnranked = isa(input.getType()); int64_t channelAxis = quantizedType.getQuantizedDimension(); - int64_t channelAxisSize = (int64_t) quantizedType.getScales().size(); + int64_t channelAxisSize = (int64_t)quantizedType.getScales().size(); Value inputShape; if (isUnranked) { std::tie(input, inputShape) = flattenUnrankedTensorAroundAxis( @@ -570,6 +615,73 @@ Value convertPerChannel(OpBuilder &builder, Location loc, Operation *op, return result; } +// Convert an operation using sub-channel quantization. +// +// - op +// 'quant.dcast' or 'quant.qcast' op. +// +// - input +// Scalar, ranked tensor. +// +// - quantizedType +// Sub-channel quantized type. +// +Value convertSubChannel(OpBuilder &builder, Location loc, Operation *op, + Value input, + UniformQuantizedSubChannelType quantizedType) { + auto *context = builder.getContext(); + + auto inputType = cast(input.getType()); + auto inputRank = inputType.getRank(); + + auto scales = materializeSubChannelScales(builder, loc, quantizedType); + auto zeroPoints = + materializeSubChannelZeroPoints(builder, loc, quantizedType); + + auto elementType = isa(inputType.getElementType()) + ? quantizedType.getStorageType() + : quantizedType.getExpressedType(); + auto initShape = tensor::getMixedSizes(builder, loc, input); + Value init = builder.create(loc, initShape, elementType); + + SmallVector iteratorTypes(inputRank, + utils::IteratorType::parallel); + const SmallVector> &blockSizeInfo = + quantizedType.getBlockSizeInfo(); + SmallVector affineExprs(inputRank, + builder.getAffineConstantExpr(0)); + for (auto [quantizedDimension, blockSize] : blockSizeInfo) { + affineExprs[quantizedDimension] = + builder.getAffineDimExpr(quantizedDimension).floorDiv(blockSize); + } + auto affineMap = AffineMap::get(inputRank, 0, affineExprs, context); + SmallVector indexingMaps{ + builder.getMultiDimIdentityMap(inputRank), affineMap, affineMap, + builder.getMultiDimIdentityMap(inputRank)}; + auto result = builder + .create( + loc, + init.getType(), // resultType + ValueRange{input, scales, zeroPoints}, // inputs + ValueRange{init}, // outputs + indexingMaps, iteratorTypes, + [&](OpBuilder &builder, Location loc, ValueRange args) { + assert(args.size() == 4); + auto input = args[0]; + auto scale = args[1]; + auto zeroPoint = args[2]; + + auto result = + convertRanked(builder, loc, op, input, {}, scale, + zeroPoint, quantizedType); + + builder.create(loc, result); + }) + .getResult(0); + + return result; +} + // Convert a quantization operation. // // - op @@ -593,11 +705,17 @@ Value convertQuantized(OpBuilder &builder, Location loc, Operation *op, return convertPerChannel(builder, loc, op, input, uniformQuantizedPerAxisType); + if (auto uniformQuantizedSubChannelType = + dyn_cast(quantizedType)) + return convertSubChannel(builder, loc, op, input, + uniformQuantizedSubChannelType); + llvm_unreachable("unexpected quantized type"); } // Lowering pattern for 'quant.dcast' -struct DequantizeCastOpConversion : public OpConversionPattern { +struct DequantizeCastOpConversion + : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; LogicalResult @@ -622,7 +740,8 @@ struct DequantizeCastOpConversion : public OpConversionPattern { +struct QuantizeCastOpConversion + : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; LogicalResult @@ -650,12 +769,8 @@ struct LowerQuantOps : public impl::LowerQuantOpsBase { ConversionTarget target(getContext()); target.addLegalOp(); target.addIllegalDialect(); - target.addLegalDialect< - arith::ArithDialect, - linalg::LinalgDialect, - shape::ShapeDialect, - tensor::TensorDialect - >(); + target.addLegalDialect(); if (failed(applyPartialConversion(getOperation(), target, std::move(patterns)))) @@ -666,10 +781,8 @@ struct LowerQuantOps : public impl::LowerQuantOpsBase { } // namespace void populateLowerQuantOpsPatterns(RewritePatternSet &patterns) { - patterns.add< - DequantizeCastOpConversion, - QuantizeCastOpConversion - >(patterns.getContext()); + patterns.add( + patterns.getContext()); } } // namespace quant diff --git a/mlir/lib/Dialect/Quant/Transforms/NormalizeQuantTypes.cpp b/mlir/lib/Dialect/Quant/Transforms/NormalizeQuantTypes.cpp new file mode 100644 index 000000000000..030cf0779437 --- /dev/null +++ b/mlir/lib/Dialect/Quant/Transforms/NormalizeQuantTypes.cpp @@ -0,0 +1,179 @@ +//===- NormalizeQuantTypes.cpp - Normalize quantized types +//----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Normalize generic quantized types to specific quantized types +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/Func/Transforms/FuncConversions.h" +#include "mlir/Dialect/Quant/IR/Quant.h" +#include "mlir/Dialect/Quant/IR/QuantTypes.h" +#include "mlir/Dialect/Quant/Transforms/Passes.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/Transforms/DialectConversion.h" + +namespace mlir { +namespace quant { + +#define GEN_PASS_DEF_NORMALIZEQUANTTYPES +#include "mlir/Dialect/Quant/Transforms/Passes.h.inc" + +namespace { + +/// Returns true if the given sub-channel quantized type is convertible to a +/// per-tensor quantized type. This is true if the sub-channel type has only +/// one scale and one zero point. +/// +/// Assumes that `tensorType` is a tensor with element type +/// `quant::UniformQuantizedSubChannelType`. +static bool isConvertibleToPerTensor(TensorType tensorType) { + return cast(tensorType.getElementType()) + .getScales() + .getType() + .getNumElements() == 1; +} + +/// Returns true if the given sub-channel quantized type is convertible to a +/// per-axis quantized type. This is true if the shape of the scales tensor has +/// all but one non-one value. +/// +/// Assumes that `tensorType` is a tensor with element type +/// `quant::UniformQuantizedSubChannelType`. +static bool isConvertibleToPerAxis(TensorType tensorType) { + auto shape = cast(tensorType.getElementType()) + .getScales() + .getType() + .getShape(); + return llvm::count_if(shape, [](int64_t dim) { return dim != 1; }) == 1; +} + +/// This class defines a type converter that converts sub-channel quantized +/// types to per-tensor or per-axis quantized types whenever possible. +class NormalizedQuantTypesConverter : public TypeConverter { + + static Type convertType(Type type) { + auto tensorType = dyn_cast(type); + if (!tensorType) { + return type; + } + + auto subChannelType = + dyn_cast(tensorType.getElementType()); + if (!subChannelType) { + return type; + } + + if (isConvertibleToPerTensor(tensorType)) { + double scale = + subChannelType.getScales().getValues()[0].convertToDouble(); + int64_t zeroPoint = + subChannelType.getZeroPoints().getValues()[0].getSExtValue(); + auto perTensorType = UniformQuantizedType::get( + subChannelType.getFlags(), subChannelType.getStorageType(), + subChannelType.getExpressedType(), scale, zeroPoint, + subChannelType.getStorageTypeMin(), + subChannelType.getStorageTypeMax()); + return tensorType.clone(perTensorType); + } + + if (isConvertibleToPerAxis(tensorType)) { + auto shape = subChannelType.getScales().getType().getShape(); + auto quantizedDimItr = + llvm::find_if(shape, [](int64_t dim) { return dim != 1; }); + auto scales = llvm::to_vector(llvm::map_range( + subChannelType.getScales().getValues(), + [](APFloat scale) { return scale.convertToDouble(); })); + auto zeroPoints = llvm::to_vector(llvm::map_range( + subChannelType.getZeroPoints().getValues(), + [](APInt zeroPoint) { return zeroPoint.getSExtValue(); })); + auto perAxisType = UniformQuantizedPerAxisType::get( + subChannelType.getFlags(), subChannelType.getStorageType(), + subChannelType.getExpressedType(), scales, zeroPoints, + quantizedDimItr - shape.begin(), subChannelType.getStorageTypeMin(), + subChannelType.getStorageTypeMax()); + return tensorType.clone(perAxisType); + } + return type; + } + +public: + explicit NormalizedQuantTypesConverter() { addConversion(convertType); } +}; + +/// This class implements a conversion pattern that converts any generic +/// operation with sub-channel quantized types to an equivalent operation with +/// per-tensor or per-axis quantized types. +class ConvertGenericOpwithSubChannelType : public ConversionPattern { +public: + ConvertGenericOpwithSubChannelType(TypeConverter &typeConverter, + MLIRContext *context) + : ConversionPattern(typeConverter, MatchAnyOpTypeTag{}, 0, context) {} + + LogicalResult + matchAndRewrite(Operation *op, ArrayRef operands, + ConversionPatternRewriter &rewriter) const final { + SmallVector resultTypes; + if (failed(typeConverter->convertTypes(op->getResultTypes(), resultTypes))) + return failure(); + + auto *newOp = Operation::create( + op->getLoc(), op->getName(), resultTypes, operands, op->getAttrs(), + op->getPropertiesStorage(), op->getSuccessors(), op->getNumRegions()); + for (auto regions : llvm::zip(op->getRegions(), newOp->getRegions())) { + Region &before = std::get<0>(regions); + Region &parent = std::get<1>(regions); + rewriter.inlineRegionBefore(before, parent, parent.end()); + if (failed(rewriter.convertRegionTypes(&parent, *typeConverter))) + return failure(); + } + rewriter.insert(newOp); + rewriter.replaceOp(op, newOp->getResults()); + return success(); + } +}; + +// Conversion pass +class NormalizeQuantTypes + : public impl::NormalizeQuantTypesBase { +public: + void runOnOperation() override { + + auto *context = &getContext(); + + NormalizedQuantTypesConverter typeConverter; + ConversionTarget target(*context); + + // Determine legal operations. + target.addDynamicallyLegalOp([&](func::FuncOp op) { + return typeConverter.isSignatureLegal(op.getFunctionType()) && + typeConverter.isLegal(&op.getBody()); + }); + target.markUnknownOpDynamicallyLegal([&](Operation *op) { + return typeConverter.isLegal(op->getOperandTypes()) && + typeConverter.isLegal(op->getResultTypes()); + }); + + // Register conversion patterns + RewritePatternSet patterns(context); + populateFunctionOpInterfaceTypeConversionPattern( + patterns, typeConverter); + patterns.add(typeConverter, context); + + // Apply conversion + if (failed( + applyFullConversion(getOperation(), target, std::move(patterns)))) + signalPassFailure(); + } +}; + +} // namespace + +} // namespace quant +} // namespace mlir diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp index ed3ba321b37a..dca3a8146195 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -77,9 +77,9 @@ struct CastOpInterface // Case 3: Ranked tensor -> ranked tensor. The offsets and strides do not // change. auto rankedResultType = cast(castOp.getType()); - return MemRefType::get( + return cast(MemRefType::get( rankedResultType.getShape(), rankedResultType.getElementType(), - llvm::cast(*maybeSrcBufferType).getLayout(), memorySpace); + llvm::cast(*maybeSrcBufferType).getLayout(), memorySpace)); } LogicalResult bufferize(Operation *op, RewriterBase &rewriter, @@ -157,8 +157,8 @@ struct CollapseShapeOpInterface tensorResultType, srcBufferType.getMemorySpace()); } - return memref::CollapseShapeOp::computeCollapsedType( - srcBufferType, collapseShapeOp.getReassociationIndices()); + return cast(memref::CollapseShapeOp::computeCollapsedType( + srcBufferType, collapseShapeOp.getReassociationIndices())); } LogicalResult bufferize(Operation *op, RewriterBase &rewriter, @@ -325,7 +325,7 @@ struct ExpandShapeOpInterface expandShapeOp.getReassociationIndices()); if (failed(maybeResultType)) return failure(); - return *maybeResultType; + return cast(*maybeResultType); } LogicalResult bufferize(Operation *op, RewriterBase &rewriter, @@ -748,9 +748,9 @@ struct PadOpInterface if (failed(maybeSrcBufferType)) return failure(); MemRefLayoutAttrInterface layout; - return MemRefType::get(padOp.getResultType().getShape(), + return cast(MemRefType::get(padOp.getResultType().getShape(), padOp.getResultType().getElementType(), layout, - maybeSrcBufferType->getMemorySpace()); + maybeSrcBufferType->getMemorySpace())); } LogicalResult bufferize(Operation *op, RewriterBase &rewriter, diff --git a/mlir/lib/Dialect/UB/IR/CMakeLists.txt b/mlir/lib/Dialect/UB/IR/CMakeLists.txt index 84125ea0b571..ef9cf277b8ba 100644 --- a/mlir/lib/Dialect/UB/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/UB/IR/CMakeLists.txt @@ -9,5 +9,6 @@ add_mlir_dialect_library(MLIRUBDialect MLIRUBOpsInterfacesIncGen LINK_LIBS PUBLIC + MLIRTransformUtils MLIRIR ) diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp index fa4a1b4b72b0..4071e816a54f 100644 --- a/mlir/lib/IR/AsmPrinter.cpp +++ b/mlir/lib/IR/AsmPrinter.cpp @@ -412,6 +412,19 @@ class AsmPrinter::Impl { /// Returns the output stream of the printer. raw_ostream &getStream() { return os; } + + /// Print a newline and indent the printer to the start of the current + /// operation. + void printNewline() { + os << newLine; + os.indent(currentIndent); + } + + /// Increase indentation. + void increaseIndent() { currentIndent += indentWidth; } + + /// Decrease indentation. + void decreaseIndent() { currentIndent -= indentWidth; } template inline void interleaveComma(const Container &c, UnaryFunctor eachFn) const { @@ -527,6 +540,12 @@ class AsmPrinter::Impl { /// A tracker for the number of new lines emitted during printing. NewLineCounter newLine; + + /// The number of spaces used for indenting nested operations. + const static unsigned indentWidth = 2; + + /// This is the current indentation level for nested structures. + unsigned currentIndent = 0; }; } // namespace mlir @@ -982,6 +1001,9 @@ class DummyAliasDialectAsmPrinter : public DialectAsmPrinter { /// The following are hooks of `DialectAsmPrinter` that are not necessary for /// determining potential aliases. + void printNewline() override {} + void increaseIndent() override {} + void decreaseIndent() override {} void printFloat(const APFloat &) override {} void printKeywordOrString(StringRef) override {} void printString(StringRef) override {} @@ -2805,6 +2827,12 @@ void AsmPrinter::Impl::printDialectAttribute(Attribute attr) { { llvm::raw_string_ostream attrNameStr(attrName); Impl subPrinter(attrNameStr, state); + + // The values of currentIndent and newLine are assigned to the created subprinter, + // so that the indent level and number of printed lines can be tracked. + subPrinter.currentIndent = currentIndent; + subPrinter.newLine = newLine; + DialectAsmPrinter printer(subPrinter); dialect.printAttribute(attr, printer); } @@ -2819,6 +2847,12 @@ void AsmPrinter::Impl::printDialectType(Type type) { { llvm::raw_string_ostream typeNameStr(typeName); Impl subPrinter(typeNameStr, state); + + // The values of currentIndent and newLine are assigned to the created subprinter, + // so that the indent level and number of printed lines can be tracked. + subPrinter.currentIndent = currentIndent; + subPrinter.newLine = newLine; + DialectAsmPrinter printer(subPrinter); dialect.printType(type, printer); } @@ -2859,6 +2893,21 @@ raw_ostream &AsmPrinter::getStream() const { return impl->getStream(); } +void AsmPrinter::printNewline() { + assert(impl && "expected AsmPrinter::printNewLine to be overriden"); + impl->printNewline(); +} + +void AsmPrinter::increaseIndent() { + assert(impl && "expected AsmPrinter::increaseIndent to be overriden"); + impl->increaseIndent(); +} + +void AsmPrinter::decreaseIndent() { + assert(impl && "expected AsmPrinter::decreaseIndent to be overriden"); + impl->decreaseIndent(); +} + /// Print the given floating point value in a stablized form. void AsmPrinter::printFloat(const APFloat &value) { assert(impl && "expected AsmPrinter::printFloat to be overriden"); @@ -3184,19 +3233,6 @@ class OperationPrinter : public AsmPrinter::Impl, private OpAsmPrinter { printTrailingLocation(loc); } - /// Print a newline and indent the printer to the start of the current - /// operation. - void printNewline() override { - os << newLine; - os.indent(currentIndent); - } - - /// Increase indentation. - void increaseIndent() override { currentIndent += indentWidth; } - - /// Decrease indentation. - void decreaseIndent() override { currentIndent -= indentWidth; } - /// Print a block argument in the usual format of: /// %ssaName : type {attr1=42} loc("here") /// where location printing is controlled by the standard internal option. @@ -3322,12 +3358,6 @@ class OperationPrinter : public AsmPrinter::Impl, private OpAsmPrinter { // top-level we start with "builtin" as the default, so that the top-level // `module` operation prints as-is. SmallVector defaultDialectStack{"builtin"}; - - /// The number of spaces used for indenting nested operations. - const static unsigned indentWidth = 2; - - // This is the current indentation level for nested structures. - unsigned currentIndent = 0; }; } // namespace diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp index 3924d082f062..f8ac5b748302 100644 --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -210,10 +210,6 @@ LogicalResult OpaqueType::verify(function_ref emitError, // VectorType //===----------------------------------------------------------------------===// -bool VectorType::isValidElementType(Type t) { - return isValidVectorTypeElementType(t); -} - LogicalResult VectorType::verify(function_ref emitError, ArrayRef shape, Type elementType, ArrayRef scalableDims) { @@ -252,49 +248,6 @@ VectorType VectorType::cloneWith(std::optional> shape, getScalableDims()); } -//===----------------------------------------------------------------------===// -// TensorType -//===----------------------------------------------------------------------===// - -Type TensorType::getElementType() const { - return llvm::TypeSwitch(*this) - .Case( - [](auto type) { return type.getElementType(); }); -} - -bool TensorType::hasRank() const { - return !llvm::isa(*this); -} - -ArrayRef TensorType::getShape() const { - return llvm::cast(*this).getShape(); -} - -TensorType TensorType::cloneWith(std::optional> shape, - Type elementType) const { - if (llvm::dyn_cast(*this)) { - if (shape) - return RankedTensorType::get(*shape, elementType); - return UnrankedTensorType::get(elementType); - } - - auto rankedTy = llvm::cast(*this); - if (!shape) - return RankedTensorType::get(rankedTy.getShape(), elementType, - rankedTy.getEncoding()); - return RankedTensorType::get(shape.value_or(rankedTy.getShape()), elementType, - rankedTy.getEncoding()); -} - -RankedTensorType TensorType::clone(::llvm::ArrayRef shape, - Type elementType) const { - return ::llvm::cast(cloneWith(shape, elementType)); -} - -RankedTensorType TensorType::clone(::llvm::ArrayRef shape) const { - return ::llvm::cast(cloneWith(shape, getElementType())); -} - // Check if "elementType" can be an element type of a tensor. static LogicalResult checkTensorElementType(function_ref emitError, @@ -318,6 +271,12 @@ bool TensorType::isValidElementType(Type type) { // RankedTensorType //===----------------------------------------------------------------------===// +RankedTensorType RankedTensorType::cloneWith(std::optional> shape, + Type elementType) const { + return RankedTensorType::get(shape.value_or(getShape()), elementType, + getEncoding()); +} + LogicalResult RankedTensorType::verify(function_ref emitError, ArrayRef shape, Type elementType, @@ -335,6 +294,14 @@ RankedTensorType::verify(function_ref emitError, // UnrankedTensorType //===----------------------------------------------------------------------===// +TensorType UnrankedTensorType::cloneWith(std::optional> shape, + Type elementType) const { + if (shape) + return RankedTensorType::get(*shape, elementType); + + return UnrankedTensorType::get(elementType); +} + LogicalResult UnrankedTensorType::verify(function_ref emitError, Type elementType) { @@ -342,55 +309,9 @@ UnrankedTensorType::verify(function_ref emitError, } //===----------------------------------------------------------------------===// -// BaseMemRefType +// BaseMemRefTypeInterface //===----------------------------------------------------------------------===// -Type BaseMemRefType::getElementType() const { - return llvm::TypeSwitch(*this) - .Case( - [](auto type) { return type.getElementType(); }); -} - -bool BaseMemRefType::hasRank() const { - return !llvm::isa(*this); -} - -ArrayRef BaseMemRefType::getShape() const { - return llvm::cast(*this).getShape(); -} - -BaseMemRefType BaseMemRefType::cloneWith(std::optional> shape, - Type elementType) const { - if (llvm::dyn_cast(*this)) { - if (!shape) - return UnrankedMemRefType::get(elementType, getMemorySpace()); - MemRefType::Builder builder(*shape, elementType); - builder.setMemorySpace(getMemorySpace()); - return builder; - } - - MemRefType::Builder builder(llvm::cast(*this)); - if (shape) - builder.setShape(*shape); - builder.setElementType(elementType); - return builder; -} - -MemRefType BaseMemRefType::clone(::llvm::ArrayRef shape, - Type elementType) const { - return ::llvm::cast(cloneWith(shape, elementType)); -} - -MemRefType BaseMemRefType::clone(::llvm::ArrayRef shape) const { - return ::llvm::cast(cloneWith(shape, getElementType())); -} - -Attribute BaseMemRefType::getMemorySpace() const { - if (auto rankedMemRefTy = llvm::dyn_cast(*this)) - return rankedMemRefTy.getMemorySpace(); - return llvm::cast(*this).getMemorySpace(); -} - unsigned BaseMemRefType::getMemorySpaceAsInt() const { if (auto rankedMemRefTy = llvm::dyn_cast(*this)) return rankedMemRefTy.getMemorySpaceAsInt(); @@ -474,7 +395,7 @@ bool mlir::detail::isSupportedMemorySpace(Attribute memorySpace) { return true; // Supported built-in attributes. - if (llvm::isa(memorySpace)) + if (llvm::isa(memorySpace)) return true; // Allow custom dialect attributes. @@ -510,6 +431,15 @@ unsigned mlir::detail::getMemorySpaceAsInt(Attribute memorySpace) { return static_cast(llvm::cast(memorySpace).getInt()); } +MemRefType MemRefType::cloneWith(std::optional> shape, + Type elementType) const { + MemRefType::Builder builder(llvm::cast(*this)); + if (shape) + builder.setShape(*shape); + builder.setElementType(elementType); + return builder; +} + unsigned MemRefType::getMemorySpaceAsInt() const { return detail::getMemorySpaceAsInt(getMemorySpace()); } @@ -834,7 +764,7 @@ static LogicalResult getStridesAndOffset(MemRefType t, } LogicalResult MemRefType::getStridesAndOffset(SmallVectorImpl &strides, - int64_t &offset) { + int64_t &offset) { // Happy path: the type uses the strided layout directly. if (auto strided = llvm::dyn_cast(getLayout())) { llvm::append_range(strides, strided.getStrides()); @@ -888,6 +818,15 @@ bool MemRefType::isLastDimUnitStride() { // UnrankedMemRefType //===----------------------------------------------------------------------===// +BaseMemRefType UnrankedMemRefType::cloneWith(std::optional> shape, + Type elementType) const { + if (!shape) + return UnrankedMemRefType::get(elementType, getMemorySpace()); + MemRefType::Builder builder(*shape, elementType); + builder.setMemorySpace(getMemorySpace()); + return (MemRefType)builder; +} + unsigned UnrankedMemRefType::getMemorySpaceAsInt() const { return detail::getMemorySpaceAsInt(getMemorySpace()); } diff --git a/mlir/lib/IR/CMakeLists.txt b/mlir/lib/IR/CMakeLists.txt index 4cabac185171..6ef9f4faa583 100644 --- a/mlir/lib/IR/CMakeLists.txt +++ b/mlir/lib/IR/CMakeLists.txt @@ -30,6 +30,7 @@ add_mlir_library(MLIRIR Operation.cpp OperationSupport.cpp PatternMatch.cpp + QuantizationInterface.cpp Region.cpp RegionKindInterface.cpp SymbolTable.cpp @@ -65,6 +66,7 @@ add_mlir_library(MLIRIR MLIRSideEffectInterfacesIncGen MLIRSymbolInterfacesIncGen MLIRTensorEncodingIncGen + MLIRQuantizationInterfaceIncGen LINK_LIBS PUBLIC MLIRSupport diff --git a/mlir/lib/IR/DialectResourceBlobManager.cpp b/mlir/lib/IR/DialectResourceBlobManager.cpp index b83b31e30ef1..83cc1879241d 100644 --- a/mlir/lib/IR/DialectResourceBlobManager.cpp +++ b/mlir/lib/IR/DialectResourceBlobManager.cpp @@ -63,3 +63,11 @@ auto DialectResourceBlobManager::insert(StringRef name, nameStorage.resize(name.size() + 1); } while (true); } + +void DialectResourceBlobManager::getBlobMap( + llvm::function_ref &)> accessor) + const { + llvm::sys::SmartScopedReader reader(blobMapLock); + + accessor(blobMap); +} diff --git a/mlir/lib/IR/QuantizationInterface.cpp b/mlir/lib/IR/QuantizationInterface.cpp new file mode 100644 index 000000000000..72a432b4906f --- /dev/null +++ b/mlir/lib/IR/QuantizationInterface.cpp @@ -0,0 +1,22 @@ +//===- QuantizationInterface.cpp +//------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/Quant/IR/QuantTypes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Diagnostics.h" +#include "llvm/ADT/Sequence.h" + +using namespace mlir; +using namespace mlir::detail; + +//===----------------------------------------------------------------------===// +/// Tablegen Interface Definitions +//===----------------------------------------------------------------------===// + +#include "mlir/IR/QuantizationInterface.cpp.inc" diff --git a/mlir/lib/IR/SymbolTable.cpp b/mlir/lib/IR/SymbolTable.cpp index 71adfc467611..3e2d6cd01427 100644 --- a/mlir/lib/IR/SymbolTable.cpp +++ b/mlir/lib/IR/SymbolTable.cpp @@ -56,8 +56,9 @@ collectValidReferencesFor(Operation *symbol, StringAttr symbolName, StringAttr symbolNameId = StringAttr::get(ctx, SymbolTable::getSymbolAttrName()); do { - // Each parent of 'symbol' should define a symbol table. - if (!symbolTableOp->hasTrait()) + // Each parent of 'symbol' should define a symbol table or be a symbol container + if (!symbolTableOp->hasTrait() && + !symbolTableOp->hasTrait()) return failure(); // Each parent of 'symbol' should also be a symbol. StringAttr symbolTableName = getNameIfSymbol(symbolTableOp, symbolNameId); @@ -117,7 +118,7 @@ walkSymbolTable(Operation *op, /// Build a symbol table with the symbols within the given operation. SymbolTable::SymbolTable(Operation *symbolTableOp) : symbolTableOp(symbolTableOp) { - assert(symbolTableOp->hasTrait() && + assert((symbolTableOp->hasTrait() || symbolTableOp->hasTrait()) && "expected operation to have SymbolTable trait"); assert(symbolTableOp->getNumRegions() == 1 && "expected operation to have a single region"); @@ -384,7 +385,7 @@ void SymbolTable::walkSymbolTables( /// was found. Operation *SymbolTable::lookupSymbolIn(Operation *symbolTableOp, StringAttr symbol) { - assert(symbolTableOp->hasTrait()); + assert(symbolTableOp->hasTrait() || symbolTableOp->hasTrait()); Region ®ion = symbolTableOp->getRegion(0); if (region.empty()) return nullptr; @@ -425,7 +426,7 @@ static LogicalResult lookupSymbolInImpl( return success(); // Verify that the root is also a symbol table. - if (!symbolTableOp->hasTrait()) + if (!symbolTableOp->hasTrait() && !symbolTableOp->hasTrait()) return failure(); // Otherwise, lookup each of the nested non-leaf references and ensure that @@ -702,7 +703,7 @@ static SmallVector collectSymbolScopes(Operation *symbol, Operation *limitIt = symbol->getParentOp(); for (size_t i = 0, e = references.size(); i != e; ++i, limitIt = limitIt->getParentOp()) { - assert(limitIt->hasTrait()); + assert(limitIt->hasTrait() || limitIt->hasTrait()); scopes.push_back({references[i], &limitIt->getRegion(0)}); } return scopes; @@ -866,23 +867,26 @@ bool SymbolTable::symbolKnownUseEmpty(Operation *symbol, Region *from) { /// Generates a new symbol reference attribute with a new leaf reference. static SymbolRefAttr generateNewRefAttr(SymbolRefAttr oldAttr, - FlatSymbolRefAttr newLeafAttr) { + SymbolRefAttr newLeafAttr) { if (llvm::isa(oldAttr)) return newLeafAttr; auto nestedRefs = llvm::to_vector<2>(oldAttr.getNestedReferences()); - nestedRefs.back() = newLeafAttr; + nestedRefs.back() = FlatSymbolRefAttr::get(newLeafAttr.getRootReference()); + + nestedRefs.append(newLeafAttr.getNestedReferences().begin(), + newLeafAttr.getNestedReferences().end()); + return SymbolRefAttr::get(oldAttr.getRootReference(), nestedRefs); } /// The implementation of SymbolTable::replaceAllSymbolUses below. template static LogicalResult -replaceAllSymbolUsesImpl(SymbolT symbol, StringAttr newSymbol, IRUnitT *limit) { +replaceAllSymbolUsesImpl(SymbolT symbol, SymbolRefAttr newSymbol, IRUnitT *limit) { // Generate a new attribute to replace the given attribute. - FlatSymbolRefAttr newLeafAttr = FlatSymbolRefAttr::get(newSymbol); for (SymbolScope &scope : collectSymbolScopes(symbol, limit)) { SymbolRefAttr oldAttr = scope.symbol; - SymbolRefAttr newAttr = generateNewRefAttr(scope.symbol, newLeafAttr); + SymbolRefAttr newAttr = generateNewRefAttr(scope.symbol, newSymbol); AttrTypeReplacer replacer; replacer.addReplacement( [&](SymbolRefAttr attr) -> std::pair { @@ -895,11 +899,12 @@ replaceAllSymbolUsesImpl(SymbolT symbol, StringAttr newSymbol, IRUnitT *limit) { auto oldNestedRefs = oldAttr.getNestedReferences(); auto nestedRefs = attr.getNestedReferences(); if (oldNestedRefs.empty()) - return {SymbolRefAttr::get(newSymbol, nestedRefs), - WalkResult::skip()}; + return {newAttr, WalkResult::skip()}; auto newNestedRefs = llvm::to_vector<4>(nestedRefs); - newNestedRefs[oldNestedRefs.size() - 1] = newLeafAttr; + newNestedRefs[oldNestedRefs.size() - 1] = FlatSymbolRefAttr::get(newAttr.getRootReference()); + newNestedRefs.append(newAttr.getNestedReferences().begin(), + newAttr.getNestedReferences().end()); return {SymbolRefAttr::get(attr.getRootReference(), newNestedRefs), WalkResult::skip()}; } @@ -924,21 +929,38 @@ replaceAllSymbolUsesImpl(SymbolT symbol, StringAttr newSymbol, IRUnitT *limit) { LogicalResult SymbolTable::replaceAllSymbolUses(StringAttr oldSymbol, StringAttr newSymbol, Operation *from) { - return replaceAllSymbolUsesImpl(oldSymbol, newSymbol, from); + auto newSymRef = mlir::FlatSymbolRefAttr::get(newSymbol); + return replaceAllSymbolUsesImpl(oldSymbol, newSymRef, from); } LogicalResult SymbolTable::replaceAllSymbolUses(Operation *oldSymbol, StringAttr newSymbol, Operation *from) { - return replaceAllSymbolUsesImpl(oldSymbol, newSymbol, from); + auto newSymRef = mlir::FlatSymbolRefAttr::get(newSymbol); + return replaceAllSymbolUsesImpl(oldSymbol, newSymRef, from); } LogicalResult SymbolTable::replaceAllSymbolUses(StringAttr oldSymbol, StringAttr newSymbol, Region *from) { - return replaceAllSymbolUsesImpl(oldSymbol, newSymbol, from); + auto newSymRef = mlir::FlatSymbolRefAttr::get(newSymbol); + return replaceAllSymbolUsesImpl(oldSymbol, newSymRef, from); } LogicalResult SymbolTable::replaceAllSymbolUses(Operation *oldSymbol, StringAttr newSymbol, Region *from) { + auto newSymRef = mlir::FlatSymbolRefAttr::get(newSymbol); + return replaceAllSymbolUsesImpl(oldSymbol, newSymRef, from); +} + +LogicalResult SymbolTable::replaceAllSymbolUses(Operation* oldSymbol, + SymbolRefAttr newSymbol, + Operation* from) { + return replaceAllSymbolUsesImpl(oldSymbol, newSymbol, from); +} + + +LogicalResult SymbolTable::replaceAllSymbolUses(Operation* oldSymbol, + SymbolRefAttr newSymbol, + Region* from) { return replaceAllSymbolUsesImpl(oldSymbol, newSymbol, from); } @@ -1076,7 +1098,7 @@ SymbolUserMap::SymbolUserMap(SymbolTableCollection &symbolTable, } void SymbolUserMap::replaceAllUsesWith(Operation *symbol, - StringAttr newSymbolName) { + SymbolRefAttr newSymbolName) { auto it = symbolToUsers.find(symbol); if (it == symbolToUsers.end()) return; @@ -1103,6 +1125,11 @@ void SymbolUserMap::replaceAllUsesWith(Operation *symbol, } } +void SymbolUserMap::replaceAllUsesWith(Operation *symbol, + StringAttr newSymbolName) { + replaceAllUsesWith(symbol, mlir::FlatSymbolRefAttr::get(newSymbolName)); +} + //===----------------------------------------------------------------------===// // Visibility parsing implementation. //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Pass/Pass.cpp b/mlir/lib/Pass/Pass.cpp index 6fd51c1e3cb5..d9ef75508499 100644 --- a/mlir/lib/Pass/Pass.cpp +++ b/mlir/lib/Pass/Pass.cpp @@ -77,6 +77,12 @@ void Pass::copyOptionValuesFrom(const Pass *other) { passOptions.copyOptionValuesFrom(other->passOptions); } +/// Copy the option values from 'other', which are PassPipeline options. +/// Here we copy only those options that have the same argument name. +void Pass::copyOptionValuesFrom(const PassOptions &other) { + passOptions.matchAndCopyOptionValuesFrom(other); +} + /// Prints out the pass in the textual representation of pipelines. If this is /// an adaptor pass, print its pass managers. void Pass::printAsTextualPipeline(raw_ostream &os) { diff --git a/mlir/lib/Pass/PassRegistry.cpp b/mlir/lib/Pass/PassRegistry.cpp index ece2fdaed0df..c382c6c7098e 100644 --- a/mlir/lib/Pass/PassRegistry.cpp +++ b/mlir/lib/Pass/PassRegistry.cpp @@ -277,6 +277,19 @@ void detail::PassOptions::copyOptionValuesFrom(const PassOptions &other) { std::get<0>(optionsIt)->copyValueFrom(*std::get<1>(optionsIt)); } +/// Copy only those options that have the same argument name. +void detail::PassOptions::matchAndCopyOptionValuesFrom(const PassOptions &other) { + for (auto* optionsIt : other.options) { + const auto& it = llvm::find_if(options, [&](OptionBase * option) { + return option->getArgStr() == optionsIt->getArgStr(); + }); + + if (it != options.end()) { + (*it)->copyValueFrom(*optionsIt); + } + } +} + /// Parse in the next argument from the given options string. Returns a tuple /// containing [the key of the option, the value of the option, updated /// `options` string pointing after the parsed option]. diff --git a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp index 9bbf91de1830..df8109f7ca0e 100644 --- a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp +++ b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp @@ -630,13 +630,41 @@ LogicalResult mlir::MlirOptMain(llvm::raw_ostream &outputStream, config.outputSplitMarker()); } -LogicalResult mlir::MlirOptMain(int argc, char **argv, - llvm::StringRef inputFilename, - llvm::StringRef outputFilename, - DialectRegistry ®istry) { +LogicalResult mlir::MlirOptMain(int argc, char **argv, llvm::StringRef toolName, + DialectRegistry ®istry, + const AdditionalRegistrationFn &additionalRegistration) { + static cl::opt inputFilename( + cl::Positional, cl::desc(""), cl::init("-")); + + static cl::opt outputFilename("o", cl::desc("Output filename"), + cl::value_desc("filename"), + cl::init("-")); InitLLVM y(argc, argv); + // Register any command line options. + registerAsmPrinterCLOptions(); + registerMLIRContextCLOptions(); + registerPassManagerCLOptions(); + registerDefaultTimingManagerCLOptions(); + tracing::DebugCounter::registerCLOptions(); + + // Build the list of dialects as a header for the --help message. + std::string helpHeader = (toolName + "\nAvailable Dialects: ").str(); + { + llvm::raw_string_ostream os(helpHeader); + interleaveComma(registry.getDialectNames(), os, + [&](auto name) { os << name; }); + } + + // It is not possible to place a call after command line parser + // since not all options are registered at the moment + additionalRegistration(helpHeader); + + MlirOptMainConfig::registerCLOptions(registry); + + // Parse pass names in main to ensure static initialization completed. + cl::ParseCommandLineOptions(argc, argv, helpHeader); MlirOptMainConfig config = MlirOptMainConfig::createFromCLOptions(); if (config.shouldShowDialects()) @@ -673,14 +701,3 @@ LogicalResult mlir::MlirOptMain(int argc, char **argv, output->keep(); return success(); } - -LogicalResult mlir::MlirOptMain(int argc, char **argv, llvm::StringRef toolName, - DialectRegistry ®istry) { - - // Register and parse command line options. - std::string inputFilename, outputFilename; - std::tie(inputFilename, outputFilename) = - registerAndParseCLIOptions(argc, argv, toolName, registry); - - return MlirOptMain(argc, argv, inputFilename, outputFilename, registry); -} diff --git a/mlir/lib/Transforms/Utils/Inliner.cpp b/mlir/lib/Transforms/Utils/Inliner.cpp index 756f5e379e7d..10c4709e1c74 100644 --- a/mlir/lib/Transforms/Utils/Inliner.cpp +++ b/mlir/lib/Transforms/Utils/Inliner.cpp @@ -688,7 +688,8 @@ Inliner::Impl::inlineCallsInSCC(InlinerInterfaceImpl &inlinerIface, useList.mergeUsesAfterInlining(it.targetNode, it.sourceNode); // then erase the call. - call.erase(); + const auto *callInterface = inlinerIface.getInterfaceFor(call->getDialect()); + callInterface->eraseCall(call); // If we inlined in place, mark the node for deletion. if (inlineInPlace) { diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp index 0cae63c58ca7..b928302716fe 100644 --- a/mlir/lib/Transforms/Utils/InliningUtils.cpp +++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp @@ -69,6 +69,19 @@ static void remapInlinedOperands(iterator_range inlinedBlocks, block.walk(remapOperands); } +//===----------------------------------------------------------------------===// +// DialectInlinerInterface +//===----------------------------------------------------------------------===// + +void DialectInlinerInterface::eraseCall(Operation *call) const { + call->erase(); +} + +std::tuple + DialectInlinerInterface::getInlineBlockAndPoint(Operation *call) const { + return std::make_tuple(call->getBlock(), std::next(call->getIterator())); +} + //===----------------------------------------------------------------------===// // InlinerInterface //===----------------------------------------------------------------------===// @@ -530,9 +543,11 @@ LogicalResult mlir::inlineCall(InlinerInterface &interface, if (!interface.isLegalToInline(call, callable, shouldCloneInlinedRegion)) return cleanupState(); + auto [inlineBlock, inlinePoint] = callInterface->getInlineBlockAndPoint(call); + // Attempt to inline the call. - if (failed(inlineRegionImpl(interface, src, call->getBlock(), - ++call->getIterator(), mapper, callResults, + if (failed(inlineRegionImpl(interface, src, inlineBlock, + inlinePoint, mapper, callResults, callableResultTypes, call.getLoc(), shouldCloneInlinedRegion, call))) return cleanupState(); diff --git a/mlir/python/mlir/_mlir_libs/_mlir/dialects/quant.pyi b/mlir/python/mlir/_mlir_libs/_mlir/dialects/quant.pyi index 47168d49c556..3f5304584ede 100644 --- a/mlir/python/mlir/_mlir_libs/_mlir/dialects/quant.pyi +++ b/mlir/python/mlir/_mlir_libs/_mlir/dialects/quant.pyi @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -from mlir.ir import Type +from mlir.ir import DenseElementsAttr, Type __all__ = [ "QuantizedType", @@ -109,6 +109,26 @@ class UniformQuantizedPerAxisType(QuantizedType): @property def is_fixed_point(self) -> bool: ... +class UniformQuantizedSubChannelType(QuantizedType): + + @classmethod + def get(cls, flags: int, storage_type: Type, expressed_type: Type, + scales: DenseElementsAttr, zero_points: DenseElementsAttr, + quantized_dimensions: list[int], block_sizes: list[int], + storage_type_min: int, storage_type_max: int): + ... + + @property + def quantized_dimensions(self) -> list[int]: ... + + @property + def block_sizes(self) -> list[int]: ... + + @property + def scales(self) -> DenseElementsAttr: ... + + @property + def zero_points(self) -> DenseElementsAttr: ... def CalibratedQuantizedType(QuantizedType): diff --git a/mlir/test/CAPI/quant.c b/mlir/test/CAPI/quant.c index 0a09e084119f..30f376ebeb11 100644 --- a/mlir/test/CAPI/quant.c +++ b/mlir/test/CAPI/quant.c @@ -10,6 +10,7 @@ // RUN: mlir-capi-quant-test 2>&1 | FileCheck %s #include "mlir-c/Dialect/Quant.h" +#include "mlir-c/BuiltinAttributes.h" #include "mlir-c/BuiltinTypes.h" #include "mlir-c/IR.h" @@ -203,6 +204,130 @@ void testUniformPerAxisType(MlirContext ctx) { fprintf(stderr, "\n\n"); } +// CHECK-LABEL: testUniformSubChannelType +void testUniformSubChannelType(MlirContext ctx) { + fprintf(stderr, "testUniformSubChannelType\n"); + + MlirType subChannelParsed = + mlirTypeParseGet(ctx, mlirStringRefCreateFromCString( + "!quant.uniform")); + + MlirType i8 = mlirIntegerTypeGet(ctx, 8); + MlirType f32 = mlirF32TypeGet(ctx); + + // block-size information + int32_t quantizedDimensions[] = {0, 1}; + int64_t blockSizes[] = {1, 2}; + int64_t numBlockSizes = 2; + + // quantization parameters + int64_t quantParamShape[] = {2, 2}; + int64_t quantParamRank = 2; + int64_t numQuantizationParams = 4; + MlirAttribute scales[] = {mlirFloatAttrDoubleGet(ctx, f32, 2.0), + mlirFloatAttrDoubleGet(ctx, f32, 3.0), + mlirFloatAttrDoubleGet(ctx, f32, 4.0), + mlirFloatAttrDoubleGet(ctx, f32, 5.0)}; + MlirAttribute zeroPoints[] = { + mlirIntegerAttrGet(i8, 10), mlirIntegerAttrGet(i8, 20), + mlirIntegerAttrGet(i8, 30), mlirIntegerAttrGet(i8, 40)}; + + MlirType scalesType = + mlirRankedTensorTypeGet(quantParamRank, quantParamShape, f32, + /*encoding=*/mlirAttributeGetNull()); + MlirType zeroPointsType = mlirRankedTensorTypeGet( + quantParamRank, quantParamShape, i8, /*encoding=*/mlirAttributeGetNull()); + MlirAttribute denseScalesAttr = + mlirDenseElementsAttrGet(scalesType, numQuantizationParams, scales); + MlirAttribute denseZeroPointsAttr = mlirDenseElementsAttrGet( + zeroPointsType, numQuantizationParams, zeroPoints); + + MlirType subChannel = mlirUniformQuantizedSubChannelTypeGet( + mlirQuantizedTypeGetSignedFlag(), i8, f32, denseScalesAttr, + denseZeroPointsAttr, numBlockSizes, quantizedDimensions, blockSizes, + mlirQuantizedTypeGetDefaultMinimumForInteger(/*isSigned=*/true, + /*integralWidth=*/8), + mlirQuantizedTypeGetDefaultMaximumForInteger(/*isSigned=*/true, + /*integralWidth=*/8)); + + MlirAttribute arrayScalesAttr = + mlirArrayAttrGet(ctx, numQuantizationParams, scales); + MlirAttribute arrayZeroPointsAttr = + mlirArrayAttrGet(ctx, numQuantizationParams, zeroPoints); + MlirType illegalSubChannel = mlirUniformQuantizedSubChannelTypeGet( + mlirQuantizedTypeGetSignedFlag(), i8, f32, arrayScalesAttr, + arrayZeroPointsAttr, numBlockSizes, quantizedDimensions, blockSizes, + mlirQuantizedTypeGetDefaultMinimumForInteger(/*isSigned=*/true, + /*integralWidth=*/8), + mlirQuantizedTypeGetDefaultMaximumForInteger(/*isSigned=*/true, + /*integralWidth=*/8)); + + // CHECK: is null sub-channel type: 1 + fprintf(stderr, "is null sub-channel type: %d\n", + mlirTypeIsNull(illegalSubChannel)); + + // CHECK: num dims: 2 + fprintf(stderr, "num dims: %" PRId64 "\n", + mlirUniformQuantizedSubChannelTypeGetNumBlockSizes(subChannel)); + + // CHECK: axis-block-size-pair[0]: 0:1 + fprintf( + stderr, "axis-block-size-pair[0]: %" PRId32 ":%" PRId64 "\n", + mlirUniformQuantizedSubChannelTypeGetQuantizedDimension(subChannel, 0), + mlirUniformQuantizedSubChannelTypeGetBlockSize(subChannel, 0)); + + // CHECK: axis-block-size-pair[1]: 1:2 + fprintf( + stderr, "axis-block-size-pair[1]: %" PRId32 ":%" PRId64 "\n", + mlirUniformQuantizedSubChannelTypeGetQuantizedDimension(subChannel, 1), + mlirUniformQuantizedSubChannelTypeGetBlockSize(subChannel, 1)); + + denseScalesAttr = mlirUniformQuantizedSubChannelTypeGetScales(subChannel); + denseZeroPointsAttr = + mlirUniformQuantizedSubChannelTypeGetZeroPoints(subChannel); + scalesType = mlirAttributeGetType(denseScalesAttr); + zeroPointsType = mlirAttributeGetType(denseZeroPointsAttr); + + // CHECK: tensor<2x2xf32> + mlirTypeDump(scalesType); + // CHECK: tensor<2x2xi8> + mlirTypeDump(zeroPointsType); + + // CHECK: number of quantization parameters: 4 + fprintf(stderr, "number of quantization parameters: %" PRId64 "\n", + mlirElementsAttrGetNumElements(denseScalesAttr)); + + // CHECK: quantization-parameter[0]: 2.000000:10 + fprintf(stderr, "quantization-parameter[0]: %lf:%" PRId8 "\n", + mlirDenseElementsAttrGetFloatValue(denseScalesAttr, 0), + mlirDenseElementsAttrGetInt8Value(denseZeroPointsAttr, 0)); + + // CHECK: quantization-parameter[1]: 3.000000:20 + fprintf(stderr, "quantization-parameter[1]: %lf:%" PRId8 "\n", + mlirDenseElementsAttrGetFloatValue(denseScalesAttr, 1), + mlirDenseElementsAttrGetInt8Value(denseZeroPointsAttr, 1)); + + // CHECK: quantization-parameter[2]: 4.000000:30 + fprintf(stderr, "quantization-parameter[2]: %lf:%" PRId8 "\n", + mlirDenseElementsAttrGetFloatValue(denseScalesAttr, 2), + mlirDenseElementsAttrGetInt8Value(denseZeroPointsAttr, 2)); + + // CHECK: quantization-parameter[3]: 5.000000:40 + fprintf(stderr, "quantization-parameter[3]: %lf:%" PRId8 "\n", + mlirDenseElementsAttrGetFloatValue(denseScalesAttr, 3), + mlirDenseElementsAttrGetInt8Value(denseZeroPointsAttr, 3)); + + // CHECK: equal: 1 + fprintf(stderr, "equal: %d\n", mlirTypeEqual(subChannel, subChannelParsed)); + + // CHECK: !quant.uniform + mlirTypeDump(subChannel); + fprintf(stderr, "\n\n"); +} + // CHECK-LABEL: testCalibratedType void testCalibratedType(MlirContext ctx) { fprintf(stderr, "testCalibratedType\n"); @@ -233,6 +358,7 @@ int main(void) { testAnyQuantizedType(ctx); testUniformType(ctx); testUniformPerAxisType(ctx); + testUniformSubChannelType(ctx); testCalibratedType(ctx); mlirContextDestroy(ctx); return EXIT_SUCCESS; diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir index 36eb4d4669b0..bf5e65f1b3f7 100644 --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor-invalid.mlir @@ -1,3 +1,4 @@ +// REQUIRES: tosa-to-tensor-enabled // RUN: mlir-opt --split-input-file -pass-pipeline="builtin.module(func.func(tosa-to-tensor))" %s -verify-diagnostics // CHECK-LABEL: @slice_resultType_unranked diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir index 27018fb79f60..7be8f417ee2f 100644 --- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir +++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir @@ -1,3 +1,4 @@ +// REQUIRES: tosa-to-tensor-enabled // RUN: mlir-opt --split-input-file --tosa-to-tensor %s -o -| FileCheck %s // ----- diff --git a/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir index 7cef17df79dd..682ab6ec086f 100644 --- a/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/load-to-xegpu.mlir @@ -1,3 +1,4 @@ +// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @load_1D_vector(%source: memref<8x16x32xf32>, %offset: index) -> vector<8xf32> { diff --git a/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir index 4f069ebc39db..404c612e48df 100644 --- a/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/store-to-xegpu.mlir @@ -1,3 +1,4 @@ +// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @store_1D_vector(%vec: vector<8xf32>, diff --git a/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir index 497eb86cea83..284c06c3b401 100644 --- a/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/transfer-read-to-xegpu.mlir @@ -1,3 +1,4 @@ +// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @load_1D_vector(%source: memref<8x16x32xf32>, %offset: index) -> vector<8xf32> { diff --git a/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir b/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir index 91e3fb3841f6..6cfb19b4142d 100644 --- a/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir +++ b/mlir/test/Conversion/VectorToXeGPU/transfer-write-to-xegpu.mlir @@ -1,3 +1,4 @@ +// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s func.func @store_1D_vector(%vec: vector<8xf32>, diff --git a/mlir/test/Dialect/LLVMIR/global.mlir b/mlir/test/Dialect/LLVMIR/global.mlir index 79d1cafabfbe..7ebe0a7114e2 100644 --- a/mlir/test/Dialect/LLVMIR/global.mlir +++ b/mlir/test/Dialect/LLVMIR/global.mlir @@ -132,7 +132,7 @@ llvm.mlir.global internal constant @constant(37.0) : !llvm.label // ----- func.func @foo() { - // expected-error @+1 {{op symbol's parent must have the SymbolTable trait}} + // expected-error @+1 {{must appear at the module level}} llvm.mlir.global internal @bar(42) : i32 return diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir index 25806d9d0edd..3ef398e005e0 100644 --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -90,20 +90,6 @@ func.func @alloca_non_integer_alignment() { // ----- -func.func @gep_missing_input_result_type(%pos : i64, %base : !llvm.ptr) { - // expected-error@+1 {{number of operands and types do not match: got 2 operands and 0 types}} - llvm.getelementptr %base[%pos] : () -> (), i64 -} - -// ----- - -func.func @gep_missing_input_type(%pos : i64, %base : !llvm.ptr) { - // expected-error@+1 {{number of operands and types do not match: got 2 operands and 0 types}} - llvm.getelementptr %base[%pos] : () -> (!llvm.ptr), i64 -} - -// ----- - func.func @gep_missing_result_type(%pos : i64, %base : !llvm.ptr) { // expected-error@+1 {{op requires one result}} llvm.getelementptr %base[%pos] : (!llvm.ptr, i64) -> (), i64 diff --git a/mlir/test/Dialect/Linalg/transform-op-replace.mlir b/mlir/test/Dialect/Linalg/transform-op-replace.mlir index 1a40912977de..2801522e81ac 100644 --- a/mlir/test/Dialect/Linalg/transform-op-replace.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-replace.mlir @@ -12,10 +12,8 @@ module attributes {transform.with_named_sequence} { transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { %0 = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.any_op transform.structured.replace %0 { - builtin.module { - func.func @foo() { - "dummy_op"() : () -> () - } + func.func @foo() { + "dummy_op"() : () -> () } } : (!transform.any_op) -> !transform.any_op transform.yield diff --git a/mlir/test/Dialect/Quant/invalid.mlir b/mlir/test/Dialect/Quant/invalid.mlir index ba3a8e312d96..7bb50f352f93 100644 --- a/mlir/test/Dialect/Quant/invalid.mlir +++ b/mlir/test/Dialect/Quant/invalid.mlir @@ -256,3 +256,71 @@ func.func @scast_per_axis_invalid_rank(%arg0: tensor<2x3x4xi8>) { return } +// ----- + +!qalias = !quant.uniform +func.func @qcast_sub_channel_scalar(%arg0: f32) { + // expected-error@+1 {{scalar types may not use sub-channel quantization}} + %0 = quant.qcast %arg0 : f32 to !qalias + return +} + +// ----- + +!qalias = !quant.uniform +func.func @qcast_sub_channel_unranked(%arg0: tensor<*xf32>) { + // expected-error@+1 {{tensor containing the sub-channel quantized type must be ranked}} + %0 = quant.qcast %arg0 : tensor<*xf32> to tensor<*x!qalias> + return +} + +// ----- + +!qalias = !quant.uniform +func.func @qcast_sub_channel_invalid_quantized_dimension(%arg0: tensor<2x4xf32>) { + // expected-error@+1 {{quantized dimension 3 must be less than tensor rank 2}} + %0 = quant.qcast %arg0 : tensor<2x4xf32> to tensor<2x4x!qalias> + return +} + +// ----- + +!qalias = !quant.uniform +func.func @qcast_sub_channel_invalid_tensor_dim_size(%arg0: tensor<2x4xf32>) { + // expected-error@+1 {{tensor dimension size 4 at axis 1 must be divisible by the corresponding block size 3}} + %0 = quant.qcast %arg0 : tensor<2x4xf32> to tensor<2x4x!qalias> + return +} + +// ----- + +!qalias = !quant.uniform +func.func @qcast_sub_channel_invalid_zero_tensor_dim_size(%arg0: tensor<0x4xf32>) { + // expected-error@+1 {{tensor dimension size of zero is not allowed with sub-channel quantization}} + %0 = quant.qcast %arg0 : tensor<0x4xf32> to tensor<0x4x!qalias> + return +} + +// ----- + +!qalias = !quant.uniform +func.func @qcast_sub_channel_invalid_scale_dim_size(%arg0: tensor<2x4xf32>) { + // expected-error@+1 {{dimension size 2 of scales tensor at axis 1 should match (tensor dimension at axis / block sizes at axis) = 2}} + %0 = quant.qcast %arg0 : tensor<2x4xf32> to tensor<2x4x!qalias> + return +} + +// ----- + +!qalias = !quant.uniform +func.func @qcast_sub_channel_invalid_scale_dim_size(%arg0: tensor) { + // expected-error@+1 {{Rank of scales 3 must match the rank of the tensor 2}} + %0 = quant.qcast %arg0 : tensor to tensor + return +} diff --git a/mlir/test/Dialect/Quant/lower-quant-ops.mlir b/mlir/test/Dialect/Quant/lower-quant-ops.mlir index 6bba9f5c0377..23c34b906dd4 100644 --- a/mlir/test/Dialect/Quant/lower-quant-ops.mlir +++ b/mlir/test/Dialect/Quant/lower-quant-ops.mlir @@ -509,3 +509,67 @@ func.func @qcast_per_channel_unranked(%arg0: tensor<*xf32>) -> tensor<*x!qalias> return %0 : tensor<*x!qalias> } +// ----- + +// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> +// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, 0, 0, d3 floordiv 2)> + +// CHECK-LABEL: @qcast_sub_channel_ranked +// CHECK-SAME: %[[ARG_0:.*]]: tensor<2x?x?x4xf32> + +// CHECK: %[[SCALES:.*]] = arith.constant dense<{{.*}}2.000000e+00, 3.000000e+00{{.*}}, {{.*}}4.000000e+00, 5.000000e+00{{.*}}> : tensor<2x1x1x2xf32> +// CHECK: %[[ZERO_POINTS:.*]] = arith.constant dense<{{.*}}10, 20{{.*}}, {{.*}}30, 40{{.*}}> : tensor<2x1x1x2xi8> + +// CHECK-DAG: %[[C_1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[DIM_1:.*]] = tensor.dim %[[ARG_0]], %[[C_1]] : tensor<2x?x?x4xf32> +// CHECK-DAG: %[[C_2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[DIM_2:.*]] = tensor.dim %[[ARG_0]], %[[C_2]] : tensor<2x?x?x4xf32> +// CHECK: %[[INIT:.*]] = tensor.empty(%[[DIM_1]], %[[DIM_2]]) : tensor<2x?x?x4xi8> + +// CHECK: %[[GENERIC:.*]] = linalg.generic {indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_1]], #[[$ATTR_0]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG_0]], %[[SCALES]], %[[ZERO_POINTS]] : tensor<2x?x?x4xf32>, tensor<2x1x1x2xf32>, tensor<2x1x1x2xi8>) outs(%[[INIT]] : tensor<2x?x?x4xi8>) { +// CHECK: ^bb0(%[[IN:.*]]: f32, %[[SCALE:.*]]: f32, %[[ZERO_POINT:.*]]: i8, %[[OUT:.*]]: i8): +// CHECK: %[[SCALED:.*]] = arith.divf %[[IN]], %[[SCALE]] : f32 +// CHECK: %[[ZERO_POINT_FLOAT:.*]] = arith.sitofp %[[ZERO_POINT]] : i8 to f32 +// CHECK: %[[STORED_FLOAT:.*]] = arith.addf %[[SCALED]], %[[ZERO_POINT_FLOAT]] : f32 +// CHECK: %[[STORED_INT:.*]] = arith.fptosi %[[STORED_FLOAT]] : f32 to i8 +// CHECK: linalg.yield %[[STORED_INT]] : i8 +// CHECK: } -> tensor<2x?x?x4xi8> + +// CHECK: %[[STORED_QUANT:.*]] = quant.scast %[[GENERIC]] : tensor<2x?x?x4xi8> to tensor<2x?x?x4x!quant.uniform> +// CHECK: return %[[STORED_QUANT]] + +!qalias = !quant.uniform +func.func @qcast_sub_channel_ranked(%arg0: tensor<2x?x?x4xf32>) -> tensor<2x?x?x4x!qalias> { + %0 = quant.qcast %arg0 : tensor<2x?x?x4xf32> to tensor<2x?x?x4x!qalias> + return %0 : tensor<2x?x?x4x!qalias> +} + +// ----- + +// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> +// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, 0, 0, d3 floordiv 2)> + +// CHECK-LABEL: @qcast_sub_channel_ranked_bounds +// CHECK-SAME: %[[ARG_0:.*]]: tensor<2x3x5x4xf32> + +// CHECK: %[[SCALES:.*]] = arith.constant dense<{{.*}}2.000000e+00, 3.000000e+00{{.*}}, {{.*}}4.000000e+00, 5.000000e+00{{.*}}> : tensor<2x1x1x2xf32> +// CHECK: %[[ZERO_POINTS:.*]] = arith.constant dense<{{.*}}10, 20{{.*}}, {{.*}}30, 40{{.*}}> : tensor<2x1x1x2xi8> + +// CHECK: %[[INIT:.*]] = tensor.empty() : tensor<2x3x5x4xi8> +// CHECK: %[[GENERIC:.*]] = linalg.generic {indexing_maps = [#[[$ATTR_0]], #[[$ATTR_1]], #[[$ATTR_1]], #[[$ATTR_0]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG_0]], %[[SCALES]], %[[ZERO_POINTS]] : tensor<2x3x5x4xf32>, tensor<2x1x1x2xf32>, tensor<2x1x1x2xi8>) outs(%[[INIT]] : tensor<2x3x5x4xi8>) { +// CHECK: ^bb0(%[[IN:.*]]: f32, %[[SCALE:.*]]: f32, %[[ZERO_POINT:.*]]: i8, %[[OUT:.*]]: i8): +// CHECK: %[[SCALED:.*]] = arith.divf %[[IN]], %[[SCALE]] : f32 +// CHECK: %[[ZERO_POINT_FLOAT:.*]] = arith.sitofp %[[ZERO_POINT]] : i8 to f32 +// CHECK: %[[STORED_FLOAT:.*]] = arith.addf %[[SCALED]], %[[ZERO_POINT_FLOAT]] : f32 +// CHECK: %[[STORED_INT:.*]] = arith.fptosi %[[STORED_FLOAT]] : f32 to i8 +// CHECK: linalg.yield %[[STORED_INT]] : i8 +// CHECK: } -> tensor<2x3x5x4xi8> + +// CHECK: %[[STORED_QUANT:.*]] = quant.scast %[[GENERIC]] : tensor<2x3x5x4xi8> to tensor<2x3x5x4x!quant.uniform> +// CHECK: return %[[STORED_QUANT]] + +!qalias = !quant.uniform +func.func @qcast_sub_channel_ranked_bounds(%arg0: tensor<2x3x5x4xf32>) -> tensor<2x3x5x4x!qalias> { + %0 = quant.qcast %arg0 : tensor<2x3x5x4xf32> to tensor<2x3x5x4x!qalias> + return %0 : tensor<2x3x5x4x!qalias> +} diff --git a/mlir/test/Dialect/Quant/normalize-quant-types.mlir b/mlir/test/Dialect/Quant/normalize-quant-types.mlir new file mode 100644 index 000000000000..573781c9ecc0 --- /dev/null +++ b/mlir/test/Dialect/Quant/normalize-quant-types.mlir @@ -0,0 +1,51 @@ +// RUN: mlir-opt %s --normalize-quant-types --split-input-file | FileCheck %s + +// CHECK-LABEL: @callee( +// CHECK-SAME: [[PER_TENSOR:tensor<\?x\?x!quant.uniform>]], +// CHECK-SAME: [[PER_TENSOR]] +// CHECK-SAME: ([[PER_TENSOR]], [[PER_TENSOR]]) +// CHECK-LABEL: @normalize_quant_types_to_per_tensor +// CHECK-SAME: %[[ARG_0:.*]]: [[PER_TENSOR:tensor<\?x\?x!quant.uniform>]], +// CHECK-SAME: %[[ARG_1:.*]]: [[PER_TENSOR]] +// CHECK-SAME: ([[PER_TENSOR]], [[PER_TENSOR]]) +// CHECK: %[[TEMP_0:.*]] = "test.custom_op"(%[[ARG_0]]) : ([[PER_TENSOR]]) -> [[PER_TENSOR]] +// CHECK: %[[TEMP_1:.*]] = "test.custom_op"(%[[ARG_1]]) : ([[PER_TENSOR]]) -> [[PER_TENSOR]] +// CHECK: %[[TEMP_3:.*]]:2 = call @callee(%[[TEMP_0]], %[[TEMP_1]]) +// CHECK: return %[[TEMP_3]]#0, %[[TEMP_3]]#1 : [[PER_TENSOR]], [[PER_TENSOR]] + +!qalias1 = !quant.uniform +!qalias2 = !quant.uniform + +func.func private @callee(tensor, tensor) -> (tensor, tensor) + +func.func @normalize_quant_types_to_per_tensor(%arg0: tensor, + %arg1: tensor) -> (tensor, tensor) { + %0 = "test.custom_op"(%arg0) : (tensor) -> tensor + %1 = "test.custom_op"(%arg1) : (tensor) -> tensor + %3:2 = func.call @callee(%0, %1) : (tensor, tensor) -> (tensor, tensor) + return %3#0, %3#1 : tensor, tensor +} + +// ----- + +// CHECK-LABEL: @normalize_quant_types_to_per_axis +// CHECK-SAME: %[[ARG_0:.*]]: [[PER_AXIS:tensor<\?x\?x!quant.uniform>]], +// CHECK-SAME: %[[ARG_1:.*]]: [[PER_AXIS]] +// CHECK-SAME: ([[PER_AXIS]], [[PER_AXIS]]) +// CHECK: %[[TEMP_0:.*]] = "test.custom_op"(%[[ARG_0]]) : ([[PER_AXIS]]) -> [[PER_AXIS]] +// CHECK: %[[TEMP_1:.*]] = "test.custom_op"(%[[ARG_1]]) : ([[PER_AXIS]]) -> [[PER_AXIS]] +// CHECK: %[[TEMP_3:.*]]:2 = call @callee(%[[TEMP_0]], %[[TEMP_1]]) +// CHECK: return %[[TEMP_3]]#0, %[[TEMP_3]]#1 : [[PER_AXIS]], [[PER_AXIS]] + +!qalias1 = !quant.uniform +!qalias2 = !quant.uniform + +func.func private @callee(tensor, tensor) -> (tensor, tensor) + +func.func @normalize_quant_types_to_per_axis(%arg0: tensor, + %arg1: tensor) -> (tensor, tensor) { + %0 = "test.custom_op"(%arg0) : (tensor) -> tensor + %1 = "test.custom_op"(%arg1) : (tensor) -> tensor + %3:2 = func.call @callee(%0, %1) : (tensor, tensor) -> (tensor, tensor) + return %3#0, %3#1 : tensor, tensor +} diff --git a/mlir/test/Dialect/Quant/ops.mlir b/mlir/test/Dialect/Quant/ops.mlir index 4abc5830d081..33ff93ecbc1d 100644 --- a/mlir/test/Dialect/Quant/ops.mlir +++ b/mlir/test/Dialect/Quant/ops.mlir @@ -148,4 +148,23 @@ func.func @scast_per_axis_unranked(%arg0: tensor<*xi8>) { return } +// ----- + +!qalias = !quant.uniform +func.func @sub_channel_quantization(%arg0: tensor<2x4xi8>) -> tensor<2x4xi8> { + %0 = quant.scast %arg0 : tensor<2x4xi8> to tensor<2x4x!qalias> + %1 = quant.dcast %0 : tensor<2x4x!qalias> to tensor<2x4xf32> + %2 = quant.qcast %1 : tensor<2x4xf32> to tensor<2x4x!qalias> + %3 = quant.scast %2 : tensor<2x4x!qalias> to tensor<2x4xi8> + return %3 : tensor<2x4xi8> +} +// ----- + +!qalias = !quant.uniform +func.func @sub_channel_quantization_with_unknown_dims(%arg0: tensor<2x?xf32>) { + %0 = quant.qcast %arg0 : tensor<2x?xf32> to tensor<2x?x!qalias> + return +} diff --git a/mlir/test/Dialect/Quant/parse-any-invalid.mlir b/mlir/test/Dialect/Quant/parse-any-invalid.mlir index 41c5f9307071..7ea4ddc61db8 100644 --- a/mlir/test/Dialect/Quant/parse-any-invalid.mlir +++ b/mlir/test/Dialect/Quant/parse-any-invalid.mlir @@ -17,12 +17,12 @@ // ----- // Unrecognized storage type: illegal prefix -// expected-error@+1 {{illegal storage type prefix}} +// expected-error@+1 {{illegal quantized storage type alias}} !qalias = !quant.any:f32> // ----- // Unrecognized storage type: no width -// expected-error@+1 {{illegal storage type prefix}} +// expected-error@+1 {{illegal quantized storage type alias}} !qalias = !quant.any:f32> // ----- diff --git a/mlir/test/Dialect/Quant/parse-quantile-invalid.mlir b/mlir/test/Dialect/Quant/parse-quantile-invalid.mlir new file mode 100644 index 000000000000..005faa60e3cb --- /dev/null +++ b/mlir/test/Dialect/Quant/parse-quantile-invalid.mlir @@ -0,0 +1,188 @@ +// RUN: mlir-opt %s -split-input-file -verify-diagnostics + +// ----- +// Illegal missing quantileType +// expected-error@+1 {{expected ':'}} +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Illegal quantileType value +// expected-error@+1 {{illegal quantile type alias}} +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Illegal quantile array size +// expected-error@+1 {{quantiles array size needs to be equal to 2^(bit_size(storageType)), or (storageTypeMax - storageTypeMin + 1) when max and min differ from the type limits; expected: 256, found: 2}} +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Illegal quantile array size (per axis type) +// expected-error@+1 {{quantiles array size needs to be equal to 2^(bit_size(storageType)), or (storageTypeMax - storageTypeMin + 1) when max and min differ from the type limits; expected: 256, found: 2}} +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Unrecognized token: trailing +// expected-error@+1 {{expected '>'}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127 23> + +// ----- +// Unrecognized token: missing storage type maximum +// expected-error@+1 {{expected ':'}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Unrecognized token: missing closing angle bracket +// expected-error@+1 {{unbalanced '<' character in pretty dialect name}} +!qalias = !quant> + +// ----- +// Unrecognized token: missing type colon +// expected-error@+1 {{expected ':'}} +!qalias = !quant.quantilef16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Unrecognized token: missing comma +// expected-error@+1 {{expected ','}} +!qalias = !quant.quantile + +// ----- +// Unrecognized storage type: illegal prefix +// expected-error@+1 {{illegal quantized storage type alias}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Unrecognized storage type: no width +// expected-error@+1 {{illegal quantized storage type alias}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Unrecognized storage type: storage size > 32 +// expected-error@+1 {{illegal storage type size: 33}} +!qalias = !quant.quantile + +// ----- +// Unrecognized storage type: storage size < 0 +// expected-error@+1 {{illegal quantized storage type alias}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Unrecognized storage type: storage size +// expected-error@+1 {{invalid integer width}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: max - min < 0 +// expected-error@+1 {{illegal storage min and storage max: (2:1)}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: max - min == 0 +// expected-error@+1 {{illegal storage min and storage max: (1:1)}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: max > defaultMax +// expected-error@+1 {{illegal storage type maximum: 9}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: min < defaultMin +// expected-error@+1 {{illegal storage type minimum: -9}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: max > defaultMax +// expected-error@+1 {{illegal storage type maximum: 60000}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: min < defaultMin +// expected-error@+1 {{illegal storage type minimum: -60000}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: max > defaultMax +// expected-error@+1 {{illegal storage type maximum: 500}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal storage min/max: min < defaultMin +// expected-error@+1 {{illegal storage type minimum: -500}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal uniform params: invalid scale +// expected-error@+1 {{expected floating point literal}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:abc:127> + +// ----- +// Illegal uniform params: invalid zero point separator +// expected-error@+1 {{expected '>'}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.1abc> + +// ----- +// Illegal uniform params: missing zero point +// expected-error@+1 {{expected integer value}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.1:> + +// ----- +// Illegal uniform params: invalid zero point +// expected-error@+1 {{expected integer value}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:0.1:abc> + +// ----- +// Illegal expressed type: f33 +// expected-error@+1 {{expected non-function type}} +!qalias = !quant.quantile:f16:f33, {-1.0,1.0}:0.99872:127> + +// ----- +// Illegal uniform params: missing quantized dimension +// expected-error@+1 {{expected integer value}} +!qalias = !quant.quantile:f16:f32:, {-1.0,1.0}:{2.000000e+02:-19.987200e-01:1}> + +// ----- +// Illegal uniform params: unspecified quantized dimension, when multiple scales +// provided. +// expected-error@+1 {{expected floating point literal}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}:{2.000000e+02,-19.987200e-01:1}> + +// ----- +// Illegal quantile params: unspecified quantile values +// expected-error@+1 {{expected floating point literal}} +!qalias = !quant.quantile:f16:f32, {}:0.99872:127> + +// ----- +// Illegal quantile params: missing quantile values +// expected-error@+1 {{expected floating point literal}} +!qalias = !quant.quantile:f16:f32, {-1.0,}:0.99872:127> + +// ----- +// Illegal quantile params: missing colon separator +// expected-error@+1 {{expected ':'}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0}0.99872:127> + +// ----- +// Illegal quantile params: unbalanced } +// expected-error@+1 {{unbalanced '{' character in pretty dialect name}} +!qalias = !quant.quantile:f16:f32, {-1.0,1.0:0.99872:127> + +// ----- +// Illegal quantile params: missing { +// expected-error@+1 {{unbalanced '<' character in pretty dialect name}} +!qalias = !quant.quantile:f16:f32, -1.0,1.0}:0.99872:127> diff --git a/mlir/test/Dialect/Quant/parse-quantile.mlir b/mlir/test/Dialect/Quant/parse-quantile.mlir new file mode 100644 index 000000000000..1af567478e71 --- /dev/null +++ b/mlir/test/Dialect/Quant/parse-quantile.mlir @@ -0,0 +1,183 @@ +// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file | FileCheck %s + +// ----- +// All per-layer params specified: +// [signed] storageType, storageTypeMin, storageTypeMax, expressedType, scale, zeroPoint +// CHECK: !quant.quantile:f16:f32, {-1.000000e+00,-8.667000e-01,-7.333000e-01,-6.000000e-01,-4.667000e-01,-3.333000e-01,-2.000000e-01,-0.066699999999999995,0.066699999999999995,2.000000e-01,3.333000e-01,4.667000e-01,6.000000e-01,7.333000e-01,8.667000e-01,1.000000e+00}:9.987200e-01:127> +!qalias = !quant.quantile:f16:f32, {-1.0000,-0.8667,-0.7333,-0.6000,-0.4667,-0.3333,-0.2000,-0.0667,0.0667,0.2000,0.3333,0.4667,0.6000,0.7333,0.8667,1.0000}:0.99872:127> +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Trailing whitespace. +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Default min/max value optimization for integers. +// CHECK: !quant.quantile +!qalias = !quant.quantile:f16:f32, {-1.0000,-0.8667,-0.7333,-0.6000,-0.4667,-0.3333,-0.2000,-0.0667,0.0667,0.2000,0.3333,0.4667,0.6000,0.7333,0.8667,1.0000}:0.99872:127 > +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Default min/max value optimization for f8E5M2. +// CHECK: !quant.quantile +!qalias = !quant.quantile:f16:f32, {-1.0000,-0.9922,-0.9843,-0.9765,-0.9686,-0.9608,-0.9529,-0.9451,-0.9373,-0.9294,-0.9216,-0.9137,-0.9059,-0.8980,-0.8902,-0.8824,-0.8745,-0.8667,-0.8588,-0.8510,-0.8431,-0.8353,-0.8275,-0.8196,-0.8118,-0.8039,-0.7961,-0.7882,-0.7804,-0.7725,-0.7647,-0.7569,-0.7490,-0.7412,-0.7333,-0.7255,-0.7176,-0.7098,-0.7020,-0.6941,-0.6863,-0.6784,-0.6706,-0.6627,-0.6549,-0.6471,-0.6392,-0.6314,-0.6235,-0.6157,-0.6078,-0.6000,-0.5922,-0.5843,-0.5765,-0.5686,-0.5608,-0.5529,-0.5451,-0.5373,-0.5294,-0.5216,-0.5137,-0.5059,-0.4980,-0.4902,-0.4824,-0.4745,-0.4667,-0.4588,-0.4510,-0.4431,-0.4353,-0.4275,-0.4196,-0.4118,-0.4039,-0.3961,-0.3882,-0.3804,-0.3725,-0.3647,-0.3569,-0.3490,-0.3412,-0.3333,-0.3255,-0.3176,-0.3098,-0.3020,-0.2941,-0.2863,-0.2784,-0.2706,-0.2627,-0.2549,-0.2471,-0.2392,-0.2314,-0.2235,-0.2157,-0.2078,-0.2000,-0.1922,-0.1843,-0.1765,-0.1686,-0.1608,-0.1529,-0.1451,-0.1373,-0.1294,-0.1216,-0.1137,-0.1059,-0.0980,-0.0902,-0.0824,-0.0745,-0.0667,-0.0588,-0.0510,-0.0431,-0.0353,-0.0275,-0.0196,-0.0118,-0.0039,0.0039,0.0118,0.0196,0.0275,0.0353,0.0431,0.0510,0.0588,0.0667,0.0745,0.0824,0.0902,0.0980,0.1059,0.1137,0.1216,0.1294,0.1373,0.1451,0.1529,0.1608,0.1686,0.1765,0.1843,0.1922,0.2000,0.2078,0.2157,0.2235,0.2314,0.2392,0.2471,0.2549,0.2627,0.2706,0.2784,0.2863,0.2941,0.3020,0.3098,0.3176,0.3255,0.3333,0.3412,0.3490,0.3569,0.3647,0.3725,0.3804,0.3882,0.3961,0.4039,0.4118,0.4196,0.4275,0.4353,0.4431,0.4510,0.4588,0.4667,0.4745,0.4824,0.4902,0.4980,0.5059,0.5137,0.5216,0.5294,0.5373,0.5451,0.5529,0.5608,0.5686,0.5765,0.5843,0.5922,0.6000,0.6078,0.6157,0.6235,0.6314,0.6392,0.6471,0.6549,0.6627,0.6706,0.6784,0.6863,0.6941,0.7020,0.7098,0.7176,0.7255,0.7333,0.7412,0.7490,0.7569,0.7647,0.7725,0.7804,0.7882,0.7961,0.8039,0.8118,0.8196,0.8275,0.8353,0.8431,0.8510,0.8588,0.8667,0.8745,0.8824,0.8902,0.8980,0.9059,0.9137,0.9216,0.9294,0.9373,0.9451,0.9529,0.9608,0.9686,0.9765,0.9843,0.9922,1.0000}:0.99872:127 > +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Default min/max value optimization for f8E4M3FN. +// CHECK: !quant.quantile +!qalias = !quant.quantile:f16:f32, {-1.0000,-0.9922,-0.9843,-0.9765,-0.9686,-0.9608,-0.9529,-0.9451,-0.9373,-0.9294,-0.9216,-0.9137,-0.9059,-0.8980,-0.8902,-0.8824,-0.8745,-0.8667,-0.8588,-0.8510,-0.8431,-0.8353,-0.8275,-0.8196,-0.8118,-0.8039,-0.7961,-0.7882,-0.7804,-0.7725,-0.7647,-0.7569,-0.7490,-0.7412,-0.7333,-0.7255,-0.7176,-0.7098,-0.7020,-0.6941,-0.6863,-0.6784,-0.6706,-0.6627,-0.6549,-0.6471,-0.6392,-0.6314,-0.6235,-0.6157,-0.6078,-0.6000,-0.5922,-0.5843,-0.5765,-0.5686,-0.5608,-0.5529,-0.5451,-0.5373,-0.5294,-0.5216,-0.5137,-0.5059,-0.4980,-0.4902,-0.4824,-0.4745,-0.4667,-0.4588,-0.4510,-0.4431,-0.4353,-0.4275,-0.4196,-0.4118,-0.4039,-0.3961,-0.3882,-0.3804,-0.3725,-0.3647,-0.3569,-0.3490,-0.3412,-0.3333,-0.3255,-0.3176,-0.3098,-0.3020,-0.2941,-0.2863,-0.2784,-0.2706,-0.2627,-0.2549,-0.2471,-0.2392,-0.2314,-0.2235,-0.2157,-0.2078,-0.2000,-0.1922,-0.1843,-0.1765,-0.1686,-0.1608,-0.1529,-0.1451,-0.1373,-0.1294,-0.1216,-0.1137,-0.1059,-0.0980,-0.0902,-0.0824,-0.0745,-0.0667,-0.0588,-0.0510,-0.0431,-0.0353,-0.0275,-0.0196,-0.0118,-0.0039,0.0039,0.0118,0.0196,0.0275,0.0353,0.0431,0.0510,0.0588,0.0667,0.0745,0.0824,0.0902,0.0980,0.1059,0.1137,0.1216,0.1294,0.1373,0.1451,0.1529,0.1608,0.1686,0.1765,0.1843,0.1922,0.2000,0.2078,0.2157,0.2235,0.2314,0.2392,0.2471,0.2549,0.2627,0.2706,0.2784,0.2863,0.2941,0.3020,0.3098,0.3176,0.3255,0.3333,0.3412,0.3490,0.3569,0.3647,0.3725,0.3804,0.3882,0.3961,0.4039,0.4118,0.4196,0.4275,0.4353,0.4431,0.4510,0.4588,0.4667,0.4745,0.4824,0.4902,0.4980,0.5059,0.5137,0.5216,0.5294,0.5373,0.5451,0.5529,0.5608,0.5686,0.5765,0.5843,0.5922,0.6000,0.6078,0.6157,0.6235,0.6314,0.6392,0.6471,0.6549,0.6627,0.6706,0.6784,0.6863,0.6941,0.7020,0.7098,0.7176,0.7255,0.7333,0.7412,0.7490,0.7569,0.7647,0.7725,0.7804,0.7882,0.7961,0.8039,0.8118,0.8196,0.8275,0.8353,0.8431,0.8510,0.8588,0.8667,0.8745,0.8824,0.8902,0.8980,0.9059,0.9137,0.9216,0.9294,0.9373,0.9451,0.9529,0.9608,0.9686,0.9765,0.9843,0.9922,1.0000}:0.99872:127 > +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Required per-layer params specified: +// [unsigned] storageType, expressedType, scale +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Exponential scale (-) +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Exponential scale (+) +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Storage type: f8E5M2 +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Storage type: f8E4M3FN +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Expressed type: f32 +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Expressed type: f32 +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Expressed type: f16 +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Expressed type: f64 +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Expressed type: bf16 +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Per-axis scales and zero points (affine) +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Per-axis scales and no zero points (fixedpoint) +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Per-axis scales and zero points (mixed affine and fixedpoint) +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Negative scale checking +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Per-axis negative scale checking +// CHECK: !quant.quantile +!qalias = !quant.quantile +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} diff --git a/mlir/test/Dialect/Quant/parse-uniform-invalid.mlir b/mlir/test/Dialect/Quant/parse-uniform-invalid.mlir index 4528d2826a85..5553aabe4599 100644 --- a/mlir/test/Dialect/Quant/parse-uniform-invalid.mlir +++ b/mlir/test/Dialect/Quant/parse-uniform-invalid.mlir @@ -37,12 +37,12 @@ // ----- // Unrecognized storage type: illegal prefix -// expected-error@+1 {{illegal storage type prefix}} +// expected-error@+1 {{illegal quantized storage type alias}} !qalias = !quant.uniform:f32, 0.99872:127> // ----- // Unrecognized storage type: no width -// expected-error@+1 {{illegal storage type prefix}} +// expected-error@+1 {{illegal quantized storage type alias}} !qalias = !quant.uniform:f32, 0.99872:127> // ----- @@ -52,7 +52,7 @@ // ----- // Unrecognized storage type: storage size < 0 -// expected-error@+1 {{illegal storage type prefix}} +// expected-error@+1 {{illegal quantized storage type alias}} !qalias = !quant.uniform:f32, 0.99872:127> // ----- @@ -80,6 +80,26 @@ // expected-error@+1 {{illegal storage type minimum: -9}} !qalias = !quant.uniform:f32, 0.99872:127> +// ----- +// Illegal storage min/max: max > defaultMax +// expected-error@+1 {{illegal storage type maximum: 60000}} +!qalias = !quant.uniform:f32, 0.99872:127> + +// ----- +// Illegal storage min/max: min < defaultMin +// expected-error@+1 {{illegal storage type minimum: -60000}} +!qalias = !quant.uniform:f32, 0.99872:127> + +// ----- +// Illegal storage min/max: max > defaultMax +// expected-error@+1 {{illegal storage type maximum: 500}} +!qalias = !quant.uniform:f32, 0.99872:127> + +// ----- +// Illegal storage min/max: min < defaultMin +// expected-error@+1 {{illegal storage type minimum: -500}} +!qalias = !quant.uniform:f32, 0.99872:127> + // ----- // Illegal uniform params: invalid scale // expected-error@+1 {{expected floating point literal}} @@ -105,11 +125,6 @@ // expected-error@+1 {{expected non-function type}} !qalias = !quant.uniform:f33, 0.99872:127> -// ----- -// Illegal scale: negative -// expected-error@+1 {{scale out of expressed type range}} -!qalias = !quant.uniform:f32, -1.0:127> - // ----- // Illegal uniform params: missing quantized dimension // expected-error@+1 {{expected integer value}} @@ -121,27 +136,103 @@ // expected-error@+1 {{expected floating point literal}} !qalias = !quant.uniform:f32, {2.000000e+02,-19.987200e-01:1}> + // ----- -// Illegal negative axis in per-axis quantization +// Scale f16 overflow +// expected-error@+1 {{scale 6.600000e+04 out of expressed type range}} +!qalias = !quant.uniform + +// ----- +// Scale f16 overflow in per-axis quantization +// expected-error@+1 {{scale 6.600000e+04 out of expressed type range}} +!qalias = !quant.uniform + +// ----- +// Illegal negative axis in sub-channel quantization // expected-error@+1 {{illegal quantized dimension: -1}} -!qalias = !quant.uniform +!qalias = !quant.uniform // ----- -// Scale f16 underflow -// expected-error@+1 {{scale out of expressed type range}} -!qalias = !quant.uniform +// Illegal zero block-size in sub-channel quantization +// expected-error@+1 {{illegal block size: 0}} +!qalias = !quant.uniform // ----- -// Scale f16 overflow -// expected-error@+1 {{scale out of expressed type range}} -!qalias = !quant.uniform +// Illegal negative block-size in sub-channel quantization +// expected-error@+1 {{illegal block size: -1}} +!qalias = !quant.uniform // ----- -// Scale f16 underflow in per-axis quantization -// expected-error@+1 {{scale out of expressed type range}} -!qalias = !quant.uniform +// Missing block size in sub-channel quantization +// expected-error@+1 {{expected ':'}} +!qalias = !quant.uniform // ----- -// Scale f16 overflow in per-axis quantization -// expected-error@+1 {{scale out of expressed type range}} -!qalias = !quant.uniform +// Missing quantization dimension in sub-channel quantization +// expected-error@+1 {{expected integer value}} +!qalias = !quant.uniform + +// ----- +// Invalid tensor literal structure in sub-channel quantization +// expected-error@+2 {{expected '>'}} +!qalias = !quant.uniform + +// ----- +// Ragged tensor literal in sub-channel quantization +// expected-error@+2 {{ranks are not consistent between elements}} +!qalias = !quant.uniform + +// ----- +// Missing braces around block-size information in sub-channel quantization +// expected-error@+1 {{expected ','}} +!qalias = !quant.uniform + +// ----- +// Missing right-brace around block-size information in sub-channel quantization +// expected-error@+1 {{unbalanced '{' character}} +!qalias = !quant.uniform + +// ----- +// Missing left-brace around block-size information in sub-channel quantization +// expected-error@+1 {{unbalanced '<' character}} +!qalias = !quant.uniform + +// ----- +// Missing Axis:BlockSize pair +// expected-error@+1 {{expected integer value}} +!qalias = !quant.uniform + +// ----- +// Missing Scale:ZeroPoint pair +// expected-error@+2 {{expected floating point literal}} +!qalias = !quant.uniform + +// ----- +// Missing ZeroPoint in Scale:ZeroPoint pair +// expected-error@+2 {{expected integer value}} +!qalias = !quant.uniform + +// ----- +// Empty quantization paramaters in sub-channel quantization +// expected-error@+1 {{expected floating point literal}} +!qalias = !quant.uniform + +// ----- +// Scale out of expressed type range in sub-channel quantization +// expected-error@+2 {{scale 6.600000e+04 out of expressed type range}} +!qalias = !quant.uniform + diff --git a/mlir/test/Dialect/Quant/parse-uniform.mlir b/mlir/test/Dialect/Quant/parse-uniform.mlir index 4fbe86d935ea..383830d7f1b1 100644 --- a/mlir/test/Dialect/Quant/parse-uniform.mlir +++ b/mlir/test/Dialect/Quant/parse-uniform.mlir @@ -19,6 +19,33 @@ func.func @parse() -> !qalias { return %0 : !qalias } +// ----- +// Default min/max value optimization for integers. +// CHECK: !quant.uniform +!qalias = !quant.uniform:f32, 0.99872:127 > +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Default min/max value optimization for f8E5M2. +// CHECK: !quant.uniform +!qalias = !quant.uniform:f32, 0.99872:127 > +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Default min/max value optimization for f8E4M3FN. +// CHECK: !quant.uniform +!qalias = !quant.uniform:f32, 0.99872:127 > +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + // ----- // Required per-layer params specified: // [unsigned] storageType, expressedType, scale @@ -47,6 +74,24 @@ func.func @parse() -> !qalias { return %0 : !qalias } +// ----- +// Storage type: f8E5M2 +// CHECK: !quant.uniform +!qalias = !quant.uniform +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Storage type: f8E4M3FN +// CHECK: !quant.uniform +!qalias = !quant.uniform +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + // ----- // Storage type: i16 // CHECK: !quant.uniform @@ -154,3 +199,39 @@ func.func @parse() -> !qalias { %0 = "foo"() : () -> !qalias return %0 : !qalias } + +// ----- +// Sub-channel scales and zero points (mixed affine and fixedpoint) +// CHECK: !quant.uniform +!qalias = !quant.uniform +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Empty block-size information in sub-channel quantization +// CHECK: !quant.uniform +!qalias = !quant.uniform +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Negative scale checking +// CHECK: !quant.uniform +!qalias = !quant.uniform +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} + +// ----- +// Per axis negative scale checking +// CHECK: !quant.uniform +!qalias = !quant.uniform +func.func @parse() -> !qalias { + %0 = "foo"() : () -> !qalias + return %0 : !qalias +} diff --git a/mlir/test/Dialect/Transform/ops-invalid.mlir b/mlir/test/Dialect/Transform/ops-invalid.mlir index 71a260f1196e..1e2f7ad69ab8 100644 --- a/mlir/test/Dialect/Transform/ops-invalid.mlir +++ b/mlir/test/Dialect/Transform/ops-invalid.mlir @@ -482,9 +482,10 @@ module { // ----- module attributes { transform.with_named_sequence} { + // expected-note @below {{ancestor transform op}} transform.sequence failures(suppress) { ^bb0(%arg0: !transform.any_op): - // expected-error @below {{op symbol's parent must have the SymbolTable trai}} + // expected-error @below {{cannot be defined inside another transform op}} transform.named_sequence @nested() { transform.yield } diff --git a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir index d7174a489888..1d0f871f06b4 100644 --- a/mlir/test/Dialect/XeGPU/XeGPUOps.mlir +++ b/mlir/test/Dialect/XeGPU/XeGPUOps.mlir @@ -1,3 +1,4 @@ +// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s | FileCheck %s // Verify the printed output can be parsed. // RUN: mlir-opt %s | mlir-opt | FileCheck %s diff --git a/mlir/test/Dialect/XeGPU/invalid.mlir b/mlir/test/Dialect/XeGPU/invalid.mlir index 7816bff0582f..4255538b9ba1 100644 --- a/mlir/test/Dialect/XeGPU/invalid.mlir +++ b/mlir/test/Dialect/XeGPU/invalid.mlir @@ -1,3 +1,4 @@ +// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt %s -split-input-file -verify-diagnostics // ----- diff --git a/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir b/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir index d32954127fce..58dbefa0fdaa 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-fold-alias-ops.mlir @@ -1,3 +1,4 @@ +// REQUIRES: xegpu-dialect-enabled // RUN: mlir-opt -xegpu-fold-alias-ops -split-input-file %s | FileCheck %s func.func @fold_subview_with_xegpu_create_nd_tdesc(%arg0 : memref<256x256xf32>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index) ->(!xegpu.tensor_desc<8x16xf32>) { diff --git a/mlir/test/IR/invalid-builtin-types.mlir b/mlir/test/IR/invalid-builtin-types.mlir index 51612446d2e6..441f036a81ce 100644 --- a/mlir/test/IR/invalid-builtin-types.mlir +++ b/mlir/test/IR/invalid-builtin-types.mlir @@ -115,17 +115,17 @@ func.func @illegaltype(i21312312323120) // expected-error {{invalid integer widt // ----- // Test no nested vector. -// expected-error@+1 {{failed to verify 'elementType': integer or index or floating-point}} +// expected-error@+1 {{vector elements must be int/index/float type}} func.func @vectors(vector<1 x vector<1xi32>>, vector<2x4xf32>) // ----- -// expected-error @+1 {{vector types must have positive constant sizes but got 0}} +// expected-error @+1 {{vector types must have positive constant sizes}} func.func @zero_vector_type() -> vector<0xi32> // ----- -// expected-error @+1 {{vector types must have positive constant sizes but got 1, 0}} +// expected-error @+1 {{vector types must have positive constant sizes}} func.func @zero_in_vector_type() -> vector<1x0xi32> // ----- diff --git a/mlir/test/IR/invalid-func-op.mlir b/mlir/test/IR/invalid-func-op.mlir index 8fd7af22e959..d995689ebb8d 100644 --- a/mlir/test/IR/invalid-func-op.mlir +++ b/mlir/test/IR/invalid-func-op.mlir @@ -31,7 +31,7 @@ func.func @func_op() { // ----- func.func @func_op() { - // expected-error@+1 {{op symbol's parent must have the SymbolTable trait}} + // expected-error@+1 {{entry block must have 1 arguments to match function signature}} func.func @mixed_named_arguments(f32) { ^entry: return @@ -42,7 +42,7 @@ func.func @func_op() { // ----- func.func @func_op() { - // expected-error@+1 {{op symbol's parent must have the SymbolTable trait}} + // expected-error@+1 {{type of entry block argument #0('i32') must match the type of the corresponding argument in function signature('f32')}} func.func @mixed_named_arguments(f32) { ^entry(%arg : i32): return diff --git a/mlir/test/IR/invalid.mlir b/mlir/test/IR/invalid.mlir index 861f4ef6c020..cd79dac6115d 100644 --- a/mlir/test/IR/invalid.mlir +++ b/mlir/test/IR/invalid.mlir @@ -675,3 +675,19 @@ func.func @error_at_end_of_line() { // ----- @foo // expected-error {{expected operation name in quotes}} + +// ----- + +func.func @foo() { + cf.br ^bb2 + + ^bb1: + // expected-error@+1 {{forward reference of value '%1' requires explicit type specification}} + test.format_operand_optional_type_op %0, %1 + return + + ^bb2: + %0 = arith.constant 0 : i64 + %1 = memref.alloc() : memref<1xf64> + cf.br ^bb1 +} diff --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir index cace1fefa43d..dd3922081ecc 100644 --- a/mlir/test/IR/parser.mlir +++ b/mlir/test/IR/parser.mlir @@ -1464,3 +1464,13 @@ test.dialect_custom_format_fallback custom_format_fallback // Check that an op with an optional result parses f80 as type. // CHECK: test.format_optional_result_d_op : f80 test.format_optional_result_d_op : f80 + +// Can skip type definition for operands, if they are already defined in the same block +// CHECK-LABEL: func @optional_operand_types +func.func @optional_operand_types(%arg0: i64, %arg1: memref<1xf64>) { + // CHECK: test.format_operand_optional_type_op %arg0, %arg1 + test.format_operand_optional_type_op %arg0, %arg1 + // CHECK: test.format_operand_optional_type_op %arg0, %arg1 + test.format_operand_optional_type_op %arg0, %arg1 : memref<1xf64> + return +} diff --git a/mlir/test/IR/region.mlir b/mlir/test/IR/region.mlir index 0b959915d6bb..bf4b1bb4e5ab 100644 --- a/mlir/test/IR/region.mlir +++ b/mlir/test/IR/region.mlir @@ -87,17 +87,18 @@ func.func @named_region_has_wrong_number_of_blocks() { // CHECK: test.single_no_terminator_op "test.single_no_terminator_op"() ( { - %foo = arith.constant 1 : i32 + func.func @foo1() { return } + func.func @foo2() { return } } ) : () -> () // CHECK: test.variadic_no_terminator_op "test.variadic_no_terminator_op"() ( { - %foo = arith.constant 1 : i32 + func.func @foo1() { return } }, { - %bar = arith.constant 1 : i32 + func.func @foo2() { return } } ) : () -> () diff --git a/mlir/test/IR/test-symbol-rauw.mlir b/mlir/test/IR/test-symbol-rauw.mlir index ba17cf9d1042..1acd834fcde8 100644 --- a/mlir/test/IR/test-symbol-rauw.mlir +++ b/mlir/test/IR/test-symbol-rauw.mlir @@ -51,11 +51,12 @@ module { } } - // CHECK: func @symbol_bar + // FIXME:#73140 + // DISABLED-CHECK: func @symbol_bar func.func @symbol_bar() { - // CHECK: foo.op - // CHECK-SAME: use_1 = @module_a::@replaced_foo - // CHECK-SAME: use_2 = @replaced_module_b::@replaced_module_c::@replaced_foo + // DISABLED-CHECK: foo.op + // DISABLED-CHECK-SAME: use_1 = @module_a::@replaced_foo + // DISABLED-CHECK-SAME: use_2 = @replaced_module_b::@replaced_module_c::@replaced_foo "foo.op"() { use_1 = @module_a::@foo, use_2 = @module_b::@module_c::@foo @@ -97,15 +98,16 @@ module { // ----- +// FIXME:#73140 module { - // CHECK: module @replaced_foo + // DISABLED-CHECK: module @replaced_foo module @foo attributes {sym.new_name = "replaced_foo" } { - // CHECK: func.func private @foo + // DISABLED-CHECK: func.func private @foo func.func private @foo() } - // CHECK: foo.op - // CHECK-SAME: use = @replaced_foo::@foo + // DISABLED-CHECK: foo.op + // DISABLED-CHECK-SAME: use = @replaced_foo::@foo "foo.op"() { use = @foo::@foo } : () -> () diff --git a/mlir/test/IR/traits.mlir b/mlir/test/IR/traits.mlir index 49cfd7e49674..85deff038690 100644 --- a/mlir/test/IR/traits.mlir +++ b/mlir/test/IR/traits.mlir @@ -591,13 +591,15 @@ func.func @failedHasDominanceScopeOutsideDominanceFreeScope() -> () { // Ensure that SSACFG regions of operations in GRAPH regions are // checked for dominance -func.func @illegalInsideDominanceFreeScope(%cond: i1) -> () { +func.func @illegalInsideDominanceFreeScope() -> () { test.graph_region { - scf.if %cond { + func.func @test() -> i1 { + ^bb1: // expected-error @+1 {{operand #0 does not dominate this use}} %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) // expected-note @+1 {{operand defined here}} - %1 = "baz"(%2#0) : (i1) -> (i64) + %1 = "baz"(%2#0) : (i1) -> (i64) + return %2#1 : i1 } "terminator"() : () -> () } @@ -608,21 +610,20 @@ func.func @illegalInsideDominanceFreeScope(%cond: i1) -> () { // Ensure that SSACFG regions of operations in GRAPH regions are // checked for dominance -func.func @illegalCFGInsideDominanceFreeScope(%cond: i1) -> () { +func.func @illegalCDFGInsideDominanceFreeScope() -> () { test.graph_region { - scf.if %cond { - "test.ssacfg_region"() ({ - ^bb1: - // expected-error @+1 {{operand #0 does not dominate this use}} - %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) - cf.br ^bb4 - ^bb2: - cf.br ^bb2 - ^bb4: - %1 = "foo"() : ()->i64 // expected-note {{operand defined here}} - }) : () -> () + func.func @test() -> i1 { + ^bb1: + // expected-error @+1 {{operand #0 does not dominate this use}} + %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) + cf.br ^bb4 + ^bb2: + cf.br ^bb2 + ^bb4: + %1 = "foo"() : ()->i64 // expected-note {{operand defined here}} + return %2#1 : i1 } - "terminator"() : () -> () + "terminator"() : () -> () } return } diff --git a/mlir/test/Transforms/canonicalize-dce.mlir b/mlir/test/Transforms/canonicalize-dce.mlir index 84631947970d..e8608a3aa3e6 100644 --- a/mlir/test/Transforms/canonicalize-dce.mlir +++ b/mlir/test/Transforms/canonicalize-dce.mlir @@ -77,15 +77,15 @@ func.func @f(%arg0: f32, %pred: i1) { // Test case: Recursively DCE into enclosed regions. -// CHECK: func.func @f(%arg0: f32) -// CHECK-NOT: arith.addf +// CHECK: func @f(%arg0: f32) +// CHECK-NEXT: func @g(%arg1: f32) +// CHECK-NEXT: return func.func @f(%arg0: f32) { - "test.region"() ( - { - %0 = "arith.addf"(%arg0, %arg0) : (f32, f32) -> f32 - } - ) : () -> () + func.func @g(%arg1: f32) { + %0 = "arith.addf"(%arg1, %arg1) : (f32, f32) -> f32 + return + } return } diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir index 9b74362b6ee7..316ee0dfe593 100644 --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -429,15 +429,16 @@ func.func @write_only_alloca_fold(%v: f32) { // CHECK-LABEL: func @dead_block_elim func.func @dead_block_elim() { // CHECK-NOT: ^bb - builtin.module { - func.func @nested() { - return + func.func @nested() { + return - ^bb1: - return - } + ^bb1: + return } return + +^bb1: + return } // CHECK-LABEL: func @dyn_shape_fold(%arg0: index, %arg1: index) diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir index 981757aed9b1..a7cdd04daae2 100644 --- a/mlir/test/Transforms/constant-fold.mlir +++ b/mlir/test/Transforms/constant-fold.mlir @@ -794,15 +794,12 @@ func.func @cmpf_inf() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1 // CHECK-LABEL: func @nested_isolated_region func.func @nested_isolated_region() { - // CHECK-NEXT: builtin.module { // CHECK-NEXT: func @isolated_op // CHECK-NEXT: arith.constant 2 - builtin.module { - func.func @isolated_op() { - %0 = arith.constant 1 : i32 - %2 = arith.addi %0, %0 : i32 - "foo.yield"(%2) : (i32) -> () - } + func.func @isolated_op() { + %0 = arith.constant 1 : i32 + %2 = arith.addi %0, %0 : i32 + "foo.yield"(%2) : (i32) -> () } // CHECK: "foo.unknown_region" diff --git a/mlir/test/Transforms/cse.mlir b/mlir/test/Transforms/cse.mlir index b447094874d0..84fb9c4591de 100644 --- a/mlir/test/Transforms/cse.mlir +++ b/mlir/test/Transforms/cse.mlir @@ -252,14 +252,11 @@ func.func @nested_isolated() -> i32 { // CHECK-NEXT: arith.constant 1 %0 = arith.constant 1 : i32 - // CHECK-NEXT: builtin.module // CHECK-NEXT: @nested_func - builtin.module { - func.func @nested_func() { - // CHECK-NEXT: arith.constant 1 - %foo = arith.constant 1 : i32 - "foo.yield"(%foo) : (i32) -> () - } + func.func @nested_func() { + // CHECK-NEXT: arith.constant 1 + %foo = arith.constant 1 : i32 + "foo.yield"(%foo) : (i32) -> () } // CHECK: "foo.region" diff --git a/mlir/test/Transforms/test-legalizer-full.mlir b/mlir/test/Transforms/test-legalizer-full.mlir index 5f1148cac650..74f312e8144a 100644 --- a/mlir/test/Transforms/test-legalizer-full.mlir +++ b/mlir/test/Transforms/test-legalizer-full.mlir @@ -37,11 +37,9 @@ func.func @recursively_legal_invalid_op() { } /// Operation that is dynamically legal, i.e. the function has a pattern /// applied to legalize the argument type before it becomes recursively legal. - builtin.module { - func.func @dynamic_func(%arg: i64) attributes {test.recursively_legal} { - %ignored = "test.illegal_op_f"() : () -> (i32) - "test.return"() : () -> () - } + func.func @dynamic_func(%arg: i64) attributes {test.recursively_legal} { + %ignored = "test.illegal_op_f"() : () -> (i32) + "test.return"() : () -> () } "test.return"() : () -> () diff --git a/mlir/test/lib/Dialect/Test/TestAttrDefs.td b/mlir/test/lib/Dialect/Test/TestAttrDefs.td index 0fd272f85d39..9a752482dcd2 100644 --- a/mlir/test/lib/Dialect/Test/TestAttrDefs.td +++ b/mlir/test/lib/Dialect/Test/TestAttrDefs.td @@ -384,6 +384,12 @@ def NestedPolynomialAttr2 : Test_Attr<"NestedPolynomialAttr2"> { }]; } +def TestAttrNewlineAndIndent : Test_Attr<"TestAttrNewlineAndIndent"> { + let mnemonic = "newline_and_indent"; + let parameters = (ins "::mlir::Type":$indentType); + let hasCustomAssemblyFormat = 1; +} + // Test custom location handling. def TestCustomLocationAttr : Test_LocAttr<"TestCustomLocation"> { diff --git a/mlir/test/lib/Dialect/Test/TestAttributes.cpp b/mlir/test/lib/Dialect/Test/TestAttributes.cpp index e09ea1090616..a822fce6974c 100644 --- a/mlir/test/lib/Dialect/Test/TestAttributes.cpp +++ b/mlir/test/lib/Dialect/Test/TestAttributes.cpp @@ -316,6 +316,34 @@ static ParseResult parseCustomFloatAttr(AsmParser &p, StringAttr &typeStrAttr, return success(); } +// TestAttrNewlineAndIndent +//===----------------------------------------------------------------------===// + +Attribute TestAttrNewlineAndIndentAttr::parse(::mlir::AsmParser &parser, + ::mlir::Type type) { + Type indentType; + if (parser.parseLess()) { + return {}; + } + if (parser.parseType(indentType)) { + return {}; + } + if (parser.parseGreater()) { + return {}; + } + return get(parser.getContext(), indentType); +} + +void TestAttrNewlineAndIndentAttr::print(::mlir::AsmPrinter &printer) const { + printer << "<"; + printer.increaseIndent(); + printer.printNewline(); + printer << getIndentType(); + printer.decreaseIndent(); + printer.printNewline(); + printer << ">"; +} + //===----------------------------------------------------------------------===// // Tablegen Generated Definitions //===----------------------------------------------------------------------===// diff --git a/mlir/test/lib/Dialect/Test/TestOpsSyntax.cpp b/mlir/test/lib/Dialect/Test/TestOpsSyntax.cpp index 664951f2a11b..917bc0fde0c8 100644 --- a/mlir/test/lib/Dialect/Test/TestOpsSyntax.cpp +++ b/mlir/test/lib/Dialect/Test/TestOpsSyntax.cpp @@ -152,6 +152,12 @@ static ParseResult parseCustomDirectiveOptionalOperandRef( bool expectedOptionalOperand = operandCount == 0; return success(expectedOptionalOperand != optOperand.has_value()); } +static ParseResult parseOptionalType(OpAsmParser &parser, Type &type) { + if (parser.parseOptionalColon()) + return success(); + + return parser.parseType(type); +} //===----------------------------------------------------------------------===// // Printing @@ -232,6 +238,21 @@ static void printCustomDirectiveOptionalOperandRef(OpAsmPrinter &printer, Value optOperand) { printer << (optOperand ? "1" : "0"); } +static bool isDefinedAbove(Value val, Operation *op) { + if (val.isa()) + return true; + + return val.getDefiningOp()->getBlock() == op->getBlock() && + val.getDefiningOp()->isBeforeInBlock(op); +} +static void printOptionalType(OpAsmPrinter &printer, + FormatOperandOptionalTypeOp op, Type type) { + if (isDefinedAbove(op.getOperand(), op)) + return; + + printer << ":"; + printer.printType(type); +} //===----------------------------------------------------------------------===// // Test parser. //===----------------------------------------------------------------------===// diff --git a/mlir/test/lib/Dialect/Test/TestOpsSyntax.td b/mlir/test/lib/Dialect/Test/TestOpsSyntax.td index 2848cb994231..43776ff6c7f0 100644 --- a/mlir/test/lib/Dialect/Test/TestOpsSyntax.td +++ b/mlir/test/lib/Dialect/Test/TestOpsSyntax.td @@ -303,6 +303,9 @@ def FormatOperandDOp : FormatOperandBase<"d", [{ def FormatOperandEOp : FormatOperandBase<"e", [{ $buildable `,` $operand `:` type($buildable) `,` type($operand) attr-dict }]>; +def FormatOperandOptionalTypeOp : FormatOperandBase<"optional_type", [{ + $buildable `,` $operand custom(type($operand)) attr-dict +}]>; def FormatSuccessorAOp : TEST_Op<"format_successor_a_op", [Terminator]> { let successors = (successor VariadicSuccessor:$targets); diff --git a/mlir/test/lib/Dialect/Test/TestTypeDefs.td b/mlir/test/lib/Dialect/Test/TestTypeDefs.td index 6335701786ec..3b344bca58bf 100644 --- a/mlir/test/lib/Dialect/Test/TestTypeDefs.td +++ b/mlir/test/lib/Dialect/Test/TestTypeDefs.td @@ -403,4 +403,9 @@ def TestTypeOpAsmTypeInterface : Test_Type<"TestTypeOpAsmTypeInterface", let mnemonic = "op_asm_type_interface"; } +def TestTypeNewlineAndIndent : Test_Type<"TestTypeNewlineAndIndent"> { + let mnemonic = "newline_and_indent"; + let hasCustomAssemblyFormat = 1; +} + #endif // TEST_TYPEDEFS diff --git a/mlir/test/lib/Dialect/Test/TestTypes.cpp b/mlir/test/lib/Dialect/Test/TestTypes.cpp index 1ae7ac472d98..77e5b35b68ba 100644 --- a/mlir/test/lib/Dialect/Test/TestTypes.cpp +++ b/mlir/test/lib/Dialect/Test/TestTypes.cpp @@ -537,3 +537,30 @@ void TestTypeOpAsmTypeInterfaceType::getAsmName( OpAsmSetNameFn setNameFn) const { setNameFn("op_asm_type_interface"); } + +//===----------------------------------------------------------------------===// +// TestTypeNewlineAndIndent +//===----------------------------------------------------------------------===// + +Type TestTypeNewlineAndIndentType::parse(::mlir::AsmParser &parser) { + if (parser.parseLess()) { + return {}; + } + if (parser.parseKeyword("indented_content")) { + return {}; + } + if (parser.parseGreater()) { + return {}; + } + return get(parser.getContext()); +} + +void TestTypeNewlineAndIndentType::print(::mlir::AsmPrinter &printer) const { + printer << "<"; + printer.increaseIndent(); + printer.printNewline(); + printer << "indented_content"; + printer.decreaseIndent(); + printer.printNewline(); + printer << ">"; +} diff --git a/mlir/test/lit.site.cfg.py.in b/mlir/test/lit.site.cfg.py.in index 132aabe13594..3cd3bde9f216 100644 --- a/mlir/test/lit.site.cfg.py.in +++ b/mlir/test/lit.site.cfg.py.in @@ -69,6 +69,14 @@ config.riscv_vector_emulator_options = "@RISCV_VECTOR_EMULATOR_OPTIONS@" config.riscv_emulator_lli_executable = "@RISCV_EMULATOR_LLI_EXECUTABLE@" config.riscv_emulator_utils_lib_dir = "@RISCV_EMULATOR_UTILS_LIB_DIR@" +# Add feature to enable/disable XeGPU Dialect tests +if "@MLIR_DIALECT_XEGPU_ENABLE@" == "ON": + config.available_features.add("xegpu-dialect-enabled") + +# Add feature to enable/disable TosaTotensor tests +if "@MLIR_CONVERSION_TOSATOTENSOR_ENABLE@" == "ON": + config.available_features.add("tosa-to-tensor-enabled") + import lit.llvm lit.llvm.initialize(lit_config, config) diff --git a/mlir/test/mlir-tblgen/testdialect-attrdefs.mlir b/mlir/test/mlir-tblgen/testdialect-attrdefs.mlir index 89ad3594eebd..12362f2b35ff 100644 --- a/mlir/test/mlir-tblgen/testdialect-attrdefs.mlir +++ b/mlir/test/mlir-tblgen/testdialect-attrdefs.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -verify-diagnostics | FileCheck %s +// RUN: mlir-opt %s -split-input-file -verify-diagnostics | FileCheck %s --strict-whitespace // CHECK-LABEL: func private @compoundA() // CHECK-SAME: #test.cmpnd_a<1, !test.smpla, [5, 6]> @@ -44,3 +44,20 @@ func.func private @hexdecimalInteger() attributes { // expected-error @below {{expected an integer}} sdg = #test.decimal_shape<1x0xb> } + +// ----- + +// CHECK-LABEL: @newlineAndIndent +// CHECK-SAME: indent = #test.newline_and_indent< +// CHECK-NEXT: !test.newline_and_indent< +// CHECK-NEXT: indented_content +// CHECK-NEXT: > +// CHECK-NEXT: > +func.func private @newlineAndIndent() attributes { + indent = #test.newline_and_indent< + !test.newline_and_indent< + indented_content + > + > +} + diff --git a/mlir/test/mlir-tblgen/testdialect-typedefs.mlir b/mlir/test/mlir-tblgen/testdialect-typedefs.mlir index 18175edc81cf..25bb0bb7ca07 100644 --- a/mlir/test/mlir-tblgen/testdialect-typedefs.mlir +++ b/mlir/test/mlir-tblgen/testdialect-typedefs.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s | mlir-opt -verify-diagnostics | FileCheck %s +// RUN: mlir-opt %s | mlir-opt -verify-diagnostics | FileCheck %s --strict-whitespace ////////////// // Tests the types in the 'Test' dialect, not the ones in 'typedefs.mlir' @@ -42,3 +42,12 @@ func.func @testInt(%A : !test.int, %B : !test.int, %C : !test func.func @structTest (%A : !test.struct< {field1, !test.smpla}, {field2, !test.int} > ) { return } + +// CHECK: @newlineAndIndent(%arg0: !test.newline_and_indent< +// CHECK-NEXT: indented_content +// CHECK-NEXT: >) +func.func @newlineAndIndent(%A : !test.newline_and_indent< + indented_content +>)-> () { + return +} diff --git a/mlir/test/python/dialects/quant.py b/mlir/test/python/dialects/quant.py index b1d6e85f519b..57c528da7b9e 100644 --- a/mlir/test/python/dialects/quant.py +++ b/mlir/test/python/dialects/quant.py @@ -1,5 +1,6 @@ # RUN: %PYTHON %s | FileCheck %s +import numpy as np from mlir.ir import * from mlir.dialects import quant @@ -18,21 +19,28 @@ def test_type_hierarchy(): any = Type.parse("!quant.any:f32>") uniform = Type.parse("!quant.uniform:f32, 0.99872:127>") per_axis = Type.parse("!quant.uniform") + sub_channel = Type.parse( + "!quant.uniform" + ) calibrated = Type.parse("!quant.calibrated>") assert not quant.QuantizedType.isinstance(i8) assert quant.QuantizedType.isinstance(any) assert quant.QuantizedType.isinstance(uniform) assert quant.QuantizedType.isinstance(per_axis) + assert quant.QuantizedType.isinstance(sub_channel) assert quant.QuantizedType.isinstance(calibrated) assert quant.AnyQuantizedType.isinstance(any) assert quant.UniformQuantizedType.isinstance(uniform) assert quant.UniformQuantizedPerAxisType.isinstance(per_axis) + assert quant.UniformQuantizedSubChannelType.isinstance(sub_channel) assert quant.CalibratedQuantizedType.isinstance(calibrated) assert not quant.AnyQuantizedType.isinstance(uniform) assert not quant.UniformQuantizedType.isinstance(per_axis) + assert not quant.UniformQuantizedType.isinstance(sub_channel) + assert not quant.UniformQuantizedPerAxisType.isinstance(sub_channel) # CHECK-LABEL: TEST: test_any_quantized_type @@ -121,6 +129,47 @@ def test_uniform_per_axis_type(): assert per_axis == Type.parse("!quant.uniform") +# CHECK-LABEL: TEST: test_uniform_sub_channel_type +@run +def test_uniform_sub_channel_type(): + with Context(): + i8 = IntegerType.get_signless(8) + f32 = F32Type.get() + sub_channel = quant.UniformQuantizedSubChannelType.get( + quant.QuantizedType.FLAG_SIGNED, + i8, + f32, + DenseElementsAttr.get( + np.asarray([2.0, 3.0, 4.0, 5.0], np.float32).reshape(2, 2) + ), + DenseElementsAttr.get(np.asarray([10, 20, 30, 40], np.int8).reshape(2, 2)), + [0, 1], + [1, 2], + storage_type_min=quant.QuantizedType.default_minimum_for_integer( + is_signed=True, integral_width=8 + ), + storage_type_max=quant.QuantizedType.default_maximum_for_integer( + is_signed=True, integral_width=8 + ), + ) + + # CHECK: quantized dimensions: [0, 1] + print(f"quantized dimensions: {sub_channel.quantized_dimensions}") + # CHECK: block sizes: [1, 2] + print(f"block sizes: {sub_channel.block_sizes}") + # CHECK: scales: {{\[}}[2. 3.] + # CHECK: [4. 5.]] + print(f"scales: {np.asarray(sub_channel.scales)}") + # CHECK: zero-points: {{\[}}[10 20] + # CHECK: [30 40]] + print(f"zero-points: {np.asarray(sub_channel.zero_points)}") + # CHECK: !quant.uniform + print(sub_channel) + assert sub_channel == Type.parse( + "!quant.uniform" + ) + + # CHECK-LABEL: TEST: test_calibrated_type @run def test_calibrated_type(): diff --git a/mlir/test/python/ir/value.py b/mlir/test/python/ir/value.py index 9a8146bd9350..75fcd5943c89 100644 --- a/mlir/test/python/ir/value.py +++ b/mlir/test/python/ir/value.py @@ -237,15 +237,28 @@ def testValuePrintAsOperand(): print(value2) topFn = func.FuncOp("test", ([i32, i32], [])) - entry_block = Block.create_at_start(topFn.operation.regions[0], [i32, i32]) + entry_block1 = Block.create_at_start(topFn.operation.regions[0], [i32, i32]) - with InsertionPoint(entry_block): + with InsertionPoint(entry_block1): value3 = Operation.create("custom.op3", results=[i32]).results[0] # CHECK: Value(%[[VAL3:.*]] = "custom.op3"() : () -> i32) print(value3) value4 = Operation.create("custom.op4", results=[i32]).results[0] # CHECK: Value(%[[VAL4:.*]] = "custom.op4"() : () -> i32) print(value4) + + f = func.FuncOp("test", ([i32, i32], [])) + entry_block2 = Block.create_at_start(f.operation.regions[0], [i32, i32]) + with InsertionPoint(entry_block2): + value5 = Operation.create("custom.op5", results=[i32]).results[0] + # CHECK: Value(%[[VAL5:.*]] = "custom.op5"() : () -> i32) + print(value5) + value6 = Operation.create("custom.op6", results=[i32]).results[0] + # CHECK: Value(%[[VAL6:.*]] = "custom.op6"() : () -> i32) + print(value6) + + func.ReturnOp([]) + func.ReturnOp([]) # CHECK: %[[VAL1]] @@ -272,10 +285,20 @@ def testValuePrintAsOperand(): # CHECK: %1 print(value4.get_name(use_local_scope=True)) + # CHECK: %[[VAL5]] + print(value5.get_name()) + # CHECK: %[[VAL6]] + print(value6.get_name()) + # CHECK: %[[ARG0:.*]] - print(entry_block.arguments[0].get_name()) + print(entry_block1.arguments[0].get_name()) # CHECK: %[[ARG1:.*]] - print(entry_block.arguments[1].get_name()) + print(entry_block1.arguments[1].get_name()) + + # CHECK: %[[ARG2:.*]] + print(entry_block2.arguments[0].get_name()) + # CHECK: %[[ARG3:.*]] + print(entry_block2.arguments[1].get_name()) # CHECK: module { # CHECK: %[[VAL1]] = "custom.op1"() : () -> i32 @@ -283,6 +306,11 @@ def testValuePrintAsOperand(): # CHECK: func.func @test(%[[ARG0]]: i32, %[[ARG1]]: i32) { # CHECK: %[[VAL3]] = "custom.op3"() : () -> i32 # CHECK: %[[VAL4]] = "custom.op4"() : () -> i32 + # CHECK: func @test(%[[ARG2]]: i32, %[[ARG3]]: i32) { + # CHECK: %[[VAL5]] = "custom.op5"() : () -> i32 + # CHECK: %[[VAL6]] = "custom.op6"() : () -> i32 + # CHECK: return + # CHECK: } # CHECK: return # CHECK: } # CHECK: } diff --git a/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp b/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp index 9ec9b7215020..9271bc4bd7dc 100644 --- a/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp +++ b/mlir/tools/mlir-src-sharder/mlir-src-sharder.cpp @@ -72,6 +72,12 @@ int main(int argc, char **argv) { "Name of the macro to be defined -- ignored by mlir-src-sharder"), llvm::cl::value_desc("macro name"), llvm::cl::Prefix); + // CMake/TableGen pass this flag, re-registering after ResetCommandLineParser + // avoids "unknown argument" errors. + llvm::cl::opt noWarnOnUnusedTemplateArg( + "no-warn-on-unused-template-args", + llvm::cl::desc("Disable unused template argument warnings.")); + llvm::InitLLVM y(argc, argv); llvm::cl::ParseCommandLineOptions(argc, argv); diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp index a4ae271edb6b..1445c45cde2d 100644 --- a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp @@ -924,9 +924,7 @@ void DefFormat::genOptionalGroupPrinter(OptionalElement *el, FmtContext &ctx, void DefFormat::genWhitespacePrinter(WhitespaceElement *el, FmtContext &ctx, MethodBody &os) { if (el->getValue() == "\\n") { - // FIXME: The newline should be `printer.printNewLine()`, i.e., handled by - // the printer. - os << tgfmt("$_printer << '\\n';\n", &ctx); + os << tgfmt("$_printer.printNewline();\n", &ctx); } else if (!el->getValue().empty()) { os << tgfmt("$_printer << \"$0\";\n", &ctx, el->getValue()); } else { diff --git a/mlir/unittests/IR/BlobManagerTest.cpp b/mlir/unittests/IR/BlobManagerTest.cpp new file mode 100644 index 000000000000..d82482ddb793 --- /dev/null +++ b/mlir/unittests/IR/BlobManagerTest.cpp @@ -0,0 +1,74 @@ +//===- mlir/unittest/IR/BlobManagerTest.cpp - Blob management unit tests --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "../../test/lib/Dialect/Test/TestDialect.h" +#include "mlir/IR/DialectResourceBlobManager.h" +#include "mlir/Parser/Parser.h" + +#include "gtest/gtest.h" + +using namespace mlir; + +namespace { + +StringLiteral moduleStr = R"mlir( +"test.use1"() {attr = dense_resource : tensor<1xi64> } : () -> () + +{-# + dialect_resources: { + builtin: { + blob1: "0x08000000ABCDABCDABCDABCE" + } + } +#-} +)mlir"; + +TEST(DialectResourceBlobManagerTest, Lookup) { + MLIRContext context; + context.loadDialect(); + + OwningOpRef m = parseSourceString(moduleStr, &context); + ASSERT_TRUE(m); + + const auto &dialectManager = + mlir::DenseResourceElementsHandle::getManagerInterface(&context); + ASSERT_NE(dialectManager.getBlobManager().lookup("blob1"), nullptr); +} + +TEST(DialectResourceBlobManagerTest, GetBlobMap) { + MLIRContext context; + context.loadDialect(); + + OwningOpRef m = parseSourceString(moduleStr, &context); + ASSERT_TRUE(m); + + Block *block = m->getBody(); + auto &op = block->getOperations().front(); + auto resourceAttr = op.getAttrOfType("attr"); + ASSERT_NE(resourceAttr, nullptr); + + const auto &dialectManager = + resourceAttr.getRawHandle().getManagerInterface(&context); + + bool blobsArePresent = false; + dialectManager.getBlobManager().getBlobMap( + [&](const llvm::StringMap + &blobMap) { blobsArePresent = blobMap.contains("blob1"); }); + ASSERT_TRUE(blobsArePresent); + + // remove operations that use resources - resources must still be accessible + block->clear(); + + blobsArePresent = false; + dialectManager.getBlobManager().getBlobMap( + [&](const llvm::StringMap + &blobMap) { blobsArePresent = blobMap.contains("blob1"); }); + ASSERT_TRUE(blobsArePresent); +} + +} // end anonymous namespace diff --git a/mlir/unittests/IR/CMakeLists.txt b/mlir/unittests/IR/CMakeLists.txt index 821ff7d14dab..9dbd6cbe2a86 100644 --- a/mlir/unittests/IR/CMakeLists.txt +++ b/mlir/unittests/IR/CMakeLists.txt @@ -17,6 +17,7 @@ add_mlir_unittest(MLIRIRTests TypeTest.cpp TypeAttrNamesTest.cpp OpPropertiesTest.cpp + BlobManagerTest.cpp DEPENDS MLIRTestInterfaceIncGen diff --git a/mlir/unittests/Pass/CMakeLists.txt b/mlir/unittests/Pass/CMakeLists.txt index a47d2eead618..e5dfd7560645 100644 --- a/mlir/unittests/Pass/CMakeLists.txt +++ b/mlir/unittests/Pass/CMakeLists.txt @@ -2,6 +2,7 @@ add_mlir_unittest(MLIRPassTests AnalysisManagerTest.cpp PassManagerTest.cpp PassPipelineParserTest.cpp + PassPipelineOptionsTest.cpp ) mlir_target_link_libraries(MLIRPassTests PRIVATE diff --git a/mlir/unittests/Pass/PassPipelineOptionsTest.cpp b/mlir/unittests/Pass/PassPipelineOptionsTest.cpp new file mode 100644 index 000000000000..bec590b8bf01 --- /dev/null +++ b/mlir/unittests/Pass/PassPipelineOptionsTest.cpp @@ -0,0 +1,121 @@ +//===- PassPipelineParserTest.cpp - Pass Parser unit tests ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "gtest/gtest.h" + +#include + +using namespace mlir; +using namespace mlir::detail; + +namespace { + +// these types are used for automatically generated code of pass +using StrPassOpt = ::mlir::Pass::Option; +using IntPassOpt = ::mlir::Pass::Option; +using BoolPassOpt = ::mlir::Pass::Option; + +// these types are used for pipeline options that we manually pass to the constructor +using StrOption = mlir::detail::PassOptions::Option; +using IntOption = mlir::detail::PassOptions::Option; +using BoolOption = mlir::detail::PassOptions::Option; + +const int intOptDefaultVal = 5; +const bool boolOptDefaultVal = true; + +struct SimplePassWithOptions + : public PassWrapper> { + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(SimplePassWithOptions) + + SimplePassWithOptions() = default; + SimplePassWithOptions(const SimplePassWithOptions &other) : PassWrapper(other) {} + + SimplePassWithOptions(const detail::PassOptions& options) { + copyOptionValuesFrom(options); + } + + LogicalResult initialize(MLIRContext *ctx) final { + return success(); + } + + void runOnOperation() override { } + +public: + StrPassOpt strOpt{*this, "str-opt", ::llvm::cl::desc("string test option"), llvm::cl::init("")}; + IntPassOpt intOpt{*this, "int-opt", ::llvm::cl::desc("int test option"), llvm::cl::init(intOptDefaultVal)}; + BoolPassOpt boolOpt{*this, "bool-opt", ::llvm::cl::desc("bool test option"), llvm::cl::init(boolOptDefaultVal)}; +}; + +TEST(PassPipelineOptionsTest, CopyAllOptions) { + struct DuplicatedOtions : ::mlir::PassPipelineOptions { + StrOption strOpt{*this, "str-opt", ::llvm::cl::desc("string test option")}; + IntOption intOpt{*this, "int-opt", ::llvm::cl::desc("int test option"), llvm::cl::init(intOptDefaultVal)}; + BoolOption boolOpt{*this, "bool-opt", ::llvm::cl::desc("bool test option"), llvm::cl::init(boolOptDefaultVal)}; + }; + + const auto expectedStrVal = "test1"; + const auto expectedIntVal = -intOptDefaultVal; + const auto expectedBoolVal = !boolOptDefaultVal; + + DuplicatedOtions options; + options.strOpt.setValue(expectedStrVal); + options.intOpt.setValue(expectedIntVal); + options.boolOpt.setValue(expectedBoolVal); + + const auto& pass = std::make_unique(options); + + EXPECT_EQ(pass->strOpt.getValue(), expectedStrVal); + EXPECT_EQ(pass->intOpt.getValue(), expectedIntVal); + EXPECT_EQ(pass->boolOpt.getValue(), expectedBoolVal); +} + +TEST(PassPipelineOptionsTest, CopyMatchedOptions) { + struct SomePipelineOptions : ::mlir::PassPipelineOptions { + StrOption strOpt{*this, "str-opt", ::llvm::cl::desc("string test option")}; + IntOption intOpt{*this, "int-opt", ::llvm::cl::desc("int test option")}; + StrOption anotherStrOpt{*this, "another-str-pipeline-opt", + ::llvm::cl::desc("there is no such option in SimplePassWithOptions"), llvm::cl::init("anotherOptVal")}; + IntOption anotherIntOpt{*this, "another-int-pipeline-opt", + ::llvm::cl::desc("there is no such option in SimplePassWithOptions"), llvm::cl::init(10)}; + }; + + const auto expectedStrVal = "test2"; + const auto expectedIntVal = -intOptDefaultVal; + + SomePipelineOptions options; + options.strOpt.setValue(expectedStrVal); + options.intOpt.setValue(expectedIntVal); + + const auto pass = std::make_unique(options); + + EXPECT_EQ(pass->strOpt.getValue(), expectedStrVal); + EXPECT_EQ(pass->intOpt.getValue(), expectedIntVal); + EXPECT_EQ(pass->boolOpt.getValue(), boolOptDefaultVal); +} + +TEST(PassPipelineOptionsTest, NoMatchedOptions) { + struct SomePipelineOptions : ::mlir::PassPipelineOptions { + StrOption anotherStrOpt{*this, "another-str-pipeline-opt", + ::llvm::cl::desc("there is no such option in SimplePassWithOptions"), llvm::cl::init("anotherOptVal")}; + IntOption anotherIntOpt{*this, "another-int-pipeline-opt", + ::llvm::cl::desc("there is no such option in SimplePassWithOptions"), llvm::cl::init(10)}; + }; + + SomePipelineOptions options; + const auto pass = std::make_unique(options); + + EXPECT_EQ(pass->strOpt.getValue(), ""); + EXPECT_EQ(pass->intOpt.getValue(), intOptDefaultVal); + EXPECT_EQ(pass->boolOpt.getValue(), boolOptDefaultVal); +} + +} // namespace diff --git a/mlir/utils/tree-sitter-mlir/package-lock.json b/mlir/utils/tree-sitter-mlir/package-lock.json index 5676fb892f12..7cd1cdea408f 100644 --- a/mlir/utils/tree-sitter-mlir/package-lock.json +++ b/mlir/utils/tree-sitter-mlir/package-lock.json @@ -23,10 +23,11 @@ "dev": true }, "node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dev": true, + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0" } diff --git a/mlir/utils/vscode/package-lock.json b/mlir/utils/vscode/package-lock.json index 1efd5779f5cb..10f2d637a0cc 100644 --- a/mlir/utils/vscode/package-lock.json +++ b/mlir/utils/vscode/package-lock.json @@ -276,9 +276,9 @@ "dev": true }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -348,13 +348,19 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dev": true, "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -595,6 +601,23 @@ "node": ">=4.0.0" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/delegates": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", @@ -695,6 +718,27 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dev": true, + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escape-string-regexp": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", @@ -788,10 +832,13 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/gauge": { "version": "2.7.4", @@ -811,14 +858,19 @@ } }, "node_modules/get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dev": true, "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -862,6 +914,18 @@ "node": ">= 6" } }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dev": true, + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/graceful-fs": { "version": "4.2.6", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", @@ -889,10 +953,34 @@ "node": ">=4" } }, - "node_modules/has-symbols": { + "node_modules/has-property-descriptors": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", "dev": true, "engines": { "node": ">= 0.4" @@ -908,6 +996,18 @@ "dev": true, "optional": true }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hosted-git-info": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", @@ -1195,9 +1295,9 @@ } }, "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dependencies": { "brace-expansion": "^1.1.7" }, @@ -1339,9 +1439,9 @@ } }, "node_modules/object-inspect": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.11.0.tgz", - "integrity": "sha512-jp7ikS6Sd3GxQfZJPyH3cjcbJF6GZPClgdV+EFygjFLQ5FmW/dRUnTd9PQ9k0JhoNDabWFbpF1yCdSWCC6gexg==", + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", "dev": true, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -1458,12 +1558,12 @@ } }, "node_modules/qs": { - "version": "6.10.1", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.1.tgz", - "integrity": "sha512-M528Hph6wsSVOBiYUnGf+K/7w0hNshs/duGsNXPUCLH5XAqjEtiPGwNONLV0tBH8NoGb0mvD5JubnUTrujKDTg==", + "version": "6.12.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.12.1.tgz", + "integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==", "dev": true, "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -1582,6 +1682,23 @@ "dev": true, "optional": true }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setimmediate": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", @@ -1589,14 +1706,18 @@ "dev": true }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dev": true, "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -1716,10 +1837,11 @@ } }, "node_modules/tar-fs": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", - "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.3.tgz", + "integrity": "sha512-090nwYJDmlhwFwEW3QQl+vaNnxsO2yVsd45eTKRBzSzu+hlb1w2K9inVq5b0ngXuLVqQ4ApvsUHHnu/zQNkWAg==", "dev": true, + "license": "MIT", "optional": true, "dependencies": { "chownr": "^1.1.1", @@ -2199,9 +2321,9 @@ "dev": true }, "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -2245,13 +2367,16 @@ "dev": true }, "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dev": true, "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" } }, "chainsaw": { @@ -2434,6 +2559,17 @@ "dev": true, "optional": true }, + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, "delegates": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", @@ -2510,6 +2646,21 @@ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", "dev": true }, + "es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dev": true, + "requires": { + "get-intrinsic": "^1.2.4" + } + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true + }, "escape-string-regexp": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", @@ -2583,9 +2734,9 @@ } }, "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true }, "gauge": { @@ -2606,14 +2757,16 @@ } }, "get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dev": true, "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" } }, "github-from-package": { @@ -2645,6 +2798,15 @@ "is-glob": "^4.0.1" } }, + "gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dev": true, + "requires": { + "get-intrinsic": "^1.1.3" + } + }, "graceful-fs": { "version": "4.2.6", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", @@ -2666,10 +2828,25 @@ "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", "dev": true }, - "has-symbols": { + "has-property-descriptors": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "requires": { + "es-define-property": "^1.0.0" + } + }, + "has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "dev": true + }, + "has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", "dev": true }, "has-unicode": { @@ -2679,6 +2856,15 @@ "dev": true, "optional": true }, + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "requires": { + "function-bind": "^1.1.2" + } + }, "hosted-git-info": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", @@ -2895,9 +3081,9 @@ "optional": true }, "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "requires": { "brace-expansion": "^1.1.7" } @@ -3014,9 +3200,9 @@ "optional": true }, "object-inspect": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.11.0.tgz", - "integrity": "sha512-jp7ikS6Sd3GxQfZJPyH3cjcbJF6GZPClgdV+EFygjFLQ5FmW/dRUnTd9PQ9k0JhoNDabWFbpF1yCdSWCC6gexg==", + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", "dev": true }, "once": { @@ -3115,12 +3301,12 @@ } }, "qs": { - "version": "6.10.1", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.1.tgz", - "integrity": "sha512-M528Hph6wsSVOBiYUnGf+K/7w0hNshs/duGsNXPUCLH5XAqjEtiPGwNONLV0tBH8NoGb0mvD5JubnUTrujKDTg==", + "version": "6.12.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.12.1.tgz", + "integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==", "dev": true, "requires": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" } }, "rc": { @@ -3212,6 +3398,20 @@ "dev": true, "optional": true }, + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + } + }, "setimmediate": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", @@ -3219,14 +3419,15 @@ "dev": true }, "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dev": true, "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" } }, "signal-exit": { @@ -3303,9 +3504,9 @@ } }, "tar-fs": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", - "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.2.tgz", + "integrity": "sha512-EsaAXwxmx8UB7FRKqeozqEPop69DXcmYwTQwXvyAPF352HJsPdkVhvTaDPYqfNgruveJIJy3TA2l+2zj8LJIJA==", "dev": true, "optional": true, "requires": {