diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 6da25933cb..91095d3a23 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -94,24 +94,27 @@ jobs: - name: Functional Tests - PanoramaFisheyeHdr Pipeline working-directory: ./functional_tests run: | - # clone required repos + # Clone required repos git clone --branch develop https://github.com/alicevision/Meshroom.git git clone --branch main https://github.com/alicevision/dataset_panoramaFisheyeHdr.git cd Meshroom/ - # select meshroom branch meshroom_avBranch=$(git ls-remote --heads https://github.com/alicevision/Meshroom.git $GITHUB_HEAD_REF | cut -f 1) if [ $meshroom_avBranch != "" ]; then git checkout $meshroom_avBranch; echo "Use Meshroom/$GITHUB_HEAD_REF"; fi + + # Set environment variables specific to Meshroom export MESHROOM_INSTALL_DIR=$PWD + export MESHROOM_NODES_PATH=${MESHROOM_NODES_PATH}:${ALICEVISION_ROOT}/share/meshroom + export MESHROOM_PIPELINE_TEMPLATES_PATH=${MESHROOM_PIPELINE_TEMPLATES_PATH}:${ALICEVISION_ROOT}/share/meshroom + + # Set environment variables to find executables and libraries export PYTHONPATH=$PWD:${ALICEVISION_ROOT}/bin:${PYTHONPATH} export PATH=$PATH:${ALICEVISION_ROOT}/bin export LD_LIBRARY_PATH=${ALICEVISION_ROOT}/lib:${ALICEVISION_ROOT}/lib64:${DEPS_INSTALL_DIR}/lib64:${DEPS_INSTALL_DIR}/lib:${LD_LIBRARY_PATH} - mkdir ./outputData - cd bin/ - python3 --version - pip3 --version + + # Run the test pipeline pip3 install psutil - echo "ldd aliceVision_cameraInit" - ldd ${ALICEVISION_ROOT}/bin/aliceVision_cameraInit + cd bin/ + mkdir ./outputData python3 meshroom_batch -i $PWD/../../dataset_panoramaFisheyeHdr/RAW -p panoramaFisheyeHdr -o $PWD/../outputData - name: Functional Tests - SfM Quality Evaluation @@ -119,13 +122,11 @@ jobs: run: | git clone --branch master https://github.com/alicevision/SfM_quality_evaluation.git cd SfM_quality_evaluation/ - # checkout a specific commit to ensure repeatability + # Checkout a specific commit to ensure repeatability git checkout 36e3bf2d05c64d1726cb4a0e770923794f203f98 export PYTHONPATH=${ALICEVISION_ROOT}/bin:${PYTHONPATH} export LD_LIBRARY_PATH=${ALICEVISION_ROOT}/lib:${ALICEVISION_ROOT}/lib64:${DEPS_INSTALL_DIR}/lib64:${DEPS_INSTALL_DIR}/lib:${LD_LIBRARY_PATH} - echo "ldd aliceVision_cameraInit" - ldd ${ALICEVISION_ROOT}/bin/aliceVision_cameraInit - python3 --version + python3 EvaluationLauncher.py -s ${ALICEVISION_ROOT}/bin -i $PWD/Benchmarking_Camera_Calibration_2008/ -o $PWD/reconstructions/ -r $PWD/results.json -v - name: Python Binding - Unit Tests @@ -135,6 +136,27 @@ jobs: pip3 install pytest pytest ./pyTests + - name: Meshroom Plugin - Templates validity + working-directory: ./functional_tests + run: | + git clone https://github.com/meshroomHub/mrSegmentation.git + cd Meshroom/ + export MESHROOM_INSTALL_DIR=$PWD + export MESHROOM_NODES_PATH=${MESHROOM_NODES_PATH}:${ALICEVISION_ROOT}/share/meshroom:$PWD/../mrSegmentation/meshroom/nodes + export MESHROOM_PIPELINE_TEMPLATES_PATH=${MESHROOM_PIPELINE_TEMPLATES_PATH}:${ALICEVISION_ROOT}/share/meshroom + export PYTHONPATH=$PWD:${ALICEVISION_ROOT}/bin:${PYTHONPATH} + export PATH=$PATH:${ALICEVISION_ROOT}/bin + export LD_LIBRARY_PATH=${ALICEVISION_ROOT}/lib:${ALICEVISION_ROOT}/lib64:${DEPS_INSTALL_DIR}/lib64:${DEPS_INSTALL_DIR}/lib:${LD_LIBRARY_PATH} + + echo " + import sys + from meshroom.core import test + if test.checkAllTemplatesVersions(): + sys.exit(0) + sys.exit(1) + " | tee test_templatesVersions.py + python3 test_templatesVersions.py + build-windows: runs-on: windows-latest env: @@ -229,6 +251,7 @@ jobs: -DALICEVISION_BUILD_PHOTOMETRICSTEREO=OFF -DALICEVISION_BUILD_SEGMENTATION=OFF -DALICEVISION_BUILD_SWIG_BINDING=ON + -DALICEVISION_INSTALL_MESHROOM_PLUGIN=OFF -DBOOST_NO_CXX11=ON cmakeBuildType: Release diff --git a/CMakeLists.txt b/CMakeLists.txt index f1b8d9f775..af94db7699 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,6 +8,7 @@ option(ALICEVISION_BUILD_TESTS "Build AliceVision tests" OFF) option(AV_USE_CUDA "Enable CUDA" ON) option(AV_USE_OPENMP "Enable OpenMP" $<$,OFF,ON>) # disable by default for AppleClang option(BUILD_SHARED_LIBS "Build shared libraries" ON) +option(ALICEVISION_INSTALL_MESHROOM_PLUGIN "Install AliceVision's plugin for Meshroom" ON) if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type for AliceVision" FORCE) @@ -85,6 +86,13 @@ install( endif() +if (ALICEVISION_INSTALL_MESHROOM_PLUGIN) + install( + DIRECTORY meshroom + DESTINATION ${CMAKE_INSTALL_DATADIR} + ) +endif() + # Bundle target (see src/cmake/MakeBundle.cmake) # Note: require that the install rule has been executed # Include VCPKG installed dir for runtime dependencies lookup diff --git a/meshroom/aliceVision/ApplyCalibration.py b/meshroom/aliceVision/ApplyCalibration.py new file mode 100644 index 0000000000..a90c98100c --- /dev/null +++ b/meshroom/aliceVision/ApplyCalibration.py @@ -0,0 +1,49 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ApplyCalibration(desc.AVCommandLineNode): + commandLine = "aliceVision_applyCalibration {allParams}" + size = desc.DynamicNodeSize("input") + + category = "Utils" + documentation = """ Overwrite intrinsics with a calibrated intrinsic. """ + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="calibration", + label="Calibration", + description="Calibration file (SfmData or Lens calibration file).", + value="", + ), + desc.BoolParam( + name="useJson", + label="Use Lens Calibration File", + description="Calibration is a Lens calibration file generated using 3Dequalizer instead of an sfmData.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfmData.sfm", + ), + ] diff --git a/meshroom/aliceVision/CameraCalibration.py b/meshroom/aliceVision/CameraCalibration.py new file mode 100644 index 0000000000..3cbb637b43 --- /dev/null +++ b/meshroom/aliceVision/CameraCalibration.py @@ -0,0 +1,129 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class CameraCalibration(desc.AVCommandLineNode): + commandLine = 'aliceVision_cameraCalibration {allParams}' + + category = 'Utils' + documentation = ''' + ''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="Input images in one of the following form:\n" + " - folder containing images.\n" + " - image sequence like \"/path/to/seq.@.jpg\".\n" + " - video file.", + value="", + ), + desc.ChoiceParam( + name="pattern", + label="Pattern", + description="Type of pattern (CHESSBOARD, CIRCLES, ASYMMETRIC_CIRCLES, ASYMMETRIC_CCTAG).", + value="CHESSBOARD", + values=["CHESSBOARD", "CIRCLES", "ASYMMETRIC_CIRCLES", "ASYMMETRIC_CCTAG"], + ), + desc.GroupAttribute( + name="size", + label="Size", + description="Number of inner corners per one of board dimension like W H.", + groupDesc=[ + desc.IntParam( + name="width", + label="Width", + description="", + value=7, + range=(0, 10000, 1), + ), + desc.IntParam( + name="height", + label="Height", + description="", + value=5, + range=(0, 10000, 1), + ), + ], + ), + desc.FloatParam( + name="squareSize", + label="Square Size", + description="Size of the grid's square cells (mm).", + value=1.0, + range=(0.0, 100.0, 1.0), + ), + desc.IntParam( + name="nbDistortionCoef", + label="Nb Distortion Coef", + description="Number of distortion coefficients.", + value=3, + range=(0, 5, 1), + ), + desc.IntParam( + name="maxFrames", + label="Max Frames", + description="Maximum number of frames to extract from the video file.", + value=0, + range=(0, 5, 1), + ), + desc.IntParam( + name="maxCalibFrames", + label="Max Calib Frames", + description="Maximum number of frames to use to calibrate from the selected frames.", + value=100, + range=(0, 1000, 1), + ), + desc.IntParam( + name="calibGridSize", + label="Calib Grid Size", + description="Define the number of cells per edge.", + value=10, + range=(0, 50, 1), + ), + desc.IntParam( + name="minInputFrames", + label="Min Input Frames", + description="Minimum number of frames to limit the refinement loop.", + value=10, + range=(0, 100, 1), + ), + desc.FloatParam( + name="maxTotalAvgErr", + label="Max Total Avg Err", + description="Maximum total average error.", + value=0.10000000000000001, + range=(0.0, 1.0, 0.01), + ), + desc.File( + name="debugRejectedImgFolder", + label="Debug Rejected Img Folder", + description="Folder to export images that were deleted during the refinement loop.", + value="", + ), + desc.File( + name="debugSelectedImgFolder", + label="Debug Selected Img Folder", + description="Folder to export debug images.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Output filename for intrinsic [and extrinsic] parameters.", + value="{nodeCacheFolder}/cameraCalibration.cal", + ), + ] diff --git a/meshroom/aliceVision/CameraInit.py b/meshroom/aliceVision/CameraInit.py new file mode 100644 index 0000000000..0ea928d9ec --- /dev/null +++ b/meshroom/aliceVision/CameraInit.py @@ -0,0 +1,686 @@ +__version__ = "12.0" + +import os +import json +import psutil +import shutil +import tempfile +import logging + +from meshroom.core import desc, Version +from meshroom.core.utils import RAW_COLOR_INTERPRETATION, VERBOSE_LEVEL +from meshroom.multiview import FilesByType, findFilesByTypeInFolder + +Viewpoint = [ + desc.IntParam( + name="viewId", + label="ID", + description="Image UID.", + value=-1, + range=None, + ), + desc.IntParam( + name="poseId", + label="Pose ID", + description="Pose ID.", + value=-1, + range=None, + ), + desc.File( + name="path", + label="Image Path", + description="Image filepath.", + value="", + ), + desc.IntParam( + name="intrinsicId", + label="Intrinsic", + description="Internal camera parameters.", + value=-1, + range=None, + ), + desc.IntParam( + name="rigId", + label="Rig", + description="Rig parameters.", + value=-1, + range=None, + ), + desc.IntParam( + name="subPoseId", + label="Rig Sub-Pose", + description="Rig sub-pose parameters.", + value=-1, + range=None, + ), + desc.StringParam( + name="metadata", + label="Image Metadata", + description="The configuration of the Viewpoints is based on the images' metadata.\n" + "The important ones are:\n" + " - Focal Length: the focal length in mm.\n" + " - Make and Model: this information allows to convert the focal in mm into a focal length in " + "pixels using an embedded sensor database.\n" + " - Serial Number: allows to uniquely identify a device so multiple devices with the same Make, " + "Model can be differentiated and their internal parameters are optimized separately.", + value="", + invalidate=False, + advanced=True, + ), +] + +Intrinsic = [ + desc.IntParam( + name="intrinsicId", + label="ID", + description="Intrinsic UID.", + value=-1, + range=None, + ), + desc.FloatParam( + name="initialFocalLength", + label="Initial Focal Length", + description="Initial guess on the focal length (in mm).\n" + "When we have an initial value from EXIF, this value is not accurate but it cannot be wrong.\n" + "So this value is used to limit the range of possible values in the optimization.\n" + "If this value is set to -1, it will not be used and the focal length will not be bounded.", + value=-1.0, + range=None, + ), + desc.FloatParam( + name="focalLength", + label="Focal Length", + description="Known/calibrated focal length (in mm).", + value=1000.0, + range=(0.0, 10000.0, 1.0), + ), + desc.FloatParam( + name="pixelRatio", + label="Pixel Ratio", + description="Ratio between the pixel width and the pixel height.", + value=1.0, + range=(0.0, 10.0, 0.1), + ), + desc.BoolParam( + name="pixelRatioLocked", + label="Pixel Ratio Locked", + description="The pixel ratio value is locked for estimation.", + value=True, + ), + desc.BoolParam( + name="scaleLocked", + label="Focal length Locked", + description="The focal length is locked for estimation.", + value=False, + ), + desc.BoolParam( + name="offsetLocked", + label="Optical Center Locked", + description="The optical center coordinates are locked for estimation.", + value=False, + ), + desc.BoolParam( + name="distortionLocked", + label="Distortion Locked", + description="The distortion parameters are locked for estimation.", + value=False, + ), + desc.ChoiceParam( + name="type", + label="Camera Type", + description="Mathematical model used to represent a camera:\n" + " - pinhole: Simplest projective camera model without optical distortion " + "(focal and optical center).\n" + " - equidistant: Non-projective camera model suited for full-fisheye optics.\n" + " - equirectangular: Projection model used in panoramas.\n", + value="pinhole", + values=["pinhole", "equidistant", "equirectangular"], + ), + desc.ChoiceParam( + name="distortionType", + label="Distortion Type", + description="Mathematical model used to represent the distortion:\n" + " - radialk1: radial distortion with one parameter.\n" + " - radialk3: radial distortion with three parameters (Best for pinhole cameras).\n" + " - radialk3pt: radial distortion with three parameters and normalized with the sum of parameters " + "(Best for equidistant cameras).\n" + " - brown: distortion with 3 radial and 2 tangential parameters.\n" + " - fisheye1: distortion with 1 parameter suited for fisheye optics (like 120deg FoV).\n" + " - fisheye4: distortion with 4 parameters suited for fisheye optics (like 120deg FoV).\n", + value="radialk3", + values=["none", "radialk1", "radialk3", "radialk3pt", "brown", "fisheye4", "fisheye1"], + ), + desc.IntParam( + name="width", + label="Width", + description="Image width.", + value=0, + range=(0, 10000, 1), + ), + desc.IntParam( + name="height", + label="Height", + description="Image height.", + value=0, + range=(0, 10000, 1), + ), + desc.FloatParam( + name="sensorWidth", + label="Sensor Width", + description="Sensor width (in mm).", + value=36.0, + range=(0.0, 1000.0, 1.0), + ), + desc.FloatParam( + name="sensorHeight", + label="Sensor Height", + description="Sensor height (in mm).", + value=24.0, + range=(0.0, 1000.0, 1.0), + ), + desc.StringParam( + name="serialNumber", + label="Serial Number", + description="Device serial number (Camera UID and Lens UID combined).", + value="", + ), + desc.GroupAttribute( + name="principalPoint", + label="Principal Point", + description="Position of the optical center in the image (i.e. the sensor surface).", + groupDesc=[ + desc.FloatParam( + name="x", + label="x", + description="", + value=0.0, + range=(0.0, 10000.0, 1.0), + ), + desc.FloatParam( + name="y", + label="y", + description="", + value=0.0, + range=(0.0, 10000.0, 1.0), + ), + ], + ), + desc.ChoiceParam( + name="initializationMode", + label="Initialization Mode", + description="Defines how this intrinsic was initialized:\n" + " - calibrated: calibrated externally.\n" + " - estimated: estimated from metadata and/or sensor width.\n" + " - unknown: unknown camera parameters (can still have default value guess).\n" + " - none: not set.", + values=["calibrated", "estimated", "unknown", "none"], + value="none", + ), + desc.ChoiceParam( + name="distortionInitializationMode", + label="Distortion Initialization Mode", + description="Defines how the distortion model and parameters were initialized:\n" + " - calibrated: calibrated externally.\n" + " - estimated: estimated from a database of generic calibration.\n" + " - unknown: unknown camera parameters (can still have default value guess).\n" + " - none: not set.", + values=["calibrated", "estimated", "unknown", "none"], + value="none", + ), + desc.ListAttribute( + name="distortionParams", + elementDesc=desc.FloatParam( + name="p", + label="", + description="", + value=0.0, + range=(-0.1, 0.1, 0.01), + ), + label="Distortion Params", + description="Distortion parameters.", + ), + desc.GroupAttribute( + name="undistortionOffset", + label="Undistortion Offset", + description="Undistortion offset.", + groupDesc=[ + desc.FloatParam( + name="x", + label="x", + description="", + value=0.0, + range=(0.0, 10000.0, 1.0), + ), + desc.FloatParam( + name="y", + label="y", + description="", + value=0.0, + range=(0.0, 10000.0, 1.0), + ), + ], + ), + desc.ListAttribute( + name="undistortionParams", + elementDesc=desc.FloatParam( + name="p", + label="", + description="", + value=0.0, + range=(-0.1, 0.1, 0.01), + ), + label="Undistortion Params", + description="Undistortion parameters." + ), + desc.BoolParam( + name="locked", + label="Locked", + description="If the camera has been calibrated, the internal camera parameters (intrinsics) can be locked. " + "It should improve robustness and speed-up the reconstruction.", + value=False, + ), +] + + +def readSfMData(sfmFile): + """ Read views and intrinsics from a .sfm file + + Args: + sfmFile: the .sfm file containing views and intrinsics + + Returns: + The views and intrinsics of the .sfm as two separate lists + """ + # skip decoding errors to avoid potential exceptions due to non utf-8 characters in images metadata + with open(sfmFile, 'r', encoding='utf-8', errors='ignore') as f: + data = json.load(f) + + intrinsicsKeys = [i.name for i in Intrinsic] + + intrinsics = [{k: v for k, v in item.items() if k in intrinsicsKeys} for item in data.get("intrinsics", [])] + for intrinsic in intrinsics: + pp = intrinsic.get('principalPoint', (0, 0)) + intrinsic['principalPoint'] = {} + intrinsic['principalPoint']['x'] = pp[0] + intrinsic['principalPoint']['y'] = pp[1] + + # convert empty string distortionParams (i.e: Pinhole model) to empty list + distortionParams = intrinsic.get('distortionParams', '') + if distortionParams == '': + intrinsic['distortionParams'] = list() + + offset = intrinsic.get('undistortionOffset', (0, 0)) + intrinsic['undistortionOffset'] = {} + intrinsic['undistortionOffset']['x'] = offset[0] + intrinsic['undistortionOffset']['y'] = offset[1] + + undistortionParams = intrinsic.get('undistortionParams', '') + if undistortionParams == '': + intrinsic['undistortionParams'] = list() + + viewsKeys = [v.name for v in Viewpoint] + views = [{k: v for k, v in item.items() if k in viewsKeys} for item in data.get("views", [])] + for view in views: + view['metadata'] = json.dumps(view['metadata']) # convert metadata to string + + return views, intrinsics + + +class CameraInit(desc.AVCommandLineNode, desc.InitNode): + commandLine = "aliceVision_cameraInit {allParams} --allowSingleView 1" # don't throw an error if there is only one image + + size = desc.DynamicNodeSize("viewpoints") + + category = "Sparse Reconstruction" + documentation = """ +This node describes your dataset. It lists the Viewpoints candidates, the guess about the type of optic, the initial +focal length and which images are sharing the same internal camera parameters, as well as potential camera rigs. + +When you import new images into Meshroom, this node is automatically configured from the analysis of the images' metadata. +The software can support images without any metadata but it is recommended to have them for robustness. + +### Metadata +Metadata allow images to be grouped together and provide an initialization of the focal length (in pixel unit). +The needed metadata are: + * **Focal Length**: the focal length in mm. + * **Make** & **Model**: this information allows to convert the focal in mm into a focal length in pixels using an + embedded sensor database. + * **Serial Number**: allows to uniquely identify a device so multiple devices with the same Make, Model can be + differentiated and their internal parameters are optimized separately (in the photogrammetry case). +""" + + inputs = [ + desc.ListAttribute( + name="viewpoints", + elementDesc=desc.GroupAttribute( + name="viewpoint", + label="Viewpoint", + description="Viewpoint.", + groupDesc=Viewpoint, + ), + label="Viewpoints", + description="Input viewpoints.", + group="", + ), + desc.ListAttribute( + name="intrinsics", + elementDesc=desc.GroupAttribute( + name="intrinsic", + label="Intrinsic", + description="Intrinsic.", + groupDesc=Intrinsic, + ), + label="Intrinsics", + description="Camera intrinsics.", + group="", + ), + desc.File( + name="sensorDatabase", + label="Sensor Database", + description="Camera sensor with database path.", + value="${ALICEVISION_SENSOR_DB}", + invalidate=False, + ), + desc.File( + name="lensCorrectionProfileInfo", + label="LCP Info", + description="Lens Correction Profile filepath or database directory.", + value="${ALICEVISION_LENS_PROFILE_INFO}", + invalidate=False, + ), + desc.BoolParam( + name="lensCorrectionProfileSearchIgnoreCameraModel", + label="LCP Generic Search", + description="The lens name and camera maker are used to match the LCP database, but the camera model is ignored.", + value=True, + advanced=True, + ), + desc.FloatParam( + name="defaultFieldOfView", + label="Default Field Of View", + description="Default value for the field of view (in degrees) used as an initialization value when there is " + "no focal or field of view in the image metadata.", + value=45.0, + range=(0.0, 180.0, 1.0), + invalidate=False, + ), + desc.ChoiceParam( + name="groupCameraFallback", + label="Group Camera Fallback", + description="If there is no serial number in the images' metadata, devices cannot be accurately identified.\n" + "Therefore, internal camera parameters cannot be shared among images reliably.\n" + "A fallback grouping strategy must be chosen:\n" + " - global: group images from comparable devices (same make/model/focal) globally.\n" + " - folder: group images from comparable devices only within the same folder.\n" + " - image: never group images from comparable devices.", + values=["global", "folder", "image"], + value="folder", + invalidate=False, + ), + desc.ChoiceParam( + name="rawColorInterpretation", + label="RAW Color Interpretation", + description="Allows to choose how RAW data are color processed:\n" + " - None: Debayering without any color processing.\n" + " - LibRawNoWhiteBalancing: Simple neutralization.\n" + " - LibRawWhiteBalancing: Use internal white balancing from libraw.\n" + " - DCPLinearProcessing: Use DCP color profile.\n" + " - DCPMetadata: Same as None with DCP info added in metadata.", + values=RAW_COLOR_INTERPRETATION, + value="DCPLinearProcessing" if os.environ.get("ALICEVISION_COLOR_PROFILE_DB", "") else "LibRawWhiteBalancing", + ), + desc.File( + name="colorProfileDatabase", + label="Color Profile Database", + description="Color Profile database directory path.", + value="${ALICEVISION_COLOR_PROFILE_DB}", + enabled=lambda node: node.rawColorInterpretation.value.startswith("DCP"), + invalidate=False, + ), + desc.BoolParam( + name="errorOnMissingColorProfile", + label="Error On Missing DCP Color Profile", + description="When enabled, if no color profile is found for at least one image, then an error is thrown.\n" + "When disabled, if no color profile is found for some images, it will fallback to " + "libRawWhiteBalancing for those images.", + value=True, + enabled=lambda node: node.rawColorInterpretation.value.startswith("DCP"), + ), + desc.ChoiceParam( + name="viewIdMethod", + label="ViewId Method", + description="Allows to choose the way the viewID is generated:\n" + " - metadata : Generate viewId from image metadata.\n" + " - filename : Generate viewId from filename using regex.", + value="metadata", + values=["metadata", "filename"], + invalidate=False, + advanced=True, + ), + desc.StringParam( + name="viewIdRegex", + label="ViewId Regex", + description="Regex used to catch number used as viewId in filename." + "You should capture specific parts of the filename with parentheses to define matching elements." + " (only numbers will work)\n" + "Some examples of patterns:\n" + " - Match the longest number at the end of the filename (default value): " + r'".*?(\d+)"' + "\n" + + " - Match the first number found in filename: " + r'"(\d+).*"', + value=r".*?(\d+)", + invalidate=False, + advanced=True, + enabled=lambda node: node.viewIdMethod.value == "filename", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Output SfMData.", + value="{nodeCacheFolder}/cameraInit.sfm", + ), + ] + + def __init__(self): + super(CameraInit, self).__init__() + + def initialize(self, node, inputs, recursiveInputs): + # Reset graph inputs + self.resetAttributes(node, ["viewpoints", "intrinsics"]) + + filesByType = FilesByType() + searchedForImages = False + + if recursiveInputs: + filesByType.extend(findFilesByTypeInFolder(recursiveInputs, recursive=True)) + searchedForImages = True + + # Add views and intrinsics from a file if it was provided, or look for the images + if len(inputs) == 1 and os.path.isfile(inputs[0]) and os.path.splitext(inputs[0])[-1] in ('.json', '.sfm'): + views, intrinsics = readSfMData(inputs[0]) + self.extendAttributes(node, {"viewpoints": views, "intrinsics": intrinsics}) + else: + filesByType.extend(findFilesByTypeInFolder(inputs, recursive=False)) + searchedForImages = True + + # If there was no input file, check that the directories do contain images + if searchedForImages and not filesByType.images: + raise ValueError("No valid input file or no image in the provided directories") + + views, intrinsics = self.buildIntrinsics(node, filesByType.images) + self.setAttributes(node, {"viewpoints": views, "intrinsics": intrinsics}) + + def upgradeTypes(self, intrinsic, itype): + if itype == "pinhole": + intrinsic['type'] = "pinhole" + intrinsic['distortionType'] = "none" + intrinsic['undistortionType'] = "none" + + elif itype == "radial1": + intrinsic['type'] = "pinhole" + intrinsic['distortionType'] = "radialk1" + intrinsic['undistortionType'] = "none" + + elif itype == "radial3": + intrinsic['type'] = "pinhole" + intrinsic['distortionType'] = "radialk3" + intrinsic['undistortionType'] = "none" + + elif itype == "brown": + intrinsic['type'] = "pinhole" + intrinsic['distortionType'] = "brown" + intrinsic['undistortionType'] = "none" + + elif itype == "fisheye4": + intrinsic['type'] = "pinhole" + intrinsic['distortionType'] = "fisheye4" + intrinsic['undistortionType'] = "none" + + elif itype == "fisheye1": + intrinsic['type'] = "pinhole" + intrinsic['distortionType'] = "fisheye1" + intrinsic['undistortionType'] = "none" + + elif itype == "equidistant": + intrinsic['type'] = "equidistant" + intrinsic['distortionType'] = "none" + intrinsic['undistortionType'] = "none" + + elif itype == "equidistant_r3": + intrinsic['type'] = "equidistant" + intrinsic['distortionType'] = "radialk3pt" + intrinsic['undistortionType'] = "none" + + else: + intrinsic['type'] = "pinhole" + intrinsic['distortionType'] = "none" + intrinsic['undistortionType'] = "none" + + def upgradeAttributeValues(self, attrValues, fromVersion): + + # Starting with version 6, the principal point is now relative to the image center + if fromVersion < Version(6, 0): + for intrinsic in attrValues['intrinsics']: + principalPoint = intrinsic['principalPoint'] + intrinsic['principalPoint'] = { + "x": int(principalPoint["x"] - 0.5 * intrinsic['width']), + "y": int(principalPoint["y"] - 0.5 * intrinsic['height']) + } + + # Starting with version 7, the focal length is now in mm + if fromVersion < Version(7, 0): + for intrinsic in attrValues['intrinsics']: + pxInitialFocalLength = intrinsic['pxInitialFocalLength'] + pxFocalLength = intrinsic['pxFocalLength'] + sensorWidth = intrinsic['sensorWidth'] + width = intrinsic['width'] + focalLength = (pxFocalLength / width) * sensorWidth + initialFocalLength = (pxInitialFocalLength / width) * sensorWidth + intrinsic['initialFocalLength'] = initialFocalLength + intrinsic['focalLength'] = focalLength + intrinsic['pixelRatio'] = 1.0 + intrinsic['pixelRatioLocked'] = False + + # Upgrade types + if fromVersion < Version(10, 0): + for intrinsic in attrValues['intrinsics']: + itype = intrinsic['type'] + self.upgradeTypes(intrinsic, itype) + + return attrValues + + def readSfMData(self, sfmFile): + return readSfMData(sfmFile) + + def buildIntrinsics(self, node, additionalViews=()): + """ Build intrinsics from node current views and optional additional views + + Args: + node: the CameraInit node instance to build intrinsics for + additionalViews: (optional) the new views (list of path to images) to add to the node's viewpoints + + Returns: + The updated views and intrinsics as two separate lists + """ + assert isinstance(node.nodeDesc, CameraInit) + if node.graph: + # make a copy of the node outside the graph + # to change its cache folder without modifying the original node + node = node.graph.copyNode(node)[0] + + tmpCache = tempfile.mkdtemp() + node.updateInternals(tmpCache) + + try: + os.makedirs(os.path.join(tmpCache, node.internalFolder)) + self.createViewpointsFile(node, additionalViews) + cmd = self.buildCommandLine(node.chunks[0]) + logging.debug(' - commandLine: {}'.format(cmd)) + proc = psutil.Popen(cmd, stdout=None, stderr=None, shell=True) + stdout, stderr = proc.communicate() + # proc.wait() + if proc.returncode != 0: + raise RuntimeError('CameraInit failed with error code {}.\nCommand was: "{}".\n'.format( + proc.returncode, cmd) + ) + + # Reload result of aliceVision_cameraInit + cameraInitSfM = node.output.value + return readSfMData(cameraInitSfM) + + except Exception as e: + logging.debug("[CameraInit] Error while building intrinsics: {}".format(str(e))) + raise + finally: + if os.path.exists(tmpCache): + logging.debug("[CameraInit] Remove temp files in: {}".format(tmpCache)) + shutil.rmtree(tmpCache) + + def createViewpointsFile(self, node, additionalViews=()): + node.viewpointsFile = "" + if node.viewpoints or additionalViews: + newViews = [] + for path in additionalViews: # format additional views to match json format + newViews.append({"path": path}) + intrinsics = node.intrinsics.getPrimitiveValue(exportDefault=True) + for intrinsic in intrinsics: + intrinsic['principalPoint'] = [intrinsic['principalPoint']['x'], intrinsic['principalPoint']['y']] + intrinsic['undistortionOffset'] = [intrinsic['undistortionOffset']['x'], intrinsic['undistortionOffset']['y']] + intrinsic['undistortionType'] = 'none' + views = node.viewpoints.getPrimitiveValue(exportDefault=False) + + # convert the metadata string into a map + for view in views: + if 'metadata' in view: + view['metadata'] = json.loads(view['metadata']) + + sfmData = { + "version": [1, 2, 12], + "views": views + newViews, + "intrinsics": intrinsics, + "featureFolder": "", + "matchingFolder": "", + } + node.viewpointsFile = os.path.join(node.internalFolder, 'viewpoints.sfm').format(**node._cmdVars) + with open(node.viewpointsFile, 'w') as f: + json.dump(sfmData, f, indent=4) + + def buildCommandLine(self, chunk): + cmd = desc.CommandLineNode.buildCommandLine(self, chunk) + if chunk.node.viewpointsFile: + cmd += ' --input "{}"'.format(chunk.node.viewpointsFile) + return cmd + + def processChunk(self, chunk): + self.createViewpointsFile(chunk.node) + desc.CommandLineNode.processChunk(self, chunk) diff --git a/meshroom/aliceVision/CameraLocalization.py b/meshroom/aliceVision/CameraLocalization.py new file mode 100644 index 0000000000..a778fda9c5 --- /dev/null +++ b/meshroom/aliceVision/CameraLocalization.py @@ -0,0 +1,202 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class CameraLocalization(desc.AVCommandLineNode): + commandLine = 'aliceVision_cameraLocalization {allParams}' + + category = 'Utils' + documentation = ''' + ''' + + inputs = [ + desc.File( + name="sfmdata", + label="SfMData", + description="The SfMData file generated by AliceVision.", + value="", + ), + desc.File( + name="mediafile", + label="Media File", + description="The folder path or the filename for the media to track.", + value="", + ), + desc.File( + name="visualDebug", + label="Visual Debug Folder", + description="If a folder is provided, this enables visual debug and all the debugging information will be saved in that folder.", + value="", + ), + desc.File( + name="descriptorPath", + label="Descriptor Path", + description="Folder containing the descriptors for all the images (ie. the *.desc.).", + value="", + ), + desc.ChoiceParam( + name="matchDescTypes", + label="Match Desc Types", + description="Describer types to use for the matching.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + ), + desc.ChoiceParam( + name="preset", + label="Preset", + description="Preset for the feature extractor when localizing a new image (low, medium, normal, high, ultra).", + value="normal", + values=["low", "medium", "normal", "high", "ultra"], + ), + desc.ChoiceParam( + name="resectionEstimator", + label="Resection Estimator", + description="The type of *sac framework to use for resection (acransac, loransac).", + value="acransac", + values=["acransac", "loransac"], + ), + desc.ChoiceParam( + name="matchingEstimator", + label="Matching Estimator", + description="The type of *sac framework to use for matching (acransac, loransac).", + value="acransac", + values=["acransac", "loransac"], + ), + desc.File( + name="calibration", + label="Calibration", + description="Calibration file.", + value="", + ), + desc.BoolParam( + name="refineIntrinsics", + label="Refine Intrinsics", + description="Enable/Disable camera intrinsics refinement for each localized image.", + value=False, + ), + desc.FloatParam( + name="reprojectionError", + label="Reprojection Error", + description="Maximum reprojection error (in pixels) allowed for resectioning. If set to 0, it lets the ACRansac select an optimal value.", + value=4.0, + range=(0.1, 50.0, 0.1), + ), + desc.IntParam( + name="nbImageMatch", + label="Nb Image Match", + description="[voctree] Number of images to retrieve in database.", + value=4, + range=(1, 1000, 1), + ), + desc.IntParam( + name="maxResults", + label="Max Results", + description="[voctree] For algorithm AllResults, it stops the image matching when this number of matched images is reached. If 0 it is ignored.", + value=10, + range=(1, 100, 1), + ), + desc.IntParam( + name="commonviews", + label="Common Views", + description="[voctree] Number of minimum images in which a point must be seen to be used in cluster tracking.", + value=3, + range=(2, 50, 1), + ), + desc.File( + name="voctree", + label="Voctree", + description="[voctree] Filename for the vocabulary tree.", + value="${ALICEVISION_VOCTREE}", + ), + desc.File( + name="voctreeWeights", + label="Voctree Weights", + description="[voctree] Filename for the vocabulary tree weights.", + value="", + ), + desc.ChoiceParam( + name="algorithm", + label="Algorithm", + description="[voctree] Algorithm type: FirstBest, AllResults.", + value="AllResults", + values=["FirstBest", "AllResults"], + ), + desc.FloatParam( + name="matchingError", + label="Matching Error", + description="[voctree] Maximum matching error (in pixels) allowed for image matching with geometric verification. If set to 0, it lets the ACRansac select an optimal value.", + value=4.0, + range=(0.0, 50.0, 1.0), + ), + desc.IntParam( + name="nbFrameBufferMatching", + label="Nb Frame Buffer Matching", + description="[voctree] Number of previous frames of the sequence to use for matching (0 = Disable).", + value=10, + range=(0, 100, 1), + ), + desc.BoolParam( + name="robustMatching", + label="Robust Matching", + description="[voctree] Enable/Disable the robust matching between query and database images, all putative matches will be considered.", + value=True, + ), + desc.IntParam( + name="nNearestKeyFrames", + label="N Nearest Key Frames", + description="[cctag] Number of images to retrieve in the database. Parameters specific for final (optional) bundle adjustment optimization of the sequence.", + value=5, + range=(1, 100, 1), + ), + desc.StringParam( + name="globalBundle", + label="Global Bundle", + description="[bundle adjustment] If --refineIntrinsics is not set, this option allows to run a final global bundle adjustment to refine the scene.", + value="", + ), + desc.BoolParam( + name="noDistortion", + label="No Distortion", + description="[bundle adjustment] It does not take into account distortion during the BA, it considers the distortion coefficients to all be equal to 0.", + value=False, + ), + desc.BoolParam( + name="noBArefineIntrinsics", + label="No BA Refine Intrinsics", + description="[bundle adjustment] If set to true, does not refine intrinsics during BA.", + value=False, + ), + desc.IntParam( + name="minPointVisibility", + label="Min Point Visibility", + description="[bundle adjustment] Minimum number of observations that a point must have in order to be considered for bundle adjustment.", + value=2, + range=(2, 50, 1), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputAlembic", + label="Alembic", + description="Filename for the SfMData export file (where camera poses will be stored).", + value="{nodeCacheFolder}/trackedCameras.abc", + ), + desc.File( + name="outputJSON", + label="JSON File", + description="Filename for the localization results as .json.", + value="{nodeCacheFolder}/trackedCameras.json", + ), + ] diff --git a/meshroom/aliceVision/CameraRigCalibration.py b/meshroom/aliceVision/CameraRigCalibration.py new file mode 100644 index 0000000000..f788134280 --- /dev/null +++ b/meshroom/aliceVision/CameraRigCalibration.py @@ -0,0 +1,161 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class CameraRigCalibration(desc.AVCommandLineNode): + commandLine = 'aliceVision_rigCalibration {allParams}' + + category = 'Utils' + documentation = ''' + ''' + + inputs = [ + desc.File( + name="sfmdata", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="mediapath", + label="Media Path", + description="The path to the video file, the folder of the image sequence or a text file\n" + "(one image path per line) for each camera of the rig (eg. --mediapath /path/to/cam1.mov /path/to/cam2.mov).", + value="", + ), + desc.File( + name="cameraIntrinsics", + label="Camera Intrinsics", + description="The intrinsics calibration file for each camera of the rig (eg. --cameraIntrinsics /path/to/calib1.txt /path/to/calib2.txt).", + value="", + ), + desc.File( + name="export", + label="Export File", + description="Filename for the alembic file containing the rig poses with the 3D points. It also saves a file for each camera named 'filename.cam##.abc'.", + value="trackedcameras.abc", + ), + desc.File( + name="descriptorPath", + label="Descriptor Path", + description="Folder containing the .desc.", + value="", + ), + desc.ChoiceParam( + name="matchDescTypes", + label="Match Describer Types", + description="The describer types to use for the matching.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + ), + desc.ChoiceParam( + name="preset", + label="Preset", + description="Preset for the feature extractor when localizing a new image (low, medium, normal, high, ultra).", + value="normal", + values=["low", "medium", "normal", "high", "ultra"], + ), + desc.ChoiceParam( + name="resectionEstimator", + label="Resection Estimator", + description="The type of *sac framework to use for resection (acransac, loransac).", + value="acransac", + values=["acransac", "loransac"], + ), + desc.ChoiceParam( + name="matchingEstimator", + label="Matching Estimator", + description="The type of *sac framework to use for matching (acransac, loransac).", + value="acransac", + values=["acransac", "loransac"], + ), + desc.StringParam( + name="refineIntrinsics", + label="Refine Intrinsics", + description="Enable/Disable camera intrinsics refinement for each localized image.", + value="", + ), + desc.FloatParam( + name="reprojectionError", + label="Reprojection Error", + description="Maximum reprojection error (in pixels) allowed for resectioning.\n" + "If set to 0, it lets the ACRansac select an optimal value.", + value=4.0, + range=(0.0, 10.0, 0.1), + ), + desc.IntParam( + name="maxInputFrames", + label="Max Input Frames", + description="Maximum number of frames to read in input. 0 means no limit.", + value=0, + range=(0, 1000, 1), + ), + desc.File( + name="voctree", + label="Voctree", + description="[voctree] Filename for the vocabulary tree.", + value="${ALICEVISION_VOCTREE}", + ), + desc.File( + name="voctreeWeights", + label="Voctree Weights", + description="[voctree] Filename for the vocabulary tree weights.", + value="", + ), + desc.ChoiceParam( + name="algorithm", + label="Algorithm", + description="[voctree] Algorithm type: {FirstBest, AllResults}.", + value="AllResults", + values=["FirstBest", "AllResults"], + ), + desc.IntParam( + name="nbImageMatch", + label="Nb Image Match", + description="[voctree] Number of images to retrieve in the database.", + value=4, + range=(0, 50, 1), + ), + desc.IntParam( + name="maxResults", + label="Max Results", + description="[voctree] For algorithm AllResults, it stops the image matching when this number of matched images is reached. If set to 0, it is ignored.", + value=10, + range=(0, 100, 1), + ), + desc.FloatParam( + name="matchingError", + label="Matching Error", + description="[voctree] Maximum matching error (in pixels) allowed for image matching with geometric verification.\n" + "If set to 0, it lets the ACRansac select an optimal value.", + value=4.0, + range=(0.0, 10.0, 0.1), + ), + desc.IntParam( + name="nNearestKeyFrames", + label="N Nearest Key Frames", + description="[cctag] Number of images to retrieve in database.", + value=5, + range=(0, 50, 1), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outfile", + label="Output File", + description="The name of the file to store the calibration data in.", + value="{nodeCacheFolder}/cameraRigCalibration.rigCal", + ), + ] diff --git a/meshroom/aliceVision/CameraRigLocalization.py b/meshroom/aliceVision/CameraRigLocalization.py new file mode 100644 index 0000000000..331c171052 --- /dev/null +++ b/meshroom/aliceVision/CameraRigLocalization.py @@ -0,0 +1,167 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class CameraRigLocalization(desc.AVCommandLineNode): + commandLine = 'aliceVision_rigLocalization {allParams}' + + category = 'Utils' + documentation = ''' + ''' + + inputs = [ + desc.File( + name="sfmdata", + label="SfMData", + description="The input SfMData file.", + value="", + ), + desc.File( + name="mediapath", + label="Media Path", + description="The path to the video file, the folder of the image sequence or a text file (one image path per line) for each camera of the rig (eg. --mediapath /path/to/cam1.mov /path/to/cam2.mov).", + value="", + ), + desc.File( + name="calibration", + label="Rig Calibration File", + description="The file containing the calibration data for the rig (subposes).", + value="", + ), + desc.File( + name="cameraIntrinsics", + label="Camera Intrinsics", + description="The intrinsics calibration file for each camera of the rig (eg. --cameraIntrinsics /path/to/calib1.txt /path/to/calib2.txt).", + value="", + ), + desc.File( + name="descriptorPath", + label="Descriptor Path", + description="Folder containing the .desc.", + value="", + ), + desc.ChoiceParam( + name="matchDescTypes", + label="Match Describer Types", + description="The describer types to use for the matching.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + ), + desc.ChoiceParam( + name="preset", + label="Preset", + description="Preset for the feature extractor when localizing a new image (low, medium, normal, high, ultra).", + value="normal", + values=["low", "medium", "normal", "high", "ultra"], + ), + desc.ChoiceParam( + name="resectionEstimator", + label="Resection Estimator", + description="The type of *sac framework to use for resection (acransac, loransac).", + value="acransac", + values=["acransac", "loransac"], + ), + desc.ChoiceParam( + name="matchingEstimator", + label="Matching Estimator", + description="The type of *sac framework to use for matching (acransac, loransac).", + value="acransac", + values=["acransac", "loransac"], + ), + desc.StringParam( + name="refineIntrinsics", + label="Refine Intrinsics", + description="Enable/Disable camera intrinsics refinement for each localized image.", + value="", + ), + desc.FloatParam( + name="reprojectionError", + label="Reprojection Error", + description="Maximum reprojection error (in pixels) allowed for resectioning.\n" + "If set to 0, it lets the ACRansac select an optimal value.", + value=4.0, + range=(0.0, 10.0, 0.1), + ), + desc.BoolParam( + name="useLocalizeRigNaive", + label="Use Localize Rig Naive", + description="Enable/Disable the naive method for rig localization: naive method tries to localize each camera separately.", + value=False, + ), + desc.FloatParam( + name="angularThreshold", + label="Angular Threshold", + description="The maximum angular threshold in degrees between feature bearing vector and 3D point direction. Used only with the opengv method.", + value=0.1, + range=(0.0, 10.0, 0.01), + ), + desc.File( + name="voctree", + label="Voctree", + description="[voctree] Filename for the vocabulary tree.""", + value="${ALICEVISION_VOCTREE}", + ), + desc.File( + name="voctreeWeights", + label="Voctree Weights", + description="[voctree] Filename for the vocabulary tree weights.", + value="", + ), + desc.ChoiceParam( + name="algorithm", + label="Algorithm", + description="[voctree] Algorithm type: {FirstBest, AllResults}.", + value="AllResults", + values=["FirstBest", "AllResults"], + ), + desc.IntParam( + name="nbImageMatch", + label="Nb Image Match", + description="[voctree] Number of images to retrieve in the database.", + value=4, + range=(0, 100, 1), + ), + desc.IntParam( + name="maxResults", + label="Max Results", + description="[voctree] For algorithm AllResults, it stops the image matching when this number of matched images is reached.\n" + "If set to 0, it is ignored.", + value=10, + range=(0, 100, 1), + ), + desc.FloatParam( + name="matchingError", + label="Matching Error", + description="[voctree] Maximum matching error (in pixels) allowed for image matching with geometric verification.\n" + "If set to 0, it lets the ACRansac select an optimal value.", + value=4.0, + range=(0.0, 10.0, 0.1), + ), + desc.IntParam( + name="nNearestKeyFrames", + label="N Nearest Key Frames", + description="[cctag] Number of images to retrieve in database.", + value=5, + range=(0, 50, 1), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputAlembic", + label="Alembic", + description="Filename for the SfMData export file (where camera poses will be stored).", + value="{nodeCacheFolder}/trackedcameras.abc", + ), + ] diff --git a/meshroom/aliceVision/CheckerboardCalibration.py b/meshroom/aliceVision/CheckerboardCalibration.py new file mode 100644 index 0000000000..1cdc8229bd --- /dev/null +++ b/meshroom/aliceVision/CheckerboardCalibration.py @@ -0,0 +1,52 @@ +__version__ = '1.0' + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class CheckerboardCalibration(desc.AVCommandLineNode): + commandLine = 'aliceVision_checkerboardCalibration {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Other' + documentation = ''' +Estimate the camera intrinsics and extrinsincs on a set of checkerboard images. +''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="checkerboards", + label="Checkerboards Folder", + description="Folder containing checkerboard JSON files.", + value="", + ), + desc.FloatParam( + name="squareSize", + label="Square Size", + description="Checkerboard square width in mm", + value=10., + range=(0.1, 100., 0.1), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData File", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfmData.sfm", + ) + ] diff --git a/meshroom/aliceVision/CheckerboardDetection.py b/meshroom/aliceVision/CheckerboardDetection.py new file mode 100644 index 0000000000..6d48ba31a5 --- /dev/null +++ b/meshroom/aliceVision/CheckerboardDetection.py @@ -0,0 +1,75 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class CheckerboardDetection(desc.AVCommandLineNode): + commandLine = 'aliceVision_checkerboardDetection {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=5) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Other' + documentation = ''' +Detect checkerboard structures in a set of images. +The detection method also supports nested calibration grids. +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="Input SfMData file. Viewpoints must correspond to lens calibration grids.", + value="", + ), + desc.BoolParam( + name="useNestedGrids", + label="Nested Calibration Grid", + description="Enable if images contain nested calibration grids. These grids must be centered on the image center.", + value=False, + ), + desc.BoolParam( + name="doubleSize", + label="Double Size", + description="Double the image size prior to processing.", + value=False, + ), + desc.BoolParam( + name="ignorePixelAspectRatio", + label="Ignore Pixel Aspect Ratio", + description="Ignore pixel aspect ratio for detection.", + value=False, + ), + desc.BoolParam( + name="exportDebugImages", + label="Export Debug Images", + description="Export debug images.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="checkerLines", + enabled=lambda node: node.exportDebugImages.value, + label="Checker Lines", + description="Debug images.", + semantic="image", + value="{nodeCacheFolder}/.png", + group="", # do not export on the command line + ), + ] diff --git a/meshroom/aliceVision/ColorCheckerCorrection.py b/meshroom/aliceVision/ColorCheckerCorrection.py new file mode 100644 index 0000000000..007ec4bb5c --- /dev/null +++ b/meshroom/aliceVision/ColorCheckerCorrection.py @@ -0,0 +1,81 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import EXR_STORAGE_DATA_TYPE, VERBOSE_LEVEL + +import os.path + + +class ColorCheckerCorrection(desc.AVCommandLineNode): + commandLine = 'aliceVision_colorCheckerCorrection {allParams}' + size = desc.DynamicNodeSize('input') + # parallelization = desc.Parallelization(blockSize=40) + # commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + documentation = ''' +(BETA) \\ +Performs color calibration from Macbeth color checker chart. + +The node assumes all the images to process are sharing the same colorimetric properties. +All the input images will get the same correction. + +If multiple color charts are submitted, only the first one will be taken in account. +''' + + inputs = [ + desc.File( + name="inputData", + label="Color Checker Data", + description="Position and colorimetric data of the color checker.", + value="", + ), + desc.File( + name="input", + label="Input", + description="Input SfMData file, image filenames or regex(es) on the image file path.\n" + "Supported regex: '#' matches a single digit, '@' one or more digits, '?' one character and " + "'*' zero or more.", + value="", + ), + desc.ChoiceParam( + name="extension", + label="Output File Extension", + description="Output image file extension.", + value="exr", + values=["exr", ""], + ), + desc.ChoiceParam( + name="storageDataType", + label="EXR Storage Data Type", + description="Storage data type for EXR output:\n" + " - float: Use full floating point (32 bits per channel).\n" + " - half: Use half float (16 bits per channel).\n" + " - halfFinite: Use half float, but clamp values to avoid non-finite values.\n" + " - auto: Use half float if all values can fit, else use full float.", + values=EXR_STORAGE_DATA_TYPE, + value="float", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outSfMData", + label="SfMData", + description="Output SfMData.", + value=lambda attr: ("{nodeCacheFolder}/" + os.path.basename(attr.node.input.value)) if (os.path.splitext(attr.node.input.value)[1] in [".abc", ".sfm"]) else "", + group="", # do not export on the command line + ), + desc.File( + name="output", + label="Folder", + description="Output images folder.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/ColorCheckerDetection.py b/meshroom/aliceVision/ColorCheckerDetection.py new file mode 100644 index 0000000000..ae8286615c --- /dev/null +++ b/meshroom/aliceVision/ColorCheckerDetection.py @@ -0,0 +1,67 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ColorCheckerDetection(desc.AVCommandLineNode): + commandLine = 'aliceVision_colorCheckerDetection {allParams}' + size = desc.DynamicNodeSize('input') + # parallelization = desc.Parallelization(blockSize=40) + # commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + documentation = ''' +(BETA) \\ +Performs Macbeth color checker chart detection. + +Outputs: +- the detected color charts position and colors +- the associated transform matrix from "theoric" to "measured" +assuming that the "theoric" Macbeth chart corners coordinates are: +(0, 0), (1675, 0), (1675, 1125), (0, 1125) + +Dev notes: +- Fisheye/pinhole is not handled +- ColorCheckerViewer is unstable with multiple color chart within a same image +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file input, image filenames or regex(es) on the image file path.\n" + "Supported regex: '#' matches a single digit, '@' one or more digits, '?' one character " + "and '*' zero or more.", + value="", + ), + desc.IntParam( + name="maxCount", + label="Max Count By Image", + description="Maximum color charts count to detect in a single image.", + value=1, + range=(1, 3, 1), + advanced=True, + ), + desc.BoolParam( + name="debug", + label="Debug", + description="If checked, debug data will be generated.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputData", + label="Color Checker Data", + description="Output position and colorimetric data extracted from detected color checkers in the images.", + value="{nodeCacheFolder}/ccheckers.json", + ), + ] diff --git a/meshroom/aliceVision/ConvertDistortion.py b/meshroom/aliceVision/ConvertDistortion.py new file mode 100644 index 0000000000..19094c05b8 --- /dev/null +++ b/meshroom/aliceVision/ConvertDistortion.py @@ -0,0 +1,53 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ConvertDistortion(desc.AVCommandLineNode): + commandLine = 'aliceVision_convertDistortion {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' + Convert distortions between different models. + ''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="Input SfMData file.", + value="", + ), + desc.ChoiceParam( + name="from", + label="From", + description="Distortion model to convert from.", + value="distortion", + values=["distortion", "undistortion"], + ), + desc.ChoiceParam( + name="to", + label="To", + description="Distortion model to convert to.", + value="undistortion", + values=["distortion", "undistortion"], + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfm.abc", + ), + ] diff --git a/meshroom/aliceVision/ConvertMesh.py b/meshroom/aliceVision/ConvertMesh.py new file mode 100644 index 0000000000..27b3d64c1c --- /dev/null +++ b/meshroom/aliceVision/ConvertMesh.py @@ -0,0 +1,44 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ConvertMesh(desc.AVCommandLineNode): + commandLine = 'aliceVision_convertMesh {allParams}' + + category = 'Utils' + documentation = '''This node allows to convert a mesh to another format.''' + + inputs = [ + desc.File( + name="inputMesh", + label="Input Mesh", + description="Input mesh (*.obj, *.mesh, *.meshb, *.ply, *.off, *.stl).", + value="", + ), + desc.ChoiceParam( + name="outputMeshFileType", + label="Output File Type", + description="Output mesh format (*.obj, *.gltf, *.fbx, *.stl).", + value="obj", + values=["gltf", "obj", "fbx", "stl"], + group="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Mesh", + description="Output mesh (*.obj, *.mesh, *.meshb, *.ply, *.off, *.stl).", + value="{nodeCacheFolder}/mesh.{outputMeshFileTypeValue}", + ), + ] diff --git a/meshroom/aliceVision/ConvertSfMFormat.py b/meshroom/aliceVision/ConvertSfMFormat.py new file mode 100644 index 0000000000..1b8d5a2aad --- /dev/null +++ b/meshroom/aliceVision/ConvertSfMFormat.py @@ -0,0 +1,105 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class ConvertSfMFormat(desc.AVCommandLineNode): + commandLine = 'aliceVision_convertSfMFormat {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' +Convert an SfM scene from one file format to another. +It can also be used to remove specific parts of from an SfM scene (like filter all 3D landmarks or filter 2D observations). +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="Input SfMData file.", + value="", + ), + desc.ChoiceParam( + name="fileExt", + label="SfM File Format", + description="Output SfM file format.", + value="abc", + values=["abc", "sfm", "json", "ply", "baf"], + group="", # exclude from command line + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types to keep.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + exposed=True, + ), + desc.ListAttribute( + elementDesc=desc.File( + name="imageId", + label="Image ID", + description="UID or path of an image to add to the white list.", + value="", + ), + name="imageWhiteList", + label="Image White List", + description="Image white list (UIDs or image paths).", + ), + desc.BoolParam( + name="views", + label="Views", + description="Export views.", + value=True, + ), + desc.BoolParam( + name="intrinsics", + label="Intrinsics", + description="Export intrinsics.", + value=True, + ), + desc.BoolParam( + name="extrinsics", + label="Extrinsics", + description="Export extrinsics.", + value=True, + ), + desc.BoolParam( + name="structure", + label="Structure", + description="Export structure.", + value=True, + ), + desc.BoolParam( + name="observations", + label="Observations", + description="Export observations.", + value=True, + ), + desc.BoolParam( + name="surveys", + label="Surveys", + description="Export surveys.", + value=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfm.{fileExtValue}", + ), + ] diff --git a/meshroom/aliceVision/DepthMap.py b/meshroom/aliceVision/DepthMap.py new file mode 100644 index 0000000000..bd391af037 --- /dev/null +++ b/meshroom/aliceVision/DepthMap.py @@ -0,0 +1,624 @@ +__version__ = "5.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class DepthMap(desc.AVCommandLineNode): + commandLine = "aliceVision_depthMapEstimation {allParams}" + gpu = desc.Level.INTENSIVE + size = desc.DynamicNodeSize("input") + parallelization = desc.Parallelization(blockSize=12) + commandLineRange = "--rangeStart {rangeStart} --rangeSize {rangeBlockSize}" + + category = "Dense Reconstruction" + documentation = """ +Estimate a depth map for each calibrated camera using Plane Sweeping, a multi-view stereo algorithm notable for its +efficiency on modern graphics hardware (GPU). + +Adjust the downscale factor to compute depth maps at a higher/lower resolution. +Use a downscale factor of one (full-resolution) only if the quality of the input images is really high +(camera on a tripod with high-quality optics). + +## Online +[https://alicevision.org/#photogrammetry/depth_maps_estimation](https://alicevision.org/#photogrammetry/depth_maps_estimation) +""" + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="imagesFolder", + label="Images Folder", + description="Use images from a specific folder instead of those specified in the SfMData file.\n" + "Filename should be the image UID.", + value="", + ), + desc.ChoiceParam( + name="downscale", + label="Downscale", + description="Downscale the input images to compute the depth map.\n" + "Full resolution (downscale = 1) gives the best result,\n" + "but using a larger downscale will reduce computation time at the expense of quality.\n" + "If the images are noisy, blurry or if the surfaces are challenging (weakly-textured or with " + "specularities), a larger downscale may improve.", + value=2, + values=[1, 2, 4, 8, 16], + ), + desc.FloatParam( + name="minViewAngle", + label="Min View Angle", + description="Minimum angle between two views (select the neighbouring cameras, select depth planes from " + "epipolar segment point).", + value=2.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="maxViewAngle", + label="Max View Angle", + description="Maximum angle between two views (select the neighbouring cameras, select depth planes from " + "epipolar segment point).", + value=70.0, + range=(10.0, 120.0, 1.0), + advanced=True, + ), + desc.GroupAttribute( + name="tiling", + label="Tiling", + description="Tiles are used to split the computation into fixed buffers to fit the GPU best.", + group=None, + groupDesc=[ + desc.IntParam( + name="tileBufferWidth", + label="Buffer Width", + description="Maximum tile buffer width.", + value=1024, + range=(-1, 2000, 10), + ), + desc.IntParam( + name="tileBufferHeight", + label="Buffer Height", + description="Maximum tile buffer height.", + value=1024, + range=(-1, 2000, 10), + ), + desc.IntParam( + name="tilePadding", + label="Padding", + description="Buffer padding for overlapping tiles.", + value=64, + range=(0, 500, 1), + ), + desc.BoolParam( + name="autoAdjustSmallImage", + label="Auto Adjust Small Image", + description="Automatically adjust depth map parameters if images are smaller than one tile\n" + "(maxTCamsPerTile = maxTCams, adjust step if needed).", + value=True, + advanced=True, + ), + ], + ), + desc.BoolParam( + name="chooseTCamsPerTile", + label="Choose Neighbour Cameras Per Tile", + description="Choose neighbour cameras per tile or globally to the image.", + value=True, + advanced=True, + ), + desc.IntParam( + name="maxTCams", + label="Max Nb Neighbour Cameras", + description="Maximum number of neighbour cameras per image.", + value=10, + range=(1, 20, 1), + ), + desc.GroupAttribute( + name="sgm", + label="SGM", + description="The Semi-Global Matching (SGM) step computes a similarity volume and extracts the initial " + "low-resolution depth map.\n" + "This method is highly robust but has limited depth precision (banding artifacts due to a " + "limited list of depth planes).", + group=None, + groupDesc=[ + desc.IntParam( + name="sgmScale", + label="Downscale Factor", + description="Downscale factor applied on source images for the SGM step (in addition to the global " + "downscale).", + value=2, + range=(-1, 10, 1), + ), + desc.IntParam( + name="sgmStepXY", + label="Step XY", + description="The step is used to compute the similarity volume for one pixel over N " + "(in the XY image plane).", + value=2, + range=(-1, 10, 1), + ), + desc.IntParam( + name="sgmStepZ", + label="Step Z", + description="Initial step used to compute the similarity volume on Z axis (every N pixels on the " + "epilolar line).\n" + "-1 means automatic estimation.\n" + "This value will be adjusted in all case to fit in the max memory (sgmMaxDepths).", + value=-1, + range=(-1, 10, 1), + ), + desc.IntParam( + name="sgmMaxTCamsPerTile", + label="Max Nb Neighbour Cameras Per Tile", + description="Maximum number of neighbour cameras used per tile.", + value=4, + range=(1, 20, 1), + ), + desc.IntParam( + name="sgmWSH", + label="WSH", + description="Half-size of the patch used to compute the similarity. Patch width is wsh*2+1.", + value=4, + range=(1, 20, 1), + advanced=True, + ), + desc.BoolParam( + name="sgmUseSfmSeeds", + label="Use SfM Landmarks", + description="Use landmarks from Structure-from-Motion as input seeds to define min/max depth ranges.", + value=True, + advanced=True, + ), + desc.FloatParam( + name="sgmSeedsRangeInflate", + label="Seeds Range Inflate", + description="Inflate factor to add margins around SfM seeds.", + value=0.2, + range=(0.0, 2.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="sgmDepthThicknessInflate", + label="Thickness Inflate", + description="Inflate factor to add margins to the depth thickness.", + value=0.0, + range=(0.0, 2.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="sgmMaxSimilarity", + label="Max Similarity", + description="Maximum similarity threshold (between 0 and 1) used to filter out poorly supported " + "depth values.", + value=1.0, + range=(0.0, 1.0, 0.01), + advanced=True, + ), + desc.FloatParam( + name="sgmGammaC", + label="GammaC", + description="GammaC threshold used for similarity computation.", + value=5.5, + range=(0.0, 30.0, 0.5), + advanced=True, + ), + desc.FloatParam( + name="sgmGammaP", + label="GammaP", + description="GammaP threshold used for similarity computation.", + value=8.0, + range=(0.0, 30.0, 0.5), + advanced=True, + ), + desc.FloatParam( + name="sgmP1", + label="P1", + description="P1 parameter for SGM filtering.", + value=10.0, + range=(0.0, 255.0, 0.5), + advanced=True, + ), + desc.FloatParam( + name="sgmP2Weighting", + label="P2 Weighting", + description="P2 weighting parameter for SGM filtering.", + value=100.0, + range=(-255.0, 255.0, 0.5), + advanced=True, + ), + desc.IntParam( + name="sgmMaxDepths", + label="Max Depths", + description="Maximum number of depths in the similarity volume.", + value=1500, + range=(1, 5000, 1), + advanced=True, + ), + desc.StringParam( + name="sgmFilteringAxes", + label="Filtering Axes", + description="Define axes for the filtering of the similarity volume.", + value="YX", + advanced=True, + ), + desc.BoolParam( + name="sgmDepthListPerTile", + label="Depth List Per Tile", + description="Select the list of depth planes per tile or globally to the image.", + value=True, + advanced=True, + ), + desc.BoolParam( + name="sgmUseConsistentScale", + label="Consistent Scale", + description="Compare patch with consistent scale for similarity volume computation.", + value=False, + ), + ], + ), + desc.GroupAttribute( + name="refine", + label="Refine", + description="The refine step computes a similarity volume in higher resolution but with a small depth " + "range around the SGM depth map.\n" + "This allows to compute a depth map with sub-pixel accuracy.", + group=None, + groupDesc=[ + desc.BoolParam( + name="refineEnabled", + label="Enable", + description="Enable depth/similarity map refinement process.", + value=True, + ), + desc.IntParam( + name="refineScale", + label="Downscale Factor", + description="Downscale factor applied on source images for the Refine step (in addition to the " + "global downscale).", + value=1, + range=(-1, 10, 1), + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.IntParam( + name="refineStepXY", + label="Step XY", + description="The step is used to compute the refine volume for one pixel over N (in the XY image plane).", + value=1, + range=(-1, 10, 1), + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.IntParam( + name="refineMaxTCamsPerTile", + label="Max Nb Neighbour Cameras Per Tile", + description="Maximum number of neighbour cameras used per tile.", + value=4, + range=(1, 20, 1), + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.IntParam( + name="refineSubsampling", + label="Number Of Subsamples", + description="The number of subsamples used to extract the best depth from the refine volume " + "(sliding gaussian window precision).", + value=10, + range=(1, 30, 1), + advanced=True, + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.IntParam( + name="refineHalfNbDepths", + label="Half Number Of Depths", + description="The thickness of the refine area around the initial depth map.\n" + "This parameter defines the number of depths in front of and behind the initial value\n" + "for which we evaluate the similarity with a finer z sampling.", + value=15, + range=(1, 50, 1), + advanced=True, + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.IntParam( + name="refineWSH", + label="WSH", + description="Half-size of the patch used to compute the similarity. Patch width is wsh*2+1.", + value=3, + range=(1, 20, 1), + advanced=True, + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.FloatParam( + name="refineSigma", + label="Sigma", + description="Sigma (2*sigma^2) of the Gaussian filter used to extract the best depth from " + "the refine volume.", + value=15.0, + range=(0.0, 30.0, 0.5), + advanced=True, + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.FloatParam( + name="refineGammaC", + label="GammaC", + description="GammaC threshold used for similarity computation.", + value=15.5, + range=(0.0, 30.0, 0.5), + advanced=True, + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.FloatParam( + name="refineGammaP", + label="GammaP", + description="GammaP threshold used for similarity computation.", + value=8.0, + range=(0.0, 30.0, 0.5), + advanced=True, + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.BoolParam( + name="refineInterpolateMiddleDepth", + label="Interpolate Middle Depth", + description="Enable middle depth bilinear interpolation.", + value=False, + enabled=lambda node: node.refine.refineEnabled.value, + ), + desc.BoolParam( + name="refineUseConsistentScale", + label="Consistent Scale", + description="Compare patch with consistent scale for similarity volume computation.", + value=False, + enabled=lambda node: node.refine.refineEnabled.value, + ), + ], + ), + desc.GroupAttribute( + name="colorOptimization", + label="Color Optimization", + description="Color optimization post-process parameters.", + group=None, + groupDesc=[ + desc.BoolParam( + name="colorOptimizationEnabled", + label="Enable", + description="Enable depth/similarity map post-process color optimization.", + value=True, + ), + desc.IntParam( + name="colorOptimizationNbIterations", + label="Number Of Iterations", + description="Number of iterations for the optimization.", + value=100, + range=(1, 500, 10), + advanced=True, + enabled=lambda node: node.colorOptimization.colorOptimizationEnabled.value, + ), + ], + ), + desc.GroupAttribute( + name="customPatchPattern", + label="Custom Patch Pattern", + description="User custom patch pattern for similarity comparison.", + advanced=True, + group=None, + groupDesc=[ + desc.BoolParam( + name="sgmUseCustomPatchPattern", + label="Enable For SGM", + description="Enable custom patch pattern for similarity volume computation at the SGM step.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="refineUseCustomPatchPattern", + label="Enable For Refine", + description="Enable custom patch pattern for similarity volume computation at the Refine step.", + value=False, + advanced=True, + ), + desc.ListAttribute( + name="customPatchPatternSubparts", + label="Subparts", + description="User custom patch pattern subparts for similarity volume computation.", + advanced=True, + enabled=lambda node: (node.customPatchPattern.sgmUseCustomPatchPattern.value or + node.customPatchPattern.refineUseCustomPatchPattern.value), + elementDesc=desc.GroupAttribute( + name="customPatchPatternSubpart", + label="Patch Pattern Subpart", + description="Custom patch pattern subpart configuration for similarity volume computation.", + joinChar=":", + group=None, + groupDesc=[ + desc.ChoiceParam( + name="customPatchPatternSubpartType", + label="Type", + description="Patch pattern subpart type.", + value="full", + values=["full", "circle"], + ), + desc.FloatParam( + name="customPatchPatternSubpartRadius", + label="Radius / WSH", + description="Patch pattern subpart half-width or circle radius.", + value=2.5, + range=(0.5, 30.0, 0.1), + ), + desc.IntParam( + name="customPatchPatternSubpartNbCoords", + label="Coordinates", + description="Patch pattern subpart number of coordinates (for circle or ignore).", + value=12, + range=(3, 24, 1), + ), + desc.IntParam( + name="customPatchPatternSubpartLevel", + label="Level", + description="Patch pattern subpart image level.", + value=0, + range=(0, 2, 1), + ), + desc.FloatParam( + name="customPatchPatternSubpartWeight", + label="Weight", + description="Patch pattern subpart weight.", + value=1.0, + range=(0.0, 1.0, 0.1), + ), + ], + ), + ), + desc.BoolParam( + name="customPatchPatternGroupSubpartsPerLevel", + label="Group Subparts Per Level", + description="Group all subparts with the same image level.", + value=False, + advanced=True, + enabled=lambda node: (node.customPatchPattern.sgmUseCustomPatchPattern.value or + node.customPatchPattern.refineUseCustomPatchPattern.value), + ), + ], + ), + desc.GroupAttribute( + name="intermediateResults", + label="Intermediate Results", + description="Intermediate results parameters for debug purposes.\n" + "Warning: Dramatically affect performances and use large amount of storage.", + advanced=True, + group=None, + groupDesc=[ + desc.BoolParam( + name="exportIntermediateDepthSimMaps", + label="Export Depth Maps", + description="Export intermediate depth/similarity maps from the SGM and Refine steps.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="exportIntermediateNormalMaps", + label="Export Normal Maps", + description="Export intermediate normal maps from the SGM and Refine steps.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="exportIntermediateVolumes", + label="Export Volumes", + description="Export intermediate full similarity volumes from the SGM and Refine steps.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="exportIntermediateCrossVolumes", + label="Export Cross Volumes", + description="Export intermediate similarity cross volumes from the SGM and Refine steps.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="exportIntermediateTopographicCutVolumes", + label="Export Cut Volumes", + description="Export intermediate similarity topographic cut volumes from the SGM and Refine steps.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="exportIntermediateVolume9pCsv", + label="Export 9 Points", + description="Export intermediate volumes 9 points from the SGM and Refine steps in CSV files.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="exportTilePattern", + label="Export Tile Pattern", + description="Export the bounding boxes of tiles volumes as meshes. " + "This allows to visualize the depth map search areas.", + value=False, + advanced=True, + ), + ], + ), + desc.IntParam( + name="nbGPUs", + label="Number Of GPUs", + description="Number of GPUs to use (0 means that all the available GPUs will be used).", + value=0, + range=(0, 5, 1), + invalidate=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder for generated depth maps.", + value="{nodeCacheFolder}", + ), + # these attributes are only here to describe more accurately the output of the node + # by specifying that it generates 2 sequences of images + # (see in Viewer2D.qml how these attributes can be used) + desc.File( + name="depth", + label="Depth Maps", + description="Generated depth maps.", + semantic="image", + value="{nodeCacheFolder}/_depthMap.exr", + group="", # do not export on the command line + ), + desc.File( + name="sim", + label="Sim Maps", + description="Generated sim maps.", + semantic="image", + value="{nodeCacheFolder}/_simMap.exr", + group="", # do not export on the command line + ), + desc.File( + name="tilePattern", + label="Tile Pattern", + description="Debug: Tile pattern.", + value="{nodeCacheFolder}/_tilePattern.obj", + group="", # do not export on the command line + enabled=lambda node: node.intermediateResults.exportTilePattern.value, + ), + desc.File( + name="depthSgm", + label="Depth Maps SGM", + description="Debug: Depth maps SGM", + semantic="image", + value="{nodeCacheFolder}/_depthMap_sgm.exr", + group="", # do not export on the command line + enabled=lambda node: node.intermediateResults.exportIntermediateDepthSimMaps.value, + ), + desc.File( + name="depthSgmUpscaled", + label="Depth Maps SGM Upscaled", + description="Debug: Depth maps SGM upscaled.", + semantic="image", + value="{nodeCacheFolder}/_depthMap_sgmUpscaled.exr", + group="", # do not export on the command line + enabled=lambda node: node.intermediateResults.exportIntermediateDepthSimMaps.value, + ), + desc.File( + name="depthRefined", + label="Depth Maps Refined", + description="Debug: Depth maps after refinement", + semantic="image", + value="{nodeCacheFolder}/_depthMap_refinedFused.exr", + group="", # do not export on the command line + enabled=lambda node: node.intermediateResults.exportIntermediateDepthSimMaps.value, + ), + ] diff --git a/meshroom/aliceVision/DepthMapFilter.py b/meshroom/aliceVision/DepthMapFilter.py new file mode 100644 index 0000000000..701708cf98 --- /dev/null +++ b/meshroom/aliceVision/DepthMapFilter.py @@ -0,0 +1,146 @@ +__version__ = "4.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class DepthMapFilter(desc.AVCommandLineNode): + commandLine = 'aliceVision_depthMapFiltering {allParams}' + gpu = desc.Level.NORMAL + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=24) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Dense Reconstruction' + documentation = ''' +Filter depth map values that are not coherent in multiple depth maps. +This allows to filter unstable points before starting the fusion of all depth maps in the Meshing node. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="depthMapsFolder", + label="Depth Maps Folder", + description="Input depth maps folder.", + value="", + ), + desc.FloatParam( + name="minViewAngle", + label="Min View Angle", + description="Minimum angle between two views.", + value=2.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="maxViewAngle", + label="Max View Angle", + description="Maximum angle between two views.", + value=70.0, + range=(10.0, 120.0, 1.0), + advanced=True, + ), + desc.IntParam( + name="nNearestCams", + label="Number Of Nearest Cameras", + description="Number of nearest cameras used for filtering.", + value=10, + range=(0, 20, 1), + advanced=True, + ), + desc.IntParam( + name="minNumOfConsistentCams", + label="Min Consistent Cameras", + description="Minimum number of consistent cameras.", + value=3, + range=(0, 10, 1), + ), + desc.IntParam( + name="minNumOfConsistentCamsWithLowSimilarity", + label="Min Consistent Cameras Bad Similarity", + description="Minimum number of consistent cameras for pixels with weak similarity value.", + value=4, + range=(0, 10, 1), + ), + desc.FloatParam( + name="pixToleranceFactor", + label="Tolerance Size", + description="Filtering tolerance size factor, in pixels.", + value=2.0, + range=(0.001, 10.0, 0.1), + advanced=True, + ), + desc.IntParam( + name="pixSizeBall", + label="Filtering Size", + description="Filtering size in pixels.", + value=0, + range=(0, 10, 1), + advanced=True, + ), + desc.IntParam( + name="pixSizeBallWithLowSimilarity", + label="Filtering Size Bad Similarity", + description="Filtering size in pixels for low similarity.", + value=0, + range=(0, 10, 1), + advanced=True, + ), + desc.BoolParam( + name="computeNormalMaps", + label="Compute Normal Maps", + description="Compute normal maps for each depth map.", + value=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Filtered Depth Maps Folder", + description="Output folder for generated depth maps.", + value="{nodeCacheFolder}" + ), + # these attributes are only here to describe more accurately the output of the node + # by specifying that it generates 2 sequences of images + # (see in Viewer2D.qml how these attributes can be used) + desc.File( + name="depth", + label="Depth Maps", + description="Filtered depth maps.", + semantic="image", + value="{nodeCacheFolder}/_depthMap.exr", + group="", # do not export on the command line + ), + desc.File( + name="sim", + label="Sim Maps", + description="Filtered sim maps.", + semantic="image", + value="{nodeCacheFolder}/_simMap.exr", + group="", # do not export on the command line + ), + desc.File( + name="normal", + label="Normal Maps", + description="Normal maps.", + semantic="image", + value="{nodeCacheFolder}/_normalMap.exr", + enabled=lambda node: node.computeNormalMaps.value, + group="", # do not export on the command line + ), + ] diff --git a/meshroom/aliceVision/DepthMapRendering.py b/meshroom/aliceVision/DepthMapRendering.py new file mode 100644 index 0000000000..fa071c0489 --- /dev/null +++ b/meshroom/aliceVision/DepthMapRendering.py @@ -0,0 +1,60 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class DepthMapRendering(desc.AVCommandLineNode): + commandLine = "aliceVision_depthMapRendering {allParams}" + + category = "Utils" + documentation = """ + Using camera parameters and mesh, render depthmaps for each view + """ + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="mesh", + label="Input Mesh", + description="Input mesh file.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="depth", + label="Depth Maps", + description="Rendered depth maps.", + semantic="image", + value="{nodeCacheFolder}/_depthMap.exr", + group="", # do not export on the command line + ), + desc.File( + name="mask", + label="Masks", + description="Masks.", + semantic="image", + value="{nodeCacheFolder}/_mask.exr", + group="", # do not export on the command line + ), + ] diff --git a/meshroom/aliceVision/DistortionCalibration.py b/meshroom/aliceVision/DistortionCalibration.py new file mode 100644 index 0000000000..8276ace659 --- /dev/null +++ b/meshroom/aliceVision/DistortionCalibration.py @@ -0,0 +1,64 @@ +__version__ = '5.0' + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class DistortionCalibration(desc.AVCommandLineNode): + commandLine = 'aliceVision_distortionCalibration {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Other' + documentation = ''' +Calibration of a camera/lens couple distortion using a full screen checkerboard. +''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="checkerboards", + label="Checkerboards Folder", + description="Folder containing checkerboard JSON files.", + value="", + ), + desc.ChoiceParam( + name="undistortionModelName", + label="Undistortion Model", + description="model used to estimate undistortion.", + value="3deanamorphic4", + values=["3deanamorphic4", "3declassicld", "3deradial4"], + ), + desc.BoolParam( + name="handleSqueeze", + label="Handle Squeeze", + description="Estimate squeeze.", + value=True, + ), + desc.BoolParam( + name="isDesqueezed", + label="Is Desqueezed", + description="True if the input image is already desqueezed.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData File", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfmData.sfm", + ), + ] diff --git a/meshroom/aliceVision/ExportAnimatedCamera.py b/meshroom/aliceVision/ExportAnimatedCamera.py new file mode 100644 index 0000000000..50ca8d0996 --- /dev/null +++ b/meshroom/aliceVision/ExportAnimatedCamera.py @@ -0,0 +1,111 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ExportAnimatedCamera(desc.AVCommandLineNode): + commandLine = 'aliceVision_exportAnimatedCamera {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Export' + documentation = ''' +Convert cameras from an SfM scene into an animated cameras in Alembic file format. +Based on the input image filenames, it will recognize the input video sequence to create an animated camera. +''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="SfMData file containing a complete SfM.", + value="", + ), + desc.File( + name="sfmDataFilter", + label="SfMData Filter", + description="Filter out cameras from the export if they are part of this SfMData.\n" + "If empty, export all cameras.", + value="", + ), + desc.File( + name="viewFilter", + label="View Filter", + description="Select the cameras to export using an expression based on the image filepath.\n" + "If empty, export all cameras.", + value="", + ), + desc.BoolParam( + name="exportSTMaps", + label="Export ST Maps", + description="Export ST maps. Motion (x, y) is encoded in the image channels to correct the lens distortion.\n" + "It represents the absolute pixel positions of an image normalized between 0 and 1.", + value=True, + ), + desc.BoolParam( + name="exportUndistortedImages", + label="Export Undistorted Images", + description="Export undistorted images.", + value=False, + ), + desc.ChoiceParam( + name="undistortedImageType", + label="Undistort Image Format", + description="Image file format to use for undistorted images ('jpg', 'png', 'tif', 'exr (half)').", + value="exr", + values=["jpg", "png", "tif", "exr"], + enabled=lambda node: node.exportUndistortedImages.value, + ), + desc.BoolParam( + name="exportFullROD", + label="Export Full ROD", + description="Export full ROD.", + value=False, + enabled=lambda node: node.exportUndistortedImages.value and node.undistortedImageType.value == "exr", + ), + desc.BoolParam( + name="correctPrincipalPoint", + label="Correct Principal Point", + description="Correct principal point.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder with animated camera and undistorted images.", + value="{nodeCacheFolder}", + ), + desc.File( + name="outputCamera", + label="Camera", + description="Output filename for the animated camera in Alembic format.", + value="{nodeCacheFolder}/camera.abc", + group="", # exclude from command line + ), + desc.File( + name="outputUndistorted", + label="Undistorted Folder", + description="Output undistorted folder.", + value="{nodeCacheFolder}/undistort/", + group="", # exclude from command line + ), + desc.File( + name="outputImages", + label="Undistorted Images", + description="Output undistorted images.", + value="{nodeCacheFolder}/undistort/_.{undistortedImageTypeValue}", + semantic="image", + group="", # exclude from command line + enabled=lambda node: node.exportUndistortedImages.value, + ), + ] diff --git a/meshroom/aliceVision/ExportColoredPointCloud.py b/meshroom/aliceVision/ExportColoredPointCloud.py new file mode 100644 index 0000000000..9ffa194fbe --- /dev/null +++ b/meshroom/aliceVision/ExportColoredPointCloud.py @@ -0,0 +1,37 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ExportColoredPointCloud(desc.AVCommandLineNode): + commandLine = 'aliceVision_exportColoredPointCloud {allParams}' + + category = 'Export' + documentation = ''' + ''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="SfMData file containing a complete SfM.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Point Cloud Filepath", + description="Output point cloud with visibilities as SfMData file.", + value="{nodeCacheFolder}/pointCloud.abc", + ), + ] diff --git a/meshroom/aliceVision/ExportDistortion.py b/meshroom/aliceVision/ExportDistortion.py new file mode 100644 index 0000000000..902b865d01 --- /dev/null +++ b/meshroom/aliceVision/ExportDistortion.py @@ -0,0 +1,99 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ExportDistortion(desc.AVCommandLineNode): + commandLine = "aliceVision_exportDistortion {allParams}" + + category = "Export" + documentation = """ +Export the lens distortion model as Nuke node and STMaps. +It also allows to export an undistorted image of the lens grids for validation. +""" + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.BoolParam( + name="exportNukeNode", + label="Export Nuke Node", + description="Export Nuke LensDistortion node as nuke file.\n" + "Only supports 3DEqualizer lens models.", + value=True, + ), + desc.BoolParam( + name="exportAnimatedNukeNode", + label="Export Animated Nuke Node", + description="Export animated distortion for this sequence as nuke file.", + value=False, + ), + desc.BoolParam( + name="exportLensGridsUndistorted", + label="Export Lens Grids Undistorted", + description="Export the lens grids undistorted for validation.", + value=True, + ), + desc.BoolParam( + name="exportSTMaps", + label="Export STMaps", + description="Export STMaps for distortion and undistortion.", + value=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="distortionNukeNode", + label="Distortion Nuke Node", + description="Calibrated distortion ST map.", + value="{nodeCacheFolder}/nukeLensDistortion_.nk", + group="", # do not export on the command line + enabled=lambda node: node.exportNukeNode.value, + ), + desc.File( + name="lensGridsUndistorted", + label="Undistorted Lens Grids", + description="Undistorted lens grids for validation", + semantic="image", + value="{nodeCacheFolder}/lensgrid__undistort.exr", + group="", # do not export on the command line + enabled=lambda node: node.exportLensGridsUndistorted.value, + ), + desc.File( + name="distortionStMap", + label="Distortion ST Map", + description="Calibrated distortion ST map.", + semantic="image", + value="{nodeCacheFolder}/stmap__distort.exr", + group="", # do not export on the command line + enabled=lambda node: node.exportSTMaps.value, + ), + desc.File( + name="undistortionStMap", + label="Undistortion ST Map", + description="Calibrated undistortion ST map.", + semantic="image", + value="{nodeCacheFolder}/stmap__undistort.exr", + group="", # do not export on the command line + enabled=lambda node: node.exportSTMaps.value, + ), + ] diff --git a/meshroom/aliceVision/ExportImages.py b/meshroom/aliceVision/ExportImages.py new file mode 100644 index 0000000000..029b1092c9 --- /dev/null +++ b/meshroom/aliceVision/ExportImages.py @@ -0,0 +1,83 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ExportImages(desc.AVCommandLineNode): + commandLine = 'aliceVision_exportImages {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=40) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Export' + documentation = ''' + Export images referenced in the input sfmData by transforming + them to adapt to the required target intrinsics. For example, the target + intrinsics may be the same without the distortion. + ''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file. Contains the original intrinsics of the images.", + value="", + ), + desc.File( + name="target", + label="Target", + description="This sfmData file contains the required intrinsics for the output images.", + value="", + ), + desc.ChoiceParam( + name="outputFileType", + label="Output File Type", + description="Output file type for the exported images.", + value="exr", + values=["jpg", "png", "tif", "exr"], + advanced=True, + ), + desc.BoolParam( + name="evCorrection", + label="Correct Images Exposure", + description="Apply a correction on images' exposure value.", + value=False, + advanced=True, + ), + desc.ChoiceParam( + name="namingMode", + label="Naming mode", + description="image naming mode :\n" + " - viewid: viewid.ext.\n" + " - frameid: Frameid.ext.\n" + " - keep: Keep original name.\n", + value="frameid", + values=["viewid", "frameid", "keep"], + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Images Folder", + description="Output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="undistorted", + label="Undistorted Images", + description="List of undistorted images.", + semantic="image", + value="{nodeCacheFolder}/.{outputFileTypeValue}", + group="", + advanced=True, + ), + ] diff --git a/meshroom/aliceVision/ExportMatches.py b/meshroom/aliceVision/ExportMatches.py new file mode 100644 index 0000000000..6e10487eea --- /dev/null +++ b/meshroom/aliceVision/ExportMatches.py @@ -0,0 +1,81 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class ExportMatches(desc.AVCommandLineNode): + commandLine = 'aliceVision_exportMatches {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Export' + documentation = ''' + ''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file.", + value="", + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features and descriptors.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features and descriptors.", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="matchesFolder", + label="Matches Folder", + description="Folder containing some computed matches.", + value="", + ), + name="matchesFolders", + label="Matches Folders", + description="Folder(s) in which computed matches are stored.", + ), + desc.File( + name="filterA", + label="Filter A", + description="One item of the pair must match this.", + value="", + ), + desc.File( + name="filterB", + label="Filter B", + description="One item of the pair must match this.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output path for the features and descriptors files (*.feat, *.desc).", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/ExportMaya.py b/meshroom/aliceVision/ExportMaya.py new file mode 100644 index 0000000000..1b46005d42 --- /dev/null +++ b/meshroom/aliceVision/ExportMaya.py @@ -0,0 +1,222 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +class ExportMaya(desc.Node): + + category = 'Export' + documentation = ''' + Export a Maya script. + This script executed inside Maya, will gather the Meshroom computed elements. + ''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="alembic", + label="Alembic File", + description="Input alembic file.", + value="", + ), + desc.File( + name="mesh", + label="Input Mesh", + description="Input mesh file.", + value="", + ), + desc.File( + name="images", + label="Undistorted Images", + description="Undistorted images template.", + value="", + ), + desc.BoolParam( + name="generateMaya", + label="Generate Maya Scene", + description="Select to generate the Maya scene in addition to the export of the mel script.", + value=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="meloutput", + label="Mel Script", + description="Generated mel script.", + value="{nodeCacheFolder}/import.mel", + ), + desc.File( + name="mayaoutput", + label="Maya Scene", + description="Generated Maya scene.", + value="{nodeCacheFolder}/scene.mb", + enabled=lambda node: node.generateMaya.value, + ), + ] + + def processChunk(self, chunk): + + import pyalicevision + import pathlib + import inspect + import subprocess + + chunk.logManager.start(chunk.node.verboseLevel.value) + + chunk.logger.info("Open input file") + data = pyalicevision.sfmData.SfMData() + ret = pyalicevision.sfmDataIO.load(data, chunk.node.input.value, pyalicevision.sfmDataIO.ALL) + if not ret: + chunk.logger.error("Cannot open input") + chunk.logManager.end() + raise RuntimeError() + + #Check that we have Only one intrinsic + intrinsics = data.getIntrinsics() + if len(intrinsics) > 1: + chunk.logger.error("Only project with a single intrinsic are supported") + chunk.logManager.end() + raise RuntimeError() + + intrinsicId = next(iter(intrinsics)) + intrinsic = intrinsics[intrinsicId] + w = intrinsic.w() + h = intrinsic.h() + + cam = pyalicevision.camera.Pinhole.cast(intrinsic) + if cam == None: + chunk.logger.error("Intrinsic is not a required pinhole model") + chunk.logManager.end() + raise RuntimeError() + + offset = cam.getOffset() + pix2inches = cam.sensorWidth() / (25.4 * max(w, h)); + ox = -pyalicevision.numeric.getX(offset) * pix2inches + oy = pyalicevision.numeric.getY(offset) * pix2inches + + scale = cam.getScale() + fx = pyalicevision.numeric.getX(scale) + fy = pyalicevision.numeric.getY(scale) + + + #Retrieve the first frame + + minIntrinsicId = 0 + minFrameId = 0 + minFrameName = '' + first = True + views = data.getViews() + + for viewId in views: + + view = views[viewId] + frameId = view.getFrameId() + intrinsicId = view.getIntrinsicId() + frameName = pathlib.Path(view.getImageInfo().getImagePath()).stem + + if first or frameId < minFrameId: + minFrameId = frameId + minIntrinsicId = intrinsicId + minFrameName = frameName + first = False + + + #Generate the script itself + mayaFileName = chunk.node.mayaoutput.value + header = f''' + file -f -new; + ''' + + footer = f''' + file -rename "{mayaFileName}"; + file -type "mayaBinary"; + file -save; + ''' + + alembic = chunk.node.alembic.value + abcString = f'AbcImport -mode open -fitTimeRange "{alembic}";' + + mesh = chunk.node.mesh.value + objString = f'file -import -type "OBJ" -ignoreVersion -ra true -mbl true -mergeNamespacesOnClash false -namespace "mesh" -options "mo=1" -pr -importTimeRange "combine" "{mesh}";' + + framePath = chunk.node.images.value.replace('', str(minIntrinsicId)).replace('', minFrameName) + + camString = f''' + select -r mvgCameras ; + string $camName[] = `listRelatives`; + + currentTime {minFrameId}; + + imagePlane -c $camName[0] -fileName "{framePath}"; + + setAttr "imagePlaneShape1.useFrameExtension" 1; + setAttr "imagePlaneShape1.offsetX" {ox}; + setAttr "imagePlaneShape1.offsetY" {oy}; + ''' + + ipa = fx / fy + advCamString = '' + + if abs(ipa - 1.0) < 1e-6: + advCamString = f''' + setAttr "imagePlaneShape1.fit" 1; + ''' + else: + advCamString = f''' + setAttr "imagePlaneShape1.fit" 4; + setAttr "imagePlaneShape1.squeezeCorrection" {ipa}; + + select -r $camName[0]; + float $vaperture = `getAttr ".verticalFilmAperture"`; + float $scaledvaperture = $vaperture * {ipa}; + setAttr "imagePlaneShape1.sizeY" $scaledvaperture; + ''' + + with open(chunk.node.meloutput.value, "w") as f: + if chunk.node.generateMaya.value: + f.write(inspect.cleandoc(header) + '\n') + f.write(inspect.cleandoc(abcString) + '\n') + f.write(inspect.cleandoc(objString) + '\n') + f.write(inspect.cleandoc(camString) + '\n') + f.write(inspect.cleandoc(advCamString) + '\n') + if chunk.node.generateMaya.value: + f.write(inspect.cleandoc(footer) + '\n') + + chunk.logger.info("Mel Script generated") + + #Export to maya + if chunk.node.generateMaya.value: + try: + melPath = chunk.node.meloutput.value + cmd = f'maya_batch -batch -script "{melPath}"' + p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + + if len(stdout) > 0: + chunk.logger.info(stdout.decode()) + + rc = p.returncode + if rc != 0: + chunk.logger.error(stderr.decode()) + raise Exception(rc) + + except Exception as e: + chunk.logger.error('Failed to run maya batch : "{}".'.format(str(e))) + raise RuntimeError() + + chunk.logger.info("Maya Scene generated") + + chunk.logManager.end() diff --git a/meshroom/aliceVision/ExportUSD.py b/meshroom/aliceVision/ExportUSD.py new file mode 100644 index 0000000000..c452fde28d --- /dev/null +++ b/meshroom/aliceVision/ExportUSD.py @@ -0,0 +1,46 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ExportUSD(desc.AVCommandLineNode): + commandLine = 'aliceVision_exportUSD {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' + Export a mesh (OBJ file) to USD format. + ''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="Input mesh file.", + value="", + ), + desc.ChoiceParam( + name="fileType", + label="USD File Format", + description="Output USD file format.", + value="usda", + values=["usda", "usdc", "usdz"] + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Path to the output file.", + value="{nodeCacheFolder}/output.{fileTypeValue}", + ), + ] diff --git a/meshroom/aliceVision/ExtractMetadata.py b/meshroom/aliceVision/ExtractMetadata.py new file mode 100644 index 0000000000..0631a65b09 --- /dev/null +++ b/meshroom/aliceVision/ExtractMetadata.py @@ -0,0 +1,133 @@ +__version__ = "0.1" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL +from pathlib import Path + +import pyalicevision as av + +import distutils.dir_util as du +import shutil +import glob +import os +import subprocess + + +class ExtractMetadata(desc.Node): + size = desc.DynamicNodeSize("input") + + category = 'Utils' + documentation = ''' +Using exifTool, this node extracts metadata of all images referenced in a sfmData and store them in appropriate files. +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData input file.", + value="", + ), + desc.BoolParam( + name="keepFilename", + label="Keep Filename", + description="Keep the filename of the inputs for the outputs.", + value=False, + ), + desc.ChoiceParam( + name="extension", + label="Output File Extension", + description="Metadata file extension.", + value="txt", + values=["txt", "xml", "xmp"], + exclusive=True, + ), + desc.StringParam( + name="arguments", + label="Arguments", + description="ExifTool command arguments.", + value="", + ), + desc.BoolParam( + name="insertInSfm", + label="Update sfmData", + description="Insert the extracted metadata in the sfmData file.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Result Folder", + description="Output path for the resulting metadata files.", + value="{nodeCacheFolder}", + ), + ] + + def processChunk(self, chunk): + try: + chunk.logManager.start(chunk.node.verboseLevel.value) + + if chunk.node.input.value == "" or chunk.node.input.value[-4:].lower() != '.sfm': + error = 'This node need to have a sfmData connected as input.' + chunk.logger.error(error) + raise RuntimeError(error) + + if not os.path.exists(chunk.node.output.value): + os.mkdir(chunk.node.output.value) + + dataAV = av.sfmData.SfMData() + if av.sfmDataIO.load(dataAV, chunk.node.input.value, av.sfmDataIO.ALL): + views = dataAV.getViews() + for id, v in views.items(): + inputFile = v.getImage().getImagePath() + chunk.logger.info(f"Processing {inputFile}") + + if chunk.node.keepFilename.value: + outputMetadataFilename = os.path.join(chunk.node.output.value, Path(inputFile).stem + "." + chunk.node.extension.value) + else: + outputMetadataFilename = os.path.join(chunk.node.output.value, str(id) + "." + chunk.node.extension.value) + + if chunk.node.extension.value == 'txt': + cmd = 'exiftool ' + chunk.node.arguments.value.strip() + ' ' + inputFile + ' > ' + outputMetadataFilename + elif chunk.node.extension.value == 'xml': + cmd = 'exiftool -X ' + chunk.node.arguments.value.strip() + ' ' + inputFile + ' > ' + outputMetadataFilename + else: #xmp + cmd = 'exiftool -tagsfromfile ' + inputFile + ' ' + chunk.node.arguments.value.strip() + ' ' + outputMetadataFilename + + chunk.logger.debug(cmd) + error = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stderr.read().decode() + + chunk.logger.debug(error) + + if error != "": + chunk.logger.error(error) + raise RuntimeError(error) + if not os.path.exists(outputMetadataFilename): + info = 'No metadata extracted for file ' + inputFile + chunk.logger.info(info) + elif chunk.node.insertInSfm.value: + cmd = 'exiftool ' + chunk.node.arguments.value.strip() + ' ' + inputFile + metadata = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.read().decode() + chunk.logger.debug(metadata) + lmeta = metadata.split('\n') + for i in range(1, len(lmeta)-1): + l = lmeta[i].split(':', 1) + v.getImageInfo().addMetadata('ExifTool:'+l[0].strip(), l[1].strip()) + + if chunk.node.insertInSfm.value: + outputSfm = os.path.join(chunk.node.output.value, Path(chunk.node.input.value).stem + ".sfm") + av.sfmDataIO.save(dataAV, outputSfm, av.sfmDataIO.ALL) + + chunk.logger.info('Metadata extraction end') + + finally: + chunk.logManager.end() diff --git a/meshroom/aliceVision/FeatureExtraction.py b/meshroom/aliceVision/FeatureExtraction.py new file mode 100644 index 0000000000..91cba12c86 --- /dev/null +++ b/meshroom/aliceVision/FeatureExtraction.py @@ -0,0 +1,165 @@ +__version__ = "1.3" + +from meshroom.core import desc +from meshroom.core.utils import COLORSPACES, DESCRIBER_TYPES, VERBOSE_LEVEL + + +class FeatureExtraction(desc.AVCommandLineNode): + commandLine = 'aliceVision_featureExtraction {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=40) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Sparse Reconstruction' + documentation = ''' +This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition. +Hence, a feature in the scene should have similar feature descriptions in all images. + +This node implements multiple methods: + * **SIFT** +The most standard method. This is the default and recommended value for all use cases. + * **AKAZE** +AKAZE can be interesting solution to extract features in challenging condition. It could be able to match wider angle than SIFT but has drawbacks. +It may extract too many features, the repartition is not always good. +It is known to be good on challenging surfaces such as skin. + * **CCTAG** +CCTag is a marker type with 3 or 4 crowns. You can put markers in the scene during the shooting session to automatically re-orient and re-scale the scene to a known size. +It is robust to motion-blur, depth-of-field, occlusion. Be careful to have enough white margin around your CCTags. + + +## Online +[https://alicevision.org/#photogrammetry/natural_feature_extraction](https://alicevision.org/#photogrammetry/natural_feature_extraction) +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="masksFolder", + label="Masks Folder", + description="Use masks to filter features. Filename should be the same or the image UID.", + value="", + ), + desc.ChoiceParam( + name="maskExtension", + label="Mask Extension", + description="File extension for masks.", + value="png", + values=["png", "exr", "jpg"], + ), + desc.BoolParam( + name="maskInvert", + label="Invert Masks", + description="Invert mask values.", + value=False, + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + exposed=True, + ), + desc.ChoiceParam( + name="describerPreset", + label="Describer Density", + description="Control the ImageDescriber density (low, medium, normal, high, ultra).\n" + "Warning: Use ULTRA only on small datasets.", + value="normal", + values=["low", "medium", "normal", "high", "ultra", "custom"], + group=lambda node: 'allParams' if node.describerPreset.value != 'custom' else None, + ), + desc.IntParam( + name="maxNbFeatures", + label="Max Nb Features", + description="Maximum number of features extracted (0 means default value based on Describer Density).", + value=0, + range=(0, 100000, 1000), + advanced=True, + enabled=lambda node: (node.describerPreset.value == "custom"), + ), + desc.ChoiceParam( + name="describerQuality", + label="Describer Quality", + description="Control the ImageDescriber quality (low, medium, normal, high, ultra).", + value="normal", + values=["low", "medium", "normal", "high", "ultra"], + ), + desc.ChoiceParam( + name="contrastFiltering", + label="Contrast Filtering", + description="Contrast filtering method to ignore features with too low contrast that can be considered as noise:\n" + " - Static: Fixed threshold.\n" + " - AdaptiveToMedianVariance: Based on image content analysis.\n" + " - NoFiltering: Disable contrast filtering.\n" + " - GridSortOctaves: Grid Sort but per octaves (and only per scale at the end).\n" + " - GridSort: Grid sort per octaves and at the end (scale * peakValue).\n" + " - GridSortScaleSteps: Grid sort per octaves and at the end (scale and then peakValue).\n" + " - NonExtremaFiltering: Filter non-extrema peakValues.\n", + value="GridSort", + values=["Static", "AdaptiveToMedianVariance", "NoFiltering", "GridSortOctaves", "GridSort", "GridSortScaleSteps", "GridSortOctaveSteps", "NonExtremaFiltering"], + advanced=True, + ), + desc.FloatParam( + name="relativePeakThreshold", + label="Relative Peak Threshold", + description="Peak threshold relative to median of gradients.", + value=0.01, + range=(0.01, 1.0, 0.001), + advanced=True, + enabled=lambda node: (node.contrastFiltering.value == "AdaptiveToMedianVariance"), + ), + desc.BoolParam( + name="gridFiltering", + label="Grid Filtering", + description="Enable grid filtering. Highly recommended to ensure usable number of features.", + value=True, + advanced=True, + ), + desc.ChoiceParam( + name="workingColorSpace", + label="Working Color Space", + description="Allows you to choose the color space in which the data are processed.", + values=COLORSPACES, + value="sRGB", + ), + desc.BoolParam( + name="forceCpuExtraction", + label="Force CPU Extraction", + description="Use only CPU feature extraction.", + value=True, + advanced=True, + ), + desc.IntParam( + name="maxThreads", + label="Max Nb Threads", + description="Maximum number of threads to run simultaneously (0 for automatic mode).", + value=0, + range=(0, 24, 1), + invalidate=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Features Folder", + description="Output path for the features and descriptors files (*.feat, *.desc).", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/FeatureMatching.py b/meshroom/aliceVision/FeatureMatching.py new file mode 100644 index 0000000000..a8bc0e19a4 --- /dev/null +++ b/meshroom/aliceVision/FeatureMatching.py @@ -0,0 +1,210 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class FeatureMatching(desc.AVCommandLineNode): + commandLine = 'aliceVision_featureMatching {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=20) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Sparse Reconstruction' + documentation = ''' +This node performs the matching of all features between the candidate image pairs. + +It is performed in 2 steps: + + 1/ **Photometric Matches** + +It performs the photometric matches between the set of features descriptors from the 2 input images. +For each feature descriptor on the first image, it looks for the 2 closest descriptors in the second image and uses a relative threshold between them. +This assumption kill features on repetitive structure but has proved to be a robust criterion. + + 2/ **Geometric Filtering** + +It performs a geometric filtering of the photometric match candidates. +It uses the features positions in the images to make a geometric filtering by using epipolar geometry in an outlier detection framework +called RANSAC (RANdom SAmple Consensus). It randomly selects a small set of feature correspondences and compute the fundamental (or essential) matrix, +then it checks the number of features that validates this model and iterate through the RANSAC framework. + +## Online +[https://alicevision.org/#photogrammetry/feature_matching](https://alicevision.org/#photogrammetry/feature_matching) +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features and descriptors.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features and descriptors.", + exposed=True, + ), + desc.File( + name="imagePairsList", + label="Image Pairs", + description="Path to a file which contains the list of image pairs to match.", + value="", + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + exposed=True, + ), + desc.ChoiceParam( + name="photometricMatchingMethod", + label="Photometric Matching Method", + description="For scalar based regions descriptors:\n" + " - BRUTE_FORCE_L2: L2 BruteForce matching\n" + " - ANN_L2: L2 Approximate Nearest Neighbor matching\n" + " - CASCADE_HASHING_L2: L2 Cascade Hashing matching\n" + " - FAST_CASCADE_HASHING_L2: L2 Cascade Hashing with precomputed hashed regions (faster than CASCADE_HASHING_L2 but use more memory)\n" + "For Binary based descriptors:\n" + " - BRUTE_FORCE_HAMMING: BruteForce Hamming matching", + value="ANN_L2", + values=["BRUTE_FORCE_L2", "ANN_L2", "CASCADE_HASHING_L2", "FAST_CASCADE_HASHING_L2", "BRUTE_FORCE_HAMMING"], + advanced=True, + ), + desc.ChoiceParam( + name="geometricEstimator", + label="Geometric Estimator", + description="Geometric estimator:\n" + " - acransac: A-Contrario Ransac.\n" + " - loransac: LO-Ransac (only available for 'fundamental_matrix' model).", + value="acransac", + values=["acransac", "loransac"], + advanced=True, + ), + desc.ChoiceParam( + name="geometricFilterType", + label="Geometric Filter Type", + description="Geometric validation method to filter features matches:\n" + " - fundamental_matrix\n" + " - fundamental_with_distortion\n" + " - essential_matrix\n" + " - homography_matrix\n" + " - homography_growing\n" + " - no_filtering", + value="fundamental_matrix", + values=["fundamental_matrix", "fundamental_with_distortion", "essential_matrix", "homography_matrix", "homography_growing", "no_filtering"], + advanced=True, + ), + desc.FloatParam( + name="distanceRatio", + label="Distance Ratio", + description="Distance ratio to discard non meaningful matches.", + value=0.8, + range=(0.0, 1.0, 0.01), + advanced=True, + ), + desc.IntParam( + name="maxIteration", + label="Max Iterations", + description="Maximum number of iterations allowed in the Ransac step.", + value=50000, + range=(1, 100000, 1), + advanced=True, + ), + desc.FloatParam( + name="geometricError", + label="Geometric Validation Error", + description="Maximum error (in pixels) allowed for features matching during geometric verification.\n" + "If set to 0, it will select a threshold according to the localizer estimator used\n" + "(if ACRansac, it will analyze the input data to select the optimal value).", + value=0.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="knownPosesGeometricErrorMax", + label="Known Poses Geometric Error Max", + description="Maximum error (in pixels) allowed for features matching guided by geometric information from known camera poses.\n" + "If set to 0 it lets the ACRansac select an optimal value.", + value=5.0, + range=(0.0, 100.0, 1.0), + advanced=True, + ), + desc.FloatParam( + name="minRequired2DMotion", + label="Minimal 2D Motion", + description="Filter out matches without enough 2D motion (threshold in pixels).\n" + "Use -1 to disable this filter.\n" + "Useful for filtering the background during acquisition with a turntable and a static camera.", + value=-1.0, + range=(0.0, 10.0, 1.0), + ), + desc.IntParam( + name="maxMatches", + label="Max Matches", + description="Maximum number of matches to keep.", + value=0, + range=(0, 10000, 1), + advanced=True, + ), + desc.BoolParam( + name="savePutativeMatches", + label="Save Putative Matches", + description="Save putative matches.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="crossMatching", + label="Cross Matching", + description="Ensure that the matching process is symmetric (same matches for I->J than for J->I).", + value=False, + ), + desc.BoolParam( + name="guidedMatching", + label="Guided Matching", + description="Use the found model to improve the pairwise correspondences.", + value=False, + ), + desc.BoolParam( + name="matchFromKnownCameraPoses", + label="Match From Known Camera Poses", + description="Enable the usage of geometric information from known camera poses to guide the feature matching.\n" + "If some cameras have unknown poses (so there is no geometric prior), the standard feature matching will be performed.", + value=False, + ), + desc.BoolParam( + name="exportDebugFiles", + label="Export Debug Files", + description="Export debug files (svg, dot).", + value=False, + invalidate=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + outputs = [ + desc.File( + name="output", + label="Matches Folder", + description="Path to a folder in which the computed matches are stored.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/FeatureRepeatability.py b/meshroom/aliceVision/FeatureRepeatability.py new file mode 100644 index 0000000000..a57ac67735 --- /dev/null +++ b/meshroom/aliceVision/FeatureRepeatability.py @@ -0,0 +1,120 @@ +__version__ = "1.1" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class FeatureRepeatability(desc.AVCommandLineNode): + commandLine = 'aliceVision_samples_repeatabilityDataset {allParams}' + size = desc.DynamicNodeSize('input') + # parallelization = desc.Parallelization(blockSize=40) + # commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Utils' + documentation = ''' +Compare feature/descriptor matching repeatability on some dataset with known homography motions. +''' + + inputs = [ + desc.File( + name="input", + label="Input Folder", + description="Input folder with evaluation datasets.", + value="", + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["sift"], + exclusive=False, + joinChar=",", + ), + desc.ChoiceParam( + name="describerPreset", + label="Describer Density", + description="Control the ImageDescriber density (low, medium, normal, high, ultra).\n" + "Warning: Use ULTRA only on small datasets.", + value="normal", + values=["low", "medium", "normal", "high", "ultra"], + ), + desc.ChoiceParam( + name="describerQuality", + label="Describer Quality", + description="Control the ImageDescriber quality (low, medium, normal, high, ultra).", + value="normal", + values=["low", "medium", "normal", "high", "ultra"], + ), + desc.ChoiceParam( + name="contrastFiltering", + label="Contrast Filtering", + description="Contrast filtering method to ignore features with too low contrast that can be considered as noise:\n" + " - Static: Fixed threshold.\n" + " - AdaptiveToMedianVariance: Based on image content analysis.\n" + " - NoFiltering: Disable contrast filtering.\n" + " - GridSortOctaves: Grid Sort but per octaves (and only per scale at the end).\n" + " - GridSort: Grid sort per octaves and at the end (scale * peakValue).\n" + " - GridSortScaleSteps: Grid sort per octaves and at the end (scale and then peakValue).\n" + " - NonExtremaFiltering: Filter non-extrema peakValues.", + value="Static", + values=["Static", "AdaptiveToMedianVariance", "NoFiltering", "GridSortOctaves", "GridSort", "GridSortScaleSteps", "GridSortOctaveSteps", "NonExtremaFiltering"], + advanced=True, + ), + desc.FloatParam( + name="relativePeakThreshold", + label="Relative Peak Threshold", + description="Peak threashold relative to the median of gradients.", + value=0.01, + range=(0.01, 1.0, 0.001), + advanced=True, + enabled=lambda node: (node.contrastFiltering.value == "AdaptiveToMedianVariance"), + ), + desc.BoolParam( + name="gridFiltering", + label="Grid Filtering", + description="Enable grid filtering. Highly recommended to ensure a usable number of features.", + value=True, + advanced=True, + ), + desc.BoolParam( + name="forceCpuExtraction", + label="Force CPU Extraction", + description="Use only CPU feature extraction.", + value=True, + invalidate=False, + advanced=True, + ), + desc.IntParam( + name="invalidate", + label="Invalidate", + description="Invalidate.", + value=0, + range=(0, 10000, 1), + group="", + ), + desc.StringParam( + name="comments", + label="Comments", + description="Comments.", + value="", + group="", + invalidate=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output path for the features and descriptors files (*.feat, *.desc).", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/GlobalRotationEstimating.py b/meshroom/aliceVision/GlobalRotationEstimating.py new file mode 100644 index 0000000000..2b2235c1b7 --- /dev/null +++ b/meshroom/aliceVision/GlobalRotationEstimating.py @@ -0,0 +1,66 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class GlobalRotationEstimating(desc.AVCommandLineNode): + commandLine = "aliceVision_globalRotationEstimating {allParams}" + + category = "Sparse Reconstruction" + documentation = ''' +Estimate the global rotation given tracks. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="tracksFilename", + label="Tracks File", + description="Tracks file.", + value="", + ), + desc.File( + name="pairs", + label="Pairs File", + description="Information on pairs.", + value="", + ), + desc.ChoiceParam( + name="rotationAveragingMethod", + label="Rotation Averaging Method", + description="Method for rotation averaging:\n" + " - L1 minimization\n" + " - L2 minimization", + values=["L1_minimization", "L2_minimization"], + value="L2_minimization", + ), + desc.FloatParam( + name="angularTolerance", + label="Angular Tolerance", + description="Angular (in degrees) tolerance for a given triplet.", + value=5.0, + range=(0.0, 180.0, 1.0), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfm.abc", + ), + ] diff --git a/meshroom/aliceVision/GlobalSfM.py b/meshroom/aliceVision/GlobalSfM.py new file mode 100644 index 0000000000..94521f2bb8 --- /dev/null +++ b/meshroom/aliceVision/GlobalSfM.py @@ -0,0 +1,111 @@ +__version__ = "1.0" + + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class GlobalSfM(desc.AVCommandLineNode): + commandLine = "aliceVision_globalSfM {allParams}" + size = desc.DynamicNodeSize("input") + + category = "Sparse Reconstruction" + documentation = """ +Performs the Structure-From-Motion with a global approach. +It is known to be faster but less robust to challenging datasets than the Incremental approach. +""" + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features.", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="matchesFolder", + label="Matches Folder", + description="Folder containing some computed matches.", + value="", + ), + name="matchesFolders", + label="Matches Folders", + description="Folder(s) in which computed matches are stored.", + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + ), + desc.ChoiceParam( + name="rotationAveraging", + label="Rotation Averaging Method", + description="Method for rotation averaging:\n" + " - L1 minimization\n" + " - L2 minimization", + values=["L1_minimization", "L2_minimization"], + value="L2_minimization", + ), + desc.ChoiceParam( + name="translationAveraging", + label="Translation Averaging Method", + description="Method for translation averaging:\n" + " - L1 minimization\n" + " - L2 minimization of sum of squared Chordal distances\n" + " - L1 soft minimization", + values=["L1_minimization", "L2_minimization", "L1_soft_minimization"], + value="L1_soft_minimization", + ), + desc.BoolParam( + name="lockAllIntrinsics", + label="Lock All Intrinsic Camera Parameters", + description="Force to keep all the intrinsics parameters of the cameras (focal length, \n" + "principal point, distortion if any) constant during the reconstruction.\n" + "This may be helpful if the input cameras are already fully calibrated.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfm.abc", + ), + desc.File( + name="outputViewsAndPoses", + label="Output Poses", + description="Path to the output SfMData file with cameras (views and poses).", + value="{nodeCacheFolder}/cameras.sfm", + ), + desc.File( + name="extraInfoFolder", + label="Folder", + description="Folder for intermediate reconstruction files and additional reconstruction information files.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/ImageMasking.py b/meshroom/aliceVision/ImageMasking.py new file mode 100644 index 0000000000..e2e1c10b8e --- /dev/null +++ b/meshroom/aliceVision/ImageMasking.py @@ -0,0 +1,136 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ImageMasking(desc.AVCommandLineNode): + commandLine = 'aliceVision_imageMasking {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=40) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + documentaiton = ''' + ''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ChoiceParam( + name="algorithm", + label="Algorithm", + description="", + value="HSV", + values=["HSV", "AutoGrayscaleThreshold"], + ), + desc.GroupAttribute( + name="hsv", + label="HSV Parameters", + description="Values to select:\n" + " - Green: default values\n" + " - White: Tolerance = 1, minSaturation = 0, maxSaturation = 0.1, minValue = 0.8, maxValue = 1\n" + " - Black: Tolerance = 1, minSaturation = 0, maxSaturation = 0.1, minValue = 0, maxValue = 0.2", + group=None, + enabled=lambda node: node.algorithm.value == "HSV", + groupDesc=[ + desc.FloatParam( + name="hsvHue", + label="Hue", + description="Hue value to isolate in [0,1] range.\n" + "0 = red, 0.33 = green, 0.66 = blue, 1 = red.", + semantic="color/hue", + value=0.33, + range=(0.0, 1.0, 0.01), + ), + desc.FloatParam( + name="hsvHueRange", + label="Tolerance", + description="Tolerance around the hue value to isolate.", + value=0.1, + range=(0.0, 1.0, 0.01), + ), + desc.FloatParam( + name="hsvMinSaturation", + label="Min Saturation", + description="Hue is meaningless if saturation is low. Do not mask pixels below this threshold.", + value=0.3, + range=(0.0, 1.0, 0.01), + ), + desc.FloatParam( + name="hsvMaxSaturation", + label="Max Saturation", + description="Do not mask pixels above this threshold. It might be useful to mask white/black pixels.", + value=1.0, + range=(0.0, 1.0, 0.01), + ), + desc.FloatParam( + name="hsvMinValue", + label="Min Value", + description="Hue is meaningless if the value is low. Do not mask pixels below this threshold.", + value=0.3, + range=(0.0, 1.0, 0.01), + ), + desc.FloatParam( + name="hsvMaxValue", + label="Max Value", + description="Do not mask pixels above this threshold. It might be useful to mask white/black pixels.", + value=1.0, + range=(0.0, 1.0, 0.01), + ), + ], + ), + desc.BoolParam( + name="invert", + label="Invert", + description="If selected, the selected area is ignored.\n" + "If not, only the selected area is considered.", + value=True, + ), + desc.IntParam( + name="growRadius", + label="Grow Radius", + description="Grow the selected area.\n" + "It might be used to fill the holes: then use shrinkRadius to restore the initial coutours.", + value=0, + range=(0, 50, 1), + ), + desc.IntParam( + name="shrinkRadius", + label="Shrink Radius", + description="Shrink the selected area.", + value=0, + range=(0, 50, 1), + ), + desc.File( + name="depthMapFolder", + label="Depth Mask Folder", + description="Depth mask folder.", + value="", + ), + desc.StringParam( + name="depthMapExp", + label="Depth Mask Expression", + description="Depth mask expression, like '{inputFolder}/{stem}-depth.{ext}'.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Output folder.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/ImageMatching.py b/meshroom/aliceVision/ImageMatching.py new file mode 100644 index 0000000000..4fea8330f3 --- /dev/null +++ b/meshroom/aliceVision/ImageMatching.py @@ -0,0 +1,140 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ImageMatching(desc.AVCommandLineNode): + commandLine = 'aliceVision_imageMatching {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Sparse Reconstruction' + documentation = ''' +The goal of this node is to select the image pairs to match. The ambition is to find the images that are looking to the same areas of the scene. +Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs. + +It provides multiple methods: + * **VocabularyTree** +It uses image retrieval techniques to find images that share some content without the cost of resolving all feature matches in details. +Each image is represented in a compact image descriptor which allows to compute the distance between all images descriptors very efficiently. +If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected. + * **Sequential** +If your input is a video sequence, you can use this option to link images between them over time. + * **SequentialAndVocabularyTree** +Combines sequential approach with Voc Tree to enable connections between keyframes at different times. + * **Exhaustive** +Export all image pairs. + * **Frustum** +If images have known poses, computes the intersection between cameras frustums to create the list of image pairs. + * **FrustumOrVocabularyTree** +If images have known poses, use frustum intersection else use VocabularuTree. + +## Online +[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching) +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features and descriptors.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features and descriptors.", + exposed=True, + ), + desc.ChoiceParam( + name="method", + label="Method", + description="Method used to select the image pairs to match:\n" + " - VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n" + "feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n" + "images descriptors very efficiently. If your scene contains less than 'Voc Tree: Minimal Number of Images', all image pairs will be selected.\n" + " - Sequential: If your input is a video sequence, you can use this option to link images between them over time.\n" + " - SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n" + " - Exhaustive: Export all image pairs.\n" + " - Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n" + " - FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n", + value="SequentialAndVocabularyTree", + values=["VocabularyTree", "Sequential", "SequentialAndVocabularyTree", "Exhaustive", "Frustum", "FrustumOrVocabularyTree"], + ), + desc.File( + name="tree", + label="Voc Tree: Tree", + description="Input name for the vocabulary tree file.", + value="${ALICEVISION_VOCTREE}", + invalidate=False, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.File( + name="weights", + label="Voc Tree: Weights", + description="Input name for the weight file.\n" + "If not provided, the weights will be computed on the database built with the provided set.", + value="", + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.IntParam( + name="minNbImages", + label="Voc Tree: Minimum Number Of Images", + description="Minimum number of images to use the vocabulary tree.\n" + "If we have less features than this threshold, we will compute all matching combinations.", + value=200, + range=(0, 500, 1), + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.IntParam( + name="maxDescriptors", + label="Voc Tree: Max Descriptors", + description="Limit the number of descriptors you load per image. 0 means no limit.", + value=500, + range=(0, 100000, 1), + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.IntParam( + name="nbMatches", + label="Voc Tree: Nb Matches", + description="The number of matches to retrieve for each image. (If 0, it will retrieve all the matches).", + value=40, + range=(0, 1000, 1), + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.IntParam( + name="nbNeighbors", + label="Sequential: Nb Neighbors", + description="The number of neighbors to retrieve for each image. (If 0, it will retrieve all the neighbors).", + value=5, + range=(0, 1000, 1), + advanced=True, + enabled=lambda node: "Sequential" in node.method.value, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Image Pairs", + description="Filepath to the output file with the list of selected image pairs.", + value="{nodeCacheFolder}/imageMatches.txt", + ), + ] diff --git a/meshroom/aliceVision/ImageMatchingMultiSfM.py b/meshroom/aliceVision/ImageMatchingMultiSfM.py new file mode 100644 index 0000000000..097bf8a501 --- /dev/null +++ b/meshroom/aliceVision/ImageMatchingMultiSfM.py @@ -0,0 +1,152 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ImageMatchingMultiSfM(desc.AVCommandLineNode): + commandLine = 'aliceVision_imageMatching {allParams}' + # use both SfM inputs to define Node's size + size = desc.MultiDynamicNodeSize(['input', 'inputB']) + + category = 'Sparse Reconstruction' + documentation = ''' +The goal of this node is to select the image pairs to match in the context of an SfM augmentation. +The ambition is to find the images that are looking to the same areas of the scene. +Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs. + +## Online +[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching) +''' + inputs = [ + desc.File( + name="input", + label="Input A", + description="First input SfMData file.", + value="", + ), + desc.File( + name="inputB", + label="Input B", + description="Second input SfMData file.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features and descriptors.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features and descriptors.", + exposed=True, + ), + desc.ChoiceParam( + name="method", + label="Method", + description="Method used to select the image pairs to match:\n" + " - VocabularyTree: It uses image retrieval techniques to find images that share some content " + "without the cost of resolving all \n" + "feature matches in details. Each image is represented in a compact image descriptor which " + "allows to compute the distance between all \n" + "images descriptors very efficiently. If your scene contains less than 'Voc Tree: Minimal " + "Number of Images', all image pairs will be selected.\n" + " - SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable " + "connections between keyframes at different times.\n" + " - Exhaustive: Export all image pairs.\n" + " - Frustum: If images have known poses, computes the intersection between cameras frustums " + "to create the list of image pairs.\n" + " - FrustumOrVocabularyTree: If images have known poses, use frustum intersection. Otherwise, " + "use VocabularyTree.\n", + value="SequentialAndVocabularyTree", + values=["VocabularyTree", "SequentialAndVocabularyTree", "Exhaustive", "Frustum"], + ), + desc.File( + name="tree", + label="Voc Tree: Tree", + description="Input name for the vocabulary tree file.", + value="${ALICEVISION_VOCTREE}", + invalidate=False, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.File( + name="weights", + label="Voc Tree: Weights", + description="Input name for the weight file.\n" + "If not provided, the weights will be computed on the database built with the provided set.", + value="", + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.ChoiceParam( + name="matchingMode", + label="Matching Mode", + description="The mode to combine image matching between the input SfMData A and B:\n" + "- 'a/a+a/b' for A with A + A with B.\n" + "- 'a/ab' for A with A and B.\n" + "- 'a/b' for A with B.", + value="a/a+a/b", + values=["a/a+a/b","a/ab", "a/b"], + ), + desc.IntParam( + name="minNbImages", + label="Voc Tree: Minimum Number Of Images", + description="Minimum number of images to use the vocabulary tree.\n" + "If we have less features than this threshold, we will compute all the matching combinations.", + value=200, + range=(0, 500, 1), + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.IntParam( + name="maxDescriptors", + label="Voc Tree: Max Descriptors", + description="Limit the number of descriptors you load per image. 0 means no limit.", + value=500, + range=(0, 100000, 1), + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.IntParam( + name="nbMatches", + label="Voc Tree: Nb Matches", + description="The number of matches to retrieve for each image. (If 0, it will retrieve all the matches).", + value=40, + range=(0, 1000, 1), + advanced=True, + enabled=lambda node: "VocabularyTree" in node.method.value, + ), + desc.IntParam( + name="nbNeighbors", + label="Sequential: Nb Neighbors", + description="The number of neighbors to retrieve for each image. (If 0, it will retrieve all the neighbors).", + value=5, + range=(0, 1000, 1), + advanced=True, + enabled=lambda node: "Sequential" in node.method.value, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="List File", + description="Filepath to the output file with the list of selected image pairs.", + value="{nodeCacheFolder}/imageMatches.txt", + ), + desc.File( + name="outputCombinedSfM", + label="Combined SfM", + description="Path for the combined SfMData file.", + value="{nodeCacheFolder}/combineSfM.sfm", + ), + ] diff --git a/meshroom/aliceVision/ImageProcessing.py b/meshroom/aliceVision/ImageProcessing.py new file mode 100644 index 0000000000..1aa76d105b --- /dev/null +++ b/meshroom/aliceVision/ImageProcessing.py @@ -0,0 +1,628 @@ +__version__ = "3.3" + +from meshroom.core import desc +from meshroom.core.utils import COLORSPACES, EXR_STORAGE_DATA_TYPE, RAW_COLOR_INTERPRETATION, VERBOSE_LEVEL + +import os.path + + +def outputImagesValueFunct(attr): + basename = os.path.basename(attr.node.input.value) + fileStem = os.path.splitext(basename)[0] + inputExt = os.path.splitext(basename)[1] + outputExt = ('.' + attr.node.extension.value) if attr.node.extension.value else None + + if inputExt in ['.abc', '.sfm']: + fileStem = '' if attr.node.keepImageFilename.value else '' + # If we have an SfM in input + return "{nodeCacheFolder}/" + fileStem + (outputExt or '.*') + + if inputExt: + # If we have one or multiple files in input + return "{nodeCacheFolder}/" + fileStem + (outputExt or inputExt) + + if '*' in fileStem: + # The fileStem of the input param is a regular expression, + # so even if there is no file extension, + # we consider that the expression represents files. + return "{nodeCacheFolder}/" + fileStem + (outputExt or '.*') + + # No extension and no expression means that the input param is a folder path + return "{nodeCacheFolder}/" + '*' + (outputExt or '.*') + + +class ImageProcessing(desc.AVCommandLineNode): + commandLine = 'aliceVision_imageProcessing {allParams}' + size = desc.DynamicNodeSize('input') + # parallelization = desc.Parallelization(blockSize=40) + # commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Utils' + documentation = ''' +Convert or apply filtering to the input images. +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file input, image filenames or regex(es) on the image file path.\n" + "Supported regex:\n" + " - '#' matches a single digit.\n" + " - '@' matches one or more digits.\n" + " - '?' matches one character.\n" + " - '*' matches zero character or more.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="inputFolder", + label="Input Folder", + description="Folder containing images.", + value="", + ), + name="inputFolders", + label="Input Images Folders", + description="Use images from specific folder(s).", + ), + desc.ListAttribute( + elementDesc=desc.StringParam( + name="metadataFolder", + label="Metadata Folder", + description="Specific folder containing images with metadata.", + value="", + ), + name="metadataFolders", + label="Input Metadata Folders", + description="Use images metadata from specific folder(s).", + ), + desc.ChoiceParam( + name="extension", + label="Output File Extension", + description="Output image file extension.\n" + "If unset, the output file extension will match the input's if possible.", + value="", + values=["", "exr", "jpg", "tiff", "png"], + ), + desc.BoolParam( + name="reconstructedViewsOnly", + label="Only Reconstructed Views", + description="Only process reconstructed views.", + value=False, + ), + desc.BoolParam( + name="keepImageFilename", + label="Keep Image Name", + description="Keep the original image name instead of the view name.", + value=False, + ), + desc.BoolParam( + name="reorient", + label="Automatic Reorientation", + description="Automatic image reorientation.", + value=False, + ), + desc.BoolParam( + name="fixNonFinite", + label="Fix Non-Finite", + description="Fix non-finite pixels based on neighboring pixels average.", + value=False, + ), + desc.BoolParam( + name="exposureCompensation", + label="Exposure Compensation", + description="Exposure compensation (only valid for SfMData).", + value=False, + ), + desc.BoolParam( + name="rawAutoBright", + label="RAW Auto Bright", + description="Enable automatic exposure adjustment for RAW images.", + value=False, + ), + desc.FloatParam( + name="rawExposureAdjust", + label="RAW Exposure Adjustment", + description="Manual exposure adjustment in fstops for RAW images.", + value=0.0, + range=(-2.0, 3.0, 0.125), + ), + desc.GroupAttribute( + name="lensCorrection", + label="Lens Correction", + description="Automatic lens correction settings.", + joinChar=":", + groupDesc=[ + desc.BoolParam( + name="lensCorrectionEnabled", + label="Enable", + description="Enable lens correction.", + value=False, + ), + desc.BoolParam( + name="geometry", + label="Geometry", + description="Geometry correction if a model is available in the SfMData.", + value=False, + enabled=lambda node: node.lensCorrection.lensCorrectionEnabled.value, + ), + desc.BoolParam( + name="vignetting", + label="Vignetting", + description="Vignetting correction if the model parameters are available in the metadata.", + value=False, + enabled=lambda node: node.lensCorrection.lensCorrectionEnabled.value, + ), + desc.BoolParam( + name="chromaticAberration", + label="Chromatic Aberration", + description="Chromatic aberration (fringing) correction if the model parameters are available in the metadata.", + value=False, + enabled=lambda node: node.lensCorrection.lensCorrectionEnabled.value, + ), + ], + ), + desc.FloatParam( + name="scaleFactor", + label="Scale Factor", + description="Scale factor.", + value=1.0, + range=(0.0, 1.0, 0.01), + ), + desc.IntParam( + name="maxWidth", + label="Max Width", + description="Maximum width of the output images (0: ignored).", + value=0, + range=(0, 10000, 1), + ), + desc.IntParam( + name="maxHeight", + label="Max Height", + description="Maximum height of the output images (0: ignored).", + value=0, + range=(0, 10000, 1), + ), + desc.FloatParam( + name="contrast", + label="Contrast", + description="Contrast.", + value=1.0, + range=(0.0, 100.0, 0.1), + ), + desc.IntParam( + name="medianFilter", + label="Median Filter", + description="Median filter.", + value=0, + range=(0, 10, 1), + ), + desc.BoolParam( + name="fillHoles", + label="Fill Holes", + description="Fill holes based on the alpha channel.\n" + "Note: It will enable 'fixNonFinite', as it is required for the image pyramid construction used to fill holes.", + value=False, + ), + desc.GroupAttribute( + name="sharpenFilter", + label="Sharpen Filter", + description="Sharpen filter parameters.", + joinChar=":", + groupDesc=[ + desc.BoolParam( + name="sharpenFilterEnabled", + label="Enable", + description="Use sharpen filter.", + value=False, + ), + desc.IntParam( + name="width", + label="Width", + description="Sharpening width.", + value=3, + range=(1, 9, 2), + enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value, + ), + desc.FloatParam( + name="contrast", + label="Contrast", + description="Sharpening contrast.", + value=1.0, + range=(0.0, 100.0, 0.1), + enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value, + ), + desc.FloatParam( + name="threshold", + label="Threshold", + description="Sharpening threshold.", + value=0.0, + range=(0.0, 1.0, 0.01), + enabled=lambda node: node.sharpenFilter.sharpenFilterEnabled.value, + ), + ], + ), + desc.GroupAttribute( + name="bilateralFilter", + label="Bilateral Filter", + description="Bilateral filter parameters.", + joinChar=":", + groupDesc=[ + desc.BoolParam( + name="bilateralFilterEnabled", + label="Enable", + description="Use bilateral filter.", + value=False, + ), + desc.IntParam( + name="bilateralFilterDistance", + label="Distance", + description="Diameter of each pixel neighborhood that is used during bilateral filtering.\n" + "Could be very slow for large filters, so it is recommended to use 5.", + value=0, + range=(0, 9, 1), + enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value, + ), + desc.FloatParam( + name="bilateralFilterSigmaSpace", + label="Sigma Coordinate Space", + description="Bilateral filter sigma in the coordinate space.", + value=0.0, + range=(0.0, 150.0, 0.01), + enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value, + ), + desc.FloatParam( + name="bilateralFilterSigmaColor", + label="Sigma Color Space", + description="Bilateral filter sigma in the color space.", + value=0.0, + range=(0.0, 150.0, 0.01), + enabled=lambda node: node.bilateralFilter.bilateralFilterEnabled.value, + ), + ], + ), + desc.GroupAttribute( + name="claheFilter", + label="Clahe Filter", + description="Clahe filter parameters.", + joinChar=":", + groupDesc=[ + desc.BoolParam( + name="claheEnabled", + label="Enable", + description="Use Contrast Limited Adaptive Histogram Equalization (CLAHE) filter.", + value=False, + ), + desc.FloatParam( + name="claheClipLimit", + label="Clip Limit", + description="Threshold for contrast limiting.", + value=4.0, + range=(0.0, 8.0, 1.0), + enabled=lambda node: node.claheFilter.claheEnabled.value, + ), + desc.IntParam( + name="claheTileGridSize", + label="Tile Grid Size", + description="Size of the grid for histogram equalization.\n" + "Input image will be divided into equally sized rectangular tiles.", + value=8, + range=(4, 64, 4), + enabled=lambda node: node.claheFilter.claheEnabled.value, + ), + ], + ), + desc.GroupAttribute( + name="noiseFilter", + label="Noise Filter", + description="Noise filter parameters.", + joinChar=":", + groupDesc=[ + desc.BoolParam( + name="noiseEnabled", + label="Enable", + description="Add noise.", + value=False, + ), + desc.ChoiceParam( + name="noiseMethod", + label="Method", + description="There are several noise types to choose from:\n" + " - uniform: adds noise values uniformly distributed on range [A,B).\n" + " - gaussian: adds Gaussian (normal distribution) noise values with mean value A and standard deviation B.\n" + " - salt: changes to value A a portion of pixels given by B.\n", + value="uniform", + values=["uniform", "gaussian", "salt"], + enabled=lambda node: node.noiseFilter.noiseEnabled.value, + ), + desc.FloatParam( + name="noiseA", + label="A", + description="Parameter that has a different interpretation depending on the chosen method:\n" + " - uniform: lower bound of the range on which the noise is uniformly distributed.\n" + " - gaussian: the mean value of the Gaussian noise.\n" + " - salt: the value of the specified portion of pixels.", + value=0.0, + range=(0.0, 1.0, 0.0001), + enabled=lambda node: node.noiseFilter.noiseEnabled.value, + ), + desc.FloatParam( + name="noiseB", + label="B", + description="Parameter that has a different interpretation depending on the chosen method:\n" + " - uniform: higher bound of the range on which the noise is uniformly distributed.\n" + " - gaussian: the standard deviation of the Gaussian noise.\n" + " - salt: the portion of pixels to set to a specified value.", + value=1.0, + range=(0.0, 1.0, 0.0001), + enabled=lambda node: node.noiseFilter.noiseEnabled.value, + ), + desc.BoolParam( + name="noiseMono", + label="Mono", + description="If selected, a single noise value will be applied to all channels.\n" + "Otherwise, a separate noise value will be computed for each channel.", + value=True, + enabled=lambda node: node.noiseFilter.noiseEnabled.value, + ), + ], + ), + desc.GroupAttribute( + name="nlmFilter", + label="NL Means Denoising (8 bits)", + description="NL Means Denoising Parameters.\n" + "This implementation only works on 8-bit images, so the colors can be reduced and clamped.", + joinChar=":", + groupDesc=[ + desc.BoolParam( + name="nlmFilterEnabled", + label="Enable", + description="Use Non-Local Mean Denoising from OpenCV to denoise images.", + value=False, + ), + desc.FloatParam( + name="nlmFilterH", + label="H", + description="Parameter regulating the filter strength for the luminance component.\n" + "Bigger H value perfectly removes noise but also removes image details,\n" + "smaller H value preserves details but also preserves some noise.", + value=5.0, + range=(1.0, 1000.0, 0.01), + enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value, + ), + desc.FloatParam( + name="nlmFilterHColor", + label="HColor", + description="Parameter regulating filter strength for color components. Not necessary for grayscale images.\n" + "Bigger HColor value perfectly removes noise but also removes image details,\n" + "smaller HColor value preserves details but also preserves some noise.", + value=10.0, + range=(0.0, 1000.0, 0.01), + enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value, + ), + desc.IntParam( + name="nlmFilterTemplateWindowSize", + label="Template Window Size", + description="Size in pixels of the template patch that is used to compute weights. Should be odd.", + value=7, + range=(1, 101, 2), + enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value, + ), + desc.IntParam( + name="nlmFilterSearchWindowSize", + label="Search Window Size", + description="Size in pixels of the window that is used to compute weighted average for a given pixel.\n" + "Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time.", + value=21, + range=(1, 1001, 2), + enabled=lambda node: node.nlmFilter.nlmFilterEnabled.value, + ), + ], + ), + desc.GroupAttribute( + name="parFilter", + label="Pixel Aspect Ratio", + description="Pixel Aspect Ratio parameters.", + joinChar=":", + groupDesc=[ + desc.BoolParam( + name="parEnabled", + label="Enable", + description="Apply pixel aspect ratio.", + value=False, + ), + desc.BoolParam( + name="parRowDecimation", + label="Row decimation", + description="If selected, reduce image height by decimating the number of rows.\n" + "Otherwise, increase width by upsampling image columns.", + value=False, + enabled=lambda node: node.parFilter.parEnabled.value, + ), + ], + ), + desc.ChoiceParam( + name="outputFormat", + label="Output Image Format", + description="Allows you to choose the format of the output image.", + value="rgba", + values=["rgba", "rgb", "grayscale"], + ), + desc.ChoiceParam( + name="inputColorSpace", + label="Input Color Space", + description="Allows you to force the color space of the input image.", + values=COLORSPACES, + value="AUTO", + ), + desc.ChoiceParam( + name="outputColorSpace", + label="Output Color Space", + description="Allows you to choose the color space of the output image.", + values=COLORSPACES, + value="AUTO", + ), + desc.ChoiceParam( + name="workingColorSpace", + label="Working Color Space", + description="Allows you to choose the color space in which the data are processed.", + values=COLORSPACES, + value="Linear", + enabled=lambda node: not node.applyDcpMetadata.value, + ), + desc.ChoiceParam( + name="rawColorInterpretation", + label="RAW Color Interpretation", + description="Allows you to choose how RAW data are color processed.", + values=RAW_COLOR_INTERPRETATION, + value="DCPLinearProcessing" if os.environ.get("ALICEVISION_COLOR_PROFILE_DB", "") else "LibRawWhiteBalancing", + ), + desc.BoolParam( + name="applyDcpMetadata", + label="Apply DCP Metadata", + description="If the image contains some DCP metadata, then generate a DCP profile from them and apply it to the image content.", + value=False, + ), + desc.File( + name="colorProfileDatabase", + label="Color Profile Database", + description="Color profile database directory path.", + value="${ALICEVISION_COLOR_PROFILE_DB}", + invalidate=False, + enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"), + ), + desc.BoolParam( + name="errorOnMissingColorProfile", + label="Error On Missing DCP Color Profile", + description="If a color profile database is specified but no color profile is found for at least one image, then an error is thrown.", + value=True, + enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"), + ), + desc.BoolParam( + name="useDCPColorMatrixOnly", + label="Use DCP Color Matrix Only", + description="Use only the Color Matrix information from the DCP and ignore the Forward Matrix.", + value=True, + enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"), + ), + desc.BoolParam( + name="doWBAfterDemosaicing", + label="WB After Demosaicing", + description="Do White Balance after demosaicing, just before DCP profile application.", + value=False, + enabled=lambda node: (node.rawColorInterpretation.value == "DCPLinearProcessing") or (node.rawColorInterpretation.value == "DCPMetadata"), + ), + desc.ChoiceParam( + name="demosaicingAlgo", + label="Demosaicing Algorithm", + description="LibRaw demosaicing algorithm to use.", + value="AHD", + values=["linear", "VNG", "PPG", "AHD", "DCB", "AHD-Mod", "AFD", "VCD", "Mixed", "LMMSE", "AMaZE", "DHT", "AAHD", "none"], + ), + desc.ChoiceParam( + name="highlightMode", + label="Highlight Mode", + description="LibRaw highlight mode:\n" + " - 0: Clip (default)\n" + " - 1: Unclip\n" + " - 2: Blend\n" + " - 3-9: Rebuild", + value=0, + values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + ), + desc.FloatParam( + name="correlatedColorTemperature", + label="Illuminant Color Temperature", + description="Scene illuminant color temperature in Kelvin.\n" + "A negative or null value indicates that the metadata information will be used.", + value=-1.0, + range=(-1.0, 10000.0, 1.0), + ), + desc.File( + name="lensCorrectionProfileInfo", + label="Lens Correction Profile Info", + description="Lens Correction Profile filepath or database directory.", + value="${ALICEVISION_LENS_PROFILE_INFO}", + invalidate=False, + ), + desc.BoolParam( + name="lensCorrectionProfileSearchIgnoreCameraModel", + label="LCP Generic Search", + description="The lens name and camera maker are used to match the LCP database, but the camera model is ignored.", + value=True, + advanced=True, + ), + desc.ChoiceParam( + name="storageDataType", + label="Storage Data Type For EXR Output", + description="Storage image data type for EXR outputs:\n" + " - float: Use full floating point (32 bits per channel).\n" + " - half: Use half float (16 bits per channel).\n" + " - halfFinite: Use half float, but clamp values to avoid non-finite values.\n" + " - auto: Use half float if all values can fit, else use full float.", + values=EXR_STORAGE_DATA_TYPE, + value="float", + ), + desc.ChoiceParam( + name="exrCompressionMethod", + label="EXR Compression Method", + description="Compression method for EXR output images.", + value="auto", + values=["none", "auto", "rle", "zip", "zips", "piz", "pxr24", "b44", "b44a", "dwaa", "dwab"], + ), + desc.IntParam( + name="exrCompressionLevel", + label="EXR Compression Level", + description="Level of compression for EXR images. The range depends on the used method.\n" + "For the zip/zips methods, values must be between 1 and 9.\n" + "A value of 0 will be ignored, and the default value for the selected method will be used.", + value=0, + range=(0, 500, 1), + enabled=lambda node: node.exrCompressionMethod.value in ["dwaa", "dwab", "zip", "zips"], + ), + desc.BoolParam( + name="jpegCompress", + label="JPEG Compress", + description="Enable JPEG compression.", + value=True, + ), + desc.IntParam( + name="jpegQuality", + label="JPEG Quality", + description="JPEG images quality after compression.", + value=90, + range=(0, 100, 1), + enabled=lambda node: node.jpegCompress.value, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outSfMData", + label="SfMData", + description="Output SfMData file.", + value=lambda attr: ("{nodeCacheFolder}/" + os.path.basename(attr.node.input.value)) if (os.path.splitext(attr.node.input.value)[1] in [".abc", ".sfm"]) else "", + group="", # do not export on the command line + ), + desc.File( + name="output", + label="Folder", + description="Output images folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="outputImages", + label="Images", + description="Output images.", + semantic="image", + value=outputImagesValueFunct, + group="", # do not export on the command line + ), + ] diff --git a/meshroom/aliceVision/ImageSegmentation.py b/meshroom/aliceVision/ImageSegmentation.py new file mode 100644 index 0000000000..889d931d8d --- /dev/null +++ b/meshroom/aliceVision/ImageSegmentation.py @@ -0,0 +1,94 @@ +__version__ = "1.2" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ImageSegmentation(desc.AVCommandLineNode): + commandLine = 'aliceVision_imageSegmentation {allParams}' + size = desc.DynamicNodeSize('input') + gpu = desc.Level.INTENSIVE + parallelization = desc.Parallelization(blockSize=50) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Utils' + documentation = ''' +Generate a mask with segmented labels for each pixel. +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file input.", + value="", + ), + desc.File( + name="modelPath", + label="Segmentation Model", + description="Weights file for the internal model.", + value="${ALICEVISION_SEMANTIC_SEGMENTATION_MODEL}", + ), + desc.ChoiceParam( + name="validClasses", + label="Classes", + description="Classes names which are to be considered.", + value=["person"], + values=[ + "__background__", + "aeroplane", + "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", + "diningtable", "dog", + "horse", + "motorbike", + "person", "pottedplant", + "sheep", "sofa", + "train", "tvmonitor" + ], + exclusive=False, + ), + desc.BoolParam( + name="maskInvert", + label="Invert Masks", + description="Invert mask values. If selected, the pixels corresponding to the mask will be set to 0 instead of 255.", + value=False, + ), + desc.BoolParam( + name="useGpu", + label="Use GPU", + description="Use GPU for computation if available", + value=True, + invalidate=False, + ), + desc.BoolParam( + name="keepFilename", + label="Keep Filename", + description="Keep Input Filename", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Masks Folder", + description="Output path for the masks.", + value="{nodeCacheFolder}", + ), + desc.File( + name="masks", + label="Masks", + description="Generated segmentation masks.", + semantic="image", + value=lambda attr: "{nodeCacheFolder}/.exr" if not attr.node.keepFilename.value else "{nodeCacheFolder}/.exr", + group="", + ), + ] diff --git a/meshroom/aliceVision/ImportE57.py b/meshroom/aliceVision/ImportE57.py new file mode 100644 index 0000000000..ef1ac204ff --- /dev/null +++ b/meshroom/aliceVision/ImportE57.py @@ -0,0 +1,65 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ImportE57(desc.AVCommandLineNode): + commandLine = 'aliceVision_importE57 {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' +Import an E57 file and generate an SfMData. +''' + + inputs = [ + desc.ListAttribute( + elementDesc=desc.File( + name="inputFile", + label="E57 File", + description="Path to an E57 file.", + value="", + ), + name="input", + label="Input Files", + description="Set of E57 files in the same reference frame.", + ), + desc.FloatParam( + name="maxDensity", + label="Points Density", + description="Ensure each point has no neighbour closer than maxDensity meters.", + value=0.01, + range=(0.0, 0.2, 0.001), + ), + desc.FloatParam( + name="minIntensity", + label="Laser Intensity Lower Limit", + description="Ensure no point has an intensity lower than this value.", + value=0.03, + range=(0.0, 1.0, 0.01), + ), + desc.IntParam( + name="maxPointsPerBlock", + label="Points Limit", + description="Limit the number of points per computation region (For memory usage, 0 means no limit).", + value=5000000, + range=(0, 10000000, 100000), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Path to the output JSON file.", + value="{nodeCacheFolder}/inputset.json", + ), + ] diff --git a/meshroom/aliceVision/ImportKnownPoses.py b/meshroom/aliceVision/ImportKnownPoses.py new file mode 100644 index 0000000000..0a7e5e9866 --- /dev/null +++ b/meshroom/aliceVision/ImportKnownPoses.py @@ -0,0 +1,44 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class ImportKnownPoses(desc.AVCommandLineNode): + commandLine = 'aliceVision_importKnownPoses {allParams}' + size = desc.DynamicNodeSize('sfmData') + + documentation = ''' + Import known poses from various file formats like xmp or json. + ''' + + inputs = [ + desc.File( + name="sfmData", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="knownPosesData", + label="Known Poses Data", + description="Known poses data in the JSON or XMP format.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfmData.abc", + ), + ] diff --git a/meshroom/aliceVision/IntrinsicsTransforming.py b/meshroom/aliceVision/IntrinsicsTransforming.py new file mode 100644 index 0000000000..223851afbb --- /dev/null +++ b/meshroom/aliceVision/IntrinsicsTransforming.py @@ -0,0 +1,67 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +class IntrinsicsTransforming(desc.AVCommandLineNode): + commandLine = 'aliceVision_intrinsicsTransforming {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' + Transforms all intrinsics in the sfmData to a new type. + ''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="inputTracks", + label="Input Tracks", + description="Input Tracks file.", + value="", + ), + desc.ChoiceParam( + name="type", + label="Camera Type", + description="Mathematical model used to represent a camera:\n" + " - pinhole: Simplest projective camera model without optical distortion " + "(focal and optical center).\n" + " - equirectangular: Projection model used in panoramas.\n", + value="pinhole", + values=["pinhole", "equidistant", "equirectangular"], + ), + desc.FloatParam( + name="fakeFov", + label="Virtual FOV", + description="If the input intrinsic is not a pinhole but the output is, what is the virtual FOV requested.", + value=90.0, + range=(1.0, 179.0, 0.1), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output SfMData", + description="Output SfMData file.", + value="{nodeCacheFolder}/sfmData.abc", + ), + desc.File( + name="outputTracks", + label="Output Tracks", + description="Output Tracks file.", + value="{nodeCacheFolder}/tracksFile.json", + ), + ] diff --git a/meshroom/aliceVision/KeyframeSelection.py b/meshroom/aliceVision/KeyframeSelection.py new file mode 100644 index 0000000000..6fb405774d --- /dev/null +++ b/meshroom/aliceVision/KeyframeSelection.py @@ -0,0 +1,409 @@ +__version__ = "5.0" + +from meshroom.core import desc +from meshroom.core.utils import EXR_STORAGE_DATA_TYPE, VERBOSE_LEVEL + +# List of supported video extensions (provided by OpenImageIO) +videoExts = [".avi", ".mov", ".mp4", ".m4a", ".m4v", ".3gp", ".3g2", ".mj2", ".m4v", ".mpg"] + +class KeyframeSelectionNodeSize(desc.DynamicNodeSize): + def computeSize(self, node): + inputPathsSize = super(KeyframeSelectionNodeSize, self).computeSize(node) + s = 0 + finalSize = 0 + defaultParam = self._param + + # Compute the size for each entry in the list of input paths + for input in node.attribute("inputPaths").value: + self._param = input.getFullName() + s = s + super(KeyframeSelectionNodeSize, self).computeSize(node) + + # Retrieve the maximum number of keyframes for the smart selection (which is high by default) + maxFramesSmart = node.attribute("selectionMethod.smartSelection.maxNbOutFrames").value + + # If the smart selection is enabled and the number of input frames is available (s is not equal to the number of input paths), + # set the size as the minimum between the number of input frames and maximum number of output keyframes. If the number of + # input frames is not available, set the size to the maximum number of output keyframes. + smartSelectionOn = node.attribute("selectionMethod.useSmartSelection").value + if smartSelectionOn: + if s != inputPathsSize: + finalSize = min(s, maxFramesSmart) + else: + finalSize = maxFramesSmart + + # If the smart selection is not enabled, the maximum number of output keyframes for the regular mode can be used + # if and only if it has been set, in the same way as for the smart selection. If the maximum number of frames has + # not been set, then the size is either the minimum between the maximum number of output keyframes for the smart + # selection and the number of input frames if it is available, or the maximum number of output keyframes for the + # smart selection if the number of input frames is not available. + else: + maxFrames = node.attribute("selectionMethod.regularSelection.maxNbOutFrames").value + if maxFrames > 0 and s != inputPathsSize: + finalSize = min(s, maxFrames) + elif maxFrames > 0 and s == inputPathsSize: + finalSize = maxFrames + elif maxFrames <= 0 and s != inputPathsSize: + finalSize = min(s, maxFramesSmart) + else: + finalSize = maxFramesSmart + + # Reset the param used to compute size to the default one: if the size is computed again, + # this will prevent having an inputPathsSize that is erroneous + self._param = defaultParam + return finalSize + + +class KeyframeSelection(desc.AVCommandLineNode): + commandLine = 'aliceVision_keyframeSelection {allParams}' + size = KeyframeSelectionNodeSize('inputPaths') + + category = 'Utils' + documentation = ''' +Allows to extract keyframes from a video and insert metadata. +It can extract frames from a synchronized multi-cameras rig. + +You can extract frames at regular interval by configuring only the min/maxFrameStep. +''' + + inputs = [ + desc.ListAttribute( + elementDesc=desc.File( + name="inputPath", + label="Input Path", + description="Input path.", + value="", + ), + name="inputPaths", + label="Input Paths", + description="Input video files, image sequence directories or SfMData file.", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="brand", + label="Brand", + description="Camera brand.", + value="", + ), + name="brands", + label="Brands", + description="Camera brands.", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="model", + label="Model", + description="Camera model.", + value="", + ), + name="models", + label="Models", + description="Camera models.", + ), + desc.ListAttribute( + elementDesc=desc.FloatParam( + name="mmFocal", + label="Focal", + description="Focal in mm (will be used if not 0).", + value=0.0, + range=(0.0, 500.0, 1.0), + ), + name="mmFocals", + label="Focals", + description="Focals in mm (will be used if not 0).", + ), + desc.File( + name="sensorDbPath", + label="Sensor Database", + description="Camera sensor width database path.", + value="${ALICEVISION_SENSOR_DB}", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="masks", + label="Masks Path", + description="Directory containing masks to apply to the frames.", + value="", + ), + name="maskPaths", + label="Masks", + description="Masks (e.g. segmentation masks) used to exclude some parts of the frames from the score computations\n" + "for the smart keyframe selection.", + enabled=lambda node: node.selectionMethod.useSmartSelection.value, + ), + desc.GroupAttribute( + name="selectionMethod", + label="Keyframe Selection Method", + description="Selection the regular or smart method for the keyframe selection.\n" + "- With the regular method, keyframes are selected regularly over the sequence with respect to the set parameters.\n" + "- With the smart method, keyframes are selected based on their sharpness and optical flow scores.", + group=None, # skip group from command line + groupDesc=[ + desc.BoolParam( + name="useSmartSelection", + label="Use Smart Keyframe Selection", + description="Use the smart keyframe selection.", + value=True, + ), + desc.GroupAttribute( + name="regularSelection", + label="Regular Keyframe Selection", + description="Parameters for the regular keyframe selection.\n" + "Keyframes are selected regularly over the sequence with respect to the set parameters.", + group=None, # skip group from command line + enabled=lambda node: node.selectionMethod.useSmartSelection.value is False, + groupDesc=[ + desc.IntParam( + name="minFrameStep", + label="Min Frame Step", + description="Minimum number of frames between two keyframes.", + value=12, + range=(1, 1000, 1), + enabled=lambda node: node.regularSelection.enabled, + ), + desc.IntParam( + name="maxFrameStep", + label="Max Frame Step", + description="Maximum number of frames between two keyframes. Ignored if equal to 0.", + value=0, + range=(0, 1000, 1), + enabled=lambda node: node.regularSelection.enabled, + ), + desc.IntParam( + name="maxNbOutFrames", + label="Max Nb Output Frames", + description="Maximum number of output frames (0 = no limit).\n" + "'minFrameStep' and 'maxFrameStep' will always be respected, so combining them with this parameter\n" + "might cause the selection to stop before reaching the end of the input sequence(s).", + value=0, + range=(0, 10000, 1), + enabled=lambda node: node.regularSelection.enabled, + ), + ], + ), + desc.GroupAttribute( + name="smartSelection", + label="Smart Keyframe Selection", + description="Parameters for the smart keyframe selection.\n" + "Keyframes are selected based on their sharpness and optical flow scores.", + group=None, # skip group from command line + enabled=lambda node: node.selectionMethod.useSmartSelection.value, + groupDesc=[ + desc.FloatParam( + name="pxDisplacement", + label="Pixel Displacement", + description="The percentage of pixels in the frame that need to have moved since the last keyframe to be considered for the selection.", + value=10.0, + range=(0.0, 100.0, 1.0), + enabled=lambda node: node.smartSelection.enabled, + ), + desc.IntParam( + name="minNbOutFrames", + label="Min Nb Output Frames", + description="Minimum number of frames selected to be keyframes.", + value=40, + range=(1, 100, 1), + enabled=lambda node: node.smartSelection.enabled, + ), + desc.IntParam( + name="maxNbOutFrames", + label="Max Nb Output Frames", + description="Maximum number of frames selected to be keyframes.", + value=2000, + range=(1, 10000, 1), + enabled=lambda node: node.smartSelection.enabled, + ), + desc.IntParam( + name="rescaledWidthSharpness", + label="Rescaled Frame's Width For Sharpness", + description="Width, in pixels, of the frame used for the sharpness score computation after a rescale.\n" + "Aspect ratio will be preserved. No rescale will be performed if equal to 0.", + value=720, + range=(0, 4000, 1), + enabled=lambda node: node.smartSelection.enabled, + advanced=True, + ), + desc.IntParam( + name="rescaledWidthFlow", + label="Rescaled Frame's Width For Motion", + description="Width, in pixels, of the frame used for the motion score computation after a rescale.\n" + "Aspect ratio will be preserved. No rescale will be performed if equal to 0.", + value=720, + range=(0, 4000, 1), + enabled=lambda node: node.smartSelection.enabled, + advanced=True, + ), + desc.IntParam( + name="sharpnessWindowSize", + label="Sharpness Window Size", + description="The size, in pixels, of the sliding window used to evaluate a frame's sharpness.", + value=200, + range=(1, 10000, 1), + enabled=lambda node: node.smartSelection.enabled, + advanced=True, + ), + desc.IntParam( + name="flowCellSize", + label="Optical Flow Cell Size", + description="The size, in pixels, of the cells within a frame in which the optical flow scores is evaluated.", + value=90, + range=(10, 2000, 1), + enabled=lambda node: node.smartSelection.enabled, + advanced=True, + ), + desc.IntParam( + name="minBlockSize", + label="Multi-Threading Minimum Block Size", + description="The minimum number of frames to process for a thread to be spawned.\n" + "If using all the available threads implies processing less than this value in every thread, then less threads should be spawned,\n" + "and each will process at least 'minBlockSize' (except maybe for the very last thread, that might process less).", + value=10, + range=(1, 1000, 1), + invalidate=False, + enabled=lambda node: node.smartSelection.enabled, + advanced=True, + ), + ], + ), + ], + ), + desc.BoolParam( + name="renameKeyframes", + label="Rename Output Keyframes", + description="Instead of using the selected keyframes' index as their name, name them as consecutive output frames.\n" + "If the selected keyframes are at index [15, 294, 825], they will be written as [00000.exr, 00001.exr, 00002.exr] with this\n" + "option enabled instead of [00015.exr, 00294.exr, 00825.exr].", + value=False, + enabled=lambda node: node.outputExtension.value != "none", + ), + desc.ChoiceParam( + name="outputExtension", + label="Keyframes File Extension", + description="File extension of the written keyframes.\n" + "If 'none' is selected, no keyframe will be written on disk.\n" + "For input videos, 'none' should not be used since the written keyframes are used to generate the output SfMData file.", + value="none", + values=["none", "exr", "jpg", "png"], + validValue=lambda node: not (any(ext in input.value.lower() for ext in videoExts for input in node.inputPaths.value) and node.outputExtension.value == "none"), + errorMessage="A video input has been provided. The output extension should be different from 'none'.", + ), + desc.ChoiceParam( + name="storageDataType", + label="EXR Storage Data Type", + description="Storage image data type for keyframes written to EXR files:\n" + " - float: Use full floating point (32 bits per channel).\n" + " - half: Use half float (16 bits per channel).\n" + " - halfFinite: Use half float, but clamp values to avoid non-finite values.\n" + " - auto: Use half float if all values can fit, else use full float.", + values=EXR_STORAGE_DATA_TYPE, + value="float", + enabled=lambda node: node.outputExtension.value == "exr", + advanced=True, + ), + desc.GroupAttribute( + name="debugOptions", + label="Debug Options", + description="Debug options for the Smart keyframe selection method.", + group=None, # skip group from command line + enabled=lambda node: node.selectionMethod.useSmartSelection.value, + advanced=True, + groupDesc=[ + desc.GroupAttribute( + name="debugScores", + label="Export Scores", + description="Export the computed sharpness and optical flow scores to a file.", + group=None, # skip group from command line + enabled=lambda node: node.debugOptions.enabled, + groupDesc=[ + desc.BoolParam( + name="exportScores", + label="Export Scores To CSV", + description="Export the computed sharpness and optical flow scores to a CSV file.", + value=False, + ), + desc.StringParam( + name="csvFilename", + label="CSV Filename", + description="Name of the CSV file to export. It will be written in the node's output folder.", + value="scores.csv", + enabled=lambda node: node.debugOptions.debugScores.exportScores.value, + ), + desc.BoolParam( + name="exportSelectedFrames", + label="Export Selected Frames", + description="Add a column in the CSV file containing 1s for frames that were selected and 0s for those that were not.", + value=False, + enabled=lambda node: node.debugOptions.debugScores.exportScores.value, + ), + ], + ), + desc.GroupAttribute( + name="opticalFlowVisualisation", + label="Optical Flow Visualisation", + description="Visualise the motion vectors for each input frame in HSV.", + group=None, # skip group from command line + enabled=lambda node: node.debugOptions.enabled, + groupDesc=[ + desc.BoolParam( + name="exportFlowVisualisation", + label="Visualise Optical Flow", + description="Export each frame's optical flow HSV visualisation as PNG images.", + value=False, + enabled=lambda node: node.debugOptions.opticalFlowVisualisation.enabled, + ), + desc.BoolParam( + name="flowVisualisationOnly", + label="Only Visualise Optical Flow", + description="Export each frame's optical flow HSV visualisation as PNG images, but do not perform any score computation or frame selection.\n" + "If this option is selected, all the other options will be ignored.", + value=False, + enabled=lambda node: node.debugOptions.opticalFlowVisualisation.enabled, + ), + ], + ), + desc.BoolParam( + name="skipSharpnessComputation", + label="Skip Sharpness Computation", + description="Skip the sharpness score computation. A fixed score of 1.0 will be applied by default to all the frames.", + value=False, + enabled=lambda node: node.debugOptions.enabled, + ), + desc.BoolParam( + name="skipSelection", + label="Skip Frame Selection", + description="Compute the sharpness and optical flow scores, but do not proceed to the frame selection.", + value=False, + enabled=lambda node: node.debugOptions.enabled, + ), + ], + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputFolder", + label="Folder", + description="Output keyframes folder for extracted frames.", + value="{nodeCacheFolder}", + ), + desc.File( + name="outputSfMDataKeyframes", + label="Keyframes SfMData", + description="Output SfMData file containing all the selected keyframes.", + value="{nodeCacheFolder}/keyframes.sfm", + ), + desc.File( + name="outputSfMDataFrames", + label="Frames SfMData", + description="Output SfMData file containing all the frames that were not selected as keyframes.\n" + "If the input contains videos, this file will not be written since all the frames that were not selected do not actually exist on disk.", + value="{nodeCacheFolder}/frames.sfm", + ), + ] + diff --git a/meshroom/aliceVision/LdrToHdrCalibration.py b/meshroom/aliceVision/LdrToHdrCalibration.py new file mode 100644 index 0000000000..30002ebe97 --- /dev/null +++ b/meshroom/aliceVision/LdrToHdrCalibration.py @@ -0,0 +1,254 @@ +__version__ = "3.1" + +import json + +from meshroom.core import desc +from meshroom.core.utils import COLORSPACES, VERBOSE_LEVEL + +def findMetadata(d, keys, defaultValue): + v = None + for key in keys: + v = d.get(key, None) + k = key.lower() + if v is not None: + return v + for dk, dv in d.items(): + dkm = dk.lower().replace(" ", "") + if dkm == key.lower(): + return dv + dkm = dkm.split(":")[-1] + dkm = dkm.split("/")[-1] + if dkm == k: + return dv + return defaultValue + + + +class LdrToHdrCalibration(desc.AVCommandLineNode): + commandLine = 'aliceVision_LdrToHdrCalibration {allParams}' + size = desc.DynamicNodeSize('input') + cpu = desc.Level.INTENSIVE + ram = desc.Level.NORMAL + + category = 'Panorama HDR' + documentation = ''' +Calibrate LDR to HDR response curve from samples. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="samples", + label="Samples Folder", + description="Samples folder.", + value="{nodeCacheFolder}", + ), + desc.IntParam( + name="userNbBrackets", + label="Number Of Brackets", + description="Number of exposure brackets per HDR image (0 for automatic detection).", + value=0, + range=(0, 15, 1), + invalidate=False, + group="user", # not used directly on the command line + errorMessage="The set number of brackets is not a multiple of the number of input images.\n" + "Errors will occur during the computation.", + exposed=True, + ), + desc.IntParam( + name="nbBrackets", + label="Automatic Nb Brackets", + description="Number of exposure brackets used per HDR image.\n" + "It is detected automatically from input Viewpoints metadata if 'userNbBrackets' is 0,\n" + "else it is equal to 'userNbBrackets'.", + value=0, + range=(0, 15, 1), + group="bracketsParams", + ), + desc.BoolParam( + name="byPass", + label="Bypass", + description="Bypass HDR creation and use the medium bracket as the source for the next steps.", + value=False, + enabled=lambda node: node.nbBrackets.value != 1, + exposed=True, + ), + desc.ChoiceParam( + name="calibrationMethod", + label="Calibration Method", + description="Method used for camera calibration:\n" + " - Auto: If RAW images are detected, the 'Linear' calibration method will be used. Otherwise, the 'Debevec' calibration method will be used.\n" + " - Linear: Disables the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), they will be automatically converted to linear.\n" + " - Debevec: Standard method for HDR calibration.\n" + " - Grossberg: Based on a learned database of cameras, allows to reduce the Camera Response Function to a few parameters while keeping all the precision.\n" + " - Laguerre: Simple but robust method estimating the minimal number of parameters.", + values=["auto", "linear", "debevec", "grossberg", "laguerre"], + value="auto", + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.ChoiceParam( + name="calibrationWeight", + label="Calibration Weight", + description="Weight function used to calibrate camera response:\n" + " - default (automatically selected according to the calibrationMethod)\n" + " - gaussian\n" + " - triangle\n" + " - plateau", + value="default", + values=["default", "gaussian", "triangle", "plateau"], + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.IntParam( + name="channelQuantizationPower", + label="Channel Quantization Power", + description="Quantization level like 8 bits or 10 bits.", + value=10, + range=(8, 14, 1), + advanced=True, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.ChoiceParam( + name="workingColorSpace", + label="Working Color Space", + description="Color space in which the data are processed.\n" + "If 'auto' is selected, the working color space will be 'Linear' if RAW images are detected; otherwise, it will be set to 'sRGB'.", + values=COLORSPACES, + value="AUTO", + invalidate=False, + group="user", # not used directly on the command line + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.IntParam( + name="maxTotalPoints", + label="Max Number Of Points", + description="Maximum number of points used from the sampling.\n" + "This ensures that the number of pixels values extracted by the sampling\n" + "can be managed by the calibration step (in term of computation time and memory usage).", + value=1000000, + range=(8, 10000000, 1000), + advanced=True, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="response", + label="Response File", + description="Path to the output response file.", + value="{nodeCacheFolder}/response_.csv", + ), + ] + + def processChunk(self, chunk): + if chunk.node.nbBrackets.value == 1: + return + # Trick to avoid sending --nbBrackets to the command line when the bracket detection is automatic. + # Otherwise, the AliceVision executable has no way of determining whether the bracket detection was automatic + # or if it was hard-set by the user. + self.commandLine = "aliceVision_LdrToHdrCalibration {allParams}" + if chunk.node.userNbBrackets.value == chunk.node.nbBrackets.value: + self.commandLine += "{bracketsParams}" + super(LdrToHdrCalibration, self).processChunk(chunk) + + @classmethod + def update(cls, node): + from pyalicevision import hdr as avhdr + + if not isinstance(node.nodeDesc, cls): + raise ValueError("Node {} is not an instance of type {}".format(node, cls)) + # TODO: use Node version for this test + if "userNbBrackets" not in node.getAttributes().keys(): + # Old version of the node + return + node.userNbBrackets.validValue = True # Reset the status of "userNbBrackets" + + cameraInitOutput = node.input.getLinkParam(recursive=True) + if not cameraInitOutput: + node.nbBrackets.value = 0 + return + if node.userNbBrackets.value != 0: + # The number of brackets has been manually forced: check whether it is valid or not + if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute("viewpoints"): + viewpoints = cameraInitOutput.node.viewpoints.value + # The number of brackets should be a multiple of the number of input images + if (len(viewpoints) % node.userNbBrackets.value != 0): + node.userNbBrackets.validValue = False + else: + node.userNbBrackets.validValue = True + node.nbBrackets.value = node.userNbBrackets.value + return + + if not cameraInitOutput.node.hasAttribute("viewpoints"): + if cameraInitOutput.node.hasAttribute("input"): + cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) + if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute("viewpoints"): + viewpoints = cameraInitOutput.node.viewpoints.value + else: + # No connected CameraInit + node.nbBrackets.value = 0 + return + + inputs = avhdr.vectorli() + for viewpoint in viewpoints: + jsonMetadata = viewpoint.metadata.value + if not jsonMetadata: + # no metadata, we cannot find the number of brackets + node.nbBrackets.value = 0 + return + d = json.loads(jsonMetadata) + + # Find Fnumber + fnumber = findMetadata(d, ["FNumber"], "") + if fnumber == "": + aperture = findMetadata(d, ["Exif:ApertureValue", "ApertureValue", "Aperture"], "") + if aperture == "": + fnumber = -1.0 + else: + fnumber = pow(2.0, aperture / 2.0) + + # Get shutter speed and ISO + shutterSpeed = findMetadata(d, ["ExposureTime", "Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], -1.0) + iso = findMetadata(d, ["Exif:PhotographicSensitivity", "PhotographicSensitivity", "Photographic Sensitivity", "ISO"], -1.0) + + if not fnumber and not shutterSpeed: + # If one image without shutter or fnumber, we cannot found the number of brackets. + # We assume that there is no multi-bracketing, so nothing to do. + node.nbBrackets.value = 1 + return + + exposure = LdrToHdrCalibration.getExposure((float(fnumber), float(shutterSpeed), float(iso))) + + obj = avhdr.LuminanceInfo(viewpoint.viewId.value,viewpoint.path.value, exposure) + inputs.append(obj) + + obj = avhdr.estimateGroups(inputs) + + if len(obj) == 0: + node.nbBrackets.value = 0 + return + + node.nbBrackets.value = len(obj[0]) + + @staticmethod + def getExposure(exp, refIso = 100.0, refFnumber = 1.0): + from pyalicevision import sfmData as avsfmdata + + fnumber, shutterSpeed, iso = exp + obj = avsfmdata.ExposureSetting(shutterSpeed, fnumber, iso) + return obj.getExposure() diff --git a/meshroom/aliceVision/LdrToHdrMerge.py b/meshroom/aliceVision/LdrToHdrMerge.py new file mode 100644 index 0000000000..ba71e0e21b --- /dev/null +++ b/meshroom/aliceVision/LdrToHdrMerge.py @@ -0,0 +1,336 @@ +__version__ = "4.1" + +import json + +from meshroom.core import desc +from meshroom.core.utils import COLORSPACES, EXR_STORAGE_DATA_TYPE, VERBOSE_LEVEL + +def findMetadata(d, keys, defaultValue): + v = None + for key in keys: + v = d.get(key, None) + k = key.lower() + if v is not None: + return v + for dk, dv in d.items(): + dkm = dk.lower().replace(" ", "") + if dkm == key.lower(): + return dv + dkm = dkm.split(":")[-1] + dkm = dkm.split("/")[-1] + if dkm == k: + return dv + return defaultValue + + +class LdrToHdrMerge(desc.AVCommandLineNode): + commandLine = 'aliceVision_LdrToHdrMerge {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=2) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Panorama HDR' + documentation = ''' +Merge LDR images into HDR images. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="response", + label="Response File", + description="Response file.", + value="", + ), + desc.IntParam( + name="userNbBrackets", + label="Number Of Brackets", + description="Number of exposure brackets per HDR image (0 for automatic detection).", + value=0, + range=(0, 15, 1), + invalidate=False, + group="user", # not used directly on the command line + errorMessage="The set number of brackets is not a multiple of the number of input images.\n" + "Errors will occur during the computation.", + exposed=True, + ), + desc.IntParam( + name="nbBrackets", + label="Automatic Nb Brackets", + description="Number of exposure brackets used per HDR image.\n" + "It is detected automatically from input Viewpoints metadata if 'userNbBrackets'\n" + "is 0, else it is equal to 'userNbBrackets'.", + value=0, + range=(0, 15, 1), + group="bracketsParams", + ), + desc.BoolParam( + name="offsetRefBracketIndexEnabled", + label="Manually Specify Ref Bracket", + description="Manually specify the reference bracket index to control the exposure of the HDR image.", + value=False, + group="user", # not used directly on the command line + ), + desc.IntParam( + name="offsetRefBracketIndex", + label="Offset Ref Bracket Index", + description="0 to use the center bracket.\n" + "+N to use a more exposed bracket or -N to use a less exposed bracket.", + value=1, + range=(-4, 4, 1), + enabled=lambda node: (node.nbBrackets.value != 1 and node.offsetRefBracketIndexEnabled.value), + ), + desc.FloatParam( + name="meanTargetedLumaForMerging", + label="Targeted Luminance For Merging", + description="Expected mean luminance of the HDR images used to compute the final panorama.", + value=0.4, + range=(0.0, 1.0, 0.01), + enabled=lambda node: (node.nbBrackets.value != 1 and not node.offsetRefBracketIndexEnabled.value), + ), + desc.FloatParam( + name="minSignificantValue", + label="Minimum Significant Value", + description="Minimum channel input value to be considered in advanced pixelwise merging.", + value=0.05, + range=(0.0, 1.0, 0.001), + enabled=lambda node: (node.nbBrackets.value != 1), + ), + desc.FloatParam( + name="maxSignificantValue", + label="Maximum Significant Value", + description="Maximum channel input value to be considered in advanced pixelwise merging.", + value=0.995, + range=(0.0, 1.0, 0.001), + enabled=lambda node: (node.nbBrackets.value != 1), + ), + desc.BoolParam( + name="computeLightMasks", + label="Compute Light Masks", + description="Compute masks of low and high lights and missing info.", + value=False, + enabled=lambda node: node.nbBrackets.value != 1, + ), + desc.BoolParam( + name="byPass", + label="Bypass", + description="Bypass HDR creation and use the medium bracket as the source for the next steps.", + value=False, + enabled=lambda node: node.nbBrackets.value != 1, + exposed=True, + ), + desc.BoolParam( + name="keepSourceImageName", + label="Keep Source Image Name", + description="Keep the filename of the input image selected as central image for the output image filename.", + value=False, + ), + desc.ChoiceParam( + name="fusionWeight", + label="Fusion Weight", + description="Weight function used to fuse all LDR images together:\n" + " - gaussian\n" + " - triangle\n" + " - plateau", + value="gaussian", + values=["gaussian", "triangle", "plateau"], + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.IntParam( + name="channelQuantizationPower", + label="Channel Quantization Power", + description="Quantization level like 8 bits or 10 bits.", + value=10, + range=(8, 14, 1), + advanced=True, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.ChoiceParam( + name="workingColorSpace", + label="Working Color Space", + description="Color space in which the data are processed.\n" + "If 'auto' is selected, the working color space will be 'Linear' if RAW images are detected; otherwise, it will be set to 'sRGB'.", + values=COLORSPACES, + value="AUTO", + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.BoolParam( + name="enableHighlight", + label="Enable Highlight", + description="Enable highlights correction.", + value=False, + group="user", # not used directly on the command line + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.FloatParam( + name="highlightCorrectionFactor", + label="Highlights Correction", + description="Pixels saturated in all input images have a partial information about their real luminance.\n" + "We only know that the value should be >= to the standard HDRfusion.\n" + "This parameter allows to perform a post-processing step to put saturated pixels to a constant\n" + "value defined by the `highlightsMaxLuminance` parameter.\n" + "This parameter is float to enable to weight this correction.", + value=1.0, + range=(0.0, 1.0, 0.01), + enabled=lambda node: node.enableHighlight.enabled and node.enableHighlight.value, + ), + desc.FloatParam( + name="highlightTargetLux", + label="Highlight Target Luminance (Lux)", + description="This is an arbitrary target value (in Lux) used to replace the unknown luminance value of the saturated pixels.\n" + "\n" + "Some Outdoor Reference Light Levels:\n" + " - 120,000 lux: Brightest sunlight\n" + " - 110,000 lux: Bright sunlight\n" + " - 20,000 lux: Shade illuminated by entire clear blue sky, midday\n" + " - 1,000 lux: Typical overcast day, midday\n" + " - 400 lux: Sunrise or sunset on a clear day\n" + " - 40 lux: Fully overcast, sunset/sunrise\n" + "\n" + "Some Indoor Reference Light Levels:\n" + " - 20000 lux: Max Usually Used Indoor\n" + " - 750 lux: Supermarkets\n" + " - 500 lux: Office Work\n" + " - 150 lux: Home\n", + value=120000.0, + range=(1000.0, 150000.0, 1.0), + enabled=lambda node: node.enableHighlight.enabled and node.enableHighlight.value and node.highlightCorrectionFactor.value != 0, + ), + desc.ChoiceParam( + name="storageDataType", + label="Storage Data Type", + description="Storage image data type:\n" + " - float: Use full floating point (32 bits per channel).\n" + " - half: Use half float (16 bits per channel).\n" + " - halfFinite: Use half float, but clamp values to avoid non-finite values.\n" + " - auto: Use half float if all values can fit, else use full float.", + values=EXR_STORAGE_DATA_TYPE, + value="float", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputFolder", + label="Folder", + description="Path to the folder containing the merged HDR images.", + value="{nodeCacheFolder}", + group="", # do not export on the command line + ), + desc.File( + name="outSfMData", + label="SfMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfmData.sfm", + ), + ] + + @classmethod + def update(cls, node): + from pyalicevision import hdr as avhdr + + if not isinstance(node.nodeDesc, cls): + raise ValueError("Node {} is not an instance of type {}".format(node, cls)) + # TODO: use Node version for this test + if "userNbBrackets" not in node.getAttributes().keys(): + # Old version of the node + return + node.userNbBrackets.validValue = True # Reset the status of "userNbBrackets" + + cameraInitOutput = node.input.getLinkParam(recursive=True) + if not cameraInitOutput: + node.nbBrackets.value = 0 + return + if node.userNbBrackets.value != 0: + # The number of brackets has been manually forced: check whether it is valid or not + if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute("viewpoints"): + viewpoints = cameraInitOutput.node.viewpoints.value + # The number of brackets should be a multiple of the number of input images + if (len(viewpoints) % node.userNbBrackets.value != 0): + node.userNbBrackets.validValue = False + else: + node.userNbBrackets.validValue = True + node.nbBrackets.value = node.userNbBrackets.value + return + + if not cameraInitOutput.node.hasAttribute("viewpoints"): + if cameraInitOutput.node.hasAttribute("input"): + cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) + if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute("viewpoints"): + viewpoints = cameraInitOutput.node.viewpoints.value + else: + # No connected CameraInit + node.nbBrackets.value = 0 + return + + inputs = avhdr.vectorli() + for viewpoint in viewpoints: + jsonMetadata = viewpoint.metadata.value + if not jsonMetadata: + # no metadata, we cannot find the number of brackets + node.nbBrackets.value = 0 + return + d = json.loads(jsonMetadata) + + # Find Fnumber + fnumber = findMetadata(d, ["FNumber"], "") + if fnumber == "": + aperture = findMetadata(d, ["Exif:ApertureValue", "ApertureValue", "Aperture"], "") + if aperture == "": + fnumber = -1.0 + else: + fnumber = pow(2.0, aperture / 2.0) + + # Get shutter speed and ISO + shutterSpeed = findMetadata(d, ["ExposureTime", "Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], -1.0) + iso = findMetadata(d, ["Exif:PhotographicSensitivity", "PhotographicSensitivity", "Photographic Sensitivity", "ISO"], -1.0) + + if not fnumber and not shutterSpeed: + # If one image without shutter or fnumber, we cannot found the number of brackets. + # We assume that there is no multi-bracketing, so nothing to do. + node.nbBrackets.value = 1 + return + + exposure = LdrToHdrMerge.getExposure((float(fnumber), float(shutterSpeed), float(iso))) + + obj = avhdr.LuminanceInfo(viewpoint.viewId.value,viewpoint.path.value, exposure) + inputs.append(obj) + + obj = avhdr.estimateGroups(inputs) + + if len(obj) == 0: + node.nbBrackets.value = 0 + return + + node.nbBrackets.value = len(obj[0]) + + @staticmethod + def getExposure(exp, refIso = 100.0, refFnumber = 1.0): + from pyalicevision import sfmData as avsfmdata + + fnumber, shutterSpeed, iso = exp + obj = avsfmdata.ExposureSetting(shutterSpeed, fnumber, iso) + return obj.getExposure() + + def processChunk(self, chunk): + # Trick to avoid sending --nbBrackets to the command line when the bracket detection is automatic. + # Otherwise, the AliceVision executable has no way of determining whether the bracket detection was automatic + # or if it was hard-set by the user. + self.commandLine = "aliceVision_LdrToHdrMerge {allParams}" + if chunk.node.userNbBrackets.value == chunk.node.nbBrackets.value: + self.commandLine += "{bracketsParams}" + super(LdrToHdrMerge, self).processChunk(chunk) diff --git a/meshroom/aliceVision/LdrToHdrSampling.py b/meshroom/aliceVision/LdrToHdrSampling.py new file mode 100644 index 0000000000..fa88ccd3c8 --- /dev/null +++ b/meshroom/aliceVision/LdrToHdrSampling.py @@ -0,0 +1,284 @@ +__version__ = "4.0" + +import json + +from meshroom.core import desc +from meshroom.core.utils import COLORSPACES, VERBOSE_LEVEL + + +def findMetadata(d, keys, defaultValue): + v = None + for key in keys: + v = d.get(key, None) + k = key.lower() + if v is not None: + return v + for dk, dv in d.items(): + dkm = dk.lower().replace(" ", "") + if dkm == key.lower(): + return dv + dkm = dkm.split(":")[-1] + dkm = dkm.split("/")[-1] + if dkm == k: + return dv + return defaultValue + + +class DividedInputNodeSize(desc.DynamicNodeSize): + ''' + The LDR2HDR will reduce the amount of views in the SfMData. + This class converts the number of LDR input views into the number of HDR output views. + ''' + def __init__(self, param, divParam): + super(DividedInputNodeSize, self).__init__(param) + self._divParam = divParam + + def computeSize(self, node): + s = super(DividedInputNodeSize, self).computeSize(node) + divParam = node.attribute(self._divParam) + if divParam.value == 0: + return s + # s is the total number of inputs and may include outliers, that will not be used + # during computations and should thus be excluded from the size computation + return (s - node.outliersNb) / divParam.value + + +class LdrToHdrSampling(desc.AVCommandLineNode): + commandLine = 'aliceVision_LdrToHdrSampling {allParams}' + size = DividedInputNodeSize('input', 'nbBrackets') + parallelization = desc.Parallelization(blockSize=2) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Panorama HDR' + documentation = ''' +Sample pixels from Low range images for HDR creation. +''' + + outliersNb = 0 # Number of detected outliers among the input images + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.IntParam( + name="userNbBrackets", + label="Number Of Brackets", + description="Number of exposure brackets per HDR image (0 for automatic detection).", + value=0, + range=(0, 15, 1), + invalidate=False, + group="user", # not used directly on the command line + errorMessage="The set number of brackets is not a multiple of the number of input images.\n" + "Errors will occur during the computation.", + exposed=True, + ), + desc.IntParam( + name="nbBrackets", + label="Automatic Nb Brackets", + description="Number of exposure brackets used per HDR image.\n" + "It is detected automatically from input Viewpoints metadata if 'userNbBrackets'\n" + "is 0, else it is equal to 'userNbBrackets'.", + value=0, + range=(0, 15, 1), + group="bracketsParams", + ), + desc.BoolParam( + name="byPass", + label="Bypass", + description="Bypass HDR creation and use the medium bracket as the source for the next steps.", + value=False, + enabled=lambda node: node.nbBrackets.value != 1, + exposed=True, + ), + desc.ChoiceParam( + name="calibrationMethod", + label="Calibration Method", + description="Method used for camera calibration:\n" + " - Auto: If RAW images are detected, the 'Linear' calibration method will be used. Otherwise, the 'Debevec' calibration method will be used.\n" + " - Linear: Disables the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), they will be automatically converted to linear.\n" + " - Debevec: Standard method for HDR calibration.\n" + " - Grossberg: Based on a learned database of cameras, allows to reduce the Camera Response Function to a few parameters while keeping all the precision.\n" + " - Laguerre: Simple but robust method estimating the minimal number of parameters.", + values=["auto", "linear", "debevec", "grossberg", "laguerre"], + value="auto", + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.IntParam( + name="channelQuantizationPower", + label="Channel Quantization Power", + description="Quantization level like 8 bits or 10 bits.", + value=10, + range=(8, 14, 1), + advanced=True, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.ChoiceParam( + name="workingColorSpace", + label="Working Color Space", + description="Color space in which the data are processed.\n" + "If 'auto' is selected, the working color space will be 'Linear' if RAW images are detected; otherwise, it will be set to 'sRGB'.", + values=COLORSPACES, + value="AUTO", + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + exposed=True, + ), + desc.IntParam( + name="blockSize", + label="Block Size", + description="Size of the image tile to extract a sample.", + value=256, + range=(8, 1024, 1), + advanced=True, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.IntParam( + name="radius", + label="Patch Radius", + description="Radius of the patch used to analyze the sample statistics.", + value=5, + range=(0, 10, 1), + advanced=True, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.IntParam( + name="maxCountSample", + label="Max Number Of Samples", + description="Maximum number of samples per image group.", + value=200, + range=(10, 1000, 10), + advanced=True, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.BoolParam( + name="debug", + label="Export Debug Files", + description="Export debug files to analyze the sampling strategy.", + value=False, + invalidate=False, + enabled=lambda node: node.byPass.enabled and not node.byPass.value, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output path for the samples.", + value="{nodeCacheFolder}", + ), + ] + + def processChunk(self, chunk): + if chunk.node.nbBrackets.value == 1: + return + # Trick to avoid sending --nbBrackets to the command line when the bracket detection is automatic. + # Otherwise, the AliceVision executable has no way of determining whether the bracket detection was automatic + # or if it was hard-set by the user. + self.commandLine = "aliceVision_LdrToHdrSampling {allParams}" + if chunk.node.userNbBrackets.value == chunk.node.nbBrackets.value: + self.commandLine += "{bracketsParams}" + super(LdrToHdrSampling, self).processChunk(chunk) + + @classmethod + def update(cls, node): + from pyalicevision import hdr as avhdr + + if not isinstance(node.nodeDesc, cls): + raise ValueError("Node {} is not an instance of type {}".format(node, cls)) + # TODO: use Node version for this test + if "userNbBrackets" not in node.getAttributes().keys(): + # Old version of the node + return + node.outliersNb = 0 # Reset the number of detected outliers + node.userNbBrackets.validValue = True # Reset the status of "userNbBrackets" + + cameraInitOutput = node.input.getLinkParam(recursive=True) + if not cameraInitOutput: + node.nbBrackets.value = 0 + return + if node.userNbBrackets.value != 0: + # The number of brackets has been manually forced: check whether it is valid or not + if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute("viewpoints"): + viewpoints = cameraInitOutput.node.viewpoints.value + # The number of brackets should be a multiple of the number of input images + if (len(viewpoints) % node.userNbBrackets.value != 0): + node.userNbBrackets.validValue = False + else: + node.userNbBrackets.validValue = True + node.nbBrackets.value = node.userNbBrackets.value + return + + if not cameraInitOutput.node.hasAttribute("viewpoints"): + if cameraInitOutput.node.hasAttribute("input"): + cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) + if cameraInitOutput and cameraInitOutput.node and cameraInitOutput.node.hasAttribute("viewpoints"): + viewpoints = cameraInitOutput.node.viewpoints.value + else: + # No connected CameraInit + node.nbBrackets.value = 0 + return + + inputs = avhdr.vectorli() + for viewpoint in viewpoints: + jsonMetadata = viewpoint.metadata.value + if not jsonMetadata: + # no metadata, we cannot find the number of brackets + node.nbBrackets.value = 0 + return + d = json.loads(jsonMetadata) + + # Find Fnumber + fnumber = findMetadata(d, ["FNumber"], "") + if fnumber == "": + aperture = findMetadata(d, ["Exif:ApertureValue", "ApertureValue", "Aperture"], "") + if aperture == "": + fnumber = -1.0 + else: + fnumber = pow(2.0, aperture / 2.0) + + # Get shutter speed and ISO + shutterSpeed = findMetadata(d, ["ExposureTime", "Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], -1.0) + iso = findMetadata(d, ["Exif:PhotographicSensitivity", "PhotographicSensitivity", "Photographic Sensitivity", "ISO"], -1.0) + + if not fnumber and not shutterSpeed: + # If one image without shutter or fnumber, we cannot found the number of brackets. + # We assume that there is no multi-bracketing, so nothing to do. + node.nbBrackets.value = 1 + return + + exposure = LdrToHdrSampling.getExposure((float(fnumber), float(shutterSpeed), float(iso))) + + obj = avhdr.LuminanceInfo(viewpoint.viewId.value,viewpoint.path.value, exposure) + inputs.append(obj) + + obj = avhdr.estimateGroups(inputs) + + if len(obj) == 0: + node.nbBrackets.value = 0 + return + + bracketSize = len(obj[0]) + bracketCount = len(obj) + + node.nbBrackets.value = bracketSize + node.outliersNb = len(inputs) - (bracketSize * bracketCount) + + @staticmethod + def getExposure(exp, refIso = 100.0, refFnumber = 1.0): + from pyalicevision import sfmData as avsfmdata + + fnumber, shutterSpeed, iso = exp + obj = avsfmdata.ExposureSetting(shutterSpeed, fnumber, iso) + return obj.getExposure() diff --git a/meshroom/aliceVision/LidarDecimating.py b/meshroom/aliceVision/LidarDecimating.py new file mode 100644 index 0000000000..8eaf484fc5 --- /dev/null +++ b/meshroom/aliceVision/LidarDecimating.py @@ -0,0 +1,57 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +class LidarDecimating(desc.AVCommandLineNode): + commandLine = 'aliceVision_lidarDecimating {allParams}' + + size = desc.StaticNodeSize(10) + parallelization = desc.Parallelization(blockSize=1) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeFullSize}' + + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Dense Reconstruction' + documentation = ''' + This node simplifies previously reconstructed meshes from Lidar. + ''' + + inputs = [ + desc.File( + name="input", + label="Input JSON", + description="Input JSON file with description of inputs.", + value="", + ), + desc.FloatParam( + name="errorLimit", + label="Error Limit", + description="Maximal distance (in meters) allowed.", + value=0.001, + range=(0.0, 1.0, 0.001), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Sub-Meshes Directory", + description="Output directory for sub-meshes.", + value="{nodeCacheFolder}", + ), + desc.File( + name="outputJson", + label="Scene Description", + description="Output scene description.", + value="{nodeCacheFolder}/scene.json", + ), + ] diff --git a/meshroom/aliceVision/LidarMerging.py b/meshroom/aliceVision/LidarMerging.py new file mode 100644 index 0000000000..e107d7c4a0 --- /dev/null +++ b/meshroom/aliceVision/LidarMerging.py @@ -0,0 +1,40 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +class LidarMerging(desc.AVCommandLineNode): + commandLine = 'aliceVision_lidarMerging {allParams}' + + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Dense Reconstruction' + documentation = ''' + This node merge several meshes into one. + ''' + + inputs = [ + desc.File( + name="input", + label="Input JSON", + description="Input JSON file with description of inputs.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Mesh Path Output", + description="Output directory for mesh.", + value="{nodeCacheFolder}/output.obj", + ), + ] diff --git a/meshroom/aliceVision/LidarMeshing.py b/meshroom/aliceVision/LidarMeshing.py new file mode 100644 index 0000000000..3ce1a7071f --- /dev/null +++ b/meshroom/aliceVision/LidarMeshing.py @@ -0,0 +1,137 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +class LidarMeshing(desc.AVCommandLineNode): + commandLine = 'aliceVision_lidarMeshing {allParams}' + + size = desc.StaticNodeSize(10) + parallelization = desc.Parallelization(blockSize=1) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeFullSize}' + + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Dense Reconstruction' + documentation = ''' + This node creates a dense geometric surface representation of the Lidar measurements. + ''' + + inputs = [ + desc.File( + name="input", + label="Input JSON", + description="Input JSON file with description of inputs.", + value="", + ), + desc.BoolParam( + name="useBoundingBox", + label="Custom Bounding Box", + description="Edit the meshing bounding box.\n" + "If enabled, it takes priority over the 'Estimate Space From SfM' option.\n" + "Parameters can be adjusted in advanced settings.", + value=False, + group="", + ), + desc.GroupAttribute( + name="boundingBox", + label="Bounding Box Settings", + description="Translation, rotation and scale of the bounding box.", + groupDesc=[ + desc.GroupAttribute( + name="bboxTranslation", + label="Translation", + description="Position in space.", + groupDesc=[ + desc.FloatParam( + name="x", label="x", description="X offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + desc.FloatParam( + name="y", label="y", description="Y offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + desc.FloatParam( + name="z", label="z", description="Z offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + ], + joinChar=",", + ), + desc.GroupAttribute( + name="bboxRotation", + label="Euler Rotation", + description="Rotation in Euler degrees.", + groupDesc=[ + desc.FloatParam( + name="x", label="x", description="Euler X rotation.", + value=0.0, + range=(-90.0, 90.0, 1.0), + ), + desc.FloatParam( + name="y", label="y", description="Euler Y rotation.", + value=0.0, + range=(-180.0, 180.0, 1.0), + ), + desc.FloatParam( + name="z", label="z", description="Euler Z rotation.", + value=0.0, + range=(-180.0, 180.0, 1.0), + ), + ], + joinChar=",", + ), + desc.GroupAttribute( + name="bboxScale", + label="Scale", + description="Scale of the bounding box.", + groupDesc=[ + desc.FloatParam( + name="x", label="x", description="X scale.", + value=1.0, + range=(0.0, 20.0, 0.01), + ), + desc.FloatParam( + name="y", label="y", description="Y scale.", + value=1.0, + range=(0.0, 20.0, 0.01), + ), + desc.FloatParam( + name="z", label="z", description="Z scale.", + value=1.0, + range=(0.0, 20.0, 0.01), + ), + ], + joinChar=",", + ), + ], + joinChar=",", + enabled=lambda node: node.useBoundingBox.value, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Sub-Meshes Directory", + description="Output directory for sub-meshes", + value="{nodeCacheFolder}", + ), + desc.File( + name="outputJson", + label="Scene Description", + description="Output scene description.", + value="{nodeCacheFolder}/scene.json", + ), + ] diff --git a/meshroom/aliceVision/LightingCalibration.py b/meshroom/aliceVision/LightingCalibration.py new file mode 100644 index 0000000000..01b1f793b6 --- /dev/null +++ b/meshroom/aliceVision/LightingCalibration.py @@ -0,0 +1,73 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class LightingCalibration(desc.CommandLineNode): + commandLine = 'aliceVision_lightingCalibration {allParams}' + category = 'Photometric Stereo' + documentation = ''' +Evaluate the lighting in a scene using spheres placed in the scene. +Can also be used to calibrate a lighting dome (RTI type). +''' + + inputs = [ + desc.File( + name="inputPath", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="inputDetection", + label="Sphere Detection File", + description="Input JSON file containing sphere centers and radiuses.", + value="", + ), + desc.BoolParam( + name="saveAsModel", + label="Save As Model", + description="Check if this calibration file will be used with other datasets.", + value=False, + ), + desc.BoolParam( + name="ellipticEstimation", + label="Use elliptic estimation", + description="Consider the right projection of the sphere. Fit the circle tool on the small axe of the ellipse.", + value=False, + ), + desc.ChoiceParam( + name="method", + label="Calibration Method", + description="Method used for light calibration.\n" + "Use 'brightestPoint' for shiny spheres and 'whiteSphere' for white matte spheres.\n" + "Spherical Harmonic lighting can be estimated using 'SH' method.", + values=["brightestPoint", "whiteSphere", "SH"], + value="brightestPoint", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputFile", + label="Light File", + description="Light information will be written here.", + value="{nodeCacheFolder}/lights.json", + ), + desc.File( + name="lightingEstimationVisualization", + label="Estimated Lighting Visualization", + description="Estimated Lighting Visualization.", + semantic="image", + value="{nodeCacheFolder}/_{methodValue}.png", + group=None, + ), + ] diff --git a/meshroom/aliceVision/LightingEstimation.py b/meshroom/aliceVision/LightingEstimation.py new file mode 100644 index 0000000000..3e3193c1fd --- /dev/null +++ b/meshroom/aliceVision/LightingEstimation.py @@ -0,0 +1,82 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class LightingEstimation(desc.AVCommandLineNode): + commandLine = 'aliceVision_lightingEstimation {allParams}' + + category = 'Utils' + documentation = ''' + ''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="depthMapsFilterFolder", + label="Filtered Depth Maps Folder", + description="Input filtered depth maps folder.", + value="", + ), + desc.File( + name="imagesFolder", + label="Images Folder", + description="Use images from a specific folder instead of those specify in the SfMData file.\n" + "Filename should be the image UID.", + value="", + ), + desc.ChoiceParam( + name="lightingEstimationMode", + label="Lighting Estimation Mode", + description="Lighting estimation mode.", + value="global", + values=["global", "per_image"], + advanced=True, + ), + desc.ChoiceParam( + name="lightingColor", + label="Lighting Color Mode", + description="Lighting color mode.", + value="RGB", + values=["RGB", "Luminance"], + advanced=True, + ), + desc.ChoiceParam( + name="albedoEstimationName", + label="Albedo Estimation Name", + description="Albedo estimation method used for light estimation.", + value="constant", + values=["constant", "picture", "median_filter", "blur_filter"], + advanced=True, + ), + desc.IntParam( + name="albedoEstimationFilterSize", + label="Albedo Estimation Filter Size", + description="Albedo filter size for estimation method using filter.", + value=3, + range=(0, 100, 1), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Folder for output lighting vector files.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/MaskProcessing.py b/meshroom/aliceVision/MaskProcessing.py new file mode 100644 index 0000000000..6d6e56457a --- /dev/null +++ b/meshroom/aliceVision/MaskProcessing.py @@ -0,0 +1,85 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + +import os.path + +class MaskProcessingNodeSize(desc.DynamicNodeSize): + """ + MaskProcessingNodeSize expresses a dependency to multiple input attributess to define + the size of a Node in terms of individual tasks for parallelization. + """ + def __init__(self, param): + self._params = param + + def computeSize(self, node): + + size = 0 + + for input in node.attribute(self._params).value: + paramName = input.getFullName() + param = node.attribute(paramName) + if param.isLink: + size = max(size, param.getLinkParam().node.size) + + return size + + +class MaskProcessing(desc.AVCommandLineNode): + commandLine = 'aliceVision_maskProcessing {allParams}' + size = MaskProcessingNodeSize("inputs") + + category = 'Utils' + documentation = ''' + Perform operations on a list of masks with the same names + ''' + + inputs = [ + desc.ListAttribute( + elementDesc=desc.File( + name="input", + label="Input Directory", + description="A directory with a set of mask.", + value="", + ), + name="inputs", + label="Input Directories", + description="A set of directories containing masks with the same names.", + exposed=True, + ), + desc.ChoiceParam( + name="operator", + label="Operator", + description="Operator: Binary operator\n" + "OR: applies binary OR between all the masks\n" + "AND: applies binary AND between all the masks\n" + "NOT: applies binary NOT to the first mask in the list\n", + value="and", + values=["or", "and", "not"], + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ) + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Path to the output directory.", + value="{nodeCacheFolder}", + ), + desc.File( + name="masks", + label="Masks", + description="Processed segmentation masks.", + semantic="imageList", + value= "{nodeCacheFolder}/*.exr", + group="", + ), + ] diff --git a/meshroom/aliceVision/MergeMeshes.py b/meshroom/aliceVision/MergeMeshes.py new file mode 100644 index 0000000000..311e7f1b48 --- /dev/null +++ b/meshroom/aliceVision/MergeMeshes.py @@ -0,0 +1,69 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class MergeMeshes(desc.AVCommandLineNode): + commandLine = 'aliceVision_mergeMeshes {allParams}' + + category = 'Utils' + documentation = ''' +This node allows to merge two meshes in one. + +Operation types used to merge two meshes: + +- boolean_union: Create a new mesh with the combined volume of the two input meshes. +- boolean_intersection: Create a new mesh from the intersected volumes of the two input meshes. +- boolean_difference: Create a new mesh from the volume of the first input mesh subtracted by the second input mesh. +''' + + inputs = [ + desc.File( + name="inputFirstMesh", + label="First Mesh", + description="Input first mesh (*.obj, *.mesh, *.meshb, *.ply, *.off, *.stl).", + value="", + ), + desc.File( + name="inputSecondMesh", + label="Second Mesh", + description="Input second mesh (*.obj, *.mesh, *.meshb, *.ply, *.off, *.stl).", + value="", + ), + desc.ChoiceParam( + name="mergeOperation", + label="Merge Operation", + description="Operation types used to merge two meshes.", + value="boolean_union", + values=["boolean_union", "boolean_intersection", "boolean_difference"], + ), + desc.BoolParam( + name="preProcess", + label="Pre-Process", + description="Pre-process the input meshes in order to avoid geometric errors in the merging process.", + value=True, + ), + desc.BoolParam( + name="postProcess", + label="Post-Process", + description="Post-process the output mesh in order to avoid future geometric errors.", + value=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Mesh", + description="Output mesh (*.obj, *.mesh, *.meshb, *.ply, *.off, *.stl).", + value="{nodeCacheFolder}/mesh.stl", + ), + ] diff --git a/meshroom/aliceVision/MeshDecimate.py b/meshroom/aliceVision/MeshDecimate.py new file mode 100644 index 0000000000..a6b092609b --- /dev/null +++ b/meshroom/aliceVision/MeshDecimate.py @@ -0,0 +1,77 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class MeshDecimate(desc.AVCommandLineNode): + commandLine = 'aliceVision_meshDecimate {allParams}' + cpu = desc.Level.NORMAL + ram = desc.Level.NORMAL + + category = 'Mesh Post-Processing' + documentation = ''' +This node allows to reduce the density of the Mesh. +''' + + inputs = [ + desc.File( + name="input", + label="Mesh", + description="Input mesh in the OBJ format.", + value="", + ), + desc.FloatParam( + name="simplificationFactor", + label="Simplification Factor", + description="Simplification factor for the decimation.", + value=0.5, + range=(0.0, 1.0, 0.01), + ), + desc.IntParam( + name="nbVertices", + label="Fixed Number of Vertices", + description="Fixed number of output vertices.", + value=0, + range=(0, 1000000, 1), + ), + desc.IntParam( + name="minVertices", + label="Min Vertices", + description="Minimum number of output vertices.", + value=0, + range=(0, 1000000, 1), + ), + desc.IntParam( + name="maxVertices", + label="Max Vertices", + description="Maximum number of output vertices.", + value=0, + range=(0, 1000000, 1), + ), + desc.BoolParam( + name="flipNormals", + label="Flip Normals", + description="Option to flip face normals.\n" + "It can be needed as it depends on the vertices order in triangles\n" + "and the convention changes from one software to another.", + value=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Mesh", + description="Output mesh in the OBJ file format.", + value="{nodeCacheFolder}/mesh.obj", + ), + ] diff --git a/meshroom/aliceVision/MeshDenoising.py b/meshroom/aliceVision/MeshDenoising.py new file mode 100644 index 0000000000..a7329e45b6 --- /dev/null +++ b/meshroom/aliceVision/MeshDenoising.py @@ -0,0 +1,92 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class MeshDenoising(desc.AVCommandLineNode): + commandLine = 'aliceVision_meshDenoising {allParams}' + + category = 'Mesh Post-Processing' + documentation = ''' +This experimental node allows to reduce noise from a Mesh. +for now, the parameters are difficult to control and vary a lot from one dataset to another. +''' + + inputs = [ + desc.File( + name="input", + label="Mesh", + description="Input mesh in the OBJ file format.", + value="", + ), + desc.IntParam( + name="denoisingIterations", + label="Denoising Iterations", + description="Number of denoising iterations.", + value=5, + range=(0, 30, 1), + ), + desc.FloatParam( + name="meshUpdateClosenessWeight", + label="Mesh Update Closeness Weight", + description="Closeness weight for mesh update. Must be positive.", + value=0.001, + range=(0.0, 0.1, 0.001), + ), + desc.FloatParam( + name="lambda", + label="Lambda", + description="Regularization weight.", + value=2.0, + range=(0.0, 10.0, 0.01), + ), + desc.FloatParam( + name="eta", + label="Eta", + description="Gaussian standard deviation for spatial weight, \n" + "scaled by the average distance between adjacent face centroids.\n" + "Must be positive.", + value=1.5, + range=(0.0, 20.0, 0.01), + ), + desc.FloatParam( + name="mu", + label="Mu", + description="Gaussian standard deviation for guidance weight.", + value=1.5, + range=(0.0, 10.0, 0.01), + ), + desc.FloatParam( + name="nu", + label="Nu", + description="Gaussian standard deviation for signal weight.", + value=0.3, + range=(0.0, 5.0, 0.01), + ), + desc.ChoiceParam( + name="meshUpdateMethod", + label="Mesh Update Method", + description="Mesh ppdate method:\n" + " - ITERATIVE_UPDATE (default): ShapeUp styled iterative solver.\n" + " - POISSON_UPDATE: Poisson-based update from [Wang et al. 2015] 'Rolling guidance normal filter for geometric processing'.", + value=0, + values=[0, 1], + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Output mesh in the OBJ file format.", + value="{nodeCacheFolder}/mesh.obj", + ), + ] diff --git a/meshroom/aliceVision/MeshFiltering.py b/meshroom/aliceVision/MeshFiltering.py new file mode 100644 index 0000000000..6eb91d1052 --- /dev/null +++ b/meshroom/aliceVision/MeshFiltering.py @@ -0,0 +1,117 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class MeshFiltering(desc.AVCommandLineNode): + commandLine = 'aliceVision_meshFiltering {allParams}' + + category = 'Dense Reconstruction' + documentation = ''' +This node applies a Laplacian filtering to remove local defects from the raw Meshing cut. +''' + + inputs = [ + desc.File( + name="inputMesh", + label="Mesh", + description="Input mesh file.", + value="", + ), + desc.ChoiceParam( + name="outputMeshFileType", + label="Mesh Type", + description="File type for the output mesh.", + value="obj", + values=["gltf", "obj", "fbx", "stl"], + group="", + ), + desc.BoolParam( + name="keepLargestMeshOnly", + label="Keep Only The Largest Mesh", + description="Keep only the largest connected triangles group.", + value=False, + ), + desc.ChoiceParam( + name="smoothingSubset", + label="Smoothing Subset", + description="Subset for smoothing (all, surface_boundaries, surface_inner_part).", + value="all", + values=["all", "surface_boundaries", "surface_inner_part"], + advanced=True, + ), + desc.IntParam( + name="smoothingBoundariesNeighbours", + label="Smoothing Boundaries Neighbours", + description="Neighbours of the boundaries to consider.", + value=0, + range=(0, 20, 1), + advanced=True, + ), + desc.IntParam( + name="smoothingIterations", + label="Smoothing Iterations", + description="Number of smoothing iterations.", + value=5, + range=(0, 50, 1), + ), + desc.FloatParam( + name="smoothingLambda", + label="Smoothing Lambda", + description="Smoothing size.", + value=1.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.ChoiceParam( + name="filteringSubset", + label="Filtering Subset", + description="Subset for filtering (all, surface_boundaries, surface_inner_part).", + value="all", + values=["all", "surface_boundaries", "surface_inner_part"], + advanced=True, + ), + desc.IntParam( + name="filteringIterations", + label="Filtering Iterations", + description="Number of filtering iterations.", + value=1, + range=(0, 20, 1), + advanced=True, + ), + desc.FloatParam( + name="filterLargeTrianglesFactor", + label="Filter Large Triangles Factor", + description="Remove all large triangles.\n" + "We consider a triangle as large if one edge is bigger than N times the average edge length.\n" + "0 disables the filtering.", + value=60.0, + range=(0.0, 100.0, 0.1), + ), + desc.FloatParam( + name="filterTrianglesRatio", + label="Filter Triangles Ratio", + description="Remove all triangles by ratio (largest edge /smallest edge).\n" + "0 disables the filtering.", + value=0.0, + range=(1.0, 50.0, 0.1), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputMesh", + label="Mesh", + description="Output mesh file.", + value="{nodeCacheFolder}/mesh.{outputMeshFileTypeValue}", + ), + ] diff --git a/meshroom/aliceVision/MeshMasking.py b/meshroom/aliceVision/MeshMasking.py new file mode 100644 index 0000000000..45fa5f329b --- /dev/null +++ b/meshroom/aliceVision/MeshMasking.py @@ -0,0 +1,103 @@ +__version__ = "1.1" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class MeshMasking(desc.AVCommandLineNode): + commandLine = 'aliceVision_meshMasking {allParams}' + category = 'Mesh Post-Processing' + documentation = ''' +Decimate triangles based on image masks. +''' + + inputs = [ + desc.File( + name="input", + label="Dense SfMData", + description="Dense SfMData file.", + value="", + ), + desc.File( + name="inputMesh", + label="Input Mesh", + description="Input mesh.", + value="", + ), + desc.ChoiceParam( + name="outputMeshFileType", + label="Output Mesh Type", + description="File type of the output mesh.", + value="obj", + values=["obj", "gltf", "fbx", "stl"], + group="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="masksFolder", + label="Masks Folder", + description="Folder containing some masks.", + value="", + ), + name="masksFolders", + label="Masks Folders", + description="Use masks from specific folder(s). Filename should be the same or the image UID.", + ), + desc.ChoiceParam( + name="maskExtension", + label="Mask Extension", + description="File extension for the masks to use.", + value="png", + values=["exr", "jpg", "png"], + ), + desc.IntParam( + name="threshold", + label="Threshold", + description="The minimum number of visibilities to keep a vertex.", + value=1, + range=(1, 100, 1), + ), + desc.BoolParam( + name="smoothBoundary", + label="Smooth Boundary", + description="Modify the triangles at the boundary to fit the masks.", + value=False, + ), + desc.BoolParam( + name="invert", + label="Invert", + description="If ticked, the selected area is ignored.\n" + "If not, only the selected area is considered.", + value=False, + ), + desc.BoolParam( + name="undistortMasks", + label="Undistort Masks", + description="Undistort the masks with the same parameters as the matching image.\n" + "Select it if the masks are drawn on the original images.", + value=False, + ), + desc.BoolParam( + name="usePointsVisibilities", + label="Use Points visibilities", + description="Use the points visibilities from the meshing to filter triangles.\n" + "Example: when they are occluded, back-face, etc.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputMesh", + label="Mesh", + description="Output mesh file.", + value="{nodeCacheFolder}/mesh.{outputMeshFileTypeValue}", + ), + ] diff --git a/meshroom/aliceVision/MeshRemoveUnseenFaces.py b/meshroom/aliceVision/MeshRemoveUnseenFaces.py new file mode 100644 index 0000000000..c82e3094bf --- /dev/null +++ b/meshroom/aliceVision/MeshRemoveUnseenFaces.py @@ -0,0 +1,69 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class MeshRemoveUnseenFaces(desc.AVCommandLineNode): + commandLine = 'aliceVision_meshRemoveUnseenFaces {allParams}' + + cpu = desc.Level.INTENSIVE + ram = desc.Level.NORMAL + + category = 'Dense Reconstruction' + documentation = ''' +Remove triangles from the mesh when the vertices are not visible by any camera. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="inputMesh", + label="Mesh", + description="Input Mesh file.", + value="", + ), + desc.ChoiceParam( + name="outputMeshFileType", + label="Mesh Type", + description="File type for the output mesh.", + value="obj", + values=["gltf", "obj", "fbx", "stl"], + group="", + ), + desc.IntParam( + name="minObservations", + label="Min Observations", + description="Minimal number of observations to keep a vertex.", + value=1, + range=(0, 5, 1), + ), + desc.IntParam( + name="minVertices", + label="Min Vertices to Remove a Triangle", + description="Minimal number of killed vertices in a triangle to remove the triangle.", + value=3, + range=(1, 3, 1), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputMesh", + label="Mesh", + description="Output mesh file.", + value="{nodeCacheFolder}/mesh.{outputMeshFileTypeValue}", + ), + ] diff --git a/meshroom/aliceVision/MeshResampling.py b/meshroom/aliceVision/MeshResampling.py new file mode 100644 index 0000000000..d96373adfe --- /dev/null +++ b/meshroom/aliceVision/MeshResampling.py @@ -0,0 +1,82 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class MeshResampling(desc.AVCommandLineNode): + commandLine = 'aliceVision_meshResampling {allParams}' + cpu = desc.Level.NORMAL + ram = desc.Level.NORMAL + + category = 'Mesh Post-Processing' + documentation = ''' +This node allows to recompute the mesh surface with a new topology and uniform density. +''' + + inputs = [ + desc.File( + name="input", + label="Input Mesh", + description="Input mesh in the OBJ file format.", + value="", + ), + desc.FloatParam( + name="simplificationFactor", + label="Simplification Factor", + description="Simplification factor for the resampling.", + value=0.5, + range=(0.0, 1.0, 0.01), + ), + desc.IntParam( + name="nbVertices", + label="Fixed Number Of Vertices", + description="Fixed number of output vertices.", + value=0, + range=(0, 1000000, 1), + ), + desc.IntParam( + name="minVertices", + label="Min Vertices", + description="Minimum number of output vertices.", + value=0, + range=(0, 1000000, 1), + ), + desc.IntParam( + name="maxVertices", + label="Max Vertices", + description="Maximum number of output vertices.", + value=0, + range=(0, 1000000, 1), + ), + desc.IntParam( + name="nbLloydIter", + label="Number Of Pre-Smoothing Iteration", + description="Number of iterations for Lloyd pre-smoothing.", + value=40, + range=(0, 100, 1), + ), + desc.BoolParam( + name="flipNormals", + label="Flip Normals", + description="Option to flip face normals.\n" + "It can be needed as it depends on the vertices order in triangles and the convention changes from one software to another.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Mesh", + description="Output mesh in the OBJ file format.", + value="{nodeCacheFolder}/mesh.obj", + ), + ] diff --git a/meshroom/aliceVision/Meshing.py b/meshroom/aliceVision/Meshing.py new file mode 100644 index 0000000000..b01fe33e85 --- /dev/null +++ b/meshroom/aliceVision/Meshing.py @@ -0,0 +1,483 @@ +__version__ = "7.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class Meshing(desc.AVCommandLineNode): + commandLine = 'aliceVision_meshing {allParams}' + + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Dense Reconstruction' + documentation = ''' +This node creates a dense geometric surface representation of the scene. + +First, it fuses all the depth maps into a global dense point cloud with an adaptive resolution. +It then performs a 3D Delaunay tetrahedralization and a voting procedure is done to compute weights on cells and weights on facets connecting the cells. +A Graph Cut Max-Flow is applied to optimally cut the volume. This cut represents the extracted mesh surface. + +## Online +[https://alicevision.org/#photogrammetry/meshing](https://alicevision.org/#photogrammetry/meshing) +''' + + inputs = [ + desc.File( + name="input", + label="SfmData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="depthMapsFolder", + label="Depth Maps Folder", + description="Input depth maps folder.", + value="", + ), + desc.ChoiceParam( + name="outputMeshFileType", + label="Mesh Type", + description="File type for the output mesh.", + value="obj", + values=["gltf", "obj", "fbx", "stl"], + group="", + ), + desc.BoolParam( + name="useBoundingBox", + label="Custom Bounding Box", + description="Edit the meshing bounding box.\n" + "If enabled, it takes priority over the 'Estimate Space From SfM' option.\n" + "Parameters can be adjusted in advanced settings.", + value=False, + group="", + ), + desc.GroupAttribute( + name="boundingBox", + label="Bounding Box Settings", + description="Translation, rotation and scale of the bounding box.", + groupDesc=[ + desc.GroupAttribute( + name="bboxTranslation", + label="Translation", + description="Position in space.", + groupDesc=[ + desc.FloatParam( + name="x", label="x", description="X offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + desc.FloatParam( + name="y", label="y", description="Y offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + desc.FloatParam( + name="z", label="z", description="Z offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + ], + joinChar=",", + ), + desc.GroupAttribute( + name="bboxRotation", + label="Euler Rotation", + description="Rotation in Euler degrees.", + groupDesc=[ + desc.FloatParam( + name="x", label="x", description="Euler X rotation.", + value=0.0, + range=(-90.0, 90.0, 1.0) + ), + desc.FloatParam( + name="y", label="y", description="Euler Y rotation.", + value=0.0, + range=(-180.0, 180.0, 1.0) + ), + desc.FloatParam( + name="z", label="z", description="Euler Z rotation.", + value=0.0, + range=(-180.0, 180.0, 1.0) + ), + ], + joinChar=",", + ), + desc.GroupAttribute( + name="bboxScale", + label="Scale", + description="Scale of the bounding box.", + groupDesc=[ + desc.FloatParam( + name="x", label="x", description="X scale.", + value=1.0, + range=(0.0, 20.0, 0.01), + ), + desc.FloatParam( + name="y", label="y", description="Y scale.", + value=1.0, + range=(0.0, 20.0, 0.01), + ), + desc.FloatParam( + name="z", label="z", description="Z scale.", + value=1.0, + range=(0.0, 20.0, 0.01), + ), + ], + joinChar=",", + ), + ], + joinChar=",", + enabled=lambda node: node.useBoundingBox.value, + ), + desc.BoolParam( + name="estimateSpaceFromSfM", + label="Estimate Space From SfM", + description="Estimate the 3D space from the SfM.", + value=True, + advanced=True, + ), + desc.IntParam( + name="estimateSpaceMinObservations", + label="Min Observations For SfM Space Estimation", + description="Minimum number of observations for the space estimation from the SfM.", + value=3, + range=(0, 100, 1), + advanced=True, + enabled=lambda node: node.estimateSpaceFromSfM.value, + ), + desc.FloatParam( + name="estimateSpaceMinObservationAngle", + label="Min Observations Angle For SfM Space Estimation", + description="Minimum angle between two observations for the space estimation from the SfM.", + value=10.0, + range=(0.0, 120.0, 1.0), + enabled=lambda node: node.estimateSpaceFromSfM.value, + ), + desc.IntParam( + name="maxInputPoints", + label="Max Input Points", + description="Maximum input points loaded from depth map images.", + value=50000000, + range=(500000, 500000000, 1000), + ), + desc.IntParam( + name="maxPoints", + label="Max Points", + description="Maximum points at the end of the depth maps fusion.", + value=5000000, + range=(100000, 10000000, 1000), + ), + desc.IntParam( + name="maxPointsPerVoxel", + label="Max Points Per Voxel", + description="Maximum points per voxel.", + value=1000000, + range=(500000, 30000000, 1000), + advanced=True, + ), + desc.IntParam( + name="minStep", + label="Min Step", + description="The step used to load depth values from depth maps is computed from 'maxInputPoints'.\n" + "Here we define the minimum value for this step, so on small datasets we will not spend " + "too much time at the beginning loading all the depth values.", + value=2, + range=(1, 20, 1), + advanced=True, + ), + desc.ChoiceParam( + name="partitioning", + label="Partitioning", + description="Single block or auto partitioning.", + value="singleBlock", + values=["singleBlock", "auto"], + advanced=True, + ), + desc.ChoiceParam( + name="repartition", + label="Repartition", + description="Multi-resolution or regular grid-based repartition.", + value="multiResolution", + values=["multiResolution", "regularGrid"], + advanced=True, + ), + desc.FloatParam( + name="angleFactor", + label="Angle Factor", + description="Angle factor.", + value=15.0, + range=(0.0, 200.0, 1.0), + advanced=True, + ), + desc.FloatParam( + name="simFactor", + label="Sim Factor", + description="Sim factor.", + value=15.0, + range=(0.0, 200.0, 1.0), + advanced=True, + ), + desc.IntParam( + name="minVis", + label="Min Observations", + description="Filter points based on their number of observations.", + value=2, + range=(1, 20, 1), + advanced=True, + ), + desc.FloatParam( + name="pixSizeMarginInitCoef", + label="Pix Size Margin Init Coef", + description="Size of the margin init coefficient, in pixels.", + value=2.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="pixSizeMarginFinalCoef", + label="Pix Size Margin Final Coef", + description="Size of the margin final coefficient, in pixels.", + value=4.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="voteMarginFactor", + label="Vote Margin Factor", + description="Vote margin factor.", + value=4.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="contributeMarginFactor", + label="Contribute Margin Factor", + description="Contribute margin factor.", + value=2.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="simGaussianSizeInit", + label="Sim Gaussian Size Init", + description="Sim Gaussian size init.", + value=10.0, + range=(0.0, 50.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="simGaussianSize", + label="Sim Gaussian Size", + description="Sim Gaussian size.", + value=10.0, + range=(0.0, 50.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="minAngleThreshold", + label="Min Angle Threshold", + description="Minimum angle threshold.", + value=1.0, + range=(0.0, 10.0, 0.01), + advanced=True, + ), + desc.BoolParam( + name="refineFuse", + label="Refine Fuse", + description="Refine depth map fusion with the new pixels size defined by angle and similarity scores.", + value=True, + advanced=True, + ), + desc.IntParam( + name="helperPointsGridSize", + label="Helper Points Grid Size", + description="Grid size for the helper points.", + value=10, + range=(0, 50, 1), + advanced=True, + ), + desc.BoolParam( + name="densify", + label="Densify", + description="Densify scene with helper points around vertices.", + value=False, + invalidate=False, + advanced=True, + group="", + ), + desc.IntParam( + name="densifyNbFront", + label="Densify: Front", + description="Densify vertices: front.", + value=1, + range=(0, 5, 1), + advanced=True, + enabled=lambda node: node.densify.value, + ), + desc.IntParam( + name="densifyNbBack", + label="Densify: Back", + description="Densify vertices: back.", + value=1, + range=(0, 5, 1), + advanced=True, + enabled=lambda node: node.densify.value, + ), + desc.FloatParam( + name="densifyScale", + label="Densify Scale", + description="Scale between points used to densify the scene.", + value=20.0, + range=(0.0, 10.0, 0.1), + advanced=True, + enabled=lambda node: node.densify.value, + ), + desc.FloatParam( + name="nPixelSizeBehind", + label="Nb Pixel Size Behind", + description="Number of pixel size units to vote behind the vertex as FULL status.", + value=4.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="fullWeight", + label="Full Weight", + description="Weighting for full status.", + value=1.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.BoolParam( + name="voteFilteringForWeaklySupportedSurfaces", + label="Weakly Supported Surface Support", + description="Improve support of weakly supported surfaces with a tetrahedra fullness score filtering.", + value=True, + ), + desc.BoolParam( + name="addLandmarksToTheDensePointCloud", + label="Add Landmarks To The Dense Point Cloud", + description="Add SfM landmarks to the dense point cloud.", + value=False, + advanced=True, + ), + desc.IntParam( + name="invertTetrahedronBasedOnNeighborsNbIterations", + label="Tretrahedron Neighbors Coherency Nb Iterations", + description="Invert cells status around surface to improve smoothness.\n" + "Set to 0 to disable.", + value=10, + range=(0, 30, 1), + advanced=True, + ), + desc.FloatParam( + name="minSolidAngleRatio", + label="Min Solid Angle Ratio", + description="Change cells status on surface around vertices to improve smoothness using solid angle \n" + "ratio between full/empty parts. Set to 0 to disable.", + value=0.2, + range=(0.0, 0.5, 0.01), + advanced=True, + ), + desc.IntParam( + name="nbSolidAngleFilteringIterations", + label="Nb Solid Angle Filtering Iterations", + description="Filter cells status on surface around vertices to improve smoothness using solid angle ratio \n" + "between full/empty parts. Set to 0 to disable.", + value=2, + range=(0, 30, 1), + advanced=True, + ), + desc.BoolParam( + name="colorizeOutput", + label="Colorize Output", + description="Whether to colorize output dense point cloud and mesh.", + value=False, + ), + desc.BoolParam( + name="addMaskHelperPoints", + label="Add Mask Helper Points", + description="Add Helper points on the outline of the depth maps masks.", + value=False, + invalidate=False, + advanced=True, + group="", + ), + desc.FloatParam( + name="maskHelperPointsWeight", + label="Mask Helper Points Weight", + description="Weight value for mask helper points. 0 means no helper point.", + value=1.0, + range=(0.0, 20.0, 1.0), + advanced=True, + enabled=lambda node: node.addMaskHelperPoints.value, + ), + desc.IntParam( + name="maskBorderSize", + label="Mask Border Size", + description="Number of pixels on mask borders.", + value=4, + range=(0, 20, 1), + advanced=True, + enabled=lambda node: node.addMaskHelperPoints.value, + ), + desc.IntParam( + name="maxNbConnectedHelperPoints", + label="Helper Points: Max Segment Size", + description="Maximum size of a segment of connected helper points before we remove it.\n" + "Small segments of helper points can be on the real surface and should not be removed to avoid the creation of holes.\n" + "0 means that all helper points are removed. -1 means that helper points are not filtered at all.", + value=50, + range=(-1, 100, 1), + advanced=True, + ), + desc.BoolParam( + name="saveRawDensePointCloud", + label="Save Raw Dense Point Cloud", + description="Save dense point cloud before cut and filtering.", + value=False, + invalidate=False, + advanced=True, + ), + desc.BoolParam( + name="exportDebugTetrahedralization", + label="Export Debug Tetrahedralization", + description="Export debug cells score as tetrahedral mesh.\n" + "WARNING: Could create HUGE meshes, only use on very small datasets.", + value=False, + invalidate=False, + advanced=True, + ), + desc.IntParam( + name="seed", + label="Seed", + description="Seed used for random operations.\n" + "0 means use of random device instead of a fixed seed.", + value=0, + range=(0, 10000, 1), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputMesh", + label="Mesh", + description="Output mesh.", + value="{nodeCacheFolder}/mesh.{outputMeshFileTypeValue}", + ), + desc.File( + name="output", + label="Dense SfMData", + description="Output dense point cloud with visibilities (SfMData file format).", + value="{nodeCacheFolder}/densePointCloud.abc", + ), + ] diff --git a/meshroom/aliceVision/NodalSfM.py b/meshroom/aliceVision/NodalSfM.py new file mode 100644 index 0000000000..1dbd902ced --- /dev/null +++ b/meshroom/aliceVision/NodalSfM.py @@ -0,0 +1,51 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class NodalSfM(desc.AVCommandLineNode): + commandLine = 'aliceVision_nodalSfM {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Sparse Reconstruction' + documentation = ''' +A Structure-From-Motion node specifically designed to handle pure rotation camera movements. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="tracksFilename", + label="Tracks File", + description="Input tracks file.", + value="", + ), + desc.File( + name="pairs", + label="Pairs File", + description="Information on pairs.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfm.abc", + ), + ] diff --git a/meshroom/aliceVision/NormalIntegration.py b/meshroom/aliceVision/NormalIntegration.py new file mode 100644 index 0000000000..207edd8305 --- /dev/null +++ b/meshroom/aliceVision/NormalIntegration.py @@ -0,0 +1,52 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +class NormalIntegration(desc.CommandLineNode): + commandLine = 'aliceVision_normalIntegration {allParams}' + category = 'Photometric Stereo' + documentation = ''' +Evaluate a depth map from a normals map (currently in development) +''' + + inputs = [ + desc.File( + name="inputPath", + label="Normal Maps Folder", + description="Path to the folder containing the normal maps and the masks.", + value="", + ), + desc.File( + name="sfmDataFile", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.IntParam( + name="downscale", + label="Downscale Factor", + description="Downscale factor for faster results.", + value=1, + range=(1, 10, 1), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="depthMap", + label="Depth Map Camera", + description="Generated depth in the camera coordinate system.", + semantic="image", + value="{nodeCacheFolder}/_depthMap.exr", + group="", # do not export on the command line + ) + ] diff --git a/meshroom/aliceVision/NormalMapRendering.py b/meshroom/aliceVision/NormalMapRendering.py new file mode 100644 index 0000000000..7355842f76 --- /dev/null +++ b/meshroom/aliceVision/NormalMapRendering.py @@ -0,0 +1,52 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class NormalMapRendering(desc.AVCommandLineNode): + commandLine = "aliceVision_normalMapRendering {allParams}" + + category = "Utils" + documentation = """ + Using camera parameters and mesh, render normalmaps for each view + """ + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="mesh", + label="Input Mesh", + description="Input mesh file.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="normal", + label="Normal Maps", + description="Rendered normal maps.", + semantic="image", + value="{nodeCacheFolder}/_normalMap.exr", + group="", # do not export on the command line + ), + ] diff --git a/meshroom/aliceVision/PanoramaCompositing.py b/meshroom/aliceVision/PanoramaCompositing.py new file mode 100644 index 0000000000..a5ac007aae --- /dev/null +++ b/meshroom/aliceVision/PanoramaCompositing.py @@ -0,0 +1,118 @@ +__version__ = "2.0" + +import json +import os + +from meshroom.core import desc +from meshroom.core.utils import EXR_STORAGE_DATA_TYPE, VERBOSE_LEVEL + + +class PanoramaCompositing(desc.AVCommandLineNode): + commandLine = 'aliceVision_panoramaCompositing {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=5) + commandLineRange = '--rangeIteration {rangeIteration} --rangeSize {rangeBlockSize}' + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Panorama HDR' + documentation = ''' +Once the images have been transformed geometrically (in PanoramaWarping), +they have to be fused together in a single panorama image which looks like a single photography. +The Multi-band Blending method provides the best quality. It averages the pixel values using multiple bands in the frequency domain. +Multiple cameras are contributing to the low frequencies and only the best one contributes to the high frequencies. +''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="warpingFolder", + label="Warping Folder", + description="Panorama warping results folder.", + value="", + ), + desc.File( + name="labels", + label="Labels Images", + description="Panorama seams results images.", + value="", + ), + desc.ChoiceParam( + name="compositerType", + label="Compositer Type", + description="Which compositer should be used to blend images:\n" + " - multiband: high quality transition by fusing images by frequency bands.\n" + " - replace: debug option with straight transitions.\n" + " - alpha: debug option with linear transitions.", + value="multiband", + values=["replace", "alpha", "multiband"], + ), + desc.IntParam( + name="forceMinPyramidLevels", + label="Min Pyramid Levels", + description="Force the minimal number of levels in the pyramid for multiband compositer.", + value=0, + range=(0, 16, 1), + enabled=lambda node: node.compositerType.value and node.compositerType.value == "multiband", + ), + desc.IntParam( + name="maxThreads", + label="Max Nb Threads", + description="Specifies the maximum number of threads to run simultaneously.", + value=4, + range=(0, 48, 1), + invalidate=False, + advanced=True, + ), + desc.BoolParam( + name="useTiling", + label="Use Tiling", + description="Enable tiling mode for parallelization.", + value=True, + exposed=True, + ), + desc.ChoiceParam( + name="storageDataType", + label="Storage Data Type", + description="Storage image data type:\n" + " - float: Use full floating point (32 bits per channel).\n" + " - half: Use half float (16 bits per channel).\n" + " - halfFinite: Use half float, but clamp values to avoid non-finite values.\n" + " - auto: Use half float if all values can fit, else use full float.", + values=EXR_STORAGE_DATA_TYPE, + value="float", + ), + desc.ChoiceParam( + name="overlayType", + label="Overlay Type", + description="Overlay on top of panorama to analyze transitions:\n" + " - none: no overlay.\n" + " - borders: display image borders.\n" + " - seams: display transitions between images.\n" + " - all: display borders and seams.", + value="none", + values=["none", "borders", "seams", "all"], + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder containing the composited panorama.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/PanoramaEstimation.py b/meshroom/aliceVision/PanoramaEstimation.py new file mode 100644 index 0000000000..d041be7e11 --- /dev/null +++ b/meshroom/aliceVision/PanoramaEstimation.py @@ -0,0 +1,181 @@ +__version__ = "1.0" + +import json +import os + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class PanoramaEstimation(desc.AVCommandLineNode): + commandLine = 'aliceVision_panoramaEstimation {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Panorama HDR' + documentation = ''' +Estimate relative camera rotations between input images. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features.", + exposed=True, + ), + desc.ListAttribute( + elementDesc=desc.File( + name="matchesFolder", + label="Matches Folder", + description="Folder containing some matches.", + value="", + ), + name="matchesFolders", + label="Matches Folders", + description="Folder(s) in which computed matches are stored.", + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["sift"], + exclusive=False, + joinChar=",", + exposed=True, + ), + desc.FloatParam( + name="offsetLongitude", + label="Longitude Offset", + description="Offset to the panorama longitude (in degrees).", + value=0.0, + range=(-180.0, 180.0, 1.0), + ), + desc.FloatParam( + name="offsetLatitude", + label="Latitude Offset", + description="Offset to the panorama latitude (in degrees).", + value=0.0, + range=(-90.0, 90.0, 1.0), + ), + desc.ChoiceParam( + name="rotationAveraging", + label="Rotation Averaging Method", + description="Method for rotation averaging :\n" + " - L1 minimization\n" + " - L2 minimization", + values=["L1_minimization", "L2_minimization"], + value="L2_minimization", + advanced=True, + ), + desc.ChoiceParam( + name="relativeRotation", + label="Relative Rotation Method", + description="Method for relative rotation :\n" + " - from essential matrix\n" + " - from homography matrix\n" + " - from rotation matrix", + values=["essential_matrix", "homography_matrix", "rotation_matrix"], + value="rotation_matrix", + advanced=True, + ), + desc.BoolParam( + name="rotationAveragingWeighting", + label="Rotation Averaging Weighting", + description="Rotation averaging weighting based on the number of feature matches.", + value=True, + advanced=True, + ), + desc.BoolParam( + name="filterMatches", + label="Filter Matches", + description="Filter the matches.", + value=False, + ), + desc.BoolParam( + name="refine", + label="Refine", + description="Refine camera relative poses, points and optionally internal camera parameters.", + value=True, + ), + desc.BoolParam( + name="lockAllIntrinsics", + label="Lock All Intrinsics", + description="Force to keep all the intrinsics parameters of the cameras (focal length, \n" + "principal point, distortion if any) constant during the reconstruction.\n" + "This may be helpful if the input cameras are already fully calibrated.", + value=False, + ), + desc.FloatParam( + name="maxAngleToPrior", + label="Max Angle To Priors (deg.)", + description="Maximum angle allowed regarding the input prior (in degrees) before refinement.", + value=20.0, + range=(0.0, 360.0, 1.0), + advanced=True, + ), + desc.FloatParam( + name="maxAngleToPriorRefined", + label="Max Refined Angle To Priors (deg.)", + description="Maximum angle allowed regarding the input prior (in degrees) after refinement.", + value=2.0, + range=(0.0, 360.0, 1.0), + advanced=True, + ), + desc.FloatParam( + name="maxAngularError", + label="Max Angular Error (deg.)", + description="Maximum angular error in global rotation averging (in degrees).", + value=100.0, + range=(0.0, 360.0, 1.0), + advanced=True, + ), + desc.BoolParam( + name="intermediateRefineWithFocal", + label="Intermediate Refine: Focal", + description="Intermediate refine with rotation and focal length only.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="intermediateRefineWithFocalDist", + label="Intermediate Refine: Focal And Distortion", + description="Intermediate refine with rotation, focal length and distortion.", + value=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfM File", + description="Path to the output SfM file.", + value="{nodeCacheFolder}/panorama.abc", + ), + desc.File( + name="outputViewsAndPoses", + label="Views And Poses", + description="Path to the output SfMData file with cameras (views and poses).", + value="{nodeCacheFolder}/cameras.sfm", + ), + ] diff --git a/meshroom/aliceVision/PanoramaInit.py b/meshroom/aliceVision/PanoramaInit.py new file mode 100644 index 0000000000..a1bf735882 --- /dev/null +++ b/meshroom/aliceVision/PanoramaInit.py @@ -0,0 +1,160 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class PanoramaInit(desc.AVCommandLineNode): + commandLine = 'aliceVision_panoramaInit {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Panorama HDR' + documentation = ''' +This node allows to setup the Panorama: + +1/ Enables the initialization the cameras from known position in an XML file (provided by +["Roundshot VR Drive"](https://www.roundshot.com/xml_1/internet/fr/application/d394/d395/f396.cfm) ). + +2/ Enables to setup Full Fisheye Optics (to use an Equirectangular camera model). + +3/ To automatically detects the Fisheye Circle (radius + center) in input images or manually adjust it. + +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ChoiceParam( + name="initializeCameras", + label="Initialize Cameras", + description="Initialize cameras.", + value="No", + values=["No", "File", "Horizontal", "Horizontal+Zenith", "Zenith+Horizontal", "Spherical"], + ), + desc.File( + name="config", + label="XML Config", + description="XML data file.", + value="", + enabled=lambda node: node.initializeCameras.value == "File", + ), + desc.BoolParam( + name="yawCW", + label="Yaw CW", + description="If selected, the yaw rotation will be clockwise. Otherwise, it will be counter-clockwise.", + value=True, + enabled=lambda node: ("Horizontal" in node.initializeCameras.value) or (node.initializeCameras.value == "Spherical"), + ), + desc.BoolParam( + name="buildContactSheet", + label="Build Contact Sheet", + description="Build the contact sheet for the panorama if an XML data file is provided.\n" + "The contact sheet consists in a preview of the panorama using the input images.", + value=True, + enabled=lambda node: node.config.enabled and node.config.value != "", + ), + desc.ListAttribute( + elementDesc=desc.IntParam( + name="nbViews", + label="Number of Views", + description="Number of views for a line.", + value=-1, + range=(-1, 20, 1), + ), + name="nbViewsPerLine", + label="Spherical: Nb Views Per Line", + description="Number of views per line in Spherical acquisition.\n" + "Assumes angles from [-90,+90deg] for pitch and [-180,+180deg] for yaw.\n" + "Use -1 to estimate the number of images automatically.", + joinChar=",", + enabled=lambda node: node.initializeCameras.value == "Spherical", + ), + desc.BoolParam( + name="useFisheye", + label="Full Fisheye", + description="Set this option to declare a full fisheye panorama setup.", + value=False, + ), + desc.BoolParam( + name="estimateFisheyeCircle", + label="Estimate Fisheye Circle", + description="Automatically estimate the fisheye circle center and radius instead of using user values.", + value=True, + enabled=lambda node: node.useFisheye.value, + ), + desc.GroupAttribute( + name="fisheyeCenterOffset", + label="Fisheye Center", + description="Center of the fisheye circle (XY offset to the center in pixels).", + groupDesc=[ + desc.FloatParam( + name="fisheyeCenterOffset_x", + label="x", + description="X offset in pixels.", + value=0.0, + range=(-1000.0, 10000.0, 1.0), + ), + desc.FloatParam( + name="fisheyeCenterOffset_y", + label="y", + description="Y offset in pixels.", + value=0.0, + range=(-1000.0, 10000.0, 1.0), + ), + ], + group=None, # skip group from command line + enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value, + ), + desc.FloatParam( + name="fisheyeRadius", + label="Fisheye Radius", + description="Fisheye visibillity circle radius (in % of image's shortest side).", + value=96.0, + range=(0.0, 150.0, 0.01), + enabled=lambda node: node.useFisheye.value and not node.estimateFisheyeCircle.value, + ), + desc.ChoiceParam( + name="inputAngle", + label="Input Angle Offset", + description="Add a rotation to the input XML given poses (CCW).", + value="None", + values=["None", "rotate90", "rotate180", "rotate270"], + ), + desc.BoolParam( + name="debugFisheyeCircleEstimation", + label="Debug Fisheye Circle Detection", + description="Debug fisheye circle detection.", + value=False, + enabled=lambda node: node.useFisheye.value, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="contactSheet", + label="Contact sheet", + semantic="image", + description="Contact sheet path.", + value="{nodeCacheFolder}/contactSheetImage.jpg", + group="", # do not export on the command line + enabled=lambda node: node.buildContactSheet.enabled + ), + desc.File( + name="outSfMData", + label="SfMData File", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfmData.sfm", + ), + ] diff --git a/meshroom/aliceVision/PanoramaMerging.py b/meshroom/aliceVision/PanoramaMerging.py new file mode 100644 index 0000000000..413f1f27d0 --- /dev/null +++ b/meshroom/aliceVision/PanoramaMerging.py @@ -0,0 +1,77 @@ +__version__ = "1.0" + +import json +import os + +from meshroom.core import desc +from meshroom.core.utils import EXR_STORAGE_DATA_TYPE, VERBOSE_LEVEL + + +class PanoramaMerging(desc.AVCommandLineNode): + commandLine = 'aliceVision_panoramaMerging {allParams}' + size = desc.DynamicNodeSize('input') + cpu = desc.Level.NORMAL + ram = desc.Level.INTENSIVE + + category = 'Panorama HDR' + documentation = ''' +Merge all inputs coming from the PanoramaCompositing node. +''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="compositingFolder", + label="Compositing Folder", + description="Panorama compositing results.", + value="", + ), + desc.ChoiceParam( + name="outputFileType", + label="Output File Type", + description="Output file type for the merged panorama.", + value="exr", + values=["jpg", "png", "tif", "exr"], + group="", # not part of allParams, as this is not a parameter for the command line + ), + desc.BoolParam( + name="useTiling", + label="Use Tiling", + description="Enable tiling mode for parallelization.", + value=True, + exposed=True, + ), + desc.ChoiceParam( + name="storageDataType", + label="Storage Data Type", + description="Storage image data type:\n" + " - float: Use full floating point (32 bits per channel).\n" + " - half: Use half float (16 bits per channel).\n" + " - halfFinite: Use half float, but clamp values to avoid non-finite values.\n" + " - auto: Use half float if all values can fit, else use full float.\n", + values=EXR_STORAGE_DATA_TYPE, + value="float", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputPanorama", + label="Panorama", + description="Output merged panorama image.", + semantic="image", + value="{nodeCacheFolder}/panorama.{outputFileTypeValue}", + ), + ] diff --git a/meshroom/aliceVision/PanoramaPostProcessing.py b/meshroom/aliceVision/PanoramaPostProcessing.py new file mode 100644 index 0000000000..1a0a8a4aba --- /dev/null +++ b/meshroom/aliceVision/PanoramaPostProcessing.py @@ -0,0 +1,126 @@ +__version__ = "2.0" + +import json +import os + +from meshroom.core import desc +from meshroom.core.utils import COLORSPACES, VERBOSE_LEVEL + + +class PanoramaPostProcessing(desc.CommandLineNode): + commandLine = 'aliceVision_panoramaPostProcessing {allParams}' + cpu = desc.Level.NORMAL + ram = desc.Level.INTENSIVE + + category = 'Panorama HDR' + documentation = ''' +Post process the panorama. +''' + + inputs = [ + desc.File( + name="inputPanorama", + label="Input Panorama", + description="Input panorama image.", + value="", + ), + desc.BoolParam( + name="fillHoles", + label="Fill Holes Algorithm", + description="Fill the non attributed pixels with push pull algorithm if set.", + value=False, + ), + desc.BoolParam( + name="exportLevels", + label="Export Downscaled Levels", + description="Export downscaled panorama levels.", + value=False, + ), + desc.IntParam( + name="lastLevelMaxSize", + label="Last Level Max Size", + description="Maximum width of smallest downscaled panorama level.", + value=3840, + range=(1, 100000), + ), + desc.IntParam( + name="previewSize", + label="Panorama Preview Width", + description="The width (in pixels) of the output panorama preview.", + value=1000, + range=(0, 5000, 100), + ), + desc.ChoiceParam( + name="outputColorSpace", + label="Output Color Space", + description="The color space of the output image.", + values=COLORSPACES, + value="Linear", + ), + desc.ChoiceParam( + name="compressionMethod", + label="Compression Method", + description="Compression method for output EXR image.", + value="auto", + values=["none", "auto", "rle", "zip", "zips", "piz", "pxr24", "b44", "b44a", "dwaa", "dwab"], + ), + desc.IntParam( + name="compressionLevel", + label="Compression Level", + description="Level of compression for the output EXR image. The range depends on method used.\n" + "For zip/zips methods, values must be between 1 and 9.\n" + "A value of 0 will be ignored, default value for the selected method will be used.", + value=0, + range=(0, 500, 1), + enabled=lambda node: node.compressionMethod.value in ["dwaa", "dwab", "zip", "zips"], + ), + desc.StringParam( + name="panoramaName", + label="Output Panorama Name", + description="Name of the output panorama.", + value="panorama.exr", + invalidate=False, + group=None, + advanced=True, + ), + desc.StringParam( + name="previewName", + label="Panorama Preview Name", + description="Name of the preview of the output panorama.", + value="panoramaPreview.jpg", + invalidate=False, + group=None, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputPanoramaPreview", + label="Output Panorama Preview", + description="Preview of the generated panorama in JPG format.", + semantic="image", + value=lambda attr: "{nodeCacheFolder}/" + attr.node.previewName.value, + ), + desc.File( + name="outputPanorama", + label="Output Panorama", + description="Generated panorama in EXR format.", + semantic="image", + value=lambda attr: "{nodeCacheFolder}/" + attr.node.panoramaName.value, + ), + desc.File( + name="downscaledPanoramaLevels", + label="Downscaled Panorama Levels", + description="Downscaled versions of the generated panorama.", + value=lambda attr: "{nodeCacheFolder}/" + os.path.splitext(attr.node.panoramaName.value)[0] + "_level_*.exr", + group="", + ), + ] diff --git a/meshroom/aliceVision/PanoramaPrepareImages.py b/meshroom/aliceVision/PanoramaPrepareImages.py new file mode 100644 index 0000000000..6a4f6137dc --- /dev/null +++ b/meshroom/aliceVision/PanoramaPrepareImages.py @@ -0,0 +1,41 @@ +__version__ = "1.1" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import os.path + + +class PanoramaPrepareImages(desc.AVCommandLineNode): + commandLine = 'aliceVision_panoramaPrepareImages {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Panorama HDR' + documentation = ''' +Prepare images for Panorama pipeline: ensures that images orientations are coherent. +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Output SfMData file.", + value=lambda attr: "{nodeCacheFolder}/" + os.path.basename(attr.node.input.value), + ), + ] diff --git a/meshroom/aliceVision/PanoramaSeams.py b/meshroom/aliceVision/PanoramaSeams.py new file mode 100644 index 0000000000..ffadc05843 --- /dev/null +++ b/meshroom/aliceVision/PanoramaSeams.py @@ -0,0 +1,70 @@ +__version__ = "2.0" + +import json +import os + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class PanoramaSeams(desc.AVCommandLineNode): + commandLine = 'aliceVision_panoramaSeams {allParams}' + size = desc.DynamicNodeSize('input') + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Panorama HDR' + documentation = ''' +Estimate the seams lines between the inputs to provide an optimal compositing in a further node +''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="warpingFolder", + label="Warping Folder", + description="Panorama warping results.", + value="", + ), + desc.IntParam( + name="maxWidth", + label="Max Resolution", + description="Maximal resolution for the panorama seams estimation.", + value=5000, + range=(0, 100000, 1), + ), + desc.BoolParam( + name="useGraphCut", + label="Use Smart Seams", + description="Use a graphcut algorithm to optimize seams for better transitions between images.", + value=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Labels", + description="", + semantic="image", + value="{nodeCacheFolder}/labels.exr", + ), + desc.File( + name="outputSfm", + label="Output SfMData File", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/panorama.sfm", + ), + ] diff --git a/meshroom/aliceVision/PanoramaWarping.py b/meshroom/aliceVision/PanoramaWarping.py new file mode 100644 index 0000000000..2a4a062689 --- /dev/null +++ b/meshroom/aliceVision/PanoramaWarping.py @@ -0,0 +1,98 @@ +__version__ = "1.1" + +import json +import os + +from meshroom.core import desc +from meshroom.core.utils import COLORSPACES, EXR_STORAGE_DATA_TYPE, VERBOSE_LEVEL + + +class PanoramaWarping(desc.AVCommandLineNode): + commandLine = 'aliceVision_panoramaWarping {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=5) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Panorama HDR' + documentation = ''' +Compute the image warping for each input image in the panorama coordinate system. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.BoolParam( + name="estimateResolution", + label="Estimate Resolution", + description="Estimate output panorama resolution automatically based on the resolution of input images.", + value=True, + group=None, # skip group from command line + ), + desc.IntParam( + name="panoramaWidth", + label="Panorama Width", + description="Choose the output panorama width (in pixels).", + value=10000, + range=(0, 50000, 1000), + enabled=lambda node: (not node.estimateResolution.value), + ), + desc.IntParam( + name="percentUpscale", + label="Upscale Ratio", + description="Percentage of upscaled pixels.\n" + "\n" + "How many percent of the pixels will be upscaled (compared to its original resolution):\n" + " - 0: all pixels will be downscaled.\n" + " - 50: on average, the input resolution is kept (optimal to reduce over/under-sampling).\n" + " - 100: all pixels will be upscaled.\n", + value=50, + range=(0, 100, 1), + enabled=lambda node: (node.estimateResolution.value), + ), + desc.IntParam( + name="maxPanoramaWidth", + label="Max Panorama Width", + description="Choose the maximum width for the output panorama (in pixels). 0 means no limit.", + value=70000, + range=(0, 100000, 1000), + enabled=lambda node: (node.estimateResolution.value), + ), + desc.ChoiceParam( + name="workingColorSpace", + label="Working Color Space", + description="Colorspace in which the panorama warping will be performed.", + values=COLORSPACES, + value="Linear", + ), + desc.ChoiceParam( + name="storageDataType", + label="Storage Data Type", + description="Storage image data type:\n" + " - float: Use full floating point (32 bits per channel).\n" + " - half: Use half float (16 bits per channel).\n" + " - halfFinite: Use half float, but clamp values to avoid non-finite values.\n" + " - auto: Use half float if all values can fit, else use full float.", + values=EXR_STORAGE_DATA_TYPE, + value="float", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/PhotometricStereo.py b/meshroom/aliceVision/PhotometricStereo.py new file mode 100644 index 0000000000..6ca877e8e9 --- /dev/null +++ b/meshroom/aliceVision/PhotometricStereo.py @@ -0,0 +1,140 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +class PhotometricStereo(desc.CommandLineNode): + commandLine = 'aliceVision_photometricStereo {allParams}' + category = 'Photometric Stereo' + documentation = ''' +Reconstruction using Photometric Stereo. A normal map is evaluated from several photographs taken from the same point of view, but under different lighting conditions. +The lighting conditions are assumed to be known. +''' + + inputs = [ + desc.File( + name="inputPath", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="pathToJSONLightFile", + label="Light File", + description="Path to a JSON file containing the lighting information.\n" + "If empty, .txt files are expected in the image folder.", + value="defaultJSON.txt", + ), + desc.File( + name="maskPath", + label="Mask Folder Path", + description="Path to a folder containing masks or to a mask directly.", + value="", + ), + desc.ChoiceParam( + name="SHOrder", + label="Spherical Harmonics Order", + description="Order of the spherical harmonics:\n" + " - 0: directional.\n" + " - 1: directional + ambient.\n" + " - 2: second order spherical harmonics.", + values=["0", "1", "2"], + value="0", + advanced=True, + ), + desc.BoolParam( + name="removeAmbient", + label="Remove Ambient Light", + description="True if the ambient light is to be removed on the PS images, false otherwise.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="isRobust", + label="Use Robust Algorithm", + description="True to use the robust algorithm, false otherwise.", + value=False, + advanced=True, + ), + desc.IntParam( + name="downscale", + label="Downscale Factor", + description="Downscale factor for faster results.", + value=1, + range=(1, 10, 1), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputPath", + label="Output Folder", + description="Path to the output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="outputSfmDataAlbedo", + label="SfMData Albedo", + description="Output SfMData file containing the albedo information.", + value="{nodeCacheFolder}/albedoMaps.sfm", + group="", # remove from command line + ), + desc.File( + name="outputSfmDataNormal", + label="SfMData Normal", + description="Output SfMData file containing the normal maps information.", + value="{nodeCacheFolder}/normalMaps.sfm", + group="", # remove from command line + ), + desc.File( + name="outputSfmDataNormalPNG", + label="SfMData Normal PNG", + description="Output SfMData file containing the normal maps information.", + value="{nodeCacheFolder}/normalMapsPNG.sfm", + group="", # remove from command line + ), + # these attributes are only here to describe more accurately the output of the node + # by specifying that it generates 2 sequences of images + # (see in Viewer2D.qml how these attributes can be used) + desc.File( + name="normals", + label="Normal Maps Camera", + description="Generated normal maps in the camera coordinate system.", + semantic="image", + value="{nodeCacheFolder}/_normals.exr", + group="", # do not export on the command line + ), + desc.File( + name="normalsPNG", + label="Normal Maps Camera (in false colors)", + description="Generated normal maps in the camera coordinate system (in false colors).", + semantic="image", + value="{nodeCacheFolder}/_normals.png", + group="", # do not export on the command line + ), + desc.File( + name="normalsWorld", + label="Normal Maps World", + description="Generated normal maps in the world coordinate system.", + semantic="image", + value="{nodeCacheFolder}/_normals_w.exr", + group="", # do not export on the command line + ), + + desc.File( + name="albedo", + label="Albedo Maps", + description="Generated albedo maps.", + semantic="image", + value="{nodeCacheFolder}/_albedo.png", + group="", # do not export on the command line + ), + ] diff --git a/meshroom/aliceVision/PrepareDenseScene.py b/meshroom/aliceVision/PrepareDenseScene.py new file mode 100644 index 0000000000..87eb78bc20 --- /dev/null +++ b/meshroom/aliceVision/PrepareDenseScene.py @@ -0,0 +1,108 @@ +__version__ = "3.1" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class PrepareDenseScene(desc.AVCommandLineNode): + commandLine = 'aliceVision_prepareDenseScene {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=40) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Dense Reconstruction' + documentation = ''' +This node export undistorted images so the depth map and texturing can be computed on Pinhole images without distortion. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="imagesFolder", + label="Images Folder", + description="", + value="", + ), + name="imagesFolders", + label="Images Folders", + description="Use images from specific folder(s). Filename should be the same or the image UID.", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="masksFolder", + label="Masks Folder", + description="", + value="", + ), + name="masksFolders", + label="Masks Folders", + description="Use masks from specific folder(s). Filename should be the same or the image UID.", + ), + desc.ChoiceParam( + name="maskExtension", + label="Mask Extension", + description="File extension for the masks to use.", + value="png", + values=["exr", "jpg", "png"], + ), + desc.ChoiceParam( + name="outputFileType", + label="Output File Type", + description="Output file type for the undistorted images.", + value="exr", + values=["jpg", "png", "tif", "exr"], + advanced=True, + ), + desc.BoolParam( + name="saveMetadata", + label="Save Metadata", + description="Save projections and intrinsics information in images metadata (only for .exr images).", + value=True, + advanced=True, + ), + desc.BoolParam( + name="saveMatricesTxtFiles", + label="Save Matrices Text Files", + description="Save projections and intrinsics information in text files.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="evCorrection", + label="Correct Images Exposure", + description="Apply a correction on images' exposure value.", + value=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Images Folder", + description="Output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="undistorted", + label="Undistorted Images", + description="List of undistorted images.", + semantic="image", + value="{nodeCacheFolder}/.{outputFileTypeValue}", + group="", + advanced=True, + ), + ] diff --git a/meshroom/aliceVision/RelativePoseEstimating.py b/meshroom/aliceVision/RelativePoseEstimating.py new file mode 100644 index 0000000000..b49eb3ced8 --- /dev/null +++ b/meshroom/aliceVision/RelativePoseEstimating.py @@ -0,0 +1,69 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + +class RelativePoseEstimating(desc.AVCommandLineNode): + commandLine = 'aliceVision_relativePoseEstimating {allParams}' + size = desc.DynamicNodeSize('input') + + parallelization = desc.Parallelization(blockSize=25) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Sparse Reconstruction' + documentation = ''' +Estimate relative pose between each pair of views that share tracks. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="tracksFilename", + label="Tracks File", + description="Tracks file.", + value="", + ), + desc.BoolParam( + name="enforcePureRotation", + label="Enforce pure rotation", + description="Enforce pure rotation as a model", + value=False, + ), + desc.IntParam( + name="countIterations", + label="Ransac Max Iterations", + description="Maximal number of iterations.", + value=1024, + range=(1024, 500000, 1), + advanced=True, + ), + desc.IntParam( + name="minInliers", + label="Ransac Min Inliers", + description="Minimal allowed inliers in two view relationship.", + value=35, + range=(1, 1000, 1), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Pairs Info", + description="Path to the output Pairs info files directory.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/SelectConnectedViews.py b/meshroom/aliceVision/SelectConnectedViews.py new file mode 100644 index 0000000000..3db36f8062 --- /dev/null +++ b/meshroom/aliceVision/SelectConnectedViews.py @@ -0,0 +1,64 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class SelectConnectedViews(desc.AVCommandLineNode): + commandLine = 'aliceVision_selectConnectedViews {allParams}' + + cpu = desc.Level.NORMAL + ram = desc.Level.NORMAL + + category = 'Dense Reconstruction' + documentation = ''' +Select Connected Views based on SfM landmarks. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.IntParam( + name="maxTCams", + label="Max Nb Neighbour Cameras", + description="Maximum number of neighbour cameras per image.", + value=10, + range=(1, 20, 1), + ), + desc.FloatParam( + name="minViewAngle", + label="Min View Angle", + description="Minimum angle between two views (select the neighbouring cameras, select depth planes from epipolar segment point).", + value=2.0, + range=(0.0, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="maxViewAngle", + label="Max View Angle", + description="Maximum angle between two views (select the neighbouring cameras, select depth planes from epipolar segment point).", + value=70.0, + range=(10.0, 120.0, 1.0), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Connected Views", + description="List of connected views in a text file.", + value="{nodeCacheFolder}/connectedViews.txt", + ), + ] diff --git a/meshroom/aliceVision/SfMAlignment.py b/meshroom/aliceVision/SfMAlignment.py new file mode 100644 index 0000000000..436ab2a911 --- /dev/null +++ b/meshroom/aliceVision/SfMAlignment.py @@ -0,0 +1,118 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import os.path + + +class SfMAlignment(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmAlignment {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' +This node allows to change the coordinate system of one SfM scene to align it on another one. + +The alignment can be based on: + * from_cameras_viewid: Align cameras in both SfM on the specified viewId + * from_cameras_poseid: Align cameras in both SfM on the specified poseId + * from_cameras_filepath: Align cameras with a filepath matching, using 'fileMatchingPattern' + * from_cameras_metadata: Align cameras with matching metadata, using 'metadataMatchingList' + * from_markers: Align from markers with the same Id + +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="Input SfMData file .", + value="", + ), + desc.File( + name="reference", + label="Reference", + description="Path to the scene used as the reference coordinate system.", + value="", + ), + desc.ChoiceParam( + name="method", + label="Alignment Method", + description="Alignment method:\n" + " - from_cameras_viewid: Align cameras with same view ID.\n" + " - from_cameras_poseid: Align cameras with same pose ID.\n" + " - from_cameras_filepath: Align cameras with a filepath matching, using 'fileMatchingPattern'.\n" + " - from_cameras_metadata: Align cameras with matching metadata, using 'metadataMatchingList'.\n" + " - from_markers: Align from markers with the same ID.\n" + " - from_landmarks: Align from matched features.\n", + value="from_cameras_viewid", + values=["from_cameras_viewid", "from_cameras_poseid", "from_cameras_filepath", "from_cameras_metadata", "from_markers", 'from_landmarks'], + ), + desc.StringParam( + name="fileMatchingPattern", + label="File Matching Pattern", + description="Matching regular expression for the 'from_cameras_filepath' method.\n" + "You should capture specific parts of the filepath with parentheses to define matching elements.\n" + "Some examples of patterns:\n" + " - Match the filename without extension (default value): " + r'".*\/(.*?)\.\w{3}"' + "\n" + " - Match the filename suffix after '_': " + r'".*\/.*(_.*?\.\w{3})"' + "\n" + " - Match the filename prefix before '_': " + r'".*\/(.*?)_.*\.\w{3}"', + value=r".*\/(.*?)\.\w{3}", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="metadataMatching", + label="Metadata", + description="", + value="Metadata that should match to create the correspondences.", + ), + name="metadataMatchingList", + label="Metadata Matching List", + description="List of metadata that should match to create the correspondences.\n" + "If the list is empty, the default value will be used: ['Make', 'Model', 'Exif:BodySerialNumber', 'Exif:LensSerialNumber'].", + ), + desc.BoolParam( + name="applyScale", + label="Scale", + description="Apply scale transformation.", + value=True, + ), + desc.BoolParam( + name="applyRotation", + label="Rotation", + description="Apply rotation transformation.", + value=True, + ), + desc.BoolParam( + name="applyTranslation", + label="Translation", + description="Apply translation transformation.", + value=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData File", + description="Output SfMData file.", + value=lambda attr: "{nodeCacheFolder}/" + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or "sfmData") + ".abc", + ), + desc.File( + name="outputViewsAndPoses", + label="Poses", + description="Path to the output SfMData file with cameras (views and poses).", + value="{nodeCacheFolder}/cameras.sfm", + ), + ] diff --git a/meshroom/aliceVision/SfMChecking.py b/meshroom/aliceVision/SfMChecking.py new file mode 100644 index 0000000000..2e160bba42 --- /dev/null +++ b/meshroom/aliceVision/SfMChecking.py @@ -0,0 +1,77 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class SfMChecking(desc.Node): + + category = "Utils" + documentation = """ + Check an input SfM for validity. + Throw an error if the SfM does not satisfy constraints. + """ + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.FloatParam( + name="poseCompletion", + label="Completion Percentage", + description="Minimal percent of the views reconstructed.", + value=80.0, + range=(0.0, 100.0, 1.0), + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ) + ] + + outputs = [ + desc.File( + name="output", + label="SfM File", + description="Path to the output SfM file.", + value="{nodeCacheFolder}/sfmData.abc", + ) + ] + + def processChunk(self, chunk): + from pyalicevision import sfmData as avsfmdata + from pyalicevision import sfmDataIO as avsfmdataio + + chunk.logManager.start(chunk.node.verboseLevel.value) + chunk.logger.info("Open input file") + + data = avsfmdata.SfMData() + ret = avsfmdataio.load(data, chunk.node.input.value, avsfmdataio.ALL) + if not ret: + chunk.logger.error("Cannot open input") + chunk.logManager.end() + raise RuntimeError() + + total = len(data.getViews()) + valid = len(data.getValidViews()) + ratio = (100.0 * float(valid)) / float(total) + + chunk.logger.info(f"Total views: {total}") + chunk.logger.info(f"Reconstructed views: {valid}") + chunk.logger.info(f"Percentage of reconstructed views: {ratio}") + + if ratio < chunk.node.poseCompletion.value: + chunk.logger.error("Percentage of reconstructed views is insufficient.") + chunk.logger.error(f"Expected {chunk.node.poseCompletion.value}, got {ratio}.") + chunk.logManager.end() + raise RuntimeError() + + avsfmdataio.save(data, chunk.node.output.value, avsfmdataio.ALL) + + chunk.logManager.end() diff --git a/meshroom/aliceVision/SfMColorizing.py b/meshroom/aliceVision/SfMColorizing.py new file mode 100644 index 0000000000..bf74217145 --- /dev/null +++ b/meshroom/aliceVision/SfMColorizing.py @@ -0,0 +1,41 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import json + +class SfMColorizing(desc.AVCommandLineNode): + + commandLine = "aliceVision_sfmColorizing {allParams}" + size = desc.DynamicNodeSize("input") + + category = "Utils" + documentation = """ + Colorize the pointcloud of a sfmData + """ + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfM file.", + value="{nodeCacheFolder}/sfmData.abc", + ), + ] diff --git a/meshroom/aliceVision/SfMDistances.py b/meshroom/aliceVision/SfMDistances.py new file mode 100644 index 0000000000..794a3a817b --- /dev/null +++ b/meshroom/aliceVision/SfMDistances.py @@ -0,0 +1,63 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class SfMDistances(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmDistances {allParams}' + size = desc.DynamicNodeSize('input') + + documentation = ''' + ''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file.", + value="", + ), + desc.ChoiceParam( + name="objectType", + label="Type", + description="", + value="landmarks", + values=["landmarks", "cameras"], + ), + desc.ChoiceParam( + name="landmarksDescriberTypes", + label="Describer Types", + description="Describer types used to describe an image (only used when using 'landmarks').", + values=DESCRIBER_TYPES, + value=["cctag3"], + exclusive=False, + joinChar=",", + ), + desc.StringParam( + name="A", + label="A IDs", + description="It will display the distances between A and B elements.\n" + "This value should be an ID or a list of IDs of landmarks IDs or cameras (UID or filename without extension).\n" + "It will list all elements if empty.", + value="", + ), + desc.StringParam( + name="B", + label="B IDs", + description="It will display the distances between A and B elements.\n" + "This value should be an ID or a list of IDs of landmarks IDs or cameras (UID or filename without extension).\n" + "It will list all elements if empty.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + ] diff --git a/meshroom/aliceVision/SfMFilter.py b/meshroom/aliceVision/SfMFilter.py new file mode 100644 index 0000000000..7193a65ec9 --- /dev/null +++ b/meshroom/aliceVision/SfMFilter.py @@ -0,0 +1,57 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class SfMFilter(desc.CommandLineNode): + commandLine = 'aliceVision_sfmFilter {allParams}' + category = 'Utils' + documentation = ''' +This node allows select views from sfmData file using a regular expresion. +''' + + inputs = [ + desc.File( + name="inputFile", + label="inputFile", + description="SfMData file.", + value="", + ), + desc.StringParam( + name="fileMatchingPattern", + label="File Matching Pattern", + description="Matching regular expression.\n" + "You should capture specific parts of the filepath with parentheses to define matching elements.\n" + "Some examples of patterns:\n" + " - Match the filename without extension (default value): " + r'".*\/(.*?)\.\w{3}"' + "\n" + " - Match the filename suffix after \"_\": " + r'".*\/.*(_.*?\.\w{3})"' + "\n" + " - Match the filename prefix before \"_\": " + r'".*\/(.*?)_.*\.\w{3}"', + value=r'.*\/(.*?)\.\w{3}', + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="outputSfMData_selected", + label="SfMData_selected", + description="Output SfMData file containing selected views.", + value="{nodeCacheFolder}/selectedSfmData.sfm", + ), + desc.File( + name="outputSfMData_unselected", + label="SfMData_unselected", + description="Output SfMData file containing remaining views.", + value="{nodeCacheFolder}/unselectedSfmData.sfm", + ), + ] diff --git a/meshroom/aliceVision/SfMMerge.py b/meshroom/aliceVision/SfMMerge.py new file mode 100644 index 0000000000..eca1654e08 --- /dev/null +++ b/meshroom/aliceVision/SfMMerge.py @@ -0,0 +1,103 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + +import os.path + +class MergeNodeSize(desc.DynamicNodeSize): + """ + MergeNodeSize expresses a dependency to multiple input attributess to define + the size of a Node in terms of individual tasks for parallelization. + """ + def __init__(self, param): + self._params = param + + def computeSize(self, node): + + size = 0 + + for input in node.attribute(self._params).value: + paramName = input.getFullName() + param = node.attribute(paramName) + size = size + param.getLinkParam().node.size + + return size + + +class SfMMerge(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmMerge {allParams}' + size = MergeNodeSize("inputs") + + category = 'Utils' + documentation = ''' +Merges two SfMData files into a single one. Fails if some UID is shared among them. +''' + + inputs = [ + desc.ListAttribute( + elementDesc=desc.File( + name="input", + label="Input SfmData", + description="A SfmData file.", + value="", + ), + name="inputs", + label="Inputs", + description="Set of SfmData (at least 1 is required).", + exposed=True, + ), + desc.ChoiceParam( + name="method", + label="Merge Method", + description="Merge method:\n" + " - simple copy: Straight copy without duplicate management.\n" + " - from_landmarks: Align from matched features, try to fuse.\n", + value="simple_copy", + values=["simple_copy", 'from_landmarks'], + ), + desc.ListAttribute( + elementDesc=desc.File( + name="matchesFolder", + label="Matches Folder", + description="", + value="", + ), + name="matchesFolders", + label="Matches Folders", + description="Folder(s) in which the computed matches are stored.", + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + ), + desc.ChoiceParam( + name="fileExt", + label="SfM File Format", + description="Output SfM file format.", + value="abc", + values=["abc", "sfm", "json"], + group="", # exclude from command line + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ) + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfM file (in SfMData format).", + value="{nodeCacheFolder}/sfmData.{fileExtValue}", + ) + ] diff --git a/meshroom/aliceVision/SfMPoseInjecting.py b/meshroom/aliceVision/SfMPoseInjecting.py new file mode 100644 index 0000000000..f855235c04 --- /dev/null +++ b/meshroom/aliceVision/SfMPoseInjecting.py @@ -0,0 +1,55 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import json + +class SfMPoseInjecting(desc.AVCommandLineNode): + + commandLine = "aliceVision_sfmPoseInjecting {allParams}" + size = desc.DynamicNodeSize("input") + + category = "Utils" + documentation = """ +Use a JSON file to inject poses inside the SfMData. +""" + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="posesFilename", + label="Poses", + description="Input JSON file containing the poses.", + value="", + ), + desc.ChoiceParam( + name="rotationFormat", + label="Rotation Format", + description="Defines the rotation format for the input poses:\n" + " - EulerZXY: Euler rotation in degrees (Y*X*Z)", + values=["EulerZXY"], + value="EulerZXY", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfM file.", + value="{nodeCacheFolder}/sfmData.abc", + ), + ] diff --git a/meshroom/aliceVision/SfMSplitReconstructed.py b/meshroom/aliceVision/SfMSplitReconstructed.py new file mode 100644 index 0000000000..eb237a416d --- /dev/null +++ b/meshroom/aliceVision/SfMSplitReconstructed.py @@ -0,0 +1,47 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class SfMSplitReconstructed(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmSplitReconstructed {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' + This nodes takes a SfMData file as an input and splits it in two output SfMData files: + - One SfMData containing the reconstructed views + - One SfMData containing the non-reconstructed views +''' + + inputs = [ + desc.File( + name="input", + label="Input SfMData", + description="Input SfMData file.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="reconstructedOutput", + label="Reconstructed SfMData File", + description="SfMData file containing the reconstructed cameras.", + value="{nodeCacheFolder}/sfmReconstructed.abc", + ), + desc.File( + name="notReconstructedOutput", + label="Not Reconstructed SfMData File", + description="SfMData file containing the non-reconstructed cameras.", + value="{nodeCacheFolder}/sfmNonReconstructed.abc", + ), + ] diff --git a/meshroom/aliceVision/SfMSurveyInjecting.py b/meshroom/aliceVision/SfMSurveyInjecting.py new file mode 100644 index 0000000000..fc52ea574b --- /dev/null +++ b/meshroom/aliceVision/SfMSurveyInjecting.py @@ -0,0 +1,47 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import json + +class SfMSurveyInjecting(desc.AVCommandLineNode): + + commandLine = "aliceVision_sfmSurveyInjecting {allParams}" + size = desc.DynamicNodeSize("input") + + category = "Utils" + documentation = """ +Use a JSON file to inject survey measurements inside the SfMData. +""" + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="surveyFilename", + label="Survey", + description="Input JSON file containing the survey.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfM file.", + value="{nodeCacheFolder}/sfmData.abc", + ), + ] diff --git a/meshroom/aliceVision/SfMToRig.py b/meshroom/aliceVision/SfMToRig.py new file mode 100644 index 0000000000..468cf6c2ca --- /dev/null +++ b/meshroom/aliceVision/SfMToRig.py @@ -0,0 +1,40 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import os.path + +class SfMToRig(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmToRig {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' +Assumes the input SfMData describes a set of cameras capturing a scene at a common time. Transformd the set of cameras into a rig of cameras. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfM file (in SfMData format).", + value="{nodeCacheFolder}/sfmData.sfm", + ), + ] diff --git a/meshroom/aliceVision/SfMTransfer.py b/meshroom/aliceVision/SfMTransfer.py new file mode 100644 index 0000000000..2b144d4abc --- /dev/null +++ b/meshroom/aliceVision/SfMTransfer.py @@ -0,0 +1,110 @@ +__version__ = "2.1" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import os.path + + +class SfMTransfer(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmTransfer {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' +This node allows to transfer poses and/or intrinsics form one SfM scene onto another one. +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file.", + value="", + ), + desc.File( + name="reference", + label="Reference", + description="Path to the scene used as the reference to retrieve resolved poses and intrinsics.", + value="", + ), + desc.ChoiceParam( + name="method", + label="Matching Method", + description="Matching Method:\n" + " - from_viewid: Match cameras based on viewId.\n" + " - from_filepath: Match cameras with a filepath matching, using 'fileMatchingPattern'.\n" + " - from_metadata: Match cameras with matching metadata, using 'metadataMatchingList'.\n" + " - from_poseid: Match cameras based on poseId.\n" + " - from_intrinsicid: Match cameras based on intrinsicId.\n", + value="from_viewid", + values=["from_viewid", "from_filepath", "from_metadata", "from_poseid", "from_intrinsicid"], + ), + desc.StringParam( + name="fileMatchingPattern", + label="File Matching Pattern", + description="Matching regular expression for the 'from_cameras_filepath' method.\n" + "You should capture specific parts of the filepath with parentheses to define matching elements.\n" + "Some examples of patterns:\n" + " - Match the filename without extension (default value): " + r'".*\/(.*?)\.\w{3}"' + "\n" + " - Match the filename suffix after \"_\": " + r'".*\/.*(_.*?\.\w{3})"' + "\n" + " - Match the filename prefix before \"_\": " + r'".*\/(.*?)_.*\.\w{3}"', + value=r'.*\/(.*?)\.\w{3}', + ), + desc.ListAttribute( + elementDesc=desc.File( + name="metadataMatching", + label="Metadata", + description="Metadata that should match to create correspondences.", + value="", + ), + name="metadataMatchingList", + label="Metadata Matching List", + description="List of metadata that should match to create the correspondences.\n" + "If the list is empty, the default value will be used:\n" + "['Make', 'Model', 'Exif:BodySerialNumber', 'Exif:LensSerialNumber'].", + ), + desc.BoolParam( + name="transferPoses", + label="Poses", + description="Transfer poses.", + value=True, + ), + desc.BoolParam( + name="transferIntrinsics", + label="Intrinsics", + description="Transfer cameras intrinsics.", + value=True, + ), + desc.BoolParam( + name="transferLandmarks", + label="Landmarks", + description="Transfer landmarks.", + value=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfM point cloud file (in SfMData format).", + value=lambda attr: "{nodeCacheFolder}/" + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or "sfmData") + ".abc", + ), + desc.File( + name="outputViewsAndPoses", + label="Poses", + description="Path to the output SfMData file with cameras (views and poses).", + value="{nodeCacheFolder}/cameras.sfm", + ), + ] diff --git a/meshroom/aliceVision/SfMTransform.py b/meshroom/aliceVision/SfMTransform.py new file mode 100644 index 0000000000..eab001937b --- /dev/null +++ b/meshroom/aliceVision/SfMTransform.py @@ -0,0 +1,266 @@ +__version__ = "3.1" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + +import os.path + + +class SfMTransform(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmTransform {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Utils' + documentation = ''' +This node allows to change the coordinate system of one SfM scene. + +The transformation can be based on: + * transformation: Apply a given transformation + * auto_from_cameras: Fit all cameras into a box [-1,1] + * auto_from_landmarks: Fit all landmarks into a box [-1,1] + * from_single_camera: Use a specific camera as the origin of the coordinate system + * from_markers: Align specific markers to custom coordinates + * from_gps: Align with the gps positions from the image metadata + * align_ground: Detect ground level and align to it + * from_lineup: Align using a camera pose (json line up file), tracks and a mesh +''' + + inputs = [ + desc.File( + name="input", + label="Input", + description="SfMData file.", + value="", + ), + desc.ChoiceParam( + name="method", + label="Transformation Method", + description="Transformation method:\n" + " - transformation: Apply a given transformation.\n" + " - manual: Apply the gizmo transformation (show the transformed input).\n" + " - auto: Determines scene orientation from the cameras' X axis, determines north and scale from GPS information if available, and defines ground level from the point cloud.\n" + " - auto_from_cameras: Defines coordinate system from cameras.\n" + " - auto_from_cameras_x_axis: Determines scene orientation from the cameras' X axis.\n" + " - auto_from_landmarks: Defines coordinate system from landmarks.\n" + " - from_single_camera: Defines the coordinate system from the camera specified by --tranformation.\n" + " - from_center_camera: Defines the coordinate system from the camera closest to the center of the reconstruction.\n" + " - from_markers: Defines the coordinate system from markers specified by --markers.\n" + " - from_gps: Defines coordinate system from GPS metadata.\n" + " - from_lineup: Defines coordinate system using lineup json file.\n" + " - align_ground: Defines ground level from the point cloud density. It assumes that the scene is oriented.", + value="auto", + values=["transformation", "manual", "auto", "auto_from_cameras", "auto_from_cameras_x_axis", "auto_from_landmarks", "from_single_camera", "from_center_camera", "from_markers", "from_gps", "from_lineup", "align_ground"], + ), + desc.File( + name="lineUp", + label="Line Up File", + description="LineUp Json file.", + value="", + enabled=lambda node: node.method.value == "from_lineup" + ), + desc.File( + name="tracksFile", + label="Tracks File", + description="Tracks file for lineup.", + value="", + enabled=lambda node: node.method.value == "from_lineup" + ), + desc.File( + name="objectFile", + label="Mesh File", + description="Mesh file for lineup.", + value="", + enabled=lambda node: node.method.value == "from_lineup" + ), + desc.StringParam( + name="transformation", + label="Transformation", + description="Required only for 'transformation' and 'from_single_camera' methods:\n" + " - transformation: Align [X,Y,Z] to +Y-axis, rotate around Y by R deg, scale by S; syntax: X,Y,Z;R;S\n" + " - from_single_camera: Camera UID or simplified regular expression to match image filepath (like '*camera2*.jpg').", + value="", + enabled=lambda node: node.method.value == "transformation" or node.method.value == "from_single_camera" or node.method.value == "auto_from_cameras_x_axis", + ), + desc.GroupAttribute( + name="manualTransform", + label="Manual Transform (Gizmo)", + description="Translation, rotation (Euler ZXY) and uniform scale.", + groupDesc=[ + desc.GroupAttribute( + name="manualTranslation", + label="Translation", + description="Translation in space.", + groupDesc=[ + desc.FloatParam( + name="x", + label="x", + description="X offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + desc.FloatParam( + name="y", + label="y", + description="Y offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + desc.FloatParam( + name="z", + label="z", + description="Z offset.", + value=0.0, + range=(-20.0, 20.0, 0.01), + ), + ], + joinChar=",", + ), + desc.GroupAttribute( + name="manualRotation", + label="Euler Rotation", + description="Rotation in Euler angles.", + groupDesc=[ + desc.FloatParam( + name="x", + label="x", + description="Euler X rotation.", + value=0.0, + range=(-90.0, 90.0, 1.0), + ), + desc.FloatParam( + name="y", + label="y", + description="Euler Y rotation.", + value=0.0, + range=(-180.0, 180.0, 1.0), + ), + desc.FloatParam( + name="z", + label="z", + description="Euler Z rotation.", + value=0.0, + range=(-180.0, 180.0, 1.0), + ), + ], + joinChar=",", + ), + desc.FloatParam( + name="manualScale", + label="Scale", + description="Uniform scale.", + value=1.0, + range=(0.0, 20.0, 0.01), + ), + ], + joinChar=",", + enabled=lambda node: node.method.value == "manual", + ), + desc.ChoiceParam( + name="landmarksDescriberTypes", + label="Landmarks Describer Types", + description="Image describer types used to compute the mean of the point cloud (only for 'landmarks' method).", + values=DESCRIBER_TYPES, + value=["sift", "dspsift", "akaze"], + exclusive=False, + joinChar=",", + ), + desc.FloatParam( + name="scale", + label="Additional Scale", + description="Additional scale to apply.", + value=1.0, + range=(0.0, 100.0, 0.1), + ), + desc.ListAttribute( + name="markers", + elementDesc=desc.GroupAttribute( + name="markerAlign", + label="Marker Align", + description="", + joinChar=":", + groupDesc=[ + desc.IntParam( + name="markerId", + label="Marker", + description="Marker ID.", + value=0, + range=(0, 32, 1), + ), + desc.GroupAttribute( + name="markerCoord", + label="Coord", + description="Marker coordinates.", + joinChar=",", + groupDesc=[ + desc.FloatParam( + name="x", + label="x", + description="X coordinates for the marker.", + value=0.0, + range=(-2.0, 2.0, 1.0), + ), + desc.FloatParam( + name="y", + label="y", + description="Y coordinates for the marker.", + value=0.0, + range=(-2.0, 2.0, 1.0), + ), + desc.FloatParam( + name="z", + label="z", + description="Z coordinates for the marker.", + value=0.0, + range=(-2.0, 2.0, 1.0), + ), + ], + ), + ], + ), + label="Markers", + description="Markers alignment points.", + ), + desc.BoolParam( + name="applyScale", + label="Scale", + description="Apply scale transformation.", + value=True, + enabled=lambda node: node.method.value != "manual", + ), + desc.BoolParam( + name="applyRotation", + label="Rotation", + description="Apply rotation transformation.", + value=True, + enabled=lambda node: node.method.value != "manual", + ), + desc.BoolParam( + name="applyTranslation", + label="Translation", + description="Apply translation transformation.", + value=True, + enabled=lambda node: node.method.value != "manual", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData File", + description="Aligned SfMData file.", + value=lambda attr: "{nodeCacheFolder}/" + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or "sfmData") + ".abc", + ), + desc.File( + name="outputViewsAndPoses", + label="Poses", + description="Path to the output SfMData file with cameras (views and poses).", + value="{nodeCacheFolder}/cameras.sfm", + ), + ] diff --git a/meshroom/aliceVision/SfMTriangulation.py b/meshroom/aliceVision/SfMTriangulation.py new file mode 100644 index 0000000000..06ce61ed00 --- /dev/null +++ b/meshroom/aliceVision/SfMTriangulation.py @@ -0,0 +1,154 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class SfMTriangulation(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmTriangulation {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Sparse Reconstruction' + documentation = ''' +This node perfoms keypoint triangulation on its input data. +Contrary to the StructureFromMotion node, this node does not infer the camera poses, therefore they must be given in the SfMData input. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="SfMData file. Must contain the camera calibration.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features and descriptors.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features and descriptors.", + exposed=True, + ), + desc.ListAttribute( + elementDesc=desc.File( + name="matchesFolder", + label="Matches Folder", + description="Folder in which some computed matches are stored.", + value="", + ), + name="matchesFolders", + label="Matches Folders", + description="Folder(s) in which computed matches are stored.", + exposed=True, + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + ), + desc.IntParam( + name="maxNumberOfMatches", + label="Maximum Number Of Matches", + description="Maximum number of matches per image pair (and per feature type).\n" + "This can be useful to have a quick reconstruction overview.\n" + "0 means no limit.", + value=0, + range=(0, 50000, 1), + ), + desc.IntParam( + name="minNumberOfMatches", + label="Minimum Number Of Matches", + description="Minimum number of matches per image pair (and per feature type).\n" + "This can be useful to have a meaningful reconstruction with accurate keypoints.\n" + "0 means no limit.", + value=0, + range=(0, 50000, 1), + ), + desc.IntParam( + name="minNumberOfObservationsForTriangulation", + label="Min Observations For Triangulation", + description="Minimum number of observations to triangulate a point.\n" + "Setting it to 3 (or more) reduces drastically the noise in the point cloud,\n" + "but the number of final poses is a little bit reduced\n" + "(from 1.5% to 11% on the tested datasets).", + value=2, + range=(2, 10, 1), + advanced=True, + ), + desc.FloatParam( + name="minAngleForTriangulation", + label="Min Angle For Triangulation", + description="Minimum angle for triangulation.", + value=3.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="minAngleForLandmark", + label="Min Angle For Landmark", + description="Minimum angle for landmark.", + value=2.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.BoolParam( + name="useRigConstraint", + label="Use Rig Constraint", + description="Enable/Disable rig constraint.", + value=True, + advanced=True, + ), + desc.IntParam( + name="rigMinNbCamerasForCalibration", + label="Min Nb Cameras For Rig Calibration", + description="Minimum number of cameras to start the calibration of the rig.", + value=20, + range=(1, 50, 1), + advanced=True, + ), + desc.BoolParam( + name="computeStructureColor", + label="Compute Structure Color", + description="Enable/Disable color computation of each 3D point.", + value=True, + ), + desc.ChoiceParam( + name="interFileExtension", + label="Inter File Extension", + description="Extension of the intermediate file export.", + value=".abc", + values=[".abc", ".ply"], + invalidate=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfm.abc", + ), + desc.File( + name="extraInfoFolder", + label="Folder", + description="Folder for intermediate reconstruction files and additional reconstruction information files.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/SfmBootstraping.py b/meshroom/aliceVision/SfmBootstraping.py new file mode 100644 index 0000000000..d4b293914b --- /dev/null +++ b/meshroom/aliceVision/SfmBootstraping.py @@ -0,0 +1,84 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class SfMBootStraping(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmBootstraping {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Sparse Reconstruction' + documentation = ''' +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="tracksFilename", + label="Tracks File", + description="Tracks file.", + value="", + ), + desc.File( + name="meshFilename", + label="Mesh File", + description="Mesh file (*.obj).", + value="", + ), + desc.File( + name="pairs", + label="Pairs File", + description="Information on pairs.", + value="", + ), + desc.FloatParam( + name="minAngleInitialPair", + label="Min Angle Initial Pair", + description="Minimum angle for the initial pair.", + value=5.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="maxAngleInitialPair", + label="Max Angle Initial Pair", + description="Maximum angle for the initial pair.", + value=40.0, + range=(0.1, 60.0, 0.1), + advanced=True, + ), + desc.File( + name="initialPairA", + label="Initial Pair A", + description="View ID of the first image.", + value="", + ), + desc.File( + name="initialPairB", + label="Initial Pair B", + description="View ID of the second image.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/bootstrap.abc", + ), + ] diff --git a/meshroom/aliceVision/SfmExpanding.py b/meshroom/aliceVision/SfmExpanding.py new file mode 100644 index 0000000000..4328dbafa8 --- /dev/null +++ b/meshroom/aliceVision/SfmExpanding.py @@ -0,0 +1,181 @@ +__version__ = "2.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class SfMExpanding(desc.AVCommandLineNode): + commandLine = 'aliceVision_sfmExpanding {allParams}' + size = desc.DynamicNodeSize('input') + + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Sparse Reconstruction' + documentation = ''' +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="tracksFilename", + label="Tracks File", + description="Tracks file.", + value="", + ), + desc.File( + name="meshFilename", + label="Mesh File", + description="Mesh file (*.obj).", + value="", + ), + desc.IntParam( + name="localizerEstimatorMaxIterations", + label="Localizer Max Ransac Iterations", + description="Maximum number of iterations allowed in the Ransac step.", + value=50000, + range=(1, 100000, 1), + advanced=True, + ), + desc.FloatParam( + name="localizerEstimatorError", + label="Localizer Max Ransac Error", + description="Maximum error (in pixels) allowed for camera localization (resectioning).\n" + "If set to 0, it will select a threshold according to the localizer estimator used\n" + "(if ACRansac, it will analyze the input data to select the optimal value).", + value=0.0, + range=(0.0, 100.0, 0.1), + advanced=True, + ), + desc.BoolParam( + name="lockScenePreviouslyReconstructed", + label="Lock Previously Reconstructed Scene", + description="Lock previously reconstructed poses and intrinsics.\n" + "This option is useful for SfM augmentation.", + value=False, + ), + desc.BoolParam( + name="useLocalBA", + label="Local Bundle Adjustment", + description="It reduces the reconstruction time, especially for large datasets (500+ images),\n" + "by avoiding computation of the Bundle Adjustment on areas that are not changing.", + value=True, + ), + desc.IntParam( + name="localBAGraphDistance", + label="LocalBA Graph Distance", + description="Graph-distance limit to define the active region in the Local Bundle Adjustment strategy.", + value=1, + range=(2, 10, 1), + advanced=True, + ), + desc.IntParam( + name="nbFirstUnstableCameras", + label="First Unstable Cameras Nb", + description="Number of cameras for which the bundle adjustment is performed every single time a camera is added.\n" + "This leads to more stable results while computations are not too expensive, as there is little data.\n" + "Past this number, the bundle adjustment will only be performed once for N added cameras.", + value=30, + range=(0, 100, 1), + advanced=True, + ), + desc.IntParam( + name="maxImagesPerGroup", + label="Max Images Per Group", + description="Maximum number of cameras that can be added before the bundle adjustment has to be performed again.\n" + "This prevents adding too much data at once without performing the bundle adjustment.", + value=30, + range=(0, 100, 1), + advanced=True, + ), + desc.IntParam( + name="bundleAdjustmentMaxOutliers", + label="Max Nb Of Outliers After BA", + description="Threshold for the maximum number of outliers allowed at the end of a bundle adjustment iteration.\n" + "Using a negative value for this threshold will disable BA iterations.", + value=50, + range=(-1, 1000, 1), + advanced=True, + ), + desc.IntParam( + name="minNumberOfObservationsForTriangulation", + label="Min Observations For Triangulation", + description="Minimum number of observations to triangulate a point.\n" + "Setting it to 3 (or more) reduces drastically the noise in the point cloud,\n" + "but the number of final poses is a little bit reduced\n" + "(from 1.5% to 11% on the tested datasets).", + value=2, + range=(2, 10, 1), + advanced=True, + ), + desc.FloatParam( + name="minAngleForTriangulation", + label="Min Angle For Triangulation", + description="Minimum angle for triangulation.", + value=3.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="minAngleForLandmark", + label="Min Angle For Landmark", + description="Minimum angle for landmark.", + value=2.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="maxReprojectionError", + label="Max Reprojection Error", + description="Maximum reprojection error.", + value=4.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.BoolParam( + name="lockAllIntrinsics", + label="Lock All Intrinsic Camera Parameters", + description="Force to keep all the intrinsic parameters of the cameras (focal length, \n" + "principal point, distortion if any) constant during the reconstruction.\n" + "This may be helpful if the input cameras are already fully calibrated.", + value=False, + ), + desc.IntParam( + name="minNbCamerasToRefinePrincipalPoint", + label="Min Nb Cameras To Refine Principal Point", + description="Minimum number of cameras to refine the principal point of the cameras (one of the intrinsic parameters of the camera).\n" + "If we do not have enough cameras, the principal point is considered to be in the center of the image.\n" + "If minNbCamerasToRefinePrincipalPoint <= 0, the principal point is never refined." + "If minNbCamerasToRefinePrincipalPoint is set to 1, the principal point is always refined.", + value=3, + range=(0, 20, 1), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfMData file.", + value="{nodeCacheFolder}/sfmExpanded.abc", + ), + desc.File( + name="outputViewsAndPoses", + label="Views And Poses", + description="Path to the output SfMData file with cameras (views and poses).", + value="{nodeCacheFolder}/cameras.sfm", + ) + ] diff --git a/meshroom/aliceVision/SketchfabUpload.py b/meshroom/aliceVision/SketchfabUpload.py new file mode 100644 index 0000000000..cb940b85c6 --- /dev/null +++ b/meshroom/aliceVision/SketchfabUpload.py @@ -0,0 +1,275 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + +import glob +import os +import json +import zipfile +import requests +import io + + +class BufferReader(io.BytesIO): # object to call the callback while the file is being uploaded + def __init__(self, buf=b'', + callback=None, + cb_args=(), + cb_kwargs={}, + stopped=None): + self._callback = callback + self._cb_args = cb_args + self._cb_kwargs = cb_kwargs + self._stopped = stopped + self._progress = 0 + self._len = len(buf) + io.BytesIO.__init__(self, buf) + + def __len__(self): + return self._len + + def read(self, n=-1): + chunk = io.BytesIO.read(self, n) + self._progress += int(len(chunk)) + self._cb_kwargs.update({ + 'size' : self._len, + 'progress': self._progress + }) + if self._callback: + try: + self._callback(*self._cb_args, **self._cb_kwargs) + except Exception as e: # catches exception from the callback + self._cb_kwargs['logManager'].logger.warning('Error at callback: {}'.format(e)) + + if self._stopped(): + raise RuntimeError('Node stopped by user') + return chunk + +def progressUpdate(size=None, progress=None, logManager=None): + if not logManager.progressBar: + logManager.makeProgressBar(size, 'Upload progress:') + + logManager.updateProgressBar(progress) + +class SketchfabUpload(desc.Node): + size = desc.DynamicNodeSize('inputFiles') + + category = 'Export' + documentation = ''' +Upload a textured mesh on Sketchfab. +''' + + inputs = [ + desc.ListAttribute( + elementDesc=desc.File( + name="input", + label="Input", + description="", + value="", + ), + name="inputFiles", + label="Input Files", + description="Input Files to export.", + group="", + ), + desc.StringParam( + name="apiToken", + label="API Token", + description="Get your token from https://sketchfab.com/settings/password.", + value="", + ), + desc.StringParam( + name="title", + label="Title", + description="Title cannot be longer than 48 characters.", + value="", + ), + desc.StringParam( + name="description", + label="Description", + description="Description cannot be longer than 1024 characters.", + value="", + ), + desc.ChoiceParam( + name="license", + label="License", + description="License label.", + value="CC Attribution", + values=["CC Attribution", + "CC Attribution-ShareAlike", + "CC Attribution-NoDerivs", + "CC Attribution-NonCommercial", + "CC Attribution-NonCommercial-ShareAlike", + "CC Attribution-NonCommercial-NoDerivs"], + ), + desc.ListAttribute( + elementDesc=desc.StringParam( + name="tag", + label="Tag", + description="Tag cannot be longer than 48 characters.", + value="", + ), + name="tags", + label="Tags", + description="Maximum of 42 separate tags.", + group="", + ), + desc.ChoiceParam( + name="category", + label="Category", + description="Adding categories helps improve the discoverability of your model.", + value="none", + values=["none", + "animals-pets", + "architecture", + "art-abstract", + "cars-vehicles", + "characters-creatures", + "cultural-heritage-history", + "electronics-gadgets", + "fashion-style", + "food-drink", + "furniture-home", + "music", + "nature-plants", + "news-politics", + "people", + "places-travel", + "science-technology", + "sports-fitness", + "weapons-military"], + ), + desc.BoolParam( + name="isPublished", + label="Publish", + description="If the model is not published, it will be saved as a draft.", + value=False, + ), + desc.BoolParam( + name="isInspectable", + label="Inspectable", + description="Allow 2D view in model inspector.", + value=True, + ), + desc.BoolParam( + name="isPrivate", + label="Private", + description="Requires a pro account.", + value=False, + ), + desc.StringParam( + name="password", + label="Password", + description="Requires a pro account.", + value="", + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + def upload(self, apiToken, modelFile, data, chunk): + modelEndpoint = 'https://api.sketchfab.com/v3/models' + f = open(modelFile, 'rb') + file = {'modelFile': (os.path.basename(modelFile), f.read())} + file.update(data) + f.close() + (files, contentType) = requests.packages.urllib3.filepost.encode_multipart_formdata(file) + headers = {'Authorization': 'Token {}'.format(apiToken), 'Content-Type': contentType} + body = BufferReader(files, progressUpdate, cb_kwargs={'logManager': chunk.logManager}, stopped=self.stopped) + chunk.logger.info('Uploading...') + try: + r = requests.post( + modelEndpoint, **{'data': body, 'headers': headers}) + chunk.logManager.completeProgressBar() + except requests.exceptions.RequestException as e: + chunk.logger.error(u'An error occurred: {}'.format(e)) + raise RuntimeError() + if r.status_code != requests.codes.created: + chunk.logger.error(u'Upload failed with error: {}'.format(r.json())) + raise RuntimeError() + + def resolvedPaths(self, inputFiles): + paths = [] + for inputFile in inputFiles: + if os.path.isdir(inputFile.value): + for path, subdirs, files in os.walk(inputFile.value): + for name in files: + paths.append(os.path.join(path, name)) + else: + for f in glob.glob(inputFile.value): + paths.append(f) + return paths + + def stopped(self): + return self._stopped + + def processChunk(self, chunk): + try: + self._stopped = False + chunk.logManager.start(chunk.node.verboseLevel.value) + uploadFile = '' + + if not chunk.node.inputFiles: + chunk.logger.warning('Nothing to upload') + return + if chunk.node.apiToken.value == '': + chunk.logger.error('Need API token.') + raise RuntimeError() + if len(chunk.node.title.value) > 48: + chunk.logger.error('Title cannot be longer than 48 characters.') + raise RuntimeError() + if len(chunk.node.description.value) > 1024: + chunk.logger.error('Description cannot be longer than 1024 characters.') + raise RuntimeError() + tags = [ i.value.replace(' ', '-') for i in chunk.node.tags.value.values() ] + if all(len(i) > 48 for i in tags) and len(tags) > 0: + chunk.logger.error('Tags cannot be longer than 48 characters.') + raise RuntimeError() + if len(tags) > 42: + chunk.logger.error('Maximum of 42 separate tags.') + raise RuntimeError() + + data = { + 'name': chunk.node.title.value, + 'description': chunk.node.description.value, + 'license': chunk.node.license.value, + 'tags': str(tags), + 'isPublished': chunk.node.isPublished.value, + 'isInspectable': chunk.node.isInspectable.value, + 'private': chunk.node.isPrivate.value, + 'password': chunk.node.password.value + } + if chunk.node.category.value != 'none': + data.update({'categories': chunk.node.category.value}) + chunk.logger.debug('Data to be sent: {}'.format(str(data))) + + # pack files into .zip to reduce file size and simplify process + uploadFile = os.path.join(chunk.node.internalFolder, 'temp.zip') + files = self.resolvedPaths(chunk.node.inputFiles.value) + zf = zipfile.ZipFile(uploadFile, 'w') + for file in files: + zf.write(file, os.path.basename(file)) + zf.close() + chunk.logger.debug('Files added to zip: {}'.format(str(files))) + chunk.logger.debug('Created {}'.format(uploadFile)) + chunk.logger.info('File size: {}MB'.format(round(os.path.getsize(uploadFile)/(1024*1024), 3))) + + self.upload(chunk.node.apiToken.value, uploadFile, data, chunk) + chunk.logger.info('Upload successful. Your model is being processed on Sketchfab. It may take some time to show up on your "models" page.') + except Exception as e: + chunk.logger.error(e) + raise RuntimeError() + finally: + if os.path.isfile(uploadFile): + os.remove(uploadFile) + chunk.logger.debug('Deleted {}'.format(uploadFile)) + + chunk.logManager.end() + + def stopProcess(self, chunk): + self._stopped = True diff --git a/meshroom/aliceVision/SphereDetection.py b/meshroom/aliceVision/SphereDetection.py new file mode 100644 index 0000000000..a58fcba98c --- /dev/null +++ b/meshroom/aliceVision/SphereDetection.py @@ -0,0 +1,89 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class SphereDetection(desc.CommandLineNode): + commandLine = 'aliceVision_sphereDetection {allParams}' + category = 'Photometric Stereo' + documentation = ''' +Detect spheres in pictures. These spheres will be used for lighting calibration. +Spheres can be automatically detected or manually defined in the interface. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.File( + name="modelPath", + label="Detection Network", + description="Deep learning network for automatic calibration sphere detection.", + value="${ALICEVISION_SPHERE_DETECTION_MODEL}", + ), + desc.BoolParam( + name="autoDetect", + label="Automatic Sphere Detection", + description="Automatic detection of calibration spheres.", + value=False, + ), + desc.FloatParam( + name="minScore", + label="Minimum Score", + description="Minimum score for the detection.", + value=0.0, + range=(0.0, 50.0, 0.01), + advanced=True, + ), + desc.GroupAttribute( + name="sphereCenter", + label="Sphere Center", + description="Center of the circle (XY offset to the center of the image in pixels).", + groupDesc=[ + desc.FloatParam( + name="x", + label="x", + description="X offset in pixels.", + value=0.0, + range=(-1000.0, 10000.0, 1.0), + ), + desc.FloatParam( + name="y", + label="y", + description="Y offset in pixels.", + value=0.0, + range=(-1000.0, 10000.0, 1.0), + ), + ], + enabled=lambda node: not node.autoDetect.value, + group=None, # skip group from command line + ), + desc.FloatParam( + name="sphereRadius", + label="Radius", + description="Sphere radius in pixels.", + value=500.0, + range=(0.0, 10000.0, 0.1), + enabled=lambda node: not node.autoDetect.value, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output Path", + description="Sphere detection information will be written here.", + value="{nodeCacheFolder}/detection.json", + ) + ] diff --git a/meshroom/aliceVision/Split360Images.py b/meshroom/aliceVision/Split360Images.py new file mode 100644 index 0000000000..b14db371c9 --- /dev/null +++ b/meshroom/aliceVision/Split360Images.py @@ -0,0 +1,139 @@ +__version__ = "3.0" + +from meshroom.core import desc +from meshroom.core.utils import VERBOSE_LEVEL + + +class Split360InputNodeSize(desc.DynamicNodeSize): + ''' + The Split360Images will increase the amount of views in the SfMData. + This class converts the number of input views into the number of split output views. + ''' + def computeSize(self, node): + s = super(Split360InputNodeSize, self).computeSize(node) + factor = 0 + mode = node.attribute('splitMode') + if mode.value == 'equirectangular': + factor = node.attribute('equirectangularGroup.equirectangularNbSplits').value + elif mode.value == 'dualfisheye': + factor = 2 + return s * factor + + +class Split360Images(desc.AVCommandLineNode): + commandLine = 'aliceVision_split360Images {allParams}' + size = Split360InputNodeSize('input') + + category = 'Utils' + documentation = "This node is used to extract multiple images from equirectangular or dualfisheye images." + + inputs = [ + desc.File( + name="input", + label="Input", + description="Single image, image folder or SfMData file.", + value="", + ), + desc.ChoiceParam( + name="splitMode", + label="Split Mode", + description="Split mode (equirectangular, dualfisheye).", + value="equirectangular", + values=["equirectangular", "dualfisheye"], + ), + desc.GroupAttribute( + name="dualFisheyeGroup", + label="Dual Fisheye", + description="Dual Fisheye.", + group=None, + enabled=lambda node: node.splitMode.value == "dualfisheye", + groupDesc=[ + desc.ChoiceParam( + name="dualFisheyeOffsetPresetX", + label="X Offset Preset", + description="Dual-Fisheye X offset preset.", + value="center", + values=["center", "left", "right"], + ), + desc.ChoiceParam( + name="dualFisheyeOffsetPresetY", + label="Y Offset Preset", + description="Dual-Fisheye Y offset preset.", + value="center", + values=["center", "top", "bottom"], + ), + desc.ChoiceParam( + name="dualFisheyeCameraModel", + label="Camera Model", + description="Dual-Fisheye camera model.", + value="fisheye4", + values=["fisheye4", "equidistant_r3"], + ), + ], + ), + desc.GroupAttribute( + name="equirectangularGroup", + label="Equirectangular", + description="Equirectangular", + group=None, + enabled=lambda node: node.splitMode.value == "equirectangular", + groupDesc=[ + desc.IntParam( + name="equirectangularNbSplits", + label="Nb Splits", + description="Equirectangular number of splits.", + value=2, + range=(1, 100, 1), + ), + desc.IntParam( + name="equirectangularSplitResolution", + label="Split Resolution", + description="Equirectangular split resolution.", + value=1200, + range=(100, 10000, 1), + ), + desc.BoolParam( + name="equirectangularPreviewMode", + label="Preview Mode", + description="Export a SVG file that simulates the split.", + value=False, + ), + desc.FloatParam( + name="fov", + label="Field Of View", + description="Field of View to extract (in degrees).", + value=110.0, + range=(0.0, 180.0, 1.0), + ), + ], + ), + desc.ChoiceParam( + name="extension", + label="Output File Extension", + description="Output image file extension.", + value="", + values=["", "exr", "jpg", "tiff", "png"], + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Output folder for extracted frames.", + value="{nodeCacheFolder}", + ), + desc.File( + name="outSfMData", + label="SfMData File", + description="Output SfMData file.", + value="{nodeCacheFolder}/rig.sfm", + ), + ] diff --git a/meshroom/aliceVision/StructureFromMotion.py b/meshroom/aliceVision/StructureFromMotion.py new file mode 100644 index 0000000000..393d3143db --- /dev/null +++ b/meshroom/aliceVision/StructureFromMotion.py @@ -0,0 +1,393 @@ +__version__ = "3.3" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class StructureFromMotion(desc.AVCommandLineNode): + commandLine = 'aliceVision_incrementalSfM {allParams}' + size = desc.DynamicNodeSize('input') + + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Sparse Reconstruction' + documentation = ''' +This node will analyze feature matches to understand the geometric relationship behind all the 2D observations, +and infer the rigid scene structure (3D points) with the pose (position and orientation) and internal calibration of all cameras. +The pipeline is a growing reconstruction process (called incremental SfM): it first computes an initial two-view reconstruction that is iteratively extended by adding new views. + +1/ Fuse 2-View Matches into Tracks + +It fuses all feature matches between image pairs into tracks. Each track represents a candidate point in space, visible from multiple cameras. +However, at this step of the pipeline, it still contains many outliers. + +2/ Initial Image Pair + +It chooses the best initial image pair. This choice is critical for the quality of the final reconstruction. +It should indeed provide robust matches and contain reliable geometric information. +So, this image pair should maximize the number of matches and the repartition of the corresponding features in each image. +But at the same time, the angle between the cameras should also be large enough to provide reliable geometric information. + +3/ Initial 2-View Geometry + +It computes the fundamental matrix between the 2 images selected and consider that the first one is the origin of the coordinate system. + +4/ Triangulate + +Now with the pose of the 2 first cameras, it triangulates the corresponding 2D features into 3D points. + +5/ Next Best View Selection + +After that, it selects all the images that have enough associations with the features that are already reconstructed in 3D. + +6/ Estimate New Cameras + +Based on these 2D-3D associations it performs the resectioning of each of these new cameras. +The resectioning is a Perspective-n-Point algorithm (PnP) in a RANSAC framework to find the pose of the camera that validates most of the features associations. +On each camera, a non-linear minimization is performed to refine the pose. + +7/ Triangulate + +From these new cameras poses, some tracks become visible by 2 or more resected cameras and it triangulates them. + +8/ Optimize + +It performs a Bundle Adjustment to refine everything: extrinsics and intrinsics parameters of all cameras as well as the position of all 3D points. +It filters the results of the Bundle Adjustment by removing all observations that have high reprojection error or insufficient angles between observations. + +9/ Loop from 5 to 9 + +As we have triangulated new points, we get more image candidates for next best views selection and we can iterate from 5 to 9. +It iterates like that, adding cameras and triangulating new 2D features into 3D points and removing 3D points that became invalidated, until we cannot localize new views. + +## Online +[https://alicevision.org/#photogrammetry/sfm](https://alicevision.org/#photogrammetry/sfm) +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features and descriptors.", + exposed=True, + ), + desc.ListAttribute( + elementDesc=desc.File( + name="matchesFolder", + label="Matches Folder", + description="", + value="", + ), + name="matchesFolders", + label="Matches Folders", + description="Folder(s) in which the computed matches are stored.", + exposed=True, + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + exposed=True, + ), + desc.ChoiceParam( + name="localizerEstimator", + label="Localizer Estimator", + description="Estimator type used to localize cameras (acransac, ransac, lsmeds, loransac, maxconsensus).", + value="acransac", + values=["acransac", "ransac", "lsmeds", "loransac", "maxconsensus"], + advanced=True, + ), + desc.ChoiceParam( + name="observationConstraint", + label="Observation Constraint", + description="Observation constraint mode used in the optimization:\n" + " - Basic: Use standard reprojection error in pixel coordinates.\n" + " - Scale: Use reprojection error in pixel coordinates but relative to the feature scale.", + value="Scale", + values=["Basic", "Scale"], + advanced=True, + ), + desc.IntParam( + name="localizerEstimatorMaxIterations", + label="Localizer Max Ransac Iterations", + description="Maximum number of iterations allowed in the Ransac step.", + value=50000, + range=(1, 100000, 1), + advanced=True, + ), + desc.FloatParam( + name="localizerEstimatorError", + label="Localizer Max Ransac Error", + description="Maximum error (in pixels) allowed for camera localization (resectioning).\n" + "If set to 0, it will select a threshold according to the localizer estimator used\n" + "(if ACRansac, it will analyze the input data to select the optimal value).", + value=0.0, + range=(0.0, 100.0, 0.1), + advanced=True, + ), + desc.BoolParam( + name="lockScenePreviouslyReconstructed", + label="Lock Previously Reconstructed Scene", + description="Lock previously reconstructed poses and intrinsics.\n" + "This option is useful for SfM augmentation.", + value=False, + ), + desc.BoolParam( + name="useLocalBA", + label="Local Bundle Adjustment", + description="It reduces the reconstruction time, especially for large datasets (500+ images),\n" + "by avoiding computation of the Bundle Adjustment on areas that are not changing.", + value=True, + ), + desc.IntParam( + name="localBAGraphDistance", + label="LocalBA Graph Distance", + description="Graph-distance limit to define the active region in the Local Bundle Adjustment strategy.", + value=1, + range=(2, 10, 1), + advanced=True, + ), + desc.IntParam( + name="nbFirstUnstableCameras", + label="First Unstable Cameras Nb", + description="Number of cameras for which the bundle adjustment is performed every single time a camera is added.\n" + "This leads to more stable results while computations are not too expensive, as there is little data.\n" + "Past this number, the bundle adjustment will only be performed once for N added cameras.", + value=30, + range=(0, 100, 1), + advanced=True, + ), + desc.IntParam( + name="maxImagesPerGroup", + label="Max Images Per Group", + description="Maximum number of cameras that can be added before the bundle adjustment has to be performed again.\n" + "This prevents adding too much data at once without performing the bundle adjustment.", + value=30, + range=(0, 100, 1), + advanced=True, + ), + desc.IntParam( + name="bundleAdjustmentMaxOutliers", + label="Max Nb Of Outliers After BA", + description="Threshold for the maximum number of outliers allowed at the end of a bundle adjustment iteration.\n" + "Using a negative value for this threshold will disable BA iterations.", + value=50, + range=(-1, 1000, 1), + advanced=True, + ), + desc.IntParam( + name="maxNumberOfMatches", + label="Maximum Number Of Matches", + description="Maximum number of matches per image pair (and per feature type).\n" + "This can be useful to have a quick reconstruction overview.\n" + "0 means no limit.", + value=0, + range=(0, 50000, 1), + ), + desc.IntParam( + name="minNumberOfMatches", + label="Minimum Number Of Matches", + description="Minimum number of matches per image pair (and per feature type).\n" + "This can be useful to have a meaningful reconstruction with accurate keypoints.\n" + "0 means no limit.", + value=0, + range=(0, 50000, 1), + ), + desc.IntParam( + name="minInputTrackLength", + label="Min Input Track Length", + description="Minimum track length in input of SfM.", + value=2, + range=(2, 10, 1), + ), + desc.IntParam( + name="minNumberOfObservationsForTriangulation", + label="Min Observations For Triangulation", + description="Minimum number of observations to triangulate a point.\n" + "Setting it to 3 (or more) reduces drastically the noise in the point cloud,\n" + "but the number of final poses is a little bit reduced\n" + "(from 1.5% to 11% on the tested datasets).", + value=2, + range=(2, 10, 1), + advanced=True, + ), + desc.FloatParam( + name="minAngleForTriangulation", + label="Min Angle For Triangulation", + description="Minimum angle for triangulation.", + value=3.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="minAngleForLandmark", + label="Min Angle For Landmark", + description="Minimum angle for landmark.", + value=2.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="maxReprojectionError", + label="Max Reprojection Error", + description="Maximum reprojection error.", + value=4.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="minAngleInitialPair", + label="Min Angle Initial Pair", + description="Minimum angle for the initial pair.", + value=5.0, + range=(0.1, 10.0, 0.1), + advanced=True, + ), + desc.FloatParam( + name="maxAngleInitialPair", + label="Max Angle Initial Pair", + description="Maximum angle for the initial pair.", + value=40.0, + range=(0.1, 60.0, 0.1), + advanced=True, + ), + desc.BoolParam( + name="useOnlyMatchesFromInputFolder", + label="Use Only Matches From Input Folder", + description="Use only matches from the input matchesFolder parameter.\n" + "Matches folders previously added to the SfMData file will be ignored.", + value=False, + invalidate=False, + advanced=True, + ), + desc.BoolParam( + name="useRigConstraint", + label="Use Rig Constraint", + description="Enable/Disable rig constraint.", + value=True, + advanced=True, + ), + desc.IntParam( + name="rigMinNbCamerasForCalibration", + label="Min Nb Cameras For Rig Calibration", + description="Minimum number of cameras to start the calibration of the rig.", + value=20, + range=(1, 50, 1), + advanced=True, + ), + desc.BoolParam( + name="lockAllIntrinsics", + label="Lock All Intrinsic Camera Parameters", + description="Force to keep all the intrinsic parameters of the cameras (focal length, \n" + "principal point, distortion if any) constant during the reconstruction.\n" + "This may be helpful if the input cameras are already fully calibrated.", + value=False, + ), + desc.IntParam( + name="minNbCamerasToRefinePrincipalPoint", + label="Min Nb Cameras To Refine Principal Point", + description="Minimum number of cameras to refine the principal point of the cameras (one of the intrinsic parameters of the camera).\n" + "If we do not have enough cameras, the principal point is considered to be in the center of the image.\n" + "If minNbCamerasToRefinePrincipalPoint <= 0, the principal point is never refined." + "If minNbCamerasToRefinePrincipalPoint is set to 1, the principal point is always refined.", + value=3, + range=(0, 20, 1), + advanced=True, + ), + desc.BoolParam( + name="filterTrackForks", + label="Filter Track Forks", + description="Enable/Disable the track forks removal. A track contains a fork when incoherent matches \n" + "lead to multiple features in the same image for a single track.", + value=False, + ), + desc.BoolParam( + name="computeStructureColor", + label="Compute Structure Color", + description="Enable/Disable color computation of every 3D point.", + value=True, + ), + desc.BoolParam( + name="useAutoTransform", + label="Automatic Alignment", + description="Enable/Disable automatic alignment of the 3D reconstruction.\n" + "Determines scene orientation from the cameras' X axis,\n" + "determines north and scale from GPS information if available,\n" + "and defines ground level from the point cloud.", + value=True, + ), + desc.File( + name="initialPairA", + label="Initial Pair A", + description="View ID or filename of the first image (either with or without the full path).", + value="", + ), + desc.File( + name="initialPairB", + label="Initial Pair B", + description="View ID or filename of the second image (either with or without the full path).", + value="", + ), + desc.ChoiceParam( + name="interFileExtension", + label="Inter File Extension", + description="Extension of the intermediate file export.", + value=".abc", + values=[".abc", ".ply"], + invalidate=False, + advanced=True, + ), + desc.BoolParam( + name="logIntermediateSteps", + label="Log Intermediate Steps", + description="Dump the current state of the scene as an SfMData file every 3 resections.", + value=False, + invalidate=False, + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="SfMData", + description="Path to the output SfM point cloud file (in SfMData format).", + value="{nodeCacheFolder}/sfm.abc", + ), + desc.File( + name="outputViewsAndPoses", + label="Views And Poses", + description="Path to the output SfMData file with cameras (views and poses).", + value="{nodeCacheFolder}/cameras.sfm", + ), + desc.File( + name="extraInfoFolder", + label="Folder", + description="Folder for intermediate reconstruction files and additional reconstruction information files.", + value="{nodeCacheFolder}", + ), + ] diff --git a/meshroom/aliceVision/Texturing.py b/meshroom/aliceVision/Texturing.py new file mode 100644 index 0000000000..87295bf877 --- /dev/null +++ b/meshroom/aliceVision/Texturing.py @@ -0,0 +1,371 @@ +__version__ = "6.0" + +from meshroom.core import desc, Version +from meshroom.core.utils import COLORSPACES, VERBOSE_LEVEL + +import logging + + +class Texturing(desc.AVCommandLineNode): + commandLine = 'aliceVision_texturing {allParams}' + cpu = desc.Level.INTENSIVE + ram = desc.Level.INTENSIVE + + category = 'Dense Reconstruction' + documentation = ''' +This node computes the texturing on the mesh. + +If the mesh has no associated UV, it automatically computes UV maps. + +For each triangle, it uses the visibility information associated to each vertex to retrieve the texture candidates. +It select the best cameras based on the resolution covering the triangle. Finally it averages the pixel values using multiple bands in the frequency domain. +Many cameras are contributing to the low frequencies and only the best ones contributes to the high frequencies. + +## Online +[https://alicevision.org/#photogrammetry/texturing](https://alicevision.org/#photogrammetry/texturing) +''' + + inputs = [ + desc.File( + name="input", + label="Dense SfMData", + description="SfMData file.", + value="", + ), + desc.File( + name="imagesFolder", + label="Images Folder", + description="Use images from a specific folder instead of those specified in the SfMData file.\n" + "Filename should be the image UID.", + value="", + ), + desc.File( + name="normalsFolder", + label="Normals Folder", + description="Use normal maps from a specific folder to texture the mesh.\nFilename should be : uid_normalMap.", + value="", + ), + desc.File( + name="inputMesh", + label="Mesh", + description="Optional input mesh to texture. By default, it will texture the result of the reconstruction.", + value="", + ), + desc.File( + name="inputRefMesh", + label="Ref Mesh", + description="Optional input mesh to compute height maps and normal maps.\n" + "If not provided, no additional map with geometric information will be generated.", + value="", + ), + desc.ChoiceParam( + name="textureSide", + label="Texture Side", + description="Output texture size.", + value=8192, + values=[1024, 2048, 4096, 8192, 16384], + ), + desc.ChoiceParam( + name="downscale", + label="Texture Downscale", + description="Texture downscale factor.", + value=2, + values=[1, 2, 4, 8], + ), + desc.ChoiceParam( + name="outputMeshFileType", + label="Mesh File Type", + description="File type for the mesh output.", + value="obj", + values=["obj", "gltf", "fbx", "stl"], + ), + desc.GroupAttribute( + name="colorMapping", + label="Color Mapping", + description="Color map parameters.", + enabled=lambda node: (node.imagesFolder.value != ''), + group=None, + groupDesc=[ + desc.BoolParam( + name="enable", + label="Enable", + description="Generate textures if set to true.", + value=True, + invalidate=False, + group=None, + ), + desc.ChoiceParam( + name="colorMappingFileType", + label="File Type", + description="Texture file type.", + value="exr", + values=["exr", "png", "tiff", "jpg"], + enabled=lambda node: node.colorMapping.enable.value, + ), + ], + ), + desc.GroupAttribute( + name="bumpMapping", + label="Bump Mapping", + description="Bump mapping parameters.", + enabled=lambda node: (node.inputRefMesh.value != ''), + group=None, + groupDesc=[ + desc.BoolParam( + name="enable", + label="Enable", + description="Generate normal / bump maps if set to true.", + value=True, + invalidate=False, + group=None, + ), + desc.ChoiceParam( + name="bumpType", + label="Bump Type", + description="Export normal map or height map.", + value="Normal", + values=["Height", "Normal"], + enabled=lambda node: node.bumpMapping.enable.value, + ), + desc.ChoiceParam( + name="normalFileType", + label="File Type", + description="File type for the normal map texture.", + value="exr", + values=["exr", "png", "tiff", "jpg"], + enabled=lambda node: node.bumpMapping.enable.value and node.bumpMapping.bumpType.value == "Normal", + ), + desc.ChoiceParam( + name="heightFileType", + label="File Type", + description="File type for the height map texture.", + value="exr", + values=["exr",], + enabled=lambda node: node.bumpMapping.enable.value and node.bumpMapping.bumpType.value == "Height", + ), + ], + ), + desc.GroupAttribute( + name="displacementMapping", + label="Displacement Mapping", + description="Displacement mapping parameters.", + enabled=lambda node: (node.inputRefMesh.value != ""), + group=None, + groupDesc=[ + desc.BoolParam( + name="enable", + label="Enable", + description="Generate height maps for displacement.", + value=True, + invalidate=False, + group=None, + ), + desc.ChoiceParam( + name="displacementMappingFileType", + label="File Type", + description="File type for the height map texture.", + value="exr", + values=["exr"], + enabled=lambda node: node.displacementMapping.enable.value, + ), + ], + ), + desc.ChoiceParam( + name="unwrapMethod", + label="Unwrap Method", + description="Method to unwrap input mesh if it does not have UV coordinates.\n" + " - Basic (> 600k faces) fast and simple. Can generate multiple atlases.\n" + " - LSCM (<= 600k faces): optimize space. Generates one atlas.\n" + " - ABF (<= 300k faces): optimize space and stretch. Generates one atlas.", + value="Basic", + values=["Basic", "LSCM", "ABF"], + ), + desc.BoolParam( + name="useUDIM", + label="Use UDIM", + description="Use UDIM UV mapping.", + value=True, + ), + desc.BoolParam( + name="fillHoles", + label="Fill Holes", + description="Fill texture holes with plausible values.", + value=False, + ), + desc.IntParam( + name="padding", + label="Padding", + description="Texture edge padding size in pixels.", + value=5, + range=(0, 20, 1), + advanced=True, + ), + desc.IntParam( + name="multiBandDownscale", + label="Multi Band Downscale", + description="Width of frequency bands for multiband blending.", + value=4, + range=(0, 8, 2), + advanced=True, + ), + desc.GroupAttribute( + name="multiBandNbContrib", + label="Multi-Band Contributions", + groupDesc=[ + desc.IntParam( + name="high", + label="High Freq", + description="High frequency band.", + value=1, + range=None, + ), + desc.IntParam( + name="midHigh", + label="Mid-High Freq", + description="Mid-high frequency band.", + value=5, + range=None, + ), + desc.IntParam( + name="midLow", + label="Mid-Low Freq", + description="Mid-low frequency band.", + value=10, + range=None, + ), + desc.IntParam( + name="low", + label="Low Freq", + description="Low frequency band", + value=0, + range=None, + ), + ], + description="Number of contributions per frequency band for multi-band blending (each frequency band also contributes to lower bands).", + advanced=True, + ), + desc.BoolParam( + name="useScore", + label="Use Score", + description="Use triangles scores (ie. reprojection area) for multi-band blending.", + value=True, + advanced=True, + ), + desc.FloatParam( + name="bestScoreThreshold", + label="Best Score Threshold", + description="Setting this parameter to 0.0 disables filtering based on threshold to relative best score.", + value=0.1, + range=(0.0, 1.0, 0.01), + advanced=True, + ), + desc.FloatParam( + name="angleHardThreshold", + label="Angle Hard Threshold", + description="Setting this parameter to 0.0 disables angle hard threshold filtering.", + value=90.0, + range=(0.0, 180.0, 0.01), + advanced=True, + ), + desc.ChoiceParam( + name="workingColorSpace", + label="Working Color Space", + description="Color space for the texturing internal computation (does not impact the output file color space).", + values=COLORSPACES, + value="sRGB", + advanced=True, + ), + desc.ChoiceParam( + name="outputColorSpace", + label="Output Color Space", + description="Color space for the output texture files.", + values=COLORSPACES, + value="AUTO", + ), + desc.BoolParam( + name="correctEV", + label="Correct Exposure", + description="Uniformize images exposure values.", + value=True, + ), + desc.BoolParam( + name="forceVisibleByAllVertices", + label="Force Visible By All Vertices", + description="Triangle visibility is based on the union of vertices visibility.", + value=False, + advanced=True, + ), + desc.BoolParam( + name="flipNormals", + label="Flip Normals", + description="Option to flip face normals.\n" + "It can be needed as it depends on the vertices order in triangles and the convention changes from one software to another.", + value=False, + advanced=True, + ), + desc.ChoiceParam( + name="visibilityRemappingMethod", + label="Visibility Remapping Method", + description="Method to remap visibilities from the reconstruction to the input mesh (Pull, Push, PullPush, MeshItself).", + value="PullPush", + values=["Pull", "Push", "PullPush", "MeshItself"], + advanced=True, + ), + desc.FloatParam( + name="subdivisionTargetRatio", + label="Subdivision Target Ratio", + description="Percentage of the density of the reconstruction as the target for the subdivision:\n" + " - 0: disable subdivision.\n" + " - 0.5: half density of the reconstruction.\n" + " - 1: full density of the reconstruction).", + value=0.8, + range=(0.0, 1.0, 0.001), + advanced=True, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Folder", + description="Folder for output mesh: OBJ, material and texture files.", + value="{nodeCacheFolder}", + ), + desc.File( + name="outputMesh", + label="Mesh", + description="Output mesh file.", + value="{nodeCacheFolder}/texturedMesh.{outputMeshFileTypeValue}", + group="", + ), + desc.File( + name="outputMaterial", + enabled=lambda node: node.outputMeshFileType.value == "obj", + label="Material", + description="Output material file.", + value="{nodeCacheFolder}/texturedMesh.mtl", + group="", + ), + desc.File( + name="outputTextures", + label="Textures", + description="Output texture files.", + value=lambda attr: "{nodeCacheFolder}/texture_*." + attr.node.colorMapping.colorMappingFileType.value if attr.node.colorMapping.enable.value else "", + group="", + ), + ] + + def upgradeAttributeValues(self, attrValues, fromVersion): + if fromVersion < Version(6, 0): + outputTextureFileType = attrValues["outputTextureFileType"] + if isinstance(outputTextureFileType, str): + attrValues["colorMapping"] = {} + attrValues["colorMapping"]["colorMappingFileType"] = outputTextureFileType + return attrValues diff --git a/meshroom/aliceVision/TracksBuilding.py b/meshroom/aliceVision/TracksBuilding.py new file mode 100644 index 0000000000..1a52185545 --- /dev/null +++ b/meshroom/aliceVision/TracksBuilding.py @@ -0,0 +1,97 @@ +__version__ = "1.0" + +from meshroom.core import desc +from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL + + +class TracksBuilding(desc.AVCommandLineNode): + commandLine = 'aliceVision_tracksBuilding {allParams}' + size = desc.DynamicNodeSize('input') + + category = 'Sparse Reconstruction' + documentation = ''' +It fuses all feature matches between image pairs into tracks. Each track represents a candidate point in space, visible from multiple cameras. +''' + + inputs = [ + desc.File( + name="input", + label="SfMData", + description="Input SfMData file.", + value="", + exposed=True, + ), + desc.ListAttribute( + elementDesc=desc.File( + name="featuresFolder", + label="Features Folder", + description="Folder containing some extracted features and descriptors.", + value="", + ), + name="featuresFolders", + label="Features Folders", + description="Folder(s) containing the extracted features and descriptors.", + exposed=True, + ), + desc.ListAttribute( + elementDesc=desc.File( + name="matchesFolder", + label="Matches Folder", + description="Folder containing some matches.", + value="", + ), + name="matchesFolders", + label="Matches Folders", + description="Folder(s) in which computed matches are stored.", + exposed=True, + ), + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types used to describe an image.", + values=DESCRIBER_TYPES, + value=["dspsift"], + exclusive=False, + joinChar=",", + exposed=True, + ), + desc.IntParam( + name="minInputTrackLength", + label="Min Input Track Length", + description="Minimum track length.", + value=2, + range=(2, 10, 1), + ), + desc.BoolParam( + name="useOnlyMatchesFromInputFolder", + label="Use Only Matches From Input Folder", + description="Use only matches from the input 'matchesFolder' parameter.\n" + "Matches folders previously added to the SfMData file will be ignored.", + value=False, + invalidate=False, + advanced=True, + ), + desc.BoolParam( + name="filterTrackForks", + label="Filter Track Forks", + description="Enable/Disable the track forks removal. A track contains a fork when incoherent matches\n" + "lead to multiple features in the same image for a single track.", + value=False, + ), + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + values=VERBOSE_LEVEL, + value="info", + ), + ] + + outputs = [ + desc.File( + name="output", + label="Tracks", + description="Path to the output tracks file.", + value="{nodeCacheFolder}/tracksFile.json", + ), + ] diff --git a/meshroom/aliceVision/__init__.py b/meshroom/aliceVision/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meshroom/blender/ScenePreview.py b/meshroom/blender/ScenePreview.py new file mode 100644 index 0000000000..154a841e3c --- /dev/null +++ b/meshroom/blender/ScenePreview.py @@ -0,0 +1,141 @@ +__version__ = "2.0" + +from meshroom.core import desc +import os.path + +currentDir = os.path.dirname(os.path.abspath(__file__)) + +class ScenePreview(desc.CommandLineNode): + commandLine = '{blenderCmdValue} -b --python {scriptValue} -- {allParams}' + size = desc.DynamicNodeSize('cameras') + parallelization = desc.Parallelization(blockSize=40) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + category = 'Utils' + documentation = ''' +This node uses Blender to visualize a 3D model from a given set of cameras. +The cameras must be a SfMData file in JSON format. +For the 3D model it supports both point clouds in Alembic format and meshes in OBJ format. +One frame per viewpoint will be rendered, and the undistorted views can optionally be used as background. +''' + + inputs = [ + desc.File( + name="blenderCmd", + label="Blender Command", + description="Command to launch Blender.", + value="blender", + invalidate=False, + group="", + ), + desc.File( + name="script", + label="Script", + description="Path to the internal script for rendering in Blender.", + value=os.path.join("{nodeSourceCodeFolder}", "scripts", "preview.py"), + invalidate=False, + group="", + advanced=True, + ), + desc.File( + name="cameras", + label="Cameras", + description="SfMData with the views, poses and intrinsics to use (in JSON format).", + value="", + ), + desc.File( + name="model", + label="Model", + description="Point cloud (.abc) or mesh (.obj) to render.", + value="", + ), + desc.BoolParam( + name="useBackground", + label="Display Background", + description="Use the undistorted images as background.", + value=True, + ), + desc.File( + name="undistortedImages", + label="Undistorted Images", + description="Folder containing the undistorted images.", + value="", + enabled=lambda node: node.useBackground.value, + ), + desc.BoolParam( + name="useMasks", + label="Apply Masks", + description="Apply mask to the rendered geometry.", + value=True, + ), + desc.File( + name="masks", + label="Masks", + description="Folder containing the masks.", + value="", + enabled=lambda node: node.useMasks.value, + ), + desc.GroupAttribute( + name="pointCloudParams", + label="Point Cloud Settings", + group=None, + enabled=lambda node: node.model.value.lower().endswith(".abc"), + description="Settings for point cloud rendering.", + groupDesc=[ + desc.FloatParam( + name="particleSize", + label="Particle Size", + description="Scale of particles used for the point cloud.", + value=0.01, + range=(0.01, 1.0, 0.01), + ), + desc.ChoiceParam( + name="particleColor", + label="Particle Color", + description="Color of particles used for the point cloud.", + value="Red", + values=["Grey", "White", "Red", "Green", "Magenta"], + ), + ], + ), + desc.GroupAttribute( + name="meshParams", + label="Mesh Settings", + group=None, + enabled=lambda node: node.model.value.lower().endswith(".obj"), + description="Setting for mesh rendering.", + groupDesc=[ + desc.ChoiceParam( + name="shading", + label="Shading", + description="Shading method for visualizing the mesh.", + value="wireframe", + values=["wireframe", "line_art"], + ), + desc.ChoiceParam( + name="edgeColor", + label="Edge Color", + description="Color of the mesh edges.", + value="Red", + values=["Grey", "White", "Red", "Green", "Magenta"], + ), + ], + ), + ] + + outputs = [ + desc.File( + name="output", + label="Output", + description="Output folder.", + value="{nodeCacheFolder}", + ), + desc.File( + name="frames", + label="Frames", + description="Frames rendered in Blender.", + semantic="image", + value="{nodeCacheFolder}/_preview.jpg", + group="", + ), + ] diff --git a/meshroom/blender/__init__.py b/meshroom/blender/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meshroom/blender/scripts/__init__.py b/meshroom/blender/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/meshroom/blender/scripts/preview.py b/meshroom/blender/scripts/preview.py new file mode 100644 index 0000000000..ba4d431860 --- /dev/null +++ b/meshroom/blender/scripts/preview.py @@ -0,0 +1,434 @@ +import bpy +import os +import mathutils +import math +import sys +import argparse +import json +import glob + + +def createParser(): + '''Create command line interface.''' + # When --help or no args are given, print this help + usage_text = ( + "Run blender in background mode with this script:" + " blender --background --python " + __file__ + " -- [options]" + ) + + parser = argparse.ArgumentParser(description=usage_text) + + parser.add_argument( + "--cameras", metavar='FILE', required=True, + help="sfmData with the animated camera.", + ) + parser.add_argument( + "--rangeStart", type=int, required=False, default=-1, + help="Range start for processing views. Set to -1 to process all views.", + ) + parser.add_argument( + "--rangeSize", type=int, required=False, default=0, + help="Range size for processing views.", + ) + parser.add_argument( + "--useBackground", type=lambda x: (str(x).lower() == 'true'), required=True, + help="Display the background image or not.", + ) + parser.add_argument( + "--undistortedImages", metavar='FILE', required=False, + help="Path to folder containing undistorted images to use as background.", + ) + parser.add_argument( + "--model", metavar='FILE', required=True, + help="Point Cloud or Mesh used in the rendering.", + ) + parser.add_argument( + "--useMasks", type=lambda x: (str(x).lower() == 'true'), required=True, + help="Apply mask to the rendered geometry or not.", + ) + parser.add_argument( + "--masks", metavar='FILE', required=False, + help="Path to folder containing masks to apply on rendered geometry.", + ) + parser.add_argument( + "--particleSize", type=float, required=False, + help="Scale of particles used to show the point cloud", + ) + parser.add_argument( + "--particleColor", type=str, required=False, + help="Color of particles used to show the point cloud (SFM Data is .abc)", + ) + parser.add_argument( + "--edgeColor", type=str, required=False, + help="Color of the edges of the rendered object (SFM Data is .obj)", + ) + parser.add_argument( + "--shading", type=str, required=False, + help="Shading method for rendering the mesh", + ) + parser.add_argument( + "--output", metavar='FILE', required=True, + help="Render an image to the specified path", + ) + + return parser + + +def parseSfMCameraFile(filepath): + '''Retrieve cameras from SfM file in json format.''' + with open(os.path.abspath(filepath), 'r') as file: + sfm = json.load(file) + views = sfm['views'] + intrinsics = sfm['intrinsics'] + poses = sfm['poses'] + return views, intrinsics, poses + + +def getFromId(data, key, identifier): + '''Utility function to retrieve view, intrinsic or pose using their IDs.''' + for item in data: + if item[key] == identifier: + return item + return None + + +def setupCamera(intrinsic, pose): + '''Setup Blender camera to match the given SfM camera.''' + camObj = bpy.data.objects['Camera'] + camData = bpy.data.cameras['Camera'] + + bpy.context.scene.render.resolution_x = int(intrinsic['width']) + bpy.context.scene.render.resolution_y = int(intrinsic['height']) + bpy.context.scene.render.pixel_aspect_x = float(intrinsic['pixelRatio']) + + camData.sensor_width = float(intrinsic['sensorWidth']) + camData.lens = float(intrinsic['focalLength']) / float(intrinsic['pixelRatio']) + + #shift is normalized with the largest resolution + fwidth = float(intrinsic['width']) + fheight = float(intrinsic['height']) + maxSize = max(fwidth, fheight) + camData.shift_x = - float(intrinsic['principalPoint'][0]) / maxSize + camData.shift_y = float(intrinsic['principalPoint'][1]) / maxSize + + tr = pose['pose']['transform'] + matPose = mathutils.Matrix.Identity(4) + matPose[0][0] = float(tr['rotation'][0]) + matPose[0][1] = float(tr['rotation'][1]) + matPose[0][2] = float(tr['rotation'][2]) + matPose[1][0] = float(tr['rotation'][3]) + matPose[1][1] = float(tr['rotation'][4]) + matPose[1][2] = float(tr['rotation'][5]) + matPose[2][0] = float(tr['rotation'][6]) + matPose[2][1] = float(tr['rotation'][7]) + matPose[2][2] = float(tr['rotation'][8]) + matPose[0][3] = float(tr['center'][0]) + matPose[1][3] = float(tr['center'][1]) + matPose[2][3] = float(tr['center'][2]) + + matConvert = mathutils.Matrix.Identity(4) + matConvert[1][1] = -1 + matConvert[2][2] = -1 + + camObj.matrix_world = matConvert @ matPose @ matConvert + + +def initScene(): + '''Initialize Blender scene.''' + # Clear current scene (keep default camera) + bpy.data.objects.remove(bpy.data.objects['Cube']) + bpy.data.objects.remove(bpy.data.objects['Light']) + # Set output format + bpy.context.scene.render.image_settings.file_format = 'JPEG' + # Setup rendering engine + bpy.context.scene.render.engine = 'CYCLES' + bpy.context.scene.cycles.samples = 1 + bpy.context.scene.cycles.use_adaptative_sampling = False + bpy.context.scene.cycles.use_denoising = False + + +def initCompositing(useBackground, useMasks): + '''Initialize Blender compositing graph for adding background image to render.''' + bpy.context.scene.render.film_transparent = True + bpy.context.scene.use_nodes = True + nodeAlphaOver = bpy.context.scene.node_tree.nodes.new(type="CompositorNodeAlphaOver") + nodeSetAlpha = bpy.context.scene.node_tree.nodes.new(type="CompositorNodeSetAlpha") + nodeBackground = bpy.context.scene.node_tree.nodes.new(type="CompositorNodeImage") + nodeMask = bpy.context.scene.node_tree.nodes.new(type="CompositorNodeImage") + nodeRender = bpy.context.scene.node_tree.nodes['Render Layers'] + nodeComposite = bpy.context.scene.node_tree.nodes['Composite'] + if useBackground and useMasks: + bpy.context.scene.node_tree.links.new(nodeBackground.outputs['Image'], nodeAlphaOver.inputs[1]) + bpy.context.scene.node_tree.links.new(nodeRender.outputs['Image'], nodeSetAlpha.inputs['Image']) + bpy.context.scene.node_tree.links.new(nodeMask.outputs['Image'], nodeSetAlpha.inputs['Alpha']) + bpy.context.scene.node_tree.links.new(nodeSetAlpha.outputs['Image'], nodeAlphaOver.inputs[2]) + bpy.context.scene.node_tree.links.new(nodeAlphaOver.outputs['Image'], nodeComposite.inputs['Image']) + elif useBackground: + bpy.context.scene.node_tree.links.new(nodeBackground.outputs['Image'], nodeAlphaOver.inputs[1]) + bpy.context.scene.node_tree.links.new(nodeRender.outputs['Image'], nodeAlphaOver.inputs[2]) + bpy.context.scene.node_tree.links.new(nodeAlphaOver.outputs['Image'], nodeComposite.inputs['Image']) + elif useMasks: + bpy.context.scene.node_tree.links.new(nodeRender.outputs['Image'], nodeSetAlpha.inputs['Image']) + bpy.context.scene.node_tree.links.new(nodeMask.outputs['Image'], nodeSetAlpha.inputs['Alpha']) + bpy.context.scene.node_tree.links.new(nodeSetAlpha.outputs['Image'], nodeComposite.inputs['Image']) + return nodeBackground, nodeMask + + +def setupRender(view, intrinsic, pose, outputDir): + '''Setup rendering in Blender for a given view.''' + setupCamera(intrinsic, pose) + + baseImgName = os.path.splitext(os.path.basename(view['path']))[0] + bpy.context.scene.render.filepath = os.path.abspath(outputDir + '/' + baseImgName + '_preview.jpg') + + +def setupBackground(view, folderUndistorted, nodeBackground): + '''Retrieve undistorted image corresponding to view and use it as background.''' + matches = glob.glob(folderUndistorted + '/*' + view['viewId'] + "*") # try with viewId + if len(matches) == 0: + baseImgName = os.path.splitext(os.path.basename(view['path']))[0] + matches = glob.glob(folderUndistorted + '/*' + baseImgName + "*") # try with image name + if len(matches) == 0: + # no background image found + return None + undistortedImgPath = matches[0] + img = bpy.data.images.load(filepath=undistortedImgPath) + nodeBackground.image = img + return img + + +def setupMask(view, folderMasks, nodeMask): + '''Retrieve mask corresponding to view and use it in compositing graph.''' + matches = glob.glob(folderMasks + '/*' + view['viewId'] + "*") # try with viewId + if len(matches) == 0: + baseImgName = os.path.splitext(os.path.basename(view['path']))[0] + matches = glob.glob(folderMasks + '/*' + baseImgName + "*") # try with image name + if len(matches) == 0: + # no background image found + return None + maskPath = matches[0] + mask = bpy.data.images.load(filepath=maskPath) + nodeMask.image = mask + return mask + + +def loadModel(filename): + '''Load model in Alembic of OBJ format. Make sure orientation matches camera orientation.''' + if filename.lower().endswith('.obj'): + bpy.ops.import_scene.obj(filepath=filename, axis_forward='Y', axis_up='Z') + meshName = os.path.splitext(os.path.basename(filename))[0] + return bpy.data.objects[meshName], bpy.data.meshes[meshName] + elif filename.lower().endswith('.abc'): + bpy.ops.wm.alembic_import(filepath=filename) + root = bpy.data.objects['mvgRoot'] + root.rotation_euler.rotate_axis('X', math.radians(-90.0)) + return bpy.data.objects['mvgPointCloud'], bpy.data.meshes['particleShape1'] + + +def setupWireframeShading(mesh, color): + '''Setup material for wireframe shading.''' + # Initialize wireframe material + material = bpy.data.materials.new('Wireframe') + material.use_backface_culling = True + material.use_nodes = True + material.blend_method = 'BLEND' + material.node_tree.links.clear() + # Wireframe node + nodeWireframe = material.node_tree.nodes.new(type='ShaderNodeWireframe') + nodeWireframe.use_pixel_size = True + nodeWireframe.inputs['Size'].default_value = 2.0 + # Emission node + nodeEmission = material.node_tree.nodes.new(type='ShaderNodeEmission') + nodeEmission.inputs['Color'].default_value = color + # Holdout node + nodeHoldout = material.node_tree.nodes.new(type='ShaderNodeHoldout') + # Max Shader node + nodeMix = material.node_tree.nodes.new(type='ShaderNodeMixShader') + # Retrieve ouput node + nodeOutput = material.node_tree.nodes['Material Output'] + # Connect nodes + material.node_tree.links.new(nodeWireframe.outputs['Fac'], nodeMix.inputs['Fac']) + material.node_tree.links.new(nodeHoldout.outputs['Holdout'], nodeMix.inputs[1]) + material.node_tree.links.new(nodeEmission.outputs['Emission'], nodeMix.inputs[2]) + material.node_tree.links.new(nodeMix.outputs['Shader'], nodeOutput.inputs['Surface']) + # Apply material to mesh + mesh.materials.clear() + mesh.materials.append(material) + + +def setupLineArtShading(obj, mesh, color): + '''Setup line art shading using Freestyle.''' + # Freestyle + bpy.context.scene.render.use_freestyle = True + bpy.data.linestyles["LineStyle"].color = (color[0], color[1], color[2]) + # Holdout material + material = bpy.data.materials.new('Holdout') + material.use_nodes = True + material.node_tree.links.clear() + nodeHoldout = material.node_tree.nodes.new(type='ShaderNodeHoldout') + nodeOutput = material.node_tree.nodes['Material Output'] + material.node_tree.links.new(nodeHoldout.outputs['Holdout'], nodeOutput.inputs['Surface']) + # Apply material to mesh + mesh.materials.clear() + mesh.materials.append(material) + + +def setupPointCloudShading(obj, color, size): + '''Setup material and geometry nodes for point cloud shading.''' + # Colored filling material + material = bpy.data.materials.new('PointCloud_Mat') + material.use_nodes = True + material.node_tree.links.clear() + nodeEmission = material.node_tree.nodes.new(type='ShaderNodeEmission') + nodeEmission.inputs['Color'].default_value = color + nodeOutputFill = material.node_tree.nodes['Material Output'] + material.node_tree.links.new(nodeEmission.outputs['Emission'], nodeOutputFill.inputs['Surface']) + # Geometry nodes modifier for particles + geo = bpy.data.node_groups.new('Particles_Graph', type='GeometryNodeTree') + mod = obj.modifiers.new('Particles_Modifier', type='NODES') + mod.node_group = geo + # Setup nodes + nodeInput = geo.nodes.new(type='NodeGroupInput') + nodeOutput = geo.nodes.new(type='NodeGroupOutput') + nodeM2P = geo.nodes.new(type='GeometryNodeMeshToPoints') + nodeIoP = geo.nodes.new(type='GeometryNodeInstanceOnPoints') + nodeCube = geo.nodes.new(type='GeometryNodeMeshCube') + nodeSize = geo.nodes.new(type='ShaderNodeValue') + nodeSize.outputs['Value'].default_value = size + nodeMat = geo.nodes.new(type='GeometryNodeSetMaterial') + nodeMat.inputs[2].default_value = material + # Connect nodes + geo.links.new(nodeInput.outputs[0], nodeM2P.inputs['Mesh']) + geo.links.new(nodeM2P.outputs['Points'], nodeIoP.inputs['Points']) + geo.links.new(nodeCube.outputs['Mesh'], nodeIoP.inputs['Instance']) + geo.links.new(nodeSize.outputs['Value'], nodeIoP.inputs['Scale']) + geo.links.new(nodeIoP.outputs['Instances'], nodeMat.inputs['Geometry']) + geo.links.new(nodeMat.outputs[0], nodeOutput.inputs[0]) + + + +def main(): + + argv = sys.argv + + if "--" not in argv: + argv = [] # as if no args are passed + else: + argv = argv[argv.index("--") + 1:] # get all args after "--" + + parser = createParser() + args = parser.parse_args(argv) + + if not argv: + parser.print_help() + return -1 + + if args.useBackground and not args.undistortedImages: + print("Error: --undistortedImages argument not given, aborting.") + parser.print_help() + return -1 + + # Color palette (common for point cloud and mesh visualization) + palette={ + 'Grey':(0.2, 0.2, 0.2, 1), + 'White':(1, 1, 1, 1), + 'Red':(0.5, 0, 0, 1), + 'Green':(0, 0.5, 0, 1), + 'Magenta':(1.0, 0, 0.75, 1) + } + + print("Init scene") + initScene() + + print("Init compositing") + nodeBackground, nodeMask = initCompositing(args.useBackground, args.useMasks) + + print("Parse cameras SfM file") + views, intrinsics, poses = parseSfMCameraFile(args.cameras) + + print("Load scene objects") + sceneObj, sceneMesh = loadModel(args.model) + + print("Setup shading") + if args.model.lower().endswith('.obj'): + color = palette[args.edgeColor] + if args.shading == 'wireframe': + setupWireframeShading(sceneMesh, color) + elif args.shading == 'line_art': + setupLineArtShading(sceneObj, sceneMesh, color) + elif args.model.lower().endswith('.abc'): + color = palette[args.particleColor] + setupPointCloudShading(sceneObj, color, args.particleSize) + + print("Retrieve range") + rangeStart = args.rangeStart + rangeSize = args.rangeSize + if rangeStart != -1: + if rangeStart < 0 or rangeSize < 0 or rangeStart > len(views): + print("Invalid range") + return 0 + if rangeStart + rangeSize > len(views): + rangeSize = len(views) - rangeStart + else: + rangeStart = 0 + rangeSize = len(views) + + print("Render viewpoints") + for view in views[rangeStart:rangeStart+rangeSize]: + intrinsic = getFromId(intrinsics, 'intrinsicId', view['intrinsicId']) + if not intrinsic: + continue + + pose = getFromId(poses, 'poseId', view['poseId']) + if not pose: + continue + + print("Rendering view " + view['viewId']) + + img = None + if args.useBackground: + img = setupBackground(view, args.undistortedImages, nodeBackground) + if not img: + # background setup failed + # do not render this frame + continue + + mask = None + if args.useMasks: + mask = setupMask(view, args.masks, nodeMask) + if not mask: + # mask setup failed + # do not render this frame + continue + + setupRender(view, intrinsic, pose, args.output) + bpy.ops.render.render(write_still=True) + + # if the pixel aspect ratio is not 1, reload and rescale the rendered image + if bpy.context.scene.render.pixel_aspect_x != 1.0: + finalImg = bpy.data.images.load(bpy.context.scene.render.filepath) + finalImg.scale(int(bpy.context.scene.render.resolution_x * bpy.context.scene.render.pixel_aspect_x), bpy.context.scene.render.resolution_y) + finalImg.save() + # clear image from memory + bpy.data.images.remove(finalImg) + + # clear memory + if img: + bpy.data.images.remove(img) + if mask: + bpy.data.images.remove(mask) + + print("Done") + return 0 + + +if __name__ == "__main__": + + err = 1 + try: + err = main() + except Exception as e: + print("\n" + str(e)) + sys.exit(err) + sys.exit(err) + diff --git a/meshroom/cameraTracking.mg b/meshroom/cameraTracking.mg new file mode 100644 index 0000000000..6fcf26ae85 --- /dev/null +++ b/meshroom/cameraTracking.mg @@ -0,0 +1,531 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "ApplyCalibration": "1.0", + "CameraInit": "12.0", + "CheckerboardDetection": "1.0", + "ConvertSfMFormat": "2.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "DistortionCalibration": "5.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageMatchingMultiSfM": "1.0", + "ImageSegmentationBox": "0.1", + "KeyframeSelection": "5.0", + "MeshDecimate": "1.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "ScenePreview": "2.0", + "SfMTransfer": "2.1", + "SfMTriangulation": "1.0", + "StructureFromMotion": "3.3", + "Texturing": "6.0" + } + }, + "graph": { + "ApplyCalibration_1": { + "nodeType": "ApplyCalibration", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}", + "calibration": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -200, + 0 + ], + "inputs": {}, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_2": { + "nodeType": "CameraInit", + "position": [ + -600, + -160 + ], + "inputs": {}, + "internalInputs": { + "label": "CameraInitLensGrid", + "color": "#302e2e" + } + }, + "CheckerboardDetection_1": { + "nodeType": "CheckerboardDetection", + "position": [ + -400, + -160 + ], + "inputs": { + "input": "{CameraInit_2.output}", + "useNestedGrids": true, + "exportDebugImages": true + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 3000, + 200 + ], + "inputs": { + "input": "{ExportAnimatedCamera_1.input}", + "fileExt": "json", + "describerTypes": "{StructureFromMotion_1.describerTypes}", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "DepthMapFilter_1": { + "nodeType": "DepthMapFilter", + "position": [ + 2400, + 0 + ], + "inputs": { + "input": "{DepthMap_1.input}", + "depthMapsFolder": "{DepthMap_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "DepthMap_1": { + "nodeType": "DepthMap", + "position": [ + 2200, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "downscale": 1 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "DistortionCalibration_1": { + "nodeType": "DistortionCalibration", + "position": [ + -200, + -160 + ], + "inputs": { + "input": "{CheckerboardDetection_1.input}", + "checkerboards": "{CheckerboardDetection_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 1600, + 200 + ], + "inputs": { + "input": "{StructureFromMotion_1.output}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 0, + -160 + ], + "inputs": { + "input": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 400, + 200 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "masksFolder": "{ImageSegmentationBox_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#575963" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingKeyframes", + "color": "#575963" + } + }, + "FeatureMatching_2": { + "nodeType": "FeatureMatching", + "position": [ + 1200, + 360 + ], + "inputs": { + "input": "{ImageMatching_2.input}", + "featuresFolders": "{ImageMatching_2.featuresFolders}", + "imagePairsList": "{ImageMatching_2.output}" + }, + "internalInputs": { + "label": "FeatureMatchingAllFrames", + "color": "#80766f" + } + }, + "FeatureMatching_3": { + "nodeType": "FeatureMatching", + "position": [ + 1200, + 200 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_1.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingFramesToKeyframes", + "color": "#80766f" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + 0, + 200 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatchingMultiSfM_1": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 1000, + 200 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataFrames}", + "inputB": "{StructureFromMotion_2.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "VocabularyTree", + "matchingMode": "a/b", + "nbMatches": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive" + }, + "internalInputs": { + "label": "ImageMatchingKeyframes", + "color": "#575963" + } + }, + "ImageMatching_2": { + "nodeType": "ImageMatching", + "position": [ + 1000, + 360 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Sequential", + "nbNeighbors": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageSegmentationBox_1": { + "nodeType": "ImageSegmentationBox", + "position": [ + 200, + 200 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "KeyframeSelection_1": { + "nodeType": "KeyframeSelection", + "position": [ + 200, + 0 + ], + "inputs": { + "inputPaths": [ + "{ApplyCalibration_1.output}" + ] + }, + "internalInputs": { + "color": "#575963" + } + }, + "MeshDecimate_1": { + "nodeType": "MeshDecimate", + "position": [ + 3000, + 0 + ], + "inputs": { + "input": "{MeshFiltering_1.outputMesh}", + "simplificationFactor": 0.05 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 2800, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}", + "filterLargeTrianglesFactor": 10.0 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{DepthMapFilter_1.input}", + "depthMapsFolder": "{DepthMapFilter_1.output}", + "estimateSpaceFromSfM": false, + "minStep": 1, + "fullWeight": 10.0, + "saveRawDensePointCloud": true + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 2000, + 0 + ], + "inputs": { + "input": "{SfMTriangulation_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 3600, + 100 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{Texturing_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}" + ] + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 3200, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{MeshDecimate_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_1.output}" + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "SfMTransfer_1": { + "nodeType": "SfMTransfer", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "reference": "{StructureFromMotion_1.output}", + "transferLandmarks": false + }, + "internalInputs": { + "comment": "Transfer pose from final camera tracking into the keyframes-only scene.", + "color": "#3f3138" + } + }, + "SfMTriangulation_1": { + "nodeType": "SfMTriangulation", + "position": [ + 1800, + 0 + ], + "inputs": { + "input": "{SfMTransfer_1.output}", + "featuresFolders": "{StructureFromMotion_2.featuresFolders}", + "matchesFolders": "{StructureFromMotion_2.matchesFolders}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "StructureFromMotion_1": { + "nodeType": "StructureFromMotion", + "position": [ + 1400, + 200 + ], + "inputs": { + "input": "{FeatureMatching_3.input}", + "featuresFolders": "{FeatureMatching_3.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_3.output}", + "{FeatureMatching_2.output}" + ], + "describerTypes": "{FeatureMatching_3.describerTypes}", + "nbFirstUnstableCameras": 0, + "maxImagesPerGroup": 0, + "bundleAdjustmentMaxOutliers": -1, + "minInputTrackLength": 5, + "minNumberOfObservationsForTriangulation": 3, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5, + "filterTrackForks": true + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the complete camera tracking sequence.", + "color": "#80766f" + } + }, + "StructureFromMotion_2": { + "nodeType": "StructureFromMotion", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5, + "filterTrackForks": true + }, + "internalInputs": { + "comment": "Solve all keyframes first.", + "label": "StructureFromMotionKeyframes", + "color": "#575963" + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 3200, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "inputMesh": "{MeshDecimate_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + } + } +} \ No newline at end of file diff --git a/meshroom/cameraTrackingExperimental.mg b/meshroom/cameraTrackingExperimental.mg new file mode 100644 index 0000000000..25b3e9e630 --- /dev/null +++ b/meshroom/cameraTrackingExperimental.mg @@ -0,0 +1,596 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "nodesVersions": { + "ApplyCalibration": "1.0", + "CameraInit": "12.0", + "CheckerboardDetection": "1.0", + "ConvertSfMFormat": "2.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "DistortionCalibration": "5.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageMatchingMultiSfM": "1.0", + "ImageSegmentationBox": "0.1", + "KeyframeSelection": "5.0", + "MeshDecimate": "1.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "RelativePoseEstimating": "3.0", + "ScenePreview": "2.0", + "SfMBootStraping": "3.0", + "SfMExpanding": "2.0", + "SfMTransfer": "2.1", + "SfMTriangulation": "1.0", + "Texturing": "6.0", + "TracksBuilding": "1.0" + }, + "template": true + }, + "graph": { + "ApplyCalibration_1": { + "nodeType": "ApplyCalibration", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}", + "calibration": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -200, + 0 + ], + "inputs": {}, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_2": { + "nodeType": "CameraInit", + "position": [ + -600, + -160 + ], + "inputs": {}, + "internalInputs": { + "label": "CameraInitLensGrid", + "color": "#302e2e" + } + }, + "CheckerboardDetection_1": { + "nodeType": "CheckerboardDetection", + "position": [ + -400, + -160 + ], + "inputs": { + "input": "{CameraInit_2.output}", + "useNestedGrids": true, + "exportDebugImages": true + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 3800, + 200 + ], + "inputs": { + "input": "{ExportAnimatedCamera_1.input}", + "fileExt": "json", + "describerTypes": "{TracksBuilding_2.describerTypes}", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "DepthMapFilter_1": { + "nodeType": "DepthMapFilter", + "position": [ + 3200, + 0 + ], + "inputs": { + "input": "{DepthMap_1.input}", + "depthMapsFolder": "{DepthMap_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "DepthMap_1": { + "nodeType": "DepthMap", + "position": [ + 3000, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "downscale": 1 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "DistortionCalibration_1": { + "nodeType": "DistortionCalibration", + "position": [ + -200, + -160 + ], + "inputs": { + "input": "{CheckerboardDetection_1.input}", + "checkerboards": "{CheckerboardDetection_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 2400, + 200 + ], + "inputs": { + "input": "{SfMExpanding_2.output}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 0, + -160 + ], + "inputs": { + "input": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 400, + 200 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "masksFolder": "{ImageSegmentationBox_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#575963" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingKeyframes", + "color": "#575963" + } + }, + "FeatureMatching_2": { + "nodeType": "FeatureMatching", + "position": [ + 1800, + 400 + ], + "inputs": { + "input": "{ImageMatching_2.input}", + "featuresFolders": "{ImageMatching_2.featuresFolders}", + "imagePairsList": "{ImageMatching_2.output}" + }, + "internalInputs": { + "label": "FeatureMatchingAllFrames", + "color": "#80766f" + } + }, + "FeatureMatching_3": { + "nodeType": "FeatureMatching", + "position": [ + 1800, + 200 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_1.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingFramesToKeyframes", + "color": "#80766f" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + 0, + 200 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatchingMultiSfM_1": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 1600, + 200 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataFrames}", + "inputB": "{SfMExpanding_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "VocabularyTree", + "matchingMode": "a/b", + "nbMatches": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive" + }, + "internalInputs": { + "label": "ImageMatchingKeyframes", + "color": "#575963" + } + }, + "ImageMatching_2": { + "nodeType": "ImageMatching", + "position": [ + 1600, + 400 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Sequential", + "nbNeighbors": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageSegmentationBox_1": { + "nodeType": "ImageSegmentationBox", + "position": [ + 200, + 200 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "KeyframeSelection_1": { + "nodeType": "KeyframeSelection", + "position": [ + 200, + 0 + ], + "inputs": { + "inputPaths": [ + "{ApplyCalibration_1.output}" + ] + }, + "internalInputs": { + "color": "#575963" + } + }, + "MeshDecimate_1": { + "nodeType": "MeshDecimate", + "position": [ + 3800, + 0 + ], + "inputs": { + "input": "{MeshFiltering_1.outputMesh}", + "simplificationFactor": 0.05 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 3600, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}", + "filterLargeTrianglesFactor": 10.0 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 3400, + 0 + ], + "inputs": { + "input": "{DepthMapFilter_1.input}", + "depthMapsFolder": "{DepthMapFilter_1.output}", + "estimateSpaceFromSfM": false, + "minStep": 1, + "fullWeight": 10.0, + "saveRawDensePointCloud": true + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 2800, + 0 + ], + "inputs": { + "input": "{SfMTriangulation_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 4200, + 100 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{Texturing_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}" + ] + } + }, + "RelativePoseEstimating_1": { + "nodeType": "RelativePoseEstimating", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{TracksBuilding_1.input}", + "tracksFilename": "{TracksBuilding_1.output}", + "countIterations": 50000, + "minInliers": 100 + }, + "internalInputs": { + "color": "#575963" + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 4000, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{MeshDecimate_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_1.output}" + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "SfMBootStraping_1": { + "nodeType": "SfMBootStraping", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{RelativePoseEstimating_1.input}", + "tracksFilename": "{RelativePoseEstimating_1.tracksFilename}", + "pairs": "{RelativePoseEstimating_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "SfMExpanding_1": { + "nodeType": "SfMExpanding", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{SfMBootStraping_1.output}", + "tracksFilename": "{SfMBootStraping_1.tracksFilename}", + "meshFilename": "{SfMBootStraping_1.meshFilename}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the keyframes.", + "label": "SfMExpandingKeys", + "color": "#575963" + } + }, + "SfMExpanding_2": { + "nodeType": "SfMExpanding", + "position": [ + 2200, + 200 + ], + "inputs": { + "input": "{TracksBuilding_2.input}", + "tracksFilename": "{TracksBuilding_2.output}", + "meshFilename": "{SfMExpanding_1.meshFilename}", + "nbFirstUnstableCameras": 0, + "maxImagesPerGroup": 0, + "bundleAdjustmentMaxOutliers": 5000000, + "minNumberOfObservationsForTriangulation": 3, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the complete camera tracking sequence.", + "label": "SfMExpandingAll", + "color": "#80766f" + } + }, + "SfMTransfer_1": { + "nodeType": "SfMTransfer", + "position": [ + 2400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "reference": "{SfMExpanding_2.output}", + "transferLandmarks": false + }, + "internalInputs": { + "comment": "Transfer pose from final camera tracking into the keyframes-only scene.", + "color": "#3f3138" + } + }, + "SfMTriangulation_1": { + "nodeType": "SfMTriangulation", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{SfMTransfer_1.output}", + "featuresFolders": "{TracksBuilding_1.featuresFolders}", + "matchesFolders": "{TracksBuilding_1.matchesFolders}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 4000, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "inputMesh": "{MeshDecimate_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "TracksBuilding_1": { + "nodeType": "TracksBuilding", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}", + "filterTrackForks": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "TracksBuilding_2": { + "nodeType": "TracksBuilding", + "position": [ + 2000, + 200 + ], + "inputs": { + "input": "{FeatureMatching_3.input}", + "featuresFolders": "{FeatureMatching_3.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_2.output}", + "{FeatureMatching_3.output}" + ], + "describerTypes": "{FeatureMatching_3.describerTypes}", + "minInputTrackLength": 5, + "filterTrackForks": true + }, + "internalInputs": { + "color": "#80766f" + } + } + } +} \ No newline at end of file diff --git a/meshroom/cameraTrackingWithoutCalibration.mg b/meshroom/cameraTrackingWithoutCalibration.mg new file mode 100644 index 0000000000..b34a7b9f2b --- /dev/null +++ b/meshroom/cameraTrackingWithoutCalibration.mg @@ -0,0 +1,502 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "ApplyCalibration": "1.0", + "CameraInit": "12.0", + "ConvertDistortion": "1.0", + "ConvertSfMFormat": "2.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageMatchingMultiSfM": "1.0", + "ImageSegmentationBox": "0.1", + "KeyframeSelection": "5.0", + "MeshDecimate": "1.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "ScenePreview": "2.0", + "SfMTransfer": "2.1", + "SfMTriangulation": "1.0", + "StructureFromMotion": "3.3", + "Texturing": "6.0" + } + }, + "graph": { + "ApplyCalibration_1": { + "nodeType": "ApplyCalibration", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -200, + 0 + ], + "inputs": {}, + "internalInputs": { + "color": "#575963" + } + }, + "ConvertDistortion_1": { + "nodeType": "ConvertDistortion", + "position": [ + 1600, + 360 + ], + "inputs": { + "input": "{StructureFromMotion_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 3000, + 200 + ], + "inputs": { + "input": "{ExportAnimatedCamera_1.input}", + "fileExt": "json", + "describerTypes": "{StructureFromMotion_1.describerTypes}", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "DepthMapFilter_1": { + "nodeType": "DepthMapFilter", + "position": [ + 2400, + 0 + ], + "inputs": { + "input": "{DepthMap_1.input}", + "depthMapsFolder": "{DepthMap_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "DepthMap_1": { + "nodeType": "DepthMap", + "position": [ + 2200, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "downscale": 1 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 1600, + 200 + ], + "inputs": { + "input": "{StructureFromMotion_1.output}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 1800, + 360 + ], + "inputs": { + "input": "{ConvertDistortion_1.output}", + "exportLensGridsUndistorted": false + }, + "internalInputs": { + "color": "#80766f" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 400, + 200 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "masksFolder": "{ImageSegmentationBox_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#575963" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingKeyframes", + "color": "#575963" + } + }, + "FeatureMatching_2": { + "nodeType": "FeatureMatching", + "position": [ + 1200, + 360 + ], + "inputs": { + "input": "{ImageMatching_2.input}", + "featuresFolders": "{ImageMatching_2.featuresFolders}", + "imagePairsList": "{ImageMatching_2.output}" + }, + "internalInputs": { + "label": "FeatureMatchingAllFrames", + "color": "#80766f" + } + }, + "FeatureMatching_3": { + "nodeType": "FeatureMatching", + "position": [ + 1200, + 200 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_1.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingFramesToKeyframes", + "color": "#80766f" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + 0, + 200 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatchingMultiSfM_1": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 1000, + 200 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataFrames}", + "inputB": "{StructureFromMotion_2.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "VocabularyTree", + "matchingMode": "a/b", + "nbMatches": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive" + }, + "internalInputs": { + "label": "ImageMatchingKeyframes", + "color": "#575963" + } + }, + "ImageMatching_2": { + "nodeType": "ImageMatching", + "position": [ + 1000, + 360 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Sequential", + "nbNeighbors": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageSegmentationBox_1": { + "nodeType": "ImageSegmentationBox", + "position": [ + 200, + 200 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "KeyframeSelection_1": { + "nodeType": "KeyframeSelection", + "position": [ + 200, + 0 + ], + "inputs": { + "inputPaths": [ + "{ApplyCalibration_1.output}" + ] + }, + "internalInputs": { + "color": "#575963" + } + }, + "MeshDecimate_1": { + "nodeType": "MeshDecimate", + "position": [ + 3000, + 0 + ], + "inputs": { + "input": "{MeshFiltering_1.outputMesh}", + "simplificationFactor": 0.05 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 2800, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}", + "filterLargeTrianglesFactor": 10.0 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{DepthMapFilter_1.input}", + "depthMapsFolder": "{DepthMapFilter_1.output}", + "estimateSpaceFromSfM": false, + "minStep": 1, + "fullWeight": 10.0, + "saveRawDensePointCloud": true + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 2000, + 0 + ], + "inputs": { + "input": "{SfMTriangulation_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 3600, + 100 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{Texturing_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}" + ] + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 3200, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{MeshDecimate_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_1.output}" + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "SfMTransfer_1": { + "nodeType": "SfMTransfer", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "reference": "{StructureFromMotion_1.output}", + "transferLandmarks": false + }, + "internalInputs": { + "comment": "Transfer pose from final camera tracking into the keyframes-only scene.", + "color": "#3f3138" + } + }, + "SfMTriangulation_1": { + "nodeType": "SfMTriangulation", + "position": [ + 1800, + 0 + ], + "inputs": { + "input": "{SfMTransfer_1.output}", + "featuresFolders": "{StructureFromMotion_2.featuresFolders}", + "matchesFolders": "{StructureFromMotion_2.matchesFolders}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "StructureFromMotion_1": { + "nodeType": "StructureFromMotion", + "position": [ + 1400, + 200 + ], + "inputs": { + "input": "{FeatureMatching_3.input}", + "featuresFolders": "{FeatureMatching_3.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_3.output}", + "{FeatureMatching_2.output}" + ], + "describerTypes": "{FeatureMatching_3.describerTypes}", + "nbFirstUnstableCameras": 0, + "maxImagesPerGroup": 0, + "bundleAdjustmentMaxOutliers": -1, + "minInputTrackLength": 5, + "minNumberOfObservationsForTriangulation": 3, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5, + "filterTrackForks": true + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the complete camera tracking sequence.", + "color": "#80766f" + } + }, + "StructureFromMotion_2": { + "nodeType": "StructureFromMotion", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5, + "filterTrackForks": true + }, + "internalInputs": { + "comment": "Solve all keyframes first.", + "label": "StructureFromMotionKeyframes", + "color": "#575963" + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 3200, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "inputMesh": "{MeshDecimate_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + } + } +} \ No newline at end of file diff --git a/meshroom/cameraTrackingWithoutCalibrationExperimental.mg b/meshroom/cameraTrackingWithoutCalibrationExperimental.mg new file mode 100644 index 0000000000..5b35d1f90c --- /dev/null +++ b/meshroom/cameraTrackingWithoutCalibrationExperimental.mg @@ -0,0 +1,567 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "nodesVersions": { + "ApplyCalibration": "1.0", + "CameraInit": "12.0", + "ConvertDistortion": "1.0", + "ConvertSfMFormat": "2.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageMatchingMultiSfM": "1.0", + "ImageSegmentationBox": "0.1", + "KeyframeSelection": "5.0", + "MeshDecimate": "1.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "RelativePoseEstimating": "3.0", + "ScenePreview": "2.0", + "SfMBootStraping": "3.0", + "SfMExpanding": "2.0", + "SfMTransfer": "2.1", + "SfMTriangulation": "1.0", + "Texturing": "6.0", + "TracksBuilding": "1.0" + }, + "template": true + }, + "graph": { + "ApplyCalibration_1": { + "nodeType": "ApplyCalibration", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -200, + 0 + ], + "inputs": {}, + "internalInputs": { + "color": "#575963" + } + }, + "ConvertDistortion_1": { + "nodeType": "ConvertDistortion", + "position": [ + 2400, + 360 + ], + "inputs": { + "input": "{SfMExpanding_2.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 3800, + 200 + ], + "inputs": { + "input": "{ExportAnimatedCamera_1.input}", + "fileExt": "json", + "describerTypes": "{TracksBuilding_2.describerTypes}", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "DepthMapFilter_1": { + "nodeType": "DepthMapFilter", + "position": [ + 3200, + 0 + ], + "inputs": { + "input": "{DepthMap_1.input}", + "depthMapsFolder": "{DepthMap_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "DepthMap_1": { + "nodeType": "DepthMap", + "position": [ + 3000, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "downscale": 1 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 2600, + 200 + ], + "inputs": { + "input": "{SfMExpanding_2.output}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 2600, + 360 + ], + "inputs": { + "input": "{ConvertDistortion_1.output}", + "exportLensGridsUndistorted": false + }, + "internalInputs": { + "color": "#80766f" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 400, + 200 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "masksFolder": "{ImageSegmentationBox_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#575963" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingKeyframes", + "color": "#575963" + } + }, + "FeatureMatching_2": { + "nodeType": "FeatureMatching", + "position": [ + 1800, + 360 + ], + "inputs": { + "input": "{ImageMatching_2.input}", + "featuresFolders": "{ImageMatching_2.featuresFolders}", + "imagePairsList": "{ImageMatching_2.output}" + }, + "internalInputs": { + "label": "FeatureMatchingAllFrames", + "color": "#80766f" + } + }, + "FeatureMatching_3": { + "nodeType": "FeatureMatching", + "position": [ + 1800, + 200 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_1.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingFramesToKeyframes", + "color": "#80766f" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + 0, + 200 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatchingMultiSfM_1": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 1600, + 200 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataFrames}", + "inputB": "{SfMExpanding_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "VocabularyTree", + "matchingMode": "a/b", + "nbMatches": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive" + }, + "internalInputs": { + "label": "ImageMatchingKeyframes", + "color": "#575963" + } + }, + "ImageMatching_2": { + "nodeType": "ImageMatching", + "position": [ + 1600, + 360 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Sequential", + "nbNeighbors": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageSegmentationBox_1": { + "nodeType": "ImageSegmentationBox", + "position": [ + 200, + 200 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "KeyframeSelection_1": { + "nodeType": "KeyframeSelection", + "position": [ + 200, + 0 + ], + "inputs": { + "inputPaths": [ + "{ApplyCalibration_1.output}" + ] + }, + "internalInputs": { + "color": "#575963" + } + }, + "MeshDecimate_1": { + "nodeType": "MeshDecimate", + "position": [ + 3800, + 0 + ], + "inputs": { + "input": "{MeshFiltering_1.outputMesh}", + "simplificationFactor": 0.05 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 3600, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}", + "filterLargeTrianglesFactor": 10.0 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 3400, + 0 + ], + "inputs": { + "input": "{DepthMapFilter_1.input}", + "depthMapsFolder": "{DepthMapFilter_1.output}", + "estimateSpaceFromSfM": false, + "minStep": 1, + "fullWeight": 10.0, + "saveRawDensePointCloud": true + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 2800, + 0 + ], + "inputs": { + "input": "{SfMTriangulation_1.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 4400, + 100 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{Texturing_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}" + ] + } + }, + "RelativePoseEstimating_1": { + "nodeType": "RelativePoseEstimating", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{TracksBuilding_1.input}", + "tracksFilename": "{TracksBuilding_1.output}", + "countIterations": 50000, + "minInliers": 100 + }, + "internalInputs": { + "color": "#575963" + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 4000, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{MeshDecimate_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_1.output}" + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "SfMBootStraping_1": { + "nodeType": "SfMBootStraping", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{RelativePoseEstimating_1.input}", + "tracksFilename": "{RelativePoseEstimating_1.tracksFilename}", + "pairs": "{RelativePoseEstimating_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "SfMExpanding_1": { + "nodeType": "SfMExpanding", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{SfMBootStraping_1.output}", + "tracksFilename": "{SfMBootStraping_1.tracksFilename}", + "meshFilename": "{SfMBootStraping_1.meshFilename}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the keyframes.", + "label": "SfMExpandingKeys", + "color": "#575963" + } + }, + "SfMExpanding_2": { + "nodeType": "SfMExpanding", + "position": [ + 2200, + 200 + ], + "inputs": { + "input": "{TracksBuilding_2.input}", + "tracksFilename": "{TracksBuilding_2.output}", + "meshFilename": "{SfMExpanding_1.meshFilename}", + "nbFirstUnstableCameras": 0, + "maxImagesPerGroup": 0, + "bundleAdjustmentMaxOutliers": 5000000, + "minNumberOfObservationsForTriangulation": 3, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the complete camera tracking sequence.", + "label": "SfMExpandingAll", + "color": "#80766f" + } + }, + "SfMTransfer_1": { + "nodeType": "SfMTransfer", + "position": [ + 2400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "reference": "{SfMExpanding_2.output}", + "transferLandmarks": false + }, + "internalInputs": { + "comment": "Transfer pose from final camera tracking into the keyframes-only scene.", + "color": "#3f3138" + } + }, + "SfMTriangulation_1": { + "nodeType": "SfMTriangulation", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{SfMTransfer_1.output}", + "featuresFolders": "{TracksBuilding_1.featuresFolders}", + "matchesFolders": "{TracksBuilding_1.matchesFolders}", + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 4000, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "inputMesh": "{MeshDecimate_1.output}" + }, + "internalInputs": { + "color": "#3f3138" + } + }, + "TracksBuilding_1": { + "nodeType": "TracksBuilding", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}", + "filterTrackForks": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "TracksBuilding_2": { + "nodeType": "TracksBuilding", + "position": [ + 2000, + 200 + ], + "inputs": { + "input": "{FeatureMatching_3.input}", + "featuresFolders": "{FeatureMatching_3.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_2.output}", + "{FeatureMatching_3.output}" + ], + "describerTypes": "{FeatureMatching_3.describerTypes}", + "minInputTrackLength": 5, + "filterTrackForks": true + }, + "internalInputs": { + "color": "#80766f" + } + } + } +} \ No newline at end of file diff --git a/meshroom/distortionCalibration.mg b/meshroom/distortionCalibration.mg new file mode 100644 index 0000000000..226e911e5b --- /dev/null +++ b/meshroom/distortionCalibration.mg @@ -0,0 +1,69 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "CameraInit": "12.0", + "CheckerboardDetection": "1.0", + "DistortionCalibration": "5.0", + "ExportDistortion": "2.0", + "Publish": "1.3" + } + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "CheckerboardDetection_1": { + "nodeType": "CheckerboardDetection", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}", + "useNestedGrids": true, + "exportDebugImages": true + } + }, + "DistortionCalibration_1": { + "nodeType": "DistortionCalibration", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{CheckerboardDetection_1.input}", + "checkerboards": "{CheckerboardDetection_1.output}" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{DistortionCalibration_1.output}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 800, + 0 + ], + "inputs": { + "inputFiles": [ + "{ExportDistortion_1.output}" + ] + } + } + } +} \ No newline at end of file diff --git a/meshroom/hdrFusion.mg b/meshroom/hdrFusion.mg new file mode 100644 index 0000000000..b3d70333d8 --- /dev/null +++ b/meshroom/hdrFusion.mg @@ -0,0 +1,77 @@ +{ + "header": { + "nodesVersions": { + "CameraInit": "12.0", + "LdrToHdrCalibration": "3.1", + "LdrToHdrMerge": "4.1", + "LdrToHdrSampling": "4.0", + "Publish": "1.3" + }, + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "LdrToHdrCalibration_1": { + "nodeType": "LdrToHdrCalibration", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{LdrToHdrSampling_1.input}", + "samples": "{LdrToHdrSampling_1.output}", + "userNbBrackets": "{LdrToHdrSampling_1.userNbBrackets}", + "byPass": "{LdrToHdrSampling_1.byPass}", + "calibrationMethod": "{LdrToHdrSampling_1.calibrationMethod}", + "channelQuantizationPower": "{LdrToHdrSampling_1.channelQuantizationPower}", + "workingColorSpace": "{LdrToHdrSampling_1.workingColorSpace}" + } + }, + "LdrToHdrMerge_1": { + "nodeType": "LdrToHdrMerge", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{LdrToHdrCalibration_1.input}", + "response": "{LdrToHdrCalibration_1.response}", + "userNbBrackets": "{LdrToHdrCalibration_1.userNbBrackets}", + "byPass": "{LdrToHdrCalibration_1.byPass}", + "channelQuantizationPower": "{LdrToHdrCalibration_1.channelQuantizationPower}", + "workingColorSpace": "{LdrToHdrCalibration_1.workingColorSpace}" + } + }, + "LdrToHdrSampling_1": { + "nodeType": "LdrToHdrSampling", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 800, + 0 + ], + "inputs": { + "inputFiles": [ + "{LdrToHdrMerge_1.outputFolder}" + ] + } + } + } +} \ No newline at end of file diff --git a/meshroom/multi-viewPhotometricStereo.mg b/meshroom/multi-viewPhotometricStereo.mg new file mode 100644 index 0000000000..c19d53f354 --- /dev/null +++ b/meshroom/multi-viewPhotometricStereo.mg @@ -0,0 +1,277 @@ +{ + "header": { + "pipelineVersion": "2.2", + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "CameraInit": "12.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageMatching": "2.0", + "LightingCalibration": "1.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PhotometricStereo": "1.0", + "PrepareDenseScene": "3.1", + "SfMFilter": "1.0", + "SfMTransfer": "2.1", + "SphereDetection": "1.0", + "StructureFromMotion": "3.3", + "Texturing": "6.0" + } + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -400, + 200 + ], + "inputs": { + "rawColorInterpretation": "LibRawWhiteBalancing" + } + }, + "DepthMapFilter_1": { + "nodeType": "DepthMapFilter", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{DepthMap_1.input}", + "depthMapsFolder": "{DepthMap_1.output}" + } + }, + "DepthMap_1": { + "nodeType": "DepthMap", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}", + "imagesFolder": "{PrepareDenseScene_1.output}" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{SfMFilter_1.outputSfMData_selected}" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "maxIteration": 2048 + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{FeatureExtraction_1.input}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + } + }, + "LightingCalibration_1": { + "nodeType": "LightingCalibration", + "position": [ + 1200, + 200 + ], + "inputs": { + "inputPath": "{SphereDetection_1.input}", + "inputDetection": "{SphereDetection_1.output}" + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 1600, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{DepthMapFilter_1.input}", + "depthMapsFolder": "{DepthMapFilter_1.output}" + } + }, + "PhotometricStereo_1": { + "nodeType": "PhotometricStereo", + "position": [ + 1400, + 200 + ], + "inputs": { + "inputPath": "{LightingCalibration_1.inputPath}", + "pathToJSONLightFile": "{LightingCalibration_1.outputFile}" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{StructureFromMotion_1.output}" + } + }, + "PrepareDenseScene_2": { + "nodeType": "PrepareDenseScene", + "position": [ + 2200, + 200 + ], + "inputs": { + "input": "{PhotometricStereo_1.outputSfmDataAlbedo}" + } + }, + "PrepareDenseScene_3": { + "nodeType": "PrepareDenseScene", + "position": [ + 2200, + 400 + ], + "inputs": { + "input": "{PhotometricStereo_1.outputSfmDataNormal}" + } + }, + "PrepareDenseScene_4": { + "nodeType": "PrepareDenseScene", + "position": [ + 2200, + 600 + ], + "inputs": { + "input": "{PhotometricStereo_1.outputSfmDataNormalPNG}" + } + }, + "SfMFilter_1": { + "nodeType": "SfMFilter", + "position": [ + -200, + 200 + ], + "inputs": { + "inputFile": "{CameraInit_1.output}", + "fileMatchingPattern": ".*/.*ambiant.*" + } + }, + "SfMTransfer_1": { + "nodeType": "SfMTransfer", + "position": [ + 800, + 200 + ], + "inputs": { + "input": "{SfMFilter_1.outputSfMData_unselected}", + "reference": "{StructureFromMotion_1.output}", + "method": "from_poseid" + } + }, + "SphereDetection_1": { + "nodeType": "SphereDetection", + "position": [ + 1000, + 200 + ], + "inputs": { + "input": "{SfMTransfer_1.output}" + } + }, + "StructureFromMotion_1": { + "nodeType": "StructureFromMotion", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}", + "localizerEstimatorMaxIterations": 4096 + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 1800, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{DepthMap_1.imagesFolder}", + "inputMesh": "{MeshFiltering_1.outputMesh}" + } + }, + "Texturing_2": { + "nodeType": "Texturing", + "position": [ + 2400, + 200 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_2.output}", + "inputMesh": "{MeshFiltering_1.outputMesh}" + } + }, + "Texturing_3": { + "nodeType": "Texturing", + "position": [ + 2400, + 400 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_3.output}", + "inputMesh": "{MeshFiltering_1.outputMesh}" + } + }, + "Texturing_4": { + "nodeType": "Texturing", + "position": [ + 2400, + 600 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_4.output}", + "inputMesh": "{MeshFiltering_1.outputMesh}" + } + } + } +} \ No newline at end of file diff --git a/meshroom/nodalCameraTracking.mg b/meshroom/nodalCameraTracking.mg new file mode 100644 index 0000000000..55bd70122d --- /dev/null +++ b/meshroom/nodalCameraTracking.mg @@ -0,0 +1,293 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "ApplyCalibration": "1.0", + "CameraInit": "12.0", + "CheckerboardDetection": "1.0", + "ConvertSfMFormat": "2.0", + "DistortionCalibration": "5.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageSegmentationBox": "0.1", + "NodalSfM": "2.0", + "Publish": "1.3", + "RelativePoseEstimating": "3.0", + "ScenePreview": "2.0", + "TracksBuilding": "1.0" + } + }, + "graph": { + "ApplyCalibration_1": { + "nodeType": "ApplyCalibration", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}", + "calibration": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -200, + 0 + ], + "inputs": {}, + "internalInputs": { + "color": "#80766f" + } + }, + "CameraInit_2": { + "nodeType": "CameraInit", + "position": [ + -600, + -160 + ], + "inputs": {}, + "internalInputs": { + "label": "CameraInitLensGrid", + "color": "#302e2e" + } + }, + "CheckerboardDetection_1": { + "nodeType": "CheckerboardDetection", + "position": [ + -400, + -160 + ], + "inputs": { + "input": "{CameraInit_2.output}", + "useNestedGrids": true, + "exportDebugImages": true + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 1600, + 200 + ], + "inputs": { + "input": "{NodalSfM_1.output}", + "fileExt": "sfm", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "DistortionCalibration_1": { + "nodeType": "DistortionCalibration", + "position": [ + -200, + -160 + ], + "inputs": { + "input": "{CheckerboardDetection_1.input}", + "checkerboards": "{CheckerboardDetection_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{NodalSfM_1.output}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 0, + -160 + ], + "inputs": { + "input": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "masksFolder": "{ImageSegmentationBox_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + 0, + 200 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{FeatureExtraction_1.input}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageSegmentationBox_1": { + "nodeType": "ImageSegmentationBox", + "position": [ + 200, + 200 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "NodalSfM_1": { + "nodeType": "NodalSfM", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{RelativePoseEstimating_1.input}", + "tracksFilename": "{RelativePoseEstimating_1.tracksFilename}", + "pairs": "{RelativePoseEstimating_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 2000, + 0 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}" + ] + } + }, + "RelativePoseEstimating_1": { + "nodeType": "RelativePoseEstimating", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{TracksBuilding_1.input}", + "tracksFilename": "{TracksBuilding_1.output}", + "enforcePureRotation": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 1800, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{NodalSfM_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_1.output}", + "pointCloudParams": { + "particleSize": 0.001, + "particleColor": "Red" + } + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "TracksBuilding_1": { + "nodeType": "TracksBuilding", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ] + }, + "internalInputs": { + "color": "#80766f" + } + } + } +} \ No newline at end of file diff --git a/meshroom/nodalCameraTrackingWithoutCalibration.mg b/meshroom/nodalCameraTrackingWithoutCalibration.mg new file mode 100644 index 0000000000..6de92754cc --- /dev/null +++ b/meshroom/nodalCameraTrackingWithoutCalibration.mg @@ -0,0 +1,250 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "CameraInit": "12.0", + "ConvertDistortion": "1.0", + "ConvertSfMFormat": "2.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageSegmentationBox": "0.1", + "NodalSfM": "2.0", + "Publish": "1.3", + "RelativePoseEstimating": "3.0", + "ScenePreview": "2.0", + "TracksBuilding": "1.0" + } + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -400, + 0 + ], + "inputs": {}, + "internalInputs": { + "color": "#80766f" + } + }, + "ConvertDistortion_1": { + "nodeType": "ConvertDistortion", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{ExportAnimatedCamera_1.input}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 1400, + 200 + ], + "inputs": { + "input": "{NodalSfM_1.output}", + "fileExt": "sfm", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{NodalSfM_1.output}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 1800, + 0 + ], + "inputs": { + "input": "{ConvertDistortion_1.output}", + "exportLensGridsUndistorted": false + }, + "internalInputs": { + "color": "#80766f" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{ImageSegmentationBox_1.input}", + "masksFolder": "{ImageSegmentationBox_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + -200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{FeatureExtraction_1.input}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageSegmentationBox_1": { + "nodeType": "ImageSegmentationBox", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "NodalSfM_1": { + "nodeType": "NodalSfM", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{RelativePoseEstimating_1.input}", + "tracksFilename": "{RelativePoseEstimating_1.tracksFilename}", + "pairs": "{RelativePoseEstimating_1.output}" + }, + "internalInputs": { + "color": "#80766f" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 2100, + 100 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}" + ] + } + }, + "RelativePoseEstimating_1": { + "nodeType": "RelativePoseEstimating", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{TracksBuilding_1.input}", + "tracksFilename": "{TracksBuilding_1.output}", + "enforcePureRotation": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 1600, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{NodalSfM_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_1.output}", + "pointCloudParams": { + "particleSize": 0.001, + "particleColor": "Red" + } + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "TracksBuilding_1": { + "nodeType": "TracksBuilding", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ] + }, + "internalInputs": { + "color": "#80766f" + } + } + } +} \ No newline at end of file diff --git a/meshroom/panoramaFisheyeHdr.mg b/meshroom/panoramaFisheyeHdr.mg new file mode 100644 index 0000000000..03daf308d2 --- /dev/null +++ b/meshroom/panoramaFisheyeHdr.mg @@ -0,0 +1,238 @@ +{ + "header": { + "nodesVersions": { + "CameraInit": "12.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageMatching": "2.0", + "LdrToHdrCalibration": "3.1", + "LdrToHdrMerge": "4.1", + "LdrToHdrSampling": "4.0", + "PanoramaCompositing": "2.0", + "PanoramaEstimation": "1.0", + "PanoramaInit": "2.0", + "PanoramaMerging": "1.0", + "PanoramaPostProcessing": "2.0", + "PanoramaPrepareImages": "1.1", + "PanoramaSeams": "2.0", + "PanoramaWarping": "1.1", + "Publish": "1.3", + "SfMTransform": "3.1" + }, + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{PanoramaInit_1.outSfMData}", + "describerTypes": [ + "sift" + ], + "describerPreset": "high", + "describerQuality": "high" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "minRequired2DMotion": 5.0 + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{FeatureExtraction_1.input}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "FrustumOrVocabularyTree" + } + }, + "LdrToHdrCalibration_1": { + "nodeType": "LdrToHdrCalibration", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{LdrToHdrSampling_1.input}", + "samples": "{LdrToHdrSampling_1.output}", + "userNbBrackets": "{LdrToHdrSampling_1.userNbBrackets}", + "byPass": "{LdrToHdrSampling_1.byPass}", + "calibrationMethod": "{LdrToHdrSampling_1.calibrationMethod}", + "channelQuantizationPower": "{LdrToHdrSampling_1.channelQuantizationPower}", + "workingColorSpace": "{LdrToHdrSampling_1.workingColorSpace}" + } + }, + "LdrToHdrMerge_1": { + "nodeType": "LdrToHdrMerge", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{LdrToHdrCalibration_1.input}", + "response": "{LdrToHdrCalibration_1.response}", + "userNbBrackets": "{LdrToHdrCalibration_1.userNbBrackets}", + "byPass": "{LdrToHdrCalibration_1.byPass}", + "channelQuantizationPower": "{LdrToHdrCalibration_1.channelQuantizationPower}", + "workingColorSpace": "{LdrToHdrCalibration_1.workingColorSpace}" + } + }, + "LdrToHdrSampling_1": { + "nodeType": "LdrToHdrSampling", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{PanoramaPrepareImages_1.output}" + } + }, + "PanoramaCompositing_1": { + "nodeType": "PanoramaCompositing", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{PanoramaSeams_1.outputSfm}", + "warpingFolder": "{PanoramaSeams_1.warpingFolder}", + "labels": "{PanoramaSeams_1.output}", + "useTiling": false + } + }, + "PanoramaEstimation_1": { + "nodeType": "PanoramaEstimation", + "position": [ + 1800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}" + } + }, + "PanoramaInit_1": { + "nodeType": "PanoramaInit", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{LdrToHdrMerge_1.outSfMData}", + "useFisheye": true + } + }, + "PanoramaMerging_1": { + "nodeType": "PanoramaMerging", + "position": [ + 2800, + 0 + ], + "inputs": { + "input": "{PanoramaCompositing_1.input}", + "compositingFolder": "{PanoramaCompositing_1.output}", + "useTiling": "{PanoramaCompositing_1.useTiling}" + } + }, + "PanoramaPostProcessing_1": { + "nodeType": "PanoramaPostProcessing", + "position": [ + 3000, + 0 + ], + "inputs": { + "inputPanorama": "{PanoramaMerging_1.outputPanorama}", + "fillHoles": true + } + }, + "PanoramaPrepareImages_1": { + "nodeType": "PanoramaPrepareImages", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + } + }, + "PanoramaSeams_1": { + "nodeType": "PanoramaSeams", + "position": [ + 2400, + 0 + ], + "inputs": { + "input": "{PanoramaWarping_1.input}", + "warpingFolder": "{PanoramaWarping_1.output}" + } + }, + "PanoramaWarping_1": { + "nodeType": "PanoramaWarping", + "position": [ + 2200, + 0 + ], + "inputs": { + "input": "{SfMTransform_1.output}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 3200, + 0 + ], + "inputs": { + "inputFiles": [ + "{PanoramaPostProcessing_1.outputPanorama}", + "{PanoramaPostProcessing_1.outputPanoramaPreview}", + "{PanoramaPostProcessing_1.downscaledPanoramaLevels}" + ] + } + }, + "SfMTransform_1": { + "nodeType": "SfMTransform", + "position": [ + 2000, + 0 + ], + "inputs": { + "input": "{PanoramaEstimation_1.output}", + "method": "manual" + } + } + } +} \ No newline at end of file diff --git a/meshroom/panoramaHdr.mg b/meshroom/panoramaHdr.mg new file mode 100644 index 0000000000..566c126d15 --- /dev/null +++ b/meshroom/panoramaHdr.mg @@ -0,0 +1,233 @@ +{ + "header": { + "nodesVersions": { + "CameraInit": "12.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageMatching": "2.0", + "LdrToHdrCalibration": "3.1", + "LdrToHdrMerge": "4.1", + "LdrToHdrSampling": "4.0", + "PanoramaCompositing": "2.0", + "PanoramaEstimation": "1.0", + "PanoramaInit": "2.0", + "PanoramaMerging": "1.0", + "PanoramaPostProcessing": "2.0", + "PanoramaPrepareImages": "1.1", + "PanoramaSeams": "2.0", + "PanoramaWarping": "1.1", + "Publish": "1.3", + "SfMTransform": "3.1" + }, + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 1000, + 70 + ], + "inputs": { + "input": "{LdrToHdrMerge_1.outSfMData}", + "describerQuality": "high" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}", + "minRequired2DMotion": 5.0 + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{PanoramaInit_1.outSfMData}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "FrustumOrVocabularyTree" + } + }, + "LdrToHdrCalibration_1": { + "nodeType": "LdrToHdrCalibration", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{LdrToHdrSampling_1.input}", + "samples": "{LdrToHdrSampling_1.output}", + "userNbBrackets": "{LdrToHdrSampling_1.userNbBrackets}", + "byPass": "{LdrToHdrSampling_1.byPass}", + "calibrationMethod": "{LdrToHdrSampling_1.calibrationMethod}", + "channelQuantizationPower": "{LdrToHdrSampling_1.channelQuantizationPower}", + "workingColorSpace": "{LdrToHdrSampling_1.workingColorSpace}" + } + }, + "LdrToHdrMerge_1": { + "nodeType": "LdrToHdrMerge", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{LdrToHdrCalibration_1.input}", + "response": "{LdrToHdrCalibration_1.response}", + "userNbBrackets": "{LdrToHdrCalibration_1.userNbBrackets}", + "byPass": "{LdrToHdrCalibration_1.byPass}", + "channelQuantizationPower": "{LdrToHdrCalibration_1.channelQuantizationPower}", + "workingColorSpace": "{LdrToHdrCalibration_1.workingColorSpace}" + } + }, + "LdrToHdrSampling_1": { + "nodeType": "LdrToHdrSampling", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{PanoramaPrepareImages_1.output}" + } + }, + "PanoramaCompositing_1": { + "nodeType": "PanoramaCompositing", + "position": [ + 2400, + 0 + ], + "inputs": { + "input": "{PanoramaSeams_1.outputSfm}", + "warpingFolder": "{PanoramaSeams_1.warpingFolder}", + "labels": "{PanoramaSeams_1.output}" + } + }, + "PanoramaEstimation_1": { + "nodeType": "PanoramaEstimation", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}" + } + }, + "PanoramaInit_1": { + "nodeType": "PanoramaInit", + "position": [ + 1000, + -50 + ], + "inputs": { + "input": "{LdrToHdrMerge_1.outSfMData}" + } + }, + "PanoramaMerging_1": { + "nodeType": "PanoramaMerging", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{PanoramaCompositing_1.input}", + "compositingFolder": "{PanoramaCompositing_1.output}", + "useTiling": "{PanoramaCompositing_1.useTiling}" + } + }, + "PanoramaPostProcessing_1": { + "nodeType": "PanoramaPostProcessing", + "position": [ + 2800, + 0 + ], + "inputs": { + "inputPanorama": "{PanoramaMerging_1.outputPanorama}", + "fillHoles": true, + "exportLevels": true + } + }, + "PanoramaPrepareImages_1": { + "nodeType": "PanoramaPrepareImages", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + } + }, + "PanoramaSeams_1": { + "nodeType": "PanoramaSeams", + "position": [ + 2200, + 0 + ], + "inputs": { + "input": "{PanoramaWarping_1.input}", + "warpingFolder": "{PanoramaWarping_1.output}" + } + }, + "PanoramaWarping_1": { + "nodeType": "PanoramaWarping", + "position": [ + 2000, + 0 + ], + "inputs": { + "input": "{SfMTransform_1.output}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 3000, + 0 + ], + "inputs": { + "inputFiles": [ + "{PanoramaPostProcessing_1.outputPanorama}", + "{PanoramaPostProcessing_1.outputPanoramaPreview}", + "{PanoramaPostProcessing_1.downscaledPanoramaLevels}" + ] + } + }, + "SfMTransform_1": { + "nodeType": "SfMTransform", + "position": [ + 1800, + 0 + ], + "inputs": { + "input": "{PanoramaEstimation_1.output}", + "method": "manual" + } + } + } +} \ No newline at end of file diff --git a/meshroom/photogrammetry.mg b/meshroom/photogrammetry.mg new file mode 100644 index 0000000000..8290fbb384 --- /dev/null +++ b/meshroom/photogrammetry.mg @@ -0,0 +1,161 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "CameraInit": "12.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageMatching": "2.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "StructureFromMotion": "3.3", + "Texturing": "6.0" + } + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "DepthMapFilter_1": { + "nodeType": "DepthMapFilter", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{DepthMap_1.input}", + "depthMapsFolder": "{DepthMap_1.output}" + } + }, + "DepthMap_1": { + "nodeType": "DepthMap", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}", + "imagesFolder": "{PrepareDenseScene_1.output}" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{FeatureExtraction_1.input}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 1800, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{DepthMapFilter_1.input}", + "depthMapsFolder": "{DepthMapFilter_1.output}" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{StructureFromMotion_1.output}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 2200, + 0 + ], + "inputs": { + "inputFiles": [ + "{Texturing_1.outputMesh}", + "{Texturing_1.outputMaterial}", + "{Texturing_1.outputTextures}" + ] + } + }, + "StructureFromMotion_1": { + "nodeType": "StructureFromMotion", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}" + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 2000, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{DepthMap_1.imagesFolder}", + "inputMesh": "{MeshFiltering_1.outputMesh}" + } + } + } +} \ No newline at end of file diff --git a/meshroom/photogrammetryAndCameraTracking.mg b/meshroom/photogrammetryAndCameraTracking.mg new file mode 100644 index 0000000000..aa32aa9770 --- /dev/null +++ b/meshroom/photogrammetryAndCameraTracking.mg @@ -0,0 +1,605 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "ApplyCalibration": "1.0", + "CameraInit": "12.0", + "CheckerboardDetection": "1.0", + "ConvertSfMFormat": "2.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "DistortionCalibration": "5.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageMatchingMultiSfM": "1.0", + "ImageSegmentationBox": "0.1", + "KeyframeSelection": "5.0", + "MeshDecimate": "1.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "ScenePreview": "2.0", + "StructureFromMotion": "3.3", + "Texturing": "6.0" + } + }, + "graph": { + "ApplyCalibration_1": { + "nodeType": "ApplyCalibration", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}", + "calibration": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -200, + 0 + ], + "inputs": {}, + "internalInputs": { + "label": "InitShot", + "color": "#575963" + } + }, + "CameraInit_2": { + "nodeType": "CameraInit", + "position": [ + -600, + -160 + ], + "inputs": {}, + "internalInputs": { + "label": "InitLensGrid", + "color": "#302e2e" + } + }, + "CameraInit_3": { + "nodeType": "CameraInit", + "position": [ + -600, + -500 + ], + "inputs": {}, + "internalInputs": { + "label": "InitPhotogrammetry", + "color": "#384a55" + } + }, + "CheckerboardDetection_1": { + "nodeType": "CheckerboardDetection", + "position": [ + -400, + -160 + ], + "inputs": { + "input": "{CameraInit_2.output}", + "useNestedGrids": true, + "exportDebugImages": true + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 2000, + 200 + ], + "inputs": { + "input": "{ExportAnimatedCamera_1.input}", + "fileExt": "sfm", + "describerTypes": "{StructureFromMotion_1.describerTypes}", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "DepthMapFilter_2": { + "nodeType": "DepthMapFilter", + "position": [ + 800, + -500 + ], + "inputs": { + "input": "{DepthMap_2.input}", + "depthMapsFolder": "{DepthMap_2.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "DepthMap_2": { + "nodeType": "DepthMap", + "position": [ + 600, + -500 + ], + "inputs": { + "input": "{PrepareDenseScene_2.input}", + "imagesFolder": "{PrepareDenseScene_2.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "DistortionCalibration_1": { + "nodeType": "DistortionCalibration", + "position": [ + -200, + -160 + ], + "inputs": { + "input": "{CheckerboardDetection_1.input}", + "checkerboards": "{CheckerboardDetection_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 1600, + 200 + ], + "inputs": { + "input": "{StructureFromMotion_1.output}", + "sfmDataFilter": "{ImageMatchingMultiSfM_2.inputB}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 0, + -160 + ], + "inputs": { + "input": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 400, + 200 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "masksFolder": "{ImageSegmentationBox_2.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#575963" + } + }, + "FeatureExtraction_2": { + "nodeType": "FeatureExtraction", + "position": [ + -400, + -500 + ], + "inputs": { + "input": "{CameraInit_3.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingKeyframes", + "color": "#575963" + } + }, + "FeatureMatching_2": { + "nodeType": "FeatureMatching", + "position": [ + 1200, + 360 + ], + "inputs": { + "input": "{ImageMatching_2.input}", + "featuresFolders": "{ImageMatching_2.featuresFolders}", + "imagePairsList": "{ImageMatching_2.output}" + }, + "internalInputs": { + "label": "FeatureMatchingAllFrames", + "color": "#80766f" + } + }, + "FeatureMatching_3": { + "nodeType": "FeatureMatching", + "position": [ + 1200, + 200 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_1.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingFramesToKeyframes", + "color": "#80766f" + } + }, + "FeatureMatching_4": { + "nodeType": "FeatureMatching", + "position": [ + 0, + -500 + ], + "inputs": { + "input": "{ImageMatching_3.input}", + "featuresFolders": "{ImageMatching_3.featuresFolders}", + "imagePairsList": "{ImageMatching_3.output}", + "describerTypes": "{FeatureExtraction_2.describerTypes}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "FeatureMatching_5": { + "nodeType": "FeatureMatching", + "position": [ + 600, + -300 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_2.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_2.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_2.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + 0, + 200 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatchingMultiSfM_1": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 1000, + 200 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataFrames}", + "inputB": "{StructureFromMotion_2.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "VocabularyTree", + "matchingMode": "a/b", + "nbMatches": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatchingMultiSfM_2": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 400, + -300 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "inputB": "{StructureFromMotion_3.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive", + "matchingMode": "a/b" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive" + }, + "internalInputs": { + "label": "ImageMatchingKeyframes", + "color": "#575963" + } + }, + "ImageMatching_2": { + "nodeType": "ImageMatching", + "position": [ + 1000, + 360 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Sequential", + "nbNeighbors": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_3": { + "nodeType": "ImageMatching", + "position": [ + -200, + -500 + ], + "inputs": { + "input": "{FeatureExtraction_2.input}", + "featuresFolders": [ + "{FeatureExtraction_2.output}" + ] + }, + "internalInputs": { + "color": "#384a55" + } + }, + "ImageSegmentationBox_2": { + "nodeType": "ImageSegmentationBox", + "position": [ + 200, + 200 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "KeyframeSelection_1": { + "nodeType": "KeyframeSelection", + "position": [ + 200, + 0 + ], + "inputs": { + "inputPaths": [ + "{ApplyCalibration_1.output}" + ] + }, + "internalInputs": { + "color": "#575963" + } + }, + "MeshDecimate_1": { + "nodeType": "MeshDecimate", + "position": [ + 2000, + 100 + ], + "inputs": { + "input": "{MeshFiltering_2.outputMesh}", + "simplificationFactor": 0.05 + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "MeshFiltering_2": { + "nodeType": "MeshFiltering", + "position": [ + 1200, + -500 + ], + "inputs": { + "inputMesh": "{Meshing_2.outputMesh}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "Meshing_2": { + "nodeType": "Meshing", + "position": [ + 1000, + -500 + ], + "inputs": { + "input": "{DepthMapFilter_2.input}", + "depthMapsFolder": "{DepthMapFilter_2.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "PrepareDenseScene_2": { + "nodeType": "PrepareDenseScene", + "position": [ + 400, + -500 + ], + "inputs": { + "input": "{StructureFromMotion_3.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 2400, + -100 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}", + "{Texturing_2.output}" + ] + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 2200, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{MeshDecimate_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_2.output}" + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "StructureFromMotion_1": { + "nodeType": "StructureFromMotion", + "position": [ + 1400, + 200 + ], + "inputs": { + "input": "{FeatureMatching_3.input}", + "featuresFolders": "{FeatureMatching_3.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_3.output}", + "{FeatureMatching_2.output}" + ], + "describerTypes": "{FeatureMatching_3.describerTypes}", + "nbFirstUnstableCameras": 0, + "maxImagesPerGroup": 0, + "bundleAdjustmentMaxOutliers": -1, + "minInputTrackLength": 5, + "minNumberOfObservationsForTriangulation": 3, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5, + "filterTrackForks": true, + "useAutoTransform": false + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the complete camera tracking sequence.", + "color": "#80766f" + } + }, + "StructureFromMotion_2": { + "nodeType": "StructureFromMotion", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_5.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}", + "{FeatureMatching_5.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}", + "lockScenePreviouslyReconstructed": true, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5, + "filterTrackForks": true, + "useAutoTransform": false + }, + "internalInputs": { + "comment": "Solve all keyframes first.", + "label": "StructureFromMotionKeyframes", + "color": "#575963" + } + }, + "StructureFromMotion_3": { + "nodeType": "StructureFromMotion", + "position": [ + 200, + -500 + ], + "inputs": { + "input": "{FeatureMatching_4.input}", + "featuresFolders": "{FeatureMatching_4.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_4.output}" + ], + "describerTypes": "{FeatureMatching_4.describerTypes}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "Texturing_2": { + "nodeType": "Texturing", + "position": [ + 1400, + -500 + ], + "inputs": { + "input": "{Meshing_2.output}", + "imagesFolder": "{DepthMap_2.imagesFolder}", + "inputMesh": "{MeshFiltering_2.outputMesh}" + }, + "internalInputs": { + "color": "#384a55" + } + } + } +} \ No newline at end of file diff --git a/meshroom/photogrammetryAndCameraTrackingExperimental.mg b/meshroom/photogrammetryAndCameraTrackingExperimental.mg new file mode 100644 index 0000000000..93cf7215ad --- /dev/null +++ b/meshroom/photogrammetryAndCameraTrackingExperimental.mg @@ -0,0 +1,712 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "nodesVersions": { + "ApplyCalibration": "1.0", + "CameraInit": "12.0", + "CheckerboardDetection": "1.0", + "ConvertSfMFormat": "2.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "DistortionCalibration": "5.0", + "ExportAnimatedCamera": "2.0", + "ExportDistortion": "2.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageDetectionPrompt": "0.1", + "ImageMatching": "2.0", + "ImageMatchingMultiSfM": "1.0", + "ImageSegmentationBox": "0.1", + "KeyframeSelection": "5.0", + "MeshDecimate": "1.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "RelativePoseEstimating": "3.0", + "ScenePreview": "2.0", + "SfMBootStraping": "3.0", + "SfMExpanding": "2.0", + "Texturing": "6.0", + "TracksBuilding": "1.0" + }, + "template": true + }, + "graph": { + "ApplyCalibration_1": { + "nodeType": "ApplyCalibration", + "position": [ + 0, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}", + "calibration": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + -200, + 0 + ], + "inputs": {}, + "internalInputs": { + "label": "InitShot", + "color": "#575963" + } + }, + "CameraInit_2": { + "nodeType": "CameraInit", + "position": [ + -600, + -160 + ], + "inputs": {}, + "internalInputs": { + "label": "InitLensGrid", + "color": "#302e2e" + } + }, + "CameraInit_3": { + "nodeType": "CameraInit", + "position": [ + -600, + -500 + ], + "inputs": {}, + "internalInputs": { + "label": "InitPhotogrammetry", + "color": "#384a55" + } + }, + "CheckerboardDetection_1": { + "nodeType": "CheckerboardDetection", + "position": [ + -400, + -160 + ], + "inputs": { + "input": "{CameraInit_2.output}", + "useNestedGrids": true, + "exportDebugImages": true + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ConvertSfMFormat_1": { + "nodeType": "ConvertSfMFormat", + "position": [ + 2600, + 200 + ], + "inputs": { + "input": "{ExportAnimatedCamera_1.input}", + "fileExt": "sfm", + "describerTypes": "{TracksBuilding_3.describerTypes}", + "structure": false, + "observations": false + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "DepthMapFilter_2": { + "nodeType": "DepthMapFilter", + "position": [ + 1400, + -500 + ], + "inputs": { + "input": "{DepthMap_2.input}", + "depthMapsFolder": "{DepthMap_2.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "DepthMap_2": { + "nodeType": "DepthMap", + "position": [ + 1200, + -500 + ], + "inputs": { + "input": "{PrepareDenseScene_2.input}", + "imagesFolder": "{PrepareDenseScene_2.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "DistortionCalibration_1": { + "nodeType": "DistortionCalibration", + "position": [ + -200, + -160 + ], + "inputs": { + "input": "{CheckerboardDetection_1.input}", + "checkerboards": "{CheckerboardDetection_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "ExportAnimatedCamera_1": { + "nodeType": "ExportAnimatedCamera", + "position": [ + 2400, + 200 + ], + "inputs": { + "input": "{SfMExpanding_3.output}", + "sfmDataFilter": "{ImageMatchingMultiSfM_2.inputB}", + "exportUndistortedImages": true + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ExportDistortion_1": { + "nodeType": "ExportDistortion", + "position": [ + 0, + -160 + ], + "inputs": { + "input": "{DistortionCalibration_1.output}" + }, + "internalInputs": { + "color": "#302e2e" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 400, + 200 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "masksFolder": "{ImageSegmentationBox_2.output}", + "maskExtension": "exr" + }, + "internalInputs": { + "color": "#575963" + } + }, + "FeatureExtraction_2": { + "nodeType": "FeatureExtraction", + "position": [ + -400, + -500 + ], + "inputs": { + "input": "{CameraInit_3.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingKeyframes", + "color": "#575963" + } + }, + "FeatureMatching_2": { + "nodeType": "FeatureMatching", + "position": [ + 1800, + 400 + ], + "inputs": { + "input": "{ImageMatching_2.input}", + "featuresFolders": "{ImageMatching_2.featuresFolders}", + "imagePairsList": "{ImageMatching_2.output}" + }, + "internalInputs": { + "label": "FeatureMatchingAllFrames", + "color": "#80766f" + } + }, + "FeatureMatching_3": { + "nodeType": "FeatureMatching", + "position": [ + 1800, + 200 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_1.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_1.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "label": "FeatureMatchingFramesToKeyframes", + "color": "#80766f" + } + }, + "FeatureMatching_4": { + "nodeType": "FeatureMatching", + "position": [ + 0, + -500 + ], + "inputs": { + "input": "{ImageMatching_3.input}", + "featuresFolders": "{ImageMatching_3.featuresFolders}", + "imagePairsList": "{ImageMatching_3.output}", + "describerTypes": "{FeatureExtraction_2.describerTypes}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "FeatureMatching_5": { + "nodeType": "FeatureMatching", + "position": [ + 1200, + -300 + ], + "inputs": { + "input": "{ImageMatchingMultiSfM_2.outputCombinedSfM}", + "featuresFolders": "{ImageMatchingMultiSfM_2.featuresFolders}", + "imagePairsList": "{ImageMatchingMultiSfM_2.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageDetectionPrompt_1": { + "nodeType": "ImageDetectionPrompt", + "position": [ + 0, + 200 + ], + "inputs": { + "input": "{CameraInit_1.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatchingMultiSfM_1": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 1600, + 200 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataFrames}", + "inputB": "{SfMExpanding_2.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "VocabularyTree", + "matchingMode": "a/b", + "nbMatches": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatchingMultiSfM_2": { + "nodeType": "ImageMatchingMultiSfM", + "position": [ + 1000, + -300 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "inputB": "{SfMExpanding_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive", + "matchingMode": "a/b" + }, + "internalInputs": { + "color": "#575963" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{KeyframeSelection_1.outputSfMDataKeyframes}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Exhaustive" + }, + "internalInputs": { + "label": "ImageMatchingKeyframes", + "color": "#575963" + } + }, + "ImageMatching_2": { + "nodeType": "ImageMatching", + "position": [ + 1600, + 400 + ], + "inputs": { + "input": "{ApplyCalibration_1.output}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ], + "method": "Sequential", + "nbNeighbors": 20 + }, + "internalInputs": { + "color": "#80766f" + } + }, + "ImageMatching_3": { + "nodeType": "ImageMatching", + "position": [ + -200, + -500 + ], + "inputs": { + "input": "{FeatureExtraction_2.input}", + "featuresFolders": [ + "{FeatureExtraction_2.output}" + ] + }, + "internalInputs": { + "color": "#384a55" + } + }, + "ImageSegmentationBox_2": { + "nodeType": "ImageSegmentationBox", + "position": [ + 200, + 200 + ], + "inputs": { + "input": "{ImageDetectionPrompt_1.input}", + "bboxFolder": "{ImageDetectionPrompt_1.output}", + "maskInvert": true, + "keepFilename": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "KeyframeSelection_1": { + "nodeType": "KeyframeSelection", + "position": [ + 200, + 0 + ], + "inputs": { + "inputPaths": [ + "{ApplyCalibration_1.output}" + ] + }, + "internalInputs": { + "color": "#575963" + } + }, + "MeshDecimate_1": { + "nodeType": "MeshDecimate", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{MeshFiltering_2.outputMesh}", + "simplificationFactor": 0.05 + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "MeshFiltering_2": { + "nodeType": "MeshFiltering", + "position": [ + 1800, + -500 + ], + "inputs": { + "inputMesh": "{Meshing_2.outputMesh}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "Meshing_2": { + "nodeType": "Meshing", + "position": [ + 1600, + -500 + ], + "inputs": { + "input": "{DepthMapFilter_2.input}", + "depthMapsFolder": "{DepthMapFilter_2.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "PrepareDenseScene_2": { + "nodeType": "PrepareDenseScene", + "position": [ + 1000, + -500 + ], + "inputs": { + "input": "{SfMExpanding_1.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 3000, + -100 + ], + "inputs": { + "inputFiles": [ + "{ExportAnimatedCamera_1.output}", + "{ScenePreview_1.output}", + "{ExportDistortion_1.output}", + "{Texturing_2.output}" + ] + } + }, + "RelativePoseEstimating_1": { + "nodeType": "RelativePoseEstimating", + "position": [ + 400, + -500 + ], + "inputs": { + "input": "{TracksBuilding_1.input}", + "tracksFilename": "{TracksBuilding_1.output}", + "minInliers": 100 + }, + "internalInputs": { + "color": "#384a55" + } + }, + "RelativePoseEstimating_2": { + "nodeType": "RelativePoseEstimating", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{TracksBuilding_2.input}", + "tracksFilename": "{TracksBuilding_2.output}", + "countIterations": 50000, + "minInliers": 100 + }, + "internalInputs": { + "color": "#575963" + } + }, + "ScenePreview_1": { + "nodeType": "ScenePreview", + "position": [ + 2800, + 200 + ], + "inputs": { + "cameras": "{ConvertSfMFormat_1.output}", + "model": "{MeshDecimate_1.output}", + "undistortedImages": "{ExportAnimatedCamera_1.outputUndistorted}", + "masks": "{ImageSegmentationBox_2.output}" + }, + "internalInputs": { + "color": "#4c594c" + } + }, + "SfMBootStraping_1": { + "nodeType": "SfMBootStraping", + "position": [ + 600, + -500 + ], + "inputs": { + "input": "{RelativePoseEstimating_1.input}", + "tracksFilename": "{RelativePoseEstimating_1.tracksFilename}", + "pairs": "{RelativePoseEstimating_1.output}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "SfMBootStraping_2": { + "nodeType": "SfMBootStraping", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{RelativePoseEstimating_2.input}", + "tracksFilename": "{RelativePoseEstimating_2.tracksFilename}", + "pairs": "{RelativePoseEstimating_2.output}" + }, + "internalInputs": { + "color": "#575963" + } + }, + "SfMExpanding_1": { + "nodeType": "SfMExpanding", + "position": [ + 800, + -500 + ], + "inputs": { + "input": "{SfMBootStraping_1.output}", + "tracksFilename": "{SfMBootStraping_1.tracksFilename}", + "meshFilename": "{SfMBootStraping_1.meshFilename}" + }, + "internalInputs": { + "label": "SfMExpandingPhotog", + "color": "#384a55" + } + }, + "SfMExpanding_2": { + "nodeType": "SfMExpanding", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{SfMBootStraping_2.output}", + "tracksFilename": "{SfMBootStraping_2.tracksFilename}", + "lockScenePreviouslyReconstructed": true, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the keyframes.", + "label": "SfMExpandingKeys", + "color": "#575963" + } + }, + "SfMExpanding_3": { + "nodeType": "SfMExpanding", + "position": [ + 2200, + 200 + ], + "inputs": { + "input": "{TracksBuilding_3.input}", + "tracksFilename": "{TracksBuilding_3.output}", + "nbFirstUnstableCameras": 0, + "maxImagesPerGroup": 0, + "bundleAdjustmentMaxOutliers": 5000000, + "minNumberOfObservationsForTriangulation": 3, + "minAngleForTriangulation": 1.0, + "minAngleForLandmark": 0.5 + }, + "internalInputs": { + "comment": "Estimate cameras parameters for the complete camera tracking sequence.", + "label": "SfMExpandingAll", + "color": "#80766f" + } + }, + "Texturing_2": { + "nodeType": "Texturing", + "position": [ + 2000, + -500 + ], + "inputs": { + "input": "{Meshing_2.output}", + "imagesFolder": "{DepthMap_2.imagesFolder}", + "inputMesh": "{MeshFiltering_2.outputMesh}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "TracksBuilding_1": { + "nodeType": "TracksBuilding", + "position": [ + 200, + -500 + ], + "inputs": { + "input": "{FeatureMatching_4.input}", + "featuresFolders": "{FeatureMatching_4.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_4.output}" + ], + "describerTypes": "{FeatureMatching_4.describerTypes}" + }, + "internalInputs": { + "color": "#384a55" + } + }, + "TracksBuilding_2": { + "nodeType": "TracksBuilding", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_5.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}", + "{FeatureMatching_5.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}", + "filterTrackForks": true + }, + "internalInputs": { + "color": "#575963" + } + }, + "TracksBuilding_3": { + "nodeType": "TracksBuilding", + "position": [ + 2000, + 200 + ], + "inputs": { + "input": "{FeatureMatching_3.input}", + "featuresFolders": "{FeatureMatching_3.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_3.output}", + "{FeatureMatching_2.output}" + ], + "describerTypes": "{FeatureMatching_3.describerTypes}", + "minInputTrackLength": 5, + "filterTrackForks": true + }, + "internalInputs": { + "color": "#80766f" + } + } + } +} \ No newline at end of file diff --git a/meshroom/photogrammetryDraft.mg b/meshroom/photogrammetryDraft.mg new file mode 100644 index 0000000000..ace6962fa1 --- /dev/null +++ b/meshroom/photogrammetryDraft.mg @@ -0,0 +1,136 @@ +{ + "header": { + "nodesVersions": { + "CameraInit": "12.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageMatching": "2.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "StructureFromMotion": "3.3", + "Texturing": "6.0" + }, + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{FeatureExtraction_1.input}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 1400, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{StructureFromMotion_1.output}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 1800, + 0 + ], + "inputs": { + "inputFiles": [ + "{Texturing_1.outputMesh}", + "{Texturing_1.outputMaterial}", + "{Texturing_1.outputTextures}" + ] + } + }, + "StructureFromMotion_1": { + "nodeType": "StructureFromMotion", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}" + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{PrepareDenseScene_1.output}", + "inputMesh": "{MeshFiltering_1.outputMesh}" + } + } + } +} \ No newline at end of file diff --git a/meshroom/photogrammetryExperimental.mg b/meshroom/photogrammetryExperimental.mg new file mode 100644 index 0000000000..01ed019621 --- /dev/null +++ b/meshroom/photogrammetryExperimental.mg @@ -0,0 +1,200 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "CameraInit": "12.0", + "DepthMap": "5.0", + "DepthMapFilter": "4.0", + "FeatureExtraction": "1.3", + "FeatureMatching": "2.0", + "ImageMatching": "2.0", + "MeshFiltering": "3.0", + "Meshing": "7.0", + "PrepareDenseScene": "3.1", + "Publish": "1.3", + "RelativePoseEstimating": "3.0", + "SfMBootStraping": "3.0", + "SfMExpanding": "2.0", + "Texturing": "6.0", + "TracksBuilding": "1.0" + } + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "DepthMapFilter_1": { + "nodeType": "DepthMapFilter", + "position": [ + 2000, + 0 + ], + "inputs": { + "input": "{DepthMap_1.input}", + "depthMapsFolder": "{DepthMap_1.output}" + } + }, + "DepthMap_1": { + "nodeType": "DepthMap", + "position": [ + 1800, + 0 + ], + "inputs": { + "input": "{PrepareDenseScene_1.input}", + "imagesFolder": "{PrepareDenseScene_1.output}" + } + }, + "FeatureExtraction_1": { + "nodeType": "FeatureExtraction", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}" + } + }, + "FeatureMatching_1": { + "nodeType": "FeatureMatching", + "position": [ + 600, + 0 + ], + "inputs": { + "input": "{ImageMatching_1.input}", + "featuresFolders": "{ImageMatching_1.featuresFolders}", + "imagePairsList": "{ImageMatching_1.output}", + "describerTypes": "{FeatureExtraction_1.describerTypes}" + } + }, + "ImageMatching_1": { + "nodeType": "ImageMatching", + "position": [ + 400, + 0 + ], + "inputs": { + "input": "{FeatureExtraction_1.input}", + "featuresFolders": [ + "{FeatureExtraction_1.output}" + ] + } + }, + "MeshFiltering_1": { + "nodeType": "MeshFiltering", + "position": [ + 2400, + 0 + ], + "inputs": { + "inputMesh": "{Meshing_1.outputMesh}" + } + }, + "Meshing_1": { + "nodeType": "Meshing", + "position": [ + 2200, + 0 + ], + "inputs": { + "input": "{DepthMapFilter_1.input}", + "depthMapsFolder": "{DepthMapFilter_1.output}" + } + }, + "PrepareDenseScene_1": { + "nodeType": "PrepareDenseScene", + "position": [ + 1600, + 0 + ], + "inputs": { + "input": "{SfMExpanding_1.output}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 2800, + 0 + ], + "inputs": { + "inputFiles": [ + "{Texturing_1.outputMesh}", + "{Texturing_1.outputMaterial}", + "{Texturing_1.outputTextures}" + ] + } + }, + "RelativePoseEstimating_1": { + "nodeType": "RelativePoseEstimating", + "position": [ + 1000, + 0 + ], + "inputs": { + "input": "{TracksBuilding_1.input}", + "tracksFilename": "{TracksBuilding_1.output}", + "minInliers": 100 + } + }, + "SfMBootStraping_1": { + "nodeType": "SfMBootStraping", + "position": [ + 1200, + 0 + ], + "inputs": { + "input": "{RelativePoseEstimating_1.input}", + "tracksFilename": "{RelativePoseEstimating_1.tracksFilename}", + "pairs": "{RelativePoseEstimating_1.output}" + } + }, + "SfMExpanding_1": { + "nodeType": "SfMExpanding", + "position": [ + 1400, + 0 + ], + "inputs": { + "input": "{SfMBootStraping_1.output}", + "tracksFilename": "{SfMBootStraping_1.tracksFilename}", + "meshFilename": "{SfMBootStraping_1.meshFilename}" + } + }, + "Texturing_1": { + "nodeType": "Texturing", + "position": [ + 2600, + 0 + ], + "inputs": { + "input": "{Meshing_1.output}", + "imagesFolder": "{DepthMap_1.imagesFolder}", + "inputMesh": "{MeshFiltering_1.outputMesh}" + } + }, + "TracksBuilding_1": { + "nodeType": "TracksBuilding", + "position": [ + 800, + 0 + ], + "inputs": { + "input": "{FeatureMatching_1.input}", + "featuresFolders": "{FeatureMatching_1.featuresFolders}", + "matchesFolders": [ + "{FeatureMatching_1.output}" + ], + "describerTypes": "{FeatureMatching_1.describerTypes}" + } + } + } +} \ No newline at end of file diff --git a/meshroom/photometricStereo.mg b/meshroom/photometricStereo.mg new file mode 100644 index 0000000000..07f5f8c37d --- /dev/null +++ b/meshroom/photometricStereo.mg @@ -0,0 +1,77 @@ +{ + "header": { + "releaseVersion": "2025.1.0-develop", + "fileVersion": "2.0", + "template": true, + "nodesVersions": { + "CameraInit": "12.0", + "LightingCalibration": "1.0", + "PhotometricStereo": "1.0", + "Publish": "1.3", + "SphereDetection": "1.0" + } + }, + "graph": { + "CameraInit_1": { + "nodeType": "CameraInit", + "position": [ + 0, + 0 + ], + "inputs": {} + }, + "LightingCalibration_1": { + "nodeType": "LightingCalibration", + "position": [ + 400, + 0 + ], + "inputs": { + "inputPath": "{SphereDetection_1.input}", + "inputDetection": "{SphereDetection_1.output}" + } + }, + "PhotometricStereo_1": { + "nodeType": "PhotometricStereo", + "position": [ + 600, + 0 + ], + "inputs": { + "inputPath": "{LightingCalibration_1.inputPath}", + "pathToJSONLightFile": "{LightingCalibration_1.outputFile}" + } + }, + "Publish_1": { + "nodeType": "Publish", + "position": [ + 800, + 0 + ], + "inputs": { + "inputFiles": [ + "{PhotometricStereo_1.outputSfmDataNormal}", + "{PhotometricStereo_1.normals}", + "{PhotometricStereo_1.normalsWorld}", + "{PhotometricStereo_1.albedo}", + "{PhotometricStereo_1.outputSfmDataAlbedo}", + "{PhotometricStereo_1.inputPath}", + "{PhotometricStereo_1.outputSfmDataNormalPNG}", + "{PhotometricStereo_1.normalsPNG}", + "{PhotometricStereo_1.pathToJSONLightFile}" + ] + } + }, + "SphereDetection_1": { + "nodeType": "SphereDetection", + "position": [ + 200, + 0 + ], + "inputs": { + "input": "{CameraInit_1.output}", + "autoDetect": true + } + } + } +}