diff --git a/src/aliceVision/feature/FeaturesPerView.hpp b/src/aliceVision/feature/FeaturesPerView.hpp
index 3df7f042bf..9859242ef6 100644
--- a/src/aliceVision/feature/FeaturesPerView.hpp
+++ b/src/aliceVision/feature/FeaturesPerView.hpp
@@ -89,6 +89,8 @@ class FeaturesPerView
      */
     feature::MapFeaturesPerView& getData() { return _data; }
 
+    const feature::MapFeaturesPerView& getData() const { return _data; }
+
   private:
     /// PointFeature array per ViewId of the considered SfMData container
     MapFeaturesPerView _data;
diff --git a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp
index ca8c0542b8..de9c241237 100644
--- a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp
+++ b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.cpp
@@ -246,7 +246,10 @@ std::size_t ReconstructionEngine_sequentialSfM::fuseMatchesIntoTracks()
         const aliceVision::matching::PairwiseMatches& matches = *_pairwiseMatches;
 
         ALICEVISION_LOG_DEBUG("Track building");
-        tracksBuilder.build(matches);
+        if (_params.mergeTracks)
+            tracksBuilder.build(matches, _featuresPerView->getData());
+        else
+            tracksBuilder.build(matches);
 
         ALICEVISION_LOG_DEBUG("Track filtering");
         tracksBuilder.filter(_params.filterTrackForks, _params.minInputTrackLength);
diff --git a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp
index 483fd11d8b..7f003851bd 100644
--- a/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp
+++ b/src/aliceVision/sfm/pipeline/sequential/ReconstructionEngine_sequentialSfM.hpp
@@ -63,6 +63,7 @@ class ReconstructionEngine_sequentialSfM : public ReconstructionEngine
         EFeatureConstraint featureConstraint = EFeatureConstraint::BASIC;
         float minAngleInitialPair = 5.0f;
         float maxAngleInitialPair = 40.0f;
+        bool mergeTracks = false;
         bool filterTrackForks = true;
         robustEstimation::ERobustEstimator localizerEstimator = robustEstimation::ERobustEstimator::ACRANSAC;
         double localizerEstimatorError = std::numeric_limits<double>::infinity();
diff --git a/src/aliceVision/track/TracksBuilder.cpp b/src/aliceVision/track/TracksBuilder.cpp
index 0030defc1a..b39fe1d381 100644
--- a/src/aliceVision/track/TracksBuilder.cpp
+++ b/src/aliceVision/track/TracksBuilder.cpp
@@ -10,14 +10,38 @@
 #include <lemon/list_graph.h>
 #include <lemon/unionfind.h>
 
+/**
+ * @brief Contains necessary information to uniquely identify a duplicate feature
+ */
+struct DuplicateFeatureId
+{
+    DuplicateFeatureId(float x_, float y_, float scale_)
+      : x(x_),
+        y(y_),
+        scale(scale_)
+    {}
+
+    // for uniqueness test when used as a map key
+    bool operator<(const DuplicateFeatureId& other) const
+    {
+        if (x == other.x)
+        {
+            if (y == other.y)
+                return scale < other.scale;
+            return y < other.y;
+        }
+        return x < other.x;
+    }
+
+    float x, y, scale;
+};
+
 namespace aliceVision {
 namespace track {
 
 using namespace aliceVision::matching;
 using namespace lemon;
 
-/// IndexedFeaturePair is: map<viewId, keypointId>
-using IndexedFeaturePair = std::pair<std::size_t, KeypointId>;
 using IndexMap = lemon::ListDigraph::NodeMap<std::size_t>;
 using UnionFindObject = lemon::UnionFindEnum<IndexMap>;
 
@@ -42,7 +66,7 @@ TracksBuilder::TracksBuilder() { _d.reset(new TracksBuilderData()); }
 
 TracksBuilder::~TracksBuilder() = default;
 
-void TracksBuilder::build(const PairwiseMatches& pairwiseMatches)
+void buildTracks(const PairwiseMatches& pairwiseMatches, std::unique_ptr<TracksBuilderData>& _d, MapIndexToNode& map_indexToNode)
 {
     typedef std::set<IndexedFeaturePair> SetIndexedPair;
 
@@ -72,7 +96,6 @@ void TracksBuilder::build(const PairwiseMatches& pairwiseMatches)
     }
 
     // build the node indirection for each referenced feature
-    MapIndexToNode map_indexToNode;
     map_indexToNode.reserve(allFeatures.size());
     _d->map_nodeToIndex.reserve(allFeatures.size());
 
@@ -114,6 +137,122 @@ void TracksBuilder::build(const PairwiseMatches& pairwiseMatches)
     }
 }
 
+// Merge tracks that have corresponding duplicate features.
+// Make the union according to duplicate features
+// (same position, scale and describer type, but different orientations)
+void mergeTracks(const feature::MapFeaturesPerView& featuresPerView,
+                 const MapIndexToNode& map_indexToNode,
+                 const PairwiseMatches& pairwiseMatches,
+                 std::unique_ptr<TracksBuilderData>& _d,
+                 stl::flat_map<IndexedFeaturePair, size_t>& _duplicateFeaturesMap)
+{
+    // map of (viewId) to
+    //    map of (descType) to
+    //        map of DuplicateFeatureId(x, y, scale) to
+    //            pair of (set<featureId>, node)
+    HashMap<size_t, HashMap<feature::EImageDescriberType, HashMap<DuplicateFeatureId, std::pair<std::set<size_t>, MapIndexToNode::mapped_type>>>>
+      duplicateFeaturesPerView;
+
+    // per viewId pair
+    for (const auto& matchesPerDescIt : pairwiseMatches)
+    {
+        const std::size_t& I = matchesPerDescIt.first.first;
+        const std::size_t& J = matchesPerDescIt.first.second;
+        const MatchesPerDescType& matchesPerDesc = matchesPerDescIt.second;
+
+        auto& featuresPerDescI = featuresPerView.at(I);
+        auto& featuresPerDescJ = featuresPerView.at(J);
+        auto& duplicateFeaturesPerDescI = duplicateFeaturesPerView[I];
+        auto& duplicateFeaturesPerDescJ = duplicateFeaturesPerView[J];
+
+        // per descType
+        for (const auto& matchesIt : matchesPerDesc)
+        {
+            const feature::EImageDescriberType descType = matchesIt.first;
+            const IndMatches& matches = matchesIt.second;
+
+            auto& featuresI = featuresPerDescI.at(descType);
+            auto& featuresJ = featuresPerDescJ.at(descType);
+            auto& duplicateFeaturesI = duplicateFeaturesPerDescI[descType];
+            auto& duplicateFeaturesJ = duplicateFeaturesPerDescJ[descType];
+
+            // per features match
+            for (const IndMatch& m : matches)
+            {
+                {
+                    auto& featureI = featuresI[m._i];
+                    IndexedFeaturePair pairI(I, KeypointId(descType, m._i));
+                    auto& nodeI = map_indexToNode.at(pairI);
+                    DuplicateFeatureId duplicateIdI(featureI.x(), featureI.y(), featureI.scale());
+                    const auto& duplicateFeaturesI_it = duplicateFeaturesI.find(duplicateIdI);
+                    // if no duplicates yet found, add to map and update values
+                    if (duplicateFeaturesI_it == duplicateFeaturesI.end())
+                        duplicateFeaturesI[duplicateIdI] = std::make_pair(std::set<size_t>({m._i}), nodeI);
+                    else
+                    {
+                        auto& duplicateFeatureIdsI = duplicateFeaturesI_it->second.first;
+                        auto& duplicateFeatureNodeI = duplicateFeaturesI_it->second.second;
+                        // if not already in corresponding duplicates set, add to set and join nodes
+                        if (duplicateFeatureIdsI.insert(m._i).second)
+                        {
+                            _d->tracksUF->join(nodeI, duplicateFeatureNodeI);
+                        }
+                    }
+                }
+                {
+                    auto& featureJ = featuresJ[m._j];
+                    IndexedFeaturePair pairJ(J, KeypointId(descType, m._j));
+                    auto& nodeJ = map_indexToNode.at(pairJ);
+                    DuplicateFeatureId duplicateIdJ(featureJ.x(), featureJ.y(), featureJ.scale());
+                    const auto& duplicateFeaturesJ_it = duplicateFeaturesJ.find(duplicateIdJ);
+                    // if no duplicates yet found, add to map and update values
+                    if (duplicateFeaturesJ_it == duplicateFeaturesJ.end())
+                        duplicateFeaturesJ[duplicateIdJ] = std::make_pair(std::set<size_t>({m._j}), nodeJ);
+                    else
+                    {
+                        auto& duplicateFeatureIdsJ = duplicateFeaturesJ_it->second.first;
+                        auto& duplicateFeatureNodeJ = duplicateFeaturesJ_it->second.second;
+                        // if not already in corresponding duplicates set, add to set and join nodes
+                        if (duplicateFeatureIdsJ.insert(m._j).second)
+                        {
+                            _d->tracksUF->join(nodeJ, duplicateFeatureNodeJ);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    // fill duplicate features map
+    for (const auto& [viewId, duplicateFeaturesPerDesc] : duplicateFeaturesPerView)
+        for (const auto& [descType, duplicateFeatures] : duplicateFeaturesPerDesc)
+            for (const auto& [duplicateFeatureId, duplicateFeature] : duplicateFeatures)
+            {
+                auto& duplicateFeatureIdsSet = duplicateFeature.first;
+                size_t indexedFeaturePair_0 = *duplicateFeatureIdsSet.begin();
+                for (const auto& featureId : duplicateFeatureIdsSet)
+                {
+                    const auto& indexedFeaturePair_i = IndexedFeaturePair(viewId, KeypointId(descType, featureId));
+                    _duplicateFeaturesMap[indexedFeaturePair_i] = indexedFeaturePair_0;
+                }
+            }
+}
+
+void TracksBuilder::build(const PairwiseMatches& pairwiseMatches)
+{
+    // the node indirection for each referenced feature
+    MapIndexToNode map_indexToNode;
+    buildTracks(pairwiseMatches, _d, map_indexToNode);
+}
+
+void TracksBuilder::build(const PairwiseMatches& pairwiseMatches, const feature::MapFeaturesPerView& featuresPerView)
+{
+    // the node indirection for each referenced feature
+    MapIndexToNode map_indexToNode;
+    buildTracks(pairwiseMatches, _d, map_indexToNode);
+    mergeTracks(featuresPerView, map_indexToNode, pairwiseMatches, _d, _duplicateFeaturesMap);
+}
+
 void TracksBuilder::filter(bool clearForks, std::size_t minTrackLength, bool multithreaded)
 {
     // remove bad tracks:
@@ -129,14 +268,30 @@ void TracksBuilder::filter(bool clearForks, std::size_t minTrackLength, bool mul
     {
 #pragma omp single nowait
         {
-            std::size_t cpt = 0;
-            std::set<std::size_t> myset;
+            bool flag = false;
+            stl::flat_map<size_t, IndexedFeaturePair> myset;
             for (lemon::UnionFindEnum<IndexMap>::ItemIt iit(*_d->tracksUF, cit); iit != INVALID; ++iit)
             {
-                myset.insert(_d->map_nodeToIndex[iit].first);
-                ++cpt;
+                IndexedFeaturePair currentPair = _d->map_nodeToIndex[iit];
+                {
+                    const auto& duplicateIt = _duplicateFeaturesMap.find(currentPair);
+                    if (duplicateIt != _duplicateFeaturesMap.end())
+                        currentPair.second.featIndex = duplicateIt->second;
+                }
+                const auto& myIt = myset.find(currentPair.first);
+                if (myIt != myset.end())
+                {
+                    if (myIt->second < currentPair || currentPair < myIt->second)
+                    {
+                        flag = true;
+                    }
+                }
+                else
+                {
+                    myset[currentPair.first] = currentPair;
+                }
             }
-            if ((clearForks && myset.size() != cpt) || myset.size() < minTrackLength)
+            if ((clearForks && flag) || myset.size() < minTrackLength)
             {
 #pragma omp critical
                 set_classToErase.insert(cit.operator int());
@@ -186,7 +341,12 @@ void TracksBuilder::exportToSTL(TracksMap& allTracks) const
             const IndexedFeaturePair& currentPair = _d->map_nodeToIndex.at(iit);
             // all descType inside the track will be the same
             outTrack.descType = currentPair.second.descType;
-            outTrack.featPerView[currentPair.first] = currentPair.second.featIndex;
+            // Warning: overwrites featureIndex if clearForks is False
+            const auto& duplicateIt = _duplicateFeaturesMap.find(currentPair);
+            if (duplicateIt != _duplicateFeaturesMap.end())
+                outTrack.featPerView[currentPair.first] = duplicateIt->second;
+            else
+                outTrack.featPerView[currentPair.first] = currentPair.second.featIndex;
         }
     }
 }
diff --git a/src/aliceVision/track/TracksBuilder.hpp b/src/aliceVision/track/TracksBuilder.hpp
index d9f60a8a1d..d3f8e2194b 100644
--- a/src/aliceVision/track/TracksBuilder.hpp
+++ b/src/aliceVision/track/TracksBuilder.hpp
@@ -7,12 +7,16 @@
 #pragma once
 
 #include <aliceVision/track/Track.hpp>
+#include <aliceVision/feature/FeaturesPerView.hpp>
 
 #include <memory>
 
 namespace aliceVision {
 namespace track {
 
+// IndexedFeaturePair is: pair<viewId, keypointId>
+using IndexedFeaturePair = std::pair<std::size_t, KeypointId>;
+
 struct TracksBuilderData;
 
 /**
@@ -53,6 +57,14 @@ class TracksBuilder
      */
     void build(const PairwiseMatches& pairwiseMatches);
 
+    /**
+     * @brief Build tracks for a given series of pairWise matches,
+     * also merge tracks based on duplicate features
+     * @param[in] pairwiseMatches PairWise matches
+     * @param[in] featuresPerView Map Features Per View, used to get duplicate features
+     */
+    void build(const PairwiseMatches& pairwiseMatches, const feature::MapFeaturesPerView& featuresPerView);
+
     /**
      * @brief Remove bad tracks (too short or track with ids collision)
      * @param[in] clearForks: remove tracks with multiple observation in a single image
@@ -82,6 +94,7 @@ class TracksBuilder
 
   private:
     std::unique_ptr<TracksBuilderData> _d;
+    stl::flat_map<IndexedFeaturePair, size_t> _duplicateFeaturesMap;
 };
 
 }  // namespace track
diff --git a/src/software/pipeline/main_incrementalSfM.cpp b/src/software/pipeline/main_incrementalSfM.cpp
index 1273d97105..80ff8e1189 100644
--- a/src/software/pipeline/main_incrementalSfM.cpp
+++ b/src/software/pipeline/main_incrementalSfM.cpp
@@ -107,93 +107,98 @@ int aliceVision_main(int argc, char **argv)
   po::options_description optionalParams("Optional parameters");
   optionalParams.add_options()
     ("featuresFolders,f", po::value<std::vector<std::string>>(&featuresFolders)->multitoken(),
-      "Path to folder(s) containing the extracted features.")
+     "Path to folder(s) containing the extracted features.")
     ("matchesFolders,m", po::value<std::vector<std::string>>(&matchesFolders)->multitoken(),
-      "Path to folder(s) in which computed matches are stored.")
+     "Path to folder(s) in which computed matches are stored.")
     ("outputViewsAndPoses", po::value<std::string>(&outputSfMViewsAndPoses)->default_value(outputSfMViewsAndPoses),
-      "Path to the output SfMData file (with only views and poses).")
+     "Path to the output SfMData file (with only views and poses).")
     ("extraInfoFolder", po::value<std::string>(&extraInfoFolder)->default_value(extraInfoFolder),
-      "Folder for intermediate reconstruction files and additional reconstruction information files.")
+     "Folder for intermediate reconstruction files and additional reconstruction information files.")
     ("describerTypes,d", po::value<std::string>(&describerTypesName)->default_value(describerTypesName),
-      feature::EImageDescriberType_informations().c_str())
+     feature::EImageDescriberType_informations().c_str())
     ("interFileExtension", po::value<std::string>(&sfmParams.sfmStepFileExtension)->default_value(sfmParams.sfmStepFileExtension),
-      "Extension of the intermediate file export.")
+     "Extension of the intermediate file export.")
     ("maxNumberOfMatches", po::value<int>(&maxNbMatches)->default_value(maxNbMatches),
-      "Maximum number of matches per image pair (and per feature type). "
-      "This can be useful to have a quick reconstruction overview. 0 means no limit.")
+     "Maximum number of matches per image pair (and per feature type). "
+     "This can be useful to have a quick reconstruction overview. 0 means no limit.")
     ("minNumberOfMatches", po::value<int>(&minNbMatches)->default_value(minNbMatches),
-      "Minimum number of matches per image pair (and per feature type). "
-      "This can be useful to have a meaningful reconstruction with accurate keypoints. 0 means no limit.")
+     "Minimum number of matches per image pair (and per feature type). "
+     "This can be useful to have a meaningful reconstruction with accurate keypoints. 0 means no limit.")
     ("minInputTrackLength", po::value<int>(&sfmParams.minInputTrackLength)->default_value(sfmParams.minInputTrackLength),
-      "Minimum track length in input of SfM.")
+     "Minimum track length in input of SfM.")
     ("minAngleForTriangulation", po::value<double>(&sfmParams.minAngleForTriangulation)->default_value(sfmParams.minAngleForTriangulation),
-      "Minimum angle for triangulation.")
+     "Minimum angle for triangulation.")
     ("minAngleForLandmark", po::value<double>(&sfmParams.minAngleForLandmark)->default_value(sfmParams.minAngleForLandmark),
-      "Minimum angle for landmark.")
+     "Minimum angle for landmark.")
     ("maxReprojectionError", po::value<double>(&sfmParams.maxReprojectionError)->default_value(sfmParams.maxReprojectionError),
-      "Maximum reprojection error.")
+     "Maximum reprojection error.")
     ("minAngleInitialPair", po::value<float>(&sfmParams.minAngleInitialPair)->default_value(sfmParams.minAngleInitialPair),
-      "Minimum angle for the initial pair.")
+     "Minimum angle for the initial pair.")
     ("maxAngleInitialPair", po::value<float>(&sfmParams.maxAngleInitialPair)->default_value(sfmParams.maxAngleInitialPair),
-      "Maximum angle for the initial pair.")
-    ("minNumberOfObservationsForTriangulation", po::value<std::size_t>(&sfmParams.minNbObservationsForTriangulation)->default_value(sfmParams.minNbObservationsForTriangulation),
-      "Minimum number of observations to triangulate a point.\n"
-      "Set it to 3 (or more) reduces drastically the noise in the point cloud, but the number of final poses is a little bit reduced (from 1.5% to 11% on the tested datasets).\n"
-      "Note: set it to 0 or 1 to use the old triangulation algorithm (using 2 views only) during resection.")
+     "Maximum angle for the initial pair.")
+    ("minNumberOfObservationsForTriangulation",
+     po::value<std::size_t>(&sfmParams.minNbObservationsForTriangulation)->default_value(sfmParams.minNbObservationsForTriangulation),
+     "Minimum number of observations to triangulate a point.\n"
+     "Set it to 3 (or more) reduces drastically the noise in the point cloud, but the number of final poses is a little bit reduced (from 1.5% to 11% "
+     "on the tested datasets).\n"
+     "Note: set it to 0 or 1 to use the old triangulation algorithm (using 2 views only) during resection.")
     ("initialPairA", po::value<std::string>(&initialPairString.first)->default_value(initialPairString.first),
-      "UID or filepath or filename of the first image.")
+     "UID or filepath or filename of the first image.")
     ("initialPairB", po::value<std::string>(&initialPairString.second)->default_value(initialPairString.second),
-      "UID or filepath or filename of the second image.")
+     "UID or filepath or filename of the second image.")
     ("lockAllIntrinsics", po::value<bool>(&sfmParams.lockAllIntrinsics)->default_value(sfmParams.lockAllIntrinsics),
-      "Force lock of all camera intrinsic parameters, so they will not be refined during Bundle Adjustment.")
+     "Force lock of all camera intrinsic parameters, so they will not be refined during Bundle Adjustment.")
     ("minNbCamerasToRefinePrincipalPoint", po::value<int>(&sfmParams.minNbCamerasToRefinePrincipalPoint)->default_value(sfmParams.minNbCamerasToRefinePrincipalPoint),
-        "Minimal number of cameras to refine the principal point of the cameras (one of the intrinsic parameters of the camera). "
-        "If we do not have enough cameras, the principal point in consider is considered in the center of the image. "
-        "If minNbCamerasToRefinePrincipalPoint<=0, the principal point is never refined. "
-        "If minNbCamerasToRefinePrincipalPoint==1, the principal point is always refined.")
+     "Minimal number of cameras to refine the principal point of the cameras (one of the intrinsic parameters of the camera). "
+     "If we do not have enough cameras, the principal point in consider is considered in the center of the image. "
+     "If minNbCamerasToRefinePrincipalPoint<=0, the principal point is never refined. "
+     "If minNbCamerasToRefinePrincipalPoint==1, the principal point is always refined.")
     ("useLocalBA,l", po::value<bool>(&sfmParams.useLocalBundleAdjustment)->default_value(sfmParams.useLocalBundleAdjustment),
-      "Enable/Disable the Local bundle adjustment strategy.\n"
-      "It reduces the reconstruction time, especially for big datasets (500+ images).")
+     "Enable/Disable the Local bundle adjustment strategy.\n"
+     "It reduces the reconstruction time, especially for big datasets (500+ images).")
     ("localBAGraphDistance", po::value<int>(&sfmParams.localBundelAdjustementGraphDistanceLimit)->default_value(sfmParams.localBundelAdjustementGraphDistanceLimit),
-      "Graph-distance limit setting the Active region in the Local Bundle Adjustment strategy.")
+     "Graph-distance limit setting the Active region in the Local Bundle Adjustment strategy.")
     ("nbFirstUnstableCameras", po::value<std::size_t>(&sfmParams.nbFirstUnstableCameras)->default_value(sfmParams.nbFirstUnstableCameras),
-      "Number of cameras for which the bundle adjustment is performed every single time a camera is added, leading to more stable "
-      "results while the computations are not too expensive since there is not much data. Past this number, the bundle adjustment "
-      "will only be performed once for N added cameras.")
+     "Number of cameras for which the bundle adjustment is performed every single time a camera is added, leading to more stable "
+     "results while the computations are not too expensive since there is not much data. Past this number, the bundle adjustment "
+     "will only be performed once for N added cameras.")
     ("maxImagesPerGroup", po::value<std::size_t>(&sfmParams.maxImagesPerGroup)->default_value(sfmParams.maxImagesPerGroup),
-      "Maximum number of cameras that can be added before the bundle adjustment is performed. This prevents adding too much data "
-      "at once without performing the bundle adjustment.")
+     "Maximum number of cameras that can be added before the bundle adjustment is performed. This prevents adding too much data "
+     "at once without performing the bundle adjustment.")
     ("bundleAdjustmentMaxOutliers", po::value<int>(&sfmParams.bundleAdjustmentMaxOutliers)->default_value(sfmParams.bundleAdjustmentMaxOutliers),
-      "Threshold for the maximum number of outliers allowed at the end of a bundle adjustment iteration."
-      "Using a negative value for this threshold will disable BA iterations.")
+     "Threshold for the maximum number of outliers allowed at the end of a bundle adjustment iteration."
+     "Using a negative value for this threshold will disable BA iterations.")
     ("localizerEstimator", po::value<robustEstimation::ERobustEstimator>(&sfmParams.localizerEstimator)->default_value(sfmParams.localizerEstimator),
-      "Estimator type used to localize cameras (acransac (default), ransac, lsmeds, loransac, maxconsensus)")
+     "Estimator type used to localize cameras (acransac (default), ransac, lsmeds, loransac, maxconsensus)")
     ("localizerEstimatorError", po::value<double>(&sfmParams.localizerEstimatorError)->default_value(0.0),
-      "Reprojection error threshold (in pixels) for the localizer estimator (0 for default value according to the estimator).")
+     "Reprojection error threshold (in pixels) for the localizer estimator (0 for default value according to the estimator).")
     ("localizerEstimatorMaxIterations", po::value<std::size_t>(&sfmParams.localizerEstimatorMaxIterations)->default_value(sfmParams.localizerEstimatorMaxIterations),
-      "Max number of RANSAC iterations.")
+     "Max number of RANSAC iterations.")
     ("useOnlyMatchesFromInputFolder", po::value<bool>(&useOnlyMatchesFromInputFolder)->default_value(useOnlyMatchesFromInputFolder),
-      "Use only matches from the input matchesFolder parameter.\n"
-      "Matches folders previously added to the SfMData file will be ignored.")
+     "Use only matches from the input matchesFolder parameter.\n"
+     "Matches folders previously added to the SfMData file will be ignored.")
+    ("mergeTracks", po::value<bool>(&sfmParams.mergeTracks)->default_value(sfmParams.mergeTracks),
+     "Enable/Disable the track merging. The merging between two tracks is made when they have duplicate features coming from the same original "
+     "feature (same describer type, same 2D position in the same view, same scale, but different rotations and different feature id).\n")
     ("filterTrackForks", po::value<bool>(&sfmParams.filterTrackForks)->default_value(sfmParams.filterTrackForks),
-      "Enable/Disable the track forks removal. A track contains a fork when incoherent matches leads to multiple features in the same image for a single track.\n")
+     "Enable/Disable the track forks removal. A track contains a fork when incoherent matches leads to multiple features in the same image for a "
+     "single track.\n")
     ("useRigConstraint", po::value<bool>(&sfmParams.rig.useRigConstraint)->default_value(sfmParams.rig.useRigConstraint),
-      "Enable/Disable rig constraint.\n")
+     "Enable/Disable rig constraint.\n")
     ("rigMinNbCamerasForCalibration", po::value<int>(&sfmParams.rig.minNbCamerasForCalibration)->default_value(sfmParams.rig.minNbCamerasForCalibration),
-        "Minimal number of cameras to start the calibration of the rig.\n")
+     "Minimal number of cameras to start the calibration of the rig.\n")
     ("lockScenePreviouslyReconstructed", po::value<bool>(&lockScenePreviouslyReconstructed)->default_value(lockScenePreviouslyReconstructed),
-      "Lock/Unlock scene previously reconstructed.\n")
+     "Lock/Unlock scene previously reconstructed.\n")
     ("observationConstraint", po::value<EFeatureConstraint>(&sfmParams.featureConstraint)->default_value(sfmParams.featureConstraint),
-      "Use of an observation constraint : basic, scale the observation or use of the covariance.\n")
+     "Use of an observation constraint : basic, scale the observation or use of the covariance.\n")
     ("computeStructureColor", po::value<bool>(&computeStructureColor)->default_value(computeStructureColor),
-      "Compute each 3D point color.\n")
+     "Compute each 3D point color.\n")
     ("useAutoTransform", po::value<bool>(&useAutoTransform)->default_value(useAutoTransform),
-      "Transform the result with the alignment method 'AUTO'.\n")
+     "Transform the result with the alignment method 'AUTO'.\n")
     ("randomSeed", po::value<int>(&randomSeed)->default_value(randomSeed),
-      "This seed value will generate a sequence using a linear random generator. Set -1 to use a random seed.")
+     "This seed value will generate a sequence using a linear random generator. Set -1 to use a random seed.")
     ("logIntermediateSteps", po::value<bool>(&sfmParams.logIntermediateSteps)->default_value(logIntermediateSteps),
-      "If set to true, the current state of the scene will be dumped as an SfMData file every 3 resections.")
-    ;
+     "If set to true, the current state of the scene will be dumped as an SfMData file every 3 resections.");
 
   CmdLine cmdline("Sequential/Incremental reconstruction.\n"
                   "This program performs incremental SfM (Initial Pair Essential + Resection).\n"