diff --git a/build.rs b/build.rs index d52392c..4c82234 100644 --- a/build.rs +++ b/build.rs @@ -13,9 +13,9 @@ fn main() { .unwrap_or_else(|| "/opt/ortools".into()); cc::Build::new() .cpp(true) - .flag("-std=c++17") + .flags(["-std=c++17", "-DOR_PROTO_DLL="]) .file("src/cp_sat_wrapper.cpp") - .include(&[&ortools_prefix, "/include"].concat()) + .include([&ortools_prefix, "/include"].concat()) .compile("cp_sat_wrapper.a"); println!("cargo:rustc-link-lib=dylib=ortools"); diff --git a/src/builder.rs b/src/builder.rs index dc99381..ff01148 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -327,15 +327,26 @@ impl CpModelBuilder { /// assert!(x_val != z_val); /// assert!(y_val != z_val); /// ``` - pub fn add_all_different(&mut self, vars: impl IntoIterator) -> Constraint { + pub fn add_all_different( + &mut self, + exprs: impl IntoIterator>, + ) -> Constraint { self.add_cst(CstEnum::AllDiff(proto::AllDifferentConstraintProto { - vars: vars.into_iter().map(|v| v.0).collect(), + exprs: exprs + .into_iter() + .map(Into::into) + .map(|expr| proto::LinearExpressionProto { + vars: expr.vars.into_vec(), + coeffs: expr.coeffs.into_vec(), + offset: expr.constant, + }) + .collect(), })) } /// Adds a linear constraint. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::CpModelBuilder; @@ -380,7 +391,7 @@ impl CpModelBuilder { /// Adds an equality constraint between 2 linear expressions. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -405,7 +416,7 @@ impl CpModelBuilder { /// Adds a greater or equal constraint between 2 linear expressions. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -430,7 +441,7 @@ impl CpModelBuilder { /// Adds a lesser or equal constraint between 2 linear expressions. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -455,7 +466,7 @@ impl CpModelBuilder { /// Adds a stricly greater constraint between 2 linear expressions. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -480,7 +491,7 @@ impl CpModelBuilder { /// Adds a strictly lesser constraint between 2 linear expressions. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -505,7 +516,7 @@ impl CpModelBuilder { /// Adds a not equal constraint between 2 linear expressions. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -529,7 +540,7 @@ impl CpModelBuilder { /// Adds a constraint that force the `target` to be equal to the /// minimum of the given `exprs`. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -550,16 +561,19 @@ impl CpModelBuilder { target: impl Into, exprs: impl IntoIterator>, ) -> Constraint { - self.add_cst(CstEnum::LinMin(proto::LinearArgumentProto { - target: Some(target.into().into()), - exprs: exprs.into_iter().map(|e| e.into().into()).collect(), + self.add_cst(CstEnum::LinMax(proto::LinearArgumentProto { + target: Some((-target.into()).into()), + exprs: exprs + .into_iter() + .map(|expr| (-expr.into()).into()) + .collect(), })) } /// Adds a constraint that force the `target` to be equal to the /// maximum of the given `exprs`. /// - /// # Exemple + /// # Example /// /// ``` /// # use cp_sat::builder::{CpModelBuilder, LinearExpr}; @@ -669,6 +683,10 @@ impl CpModelBuilder { offset: expr.constant as f64, scaling_factor: 1., domain: vec![], + scaling_was_exact: false, + integer_before_offset: 0, + integer_after_offset: 0, + integer_scaling_factor: 0, }); } @@ -698,6 +716,10 @@ impl CpModelBuilder { offset: -expr.constant as f64, scaling_factor: -1., domain: vec![], + scaling_was_exact: false, + integer_before_offset: 0, + integer_after_offset: 0, + integer_scaling_factor: 0, }); } @@ -865,7 +887,7 @@ pub struct Constraint(usize); /// It describes an expression in the form `ax+by+c`. Several [From] /// and [std::ops] traits are implemented for easy modeling. /// -/// # Exemple +/// # Example /// /// Most of the builder methods that takes something linear take in /// practice a `impl Into`. In the example, we will use diff --git a/src/cp_model.proto b/src/cp_model.proto index 3852e06..d52fd8b 100644 --- a/src/cp_model.proto +++ b/src/cp_model.proto @@ -1,4 +1,4 @@ -// Copyright 2010-2021 Google LLC +// Copyright 2010-2025 Google LLC // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -18,6 +18,7 @@ syntax = "proto3"; package operations_research.sat; option csharp_namespace = "Google.OrTools.Sat"; +option go_package = "github.com/google/or-tools/ortools/sat/proto/cpmodel"; option java_package = "com.google.ortools.sat"; option java_multiple_files = true; option java_outer_classname = "CpModelProtobuf"; @@ -70,12 +71,6 @@ message BoolArgumentProto { repeated int32 literals = 1; } -// Argument of the constraints of the form target_var = OP(vars). -message IntegerArgumentProto { - int32 target = 1; - repeated int32 vars = 2; -} - // Some constraints supports linear expression instead of just using a reference // to a variable. This is especially useful during presolve to reduce the model // size. @@ -90,9 +85,9 @@ message LinearArgumentProto { repeated LinearExpressionProto exprs = 2; } -// All variables must take different values. +// All affine expressions must take different values. message AllDifferentConstraintProto { - repeated int32 vars = 1; + repeated LinearExpressionProto exprs = 1; } // The linear sum vars[i] * coeffs[i] must fall in the given domain. The domain @@ -110,38 +105,30 @@ message LinearConstraintProto { // The constraint target = vars[index]. // This enforces that index takes one of the value in [0, vars_size()). message ElementConstraintProto { - int32 index = 1; - int32 target = 2; - repeated int32 vars = 3; + int32 index = 1; // Legacy field. + int32 target = 2; // Legacy field. + repeated int32 vars = 3; // Legacy field. + // All expressions below must be affine function with at most one variable. + LinearExpressionProto linear_index = 4; + LinearExpressionProto linear_target = 5; + repeated LinearExpressionProto exprs = 6; } -// This "special" constraint not only enforces (start + size == end) and (size -// >= 0) but can also be referred by other constraints using this "interval" -// concept. +// This is not really a constraint. It is there so it can be referred by other +// constraints using this "interval" concept. +// +// IMPORTANT: For now, this constraint do not enforce any relations on the +// components, and it is up to the client to add in the model: +// - enforcement => start + size == end. +// - enforcement => size >= 0 // Only needed if size is not already >= 0. +// +// IMPORTANT: For now, we just support affine relation. We could easily +// create an intermediate variable to support full linear expression, but this +// isn't done currently. message IntervalConstraintProto { - int32 start = 1; - int32 end = 2; - int32 size = 3; - - // EXPERIMENTAL: This will become the new way to specify an interval. - // Depending on the parameters, the presolve will convert the old way to the - // new way. Do not forget to add an associated linear constraint if you use - // this directly. - // - // If any of this field is set, then all must be set and the ones above will - // be ignored. - // - // IMPORTANT: For now, this constraint do not enforce any relations on the - // view, and a linear constraint must be added together with this to enforce - // enforcement => start_view + size_view == end_view. An enforcement => - // size_view >=0 might also be needed. - // - // IMPORTANT: For now, we just support affine relation. We could easily - // create an intermediate variable to support full linear expression, but this - // isn't done currently. - LinearExpressionProto start_view = 4; - LinearExpressionProto end_view = 5; - LinearExpressionProto size_view = 6; + LinearExpressionProto start = 4; + LinearExpressionProto end = 5; + LinearExpressionProto size = 6; } // All the intervals (index of IntervalConstraintProto) must be disjoint. More @@ -154,32 +141,46 @@ message NoOverlapConstraintProto { } // The boxes defined by [start_x, end_x) * [start_y, end_y) cannot overlap. +// Furthermore, one box is optional if at least one of the x or y interval is +// optional. +// +// Note that the case of boxes of size zero is special. The following cases +// violate the constraint: +// - a point box inside a box with a non zero area +// - a line box overlapping a box with a non zero area +// - one vertical line box crossing an horizontal line box. message NoOverlap2DConstraintProto { repeated int32 x_intervals = 1; repeated int32 y_intervals = 2; // Same size as x_intervals. - bool boxes_with_null_area_can_overlap = 3; } // The sum of the demands of the intervals at each interval point cannot exceed // a capacity. Note that intervals are interpreted as [start, end) and as // such intervals like [2,3) and [3,4) do not overlap for the point of view of // this constraint. Moreover, intervals of size zero are ignored. +// +// All demands must not contain any negative value in their domains. This is +// checked at validation. Even if there are no intervals, this constraint +// implicit enforces capacity >= 0. In other words, a negative capacity is +// considered valid but always infeasible. message CumulativeConstraintProto { - int32 capacity = 1; + LinearExpressionProto capacity = 1; repeated int32 intervals = 2; - repeated int32 demands = 3; // Same size as intervals. + repeated LinearExpressionProto demands = 3; // Same size as intervals. } // Maintain a reservoir level within bounds. The water level starts at 0, and at // any time, it must be within [min_level, max_level]. // -// If the variable actives[i] is true, and if the variable times[i] is assigned -// a value t, then the current level changes by demands[i] (which is constant) -// at the time t. Therefore, at any time t: -// sum(demands[i] * actives[i] if times[i] <= t) in [min_level, max_level] +// If the variable active_literals[i] is true, and if the expression +// time_exprs[i] is assigned a value t, then the current level changes by +// level_changes[i] at the time t. Therefore, at any time t: +// +// sum(level_changes[i] * active_literals[i] if time_exprs[i] <= t) +// in [min_level, max_level] // // Note that min level must be <= 0, and the max level must be >= 0. Please use -// fixed demands to simulate initial state. +// fixed level_changes to simulate initial state. // // The array of boolean variables 'actives', if defined, indicates which actions // are actually performed. If this array is not defined, then it is assumed that @@ -187,9 +188,11 @@ message CumulativeConstraintProto { message ReservoirConstraintProto { int64 min_level = 1; int64 max_level = 2; - repeated int32 times = 3; // variables. - repeated int64 demands = 4; // constants, can be negative. - repeated int32 actives = 5; // literals. + repeated LinearExpressionProto time_exprs = 3; // affine expressions. + // Currently, we only support constant level changes. + repeated LinearExpressionProto level_changes = 6; // affine expressions. + repeated int32 active_literals = 5; + reserved 4; } // The circuit constraint is defined on a graph where the arc presence are @@ -218,6 +221,11 @@ message CircuitConstraintProto { // - Self-arcs are allowed except for node 0. // - There is no cycle in this graph, except through node 0. // +// Note: Currently this constraint expects all the nodes in [0, num_nodes) to +// have at least one incident arc. The model will be considered invalid if it +// is not the case. You can add self-arc fixed to one to ignore some nodes if +// needed. +// // TODO(user): It is probably possible to generalize this constraint to a // no-cycle in a general graph, or a no-cycle with sum incoming <= 1 and sum // outgoing <= 1 (more efficient implementation). On the other hand, having this @@ -227,25 +235,44 @@ message RoutesConstraintProto { repeated int32 heads = 2; repeated int32 literals = 3; - // Experimental. The demands for each node, and the maximum capacity for each - // route. Note that this is currently only used for the LP relaxation and one - // need to add the corresponding constraint to enforce this outside of the LP. - // - // TODO(user): Ideally, we should be able to extract any dimension like these - // (i.e. capacity, route_length, etc..) automatically from the encoding. The - // classical way to encode that is to have "current_capacity" variables along - // the route and linear equations of the form: - // arc_literal => (current_capacity_tail + demand <= current_capacity_head) + // DEPRECATED. These fields are no longer used. The solver ignores them. repeated int32 demands = 4; int64 capacity = 5; + + // A set of linear expressions associated with the nodes. + message NodeExpressions { + // The i-th element is the linear expression associated with the i-th node. + // All expressions must be affine expressions (a * var + b). + repeated LinearExpressionProto exprs = 1; + } + + // Expressions associated with the nodes of the graph, such as the load of the + // vehicle arriving at a node, or the time at which a vehicle arrives at a + // node. Expressions with the same "dimension" (such as "load" or "time") must + // be listed together. + // This field is optional. If it is set, the linear constraints of size 1 or 2 + // between the variables in these expressions will be used to derive cuts for + // this constraint. If it is not set, the solver will try to automatically + // derive it, from the linear constraints of size 1 or 2 in the model (this + // can fail in complex cases). + repeated NodeExpressions dimensions = 6; } -// The values of the n-tuple formed by the given variables can only be one of +// The values of the n-tuple formed by the given expression can only be one of // the listed n-tuples in values. The n-tuples are encoded in a flattened way: // [tuple0_v0, tuple0_v1, ..., tuple0_v{n-1}, tuple1_v0, ...]. +// Expressions must be affine (a * var + b). +// Corner cases: +// - If all `vars`, `values` and `exprs` are empty, the constraint is trivially +// true, irrespective of the value of `negated`. +// - If `values` is empty but either vars or exprs is not, the constraint is +// trivially false if `negated` is false, and trivially true if `negated` is +// true. +// - If `vars` and `exprs` are empty but `values` is not, the model is invalid. message TableConstraintProto { - repeated int32 vars = 1; + repeated int32 vars = 1; // Legacy field. repeated int64 values = 2; + repeated LinearExpressionProto exprs = 4; // If true, the meaning is "negated", that is we forbid any of the given // tuple from a feasible assignment. @@ -259,7 +286,7 @@ message InverseConstraintProto { repeated int32 f_inverse = 2; } -// This constraint forces a sequence of variables to be accepted by an +// This constraint forces a sequence of expressions to be accepted by an // automaton. message AutomatonConstraintProto { // A state is identified by a non-negative number. It is preferable to keep @@ -275,12 +302,20 @@ message AutomatonConstraintProto { repeated int64 transition_head = 5; repeated int64 transition_label = 6; - // The sequence of variables. The automaton is ran for vars_size() "steps" and - // the value of vars[i] corresponds to the transition label at step i. + // Legacy field. repeated int32 vars = 7; + // The sequence of affine expressions (a * var + b). The automaton is ran for + // exprs_size() "steps" and the value of exprs[i] corresponds to the + // transition label at step i. + repeated LinearExpressionProto exprs = 8; } -// Next id: 30 +// A list of variables, without any semantics. +message ListOfVariablesProto { + repeated int32 vars = 1; +} + +// Next id: 31 message ConstraintProto { // For debug/logging only. Can be empty. string name = 1; @@ -343,40 +378,34 @@ message ConstraintProto { // The bool_xor constraint forces an odd number of the literals to be true. BoolArgumentProto bool_xor = 5; - // The int_div constraint forces the target to equal vars[0] / vars[1]. - // In particular, vars[1] can never take the value 0. - IntegerArgumentProto int_div = 7; - - // The int_mod constraint forces the target to equal vars[0] % vars[1]. - // The domain of vars[1] must be strictly positive. - IntegerArgumentProto int_mod = 8; - - // The int_max constraint forces the target to equal the maximum of all - // variables. - // - // The lin_max constraint forces the target to equal the maximum of all - // linear expressions. + // The int_div constraint forces the target to equal exprs[0] / exprs[1]. + // The division is "rounded" towards zero, so we can have for instance + // (2 = 12 / 5) or (-3 = -10 / 3). If you only want exact integer division, + // then you should use instead of t = a / b, the int_prod constraint + // a = b * t. // - // TODO(user): Remove int_max in favor of lin_max. - IntegerArgumentProto int_max = 9; - LinearArgumentProto lin_max = 27; + // If 0 belongs to the domain of exprs[1], then the model is deemed invalid. + LinearArgumentProto int_div = 7; - // The int_min constraint forces the target to equal the minimum of all - // variables. - // - // The lin_min constraint forces the target to equal the minimum of all - // linear expressions. - // - // TODO(user): Remove int_min in favor of lin_min. - IntegerArgumentProto int_min = 10; - LinearArgumentProto lin_min = 28; + // The int_mod constraint forces the target to equal exprs[0] % exprs[1]. + // The domain of exprs[1] must be strictly positive. The sign of the target + // is the same as the sign of exprs[0]. + LinearArgumentProto int_mod = 8; // The int_prod constraint forces the target to equal the product of all // variables. By convention, because we can just remove term equal to one, // the empty product forces the target to be one. // - // TODO(user): Support more than two terms in the product. - IntegerArgumentProto int_prod = 11; + // Note that the solver checks for potential integer overflow. So the + // product of the maximum absolute value of all the terms (using the initial + // domain) should fit on an int64. Otherwise the model will be declared + // invalid. + LinearArgumentProto int_prod = 11; + + // The lin_max constraint forces the target to equal the maximum of all + // linear expressions. + // Note that this can model a minimum simply by negating all expressions. + LinearArgumentProto lin_max = 27; // The linear constraint enforces a linear inequality among the variables, // such as 0 <= x + 2y <= 10. @@ -438,16 +467,18 @@ message ConstraintProto { // of the demands of the intervals containing that point does not exceed // the capacity. CumulativeConstraintProto cumulative = 22; + + // This constraint is not meant to be used and will be rejected by the + // solver. It is meant to mark variable when testing the presolve code. + ListOfVariablesProto dummy_constraint = 30; } } // Optimization objective. -// -// This is in a message because decision problems don't have any objective. message CpObjectiveProto { // The linear terms of the objective to minimize. // For a maximization problem, one can negate all coefficients in the - // objective and set a scaling_factor to -1. + // objective and set scaling_factor to -1. repeated int32 vars = 1; repeated int64 coeffs = 4; @@ -465,6 +496,40 @@ message CpObjectiveProto { // Note that this does not depend on the offset or scaling factor, it is a // domain on the sum of the objective terms only. repeated int64 domain = 5; + + // Internal field. Do not set. When we scale a FloatObjectiveProto to a + // integer version, we set this to true if the scaling was exact (i.e. all + // original coeff were integer for instance). + // + // TODO(user): Put the error bounds we computed instead? + bool scaling_was_exact = 6; + + // Internal fields to recover a bound on the original integer objective from + // the presolved one. Basically, initially the integer objective fit on an + // int64 and is in [Initial_lb, Initial_ub]. During presolve, we might change + // the linear expression to have a new domain [Presolved_lb, Presolved_ub] + // that will also always fit on an int64. + // + // The two domain will always be linked with an affine transformation between + // the two of the form: + // old = (new + before_offset) * integer_scaling_factor + after_offset. + // Note that we use both offsets to always be able to do the computation while + // staying in the int64 domain. In particular, the after_offset will always + // be in (-integer_scaling_factor, integer_scaling_factor). + int64 integer_before_offset = 7; + int64 integer_after_offset = 9; + int64 integer_scaling_factor = 8; +} + +// A linear floating point objective: sum coeffs[i] * vars[i] + offset. +// Note that the variable can only still take integer value. +message FloatObjectiveProto { + repeated int32 vars = 1; + repeated double coeffs = 2; + double offset = 3; + + // The optimization direction. The default is to minimize + bool maximize = 4; } // Define the strategy to follow when the solver needs to take a new decision. @@ -475,8 +540,17 @@ message DecisionStrategyProto { // criteria defined below. repeated int32 variables = 1; - // The order in which the variables above should be considered. Note that only - // variables that are not already fixed are considered. + // If this is set, then the variables field must be empty. + // We currently only support affine expression. + // + // Note that this is needed so that if a variable has an affine + // representative, we can properly transform a DecisionStrategyProto through + // presolve. + repeated LinearExpressionProto exprs = 5; + + // The order in which the variables (resp. affine expression) above should be + // considered. Note that only variables that are not already fixed are + // considered. // // TODO(user): extend as needed. enum VariableSelectionStrategy { @@ -488,8 +562,8 @@ message DecisionStrategyProto { } VariableSelectionStrategy variable_selection_strategy = 2; - // Once a variable has been chosen, this enum describe what decision is taken - // on its domain. + // Once a variable (resp. affine expression) has been chosen, this enum + // describe what decision is taken on its domain. // // TODO(user): extend as needed. enum DomainReductionStrategy { @@ -498,19 +572,9 @@ message DecisionStrategyProto { SELECT_LOWER_HALF = 2; SELECT_UPPER_HALF = 3; SELECT_MEDIAN_VALUE = 4; + SELECT_RANDOM_HALF = 5; } DomainReductionStrategy domain_reduction_strategy = 3; - - // Advanced usage. Some of the variable listed above may have been transformed - // by the presolve so this is needed to properly follow the given selection - // strategy. Instead of using a value X for variables[index], we will use - // positive_coeff * X + offset instead. - message AffineTransformation { - int32 index = 1; - int64 offset = 2; - int64 positive_coeff = 3; - } - repeated AffineTransformation transformations = 4; } // This message encodes a partial (or full) assignment of the variables of a @@ -538,7 +602,7 @@ message DenseMatrixProto { repeated int32 entries = 3; } -// Experimental. For now, this is meant to be used by the solver and not filled +// EXPERIMENTAL. For now, this is meant to be used by the solver and not filled // by clients. // // Hold symmetry information about the set of feasible solutions. If we permute @@ -580,6 +644,21 @@ message CpModelProto { // The objective to minimize. Can be empty for pure decision problems. CpObjectiveProto objective = 4; + // Advanced usage. + // It is invalid to have both an objective and a floating point objective. + // + // The objective of the model, in floating point format. The solver will + // automatically scale this to integer during expansion and thus convert it to + // a normal CpObjectiveProto. See the mip* parameters to control how this is + // scaled. In most situation the precision will be good enough, but you can + // see the logs to see what are the precision guaranteed when this is + // converted to a fixed point representation. + // + // Note that even if the precision is bad, the returned objective_value and + // best_objective_bound will be computed correctly. So at the end of the solve + // you can check the gap if you only want precise optimal. + FloatObjectiveProto floating_point_objective = 9; + // Defines the strategy that the solver should follow when the // search_branching parameter is set to FIXED_SEARCH. Note that this strategy // is also used as a heuristic when we are not in fixed search. @@ -587,9 +666,8 @@ message CpModelProto { // Advanced Usage: if not all variables appears and the parameter // "instantiate_all_variables" is set to false, then the solver will not try // to instantiate the variables that do not appear. Thus, at the end of the - // search, not all variables may be fixed and this is why we have the - // solution_lower_bounds and solution_upper_bounds fields in the - // CpSolverResponse. + // search, not all variables may be fixed. Currently, we will set them to + // their lower bound in the solution. repeated DecisionStrategyProto search_strategy = 5; // Solution hint. @@ -612,7 +690,7 @@ message CpModelProto { // infeasibility. // // Think (IIS), except when you are only concerned by the provided - // assumptions. This is powerful as it allows to group a set of logicially + // assumptions. This is powerful as it allows to group a set of logically // related constraint under only one enforcement literal which can potentially // give you a good and interpretable explanation for infeasiblity. // @@ -649,7 +727,7 @@ enum CpSolverStatus { // An optimal feasible solution has been found. // // More generally, this status represent a success. So we also return OPTIMAL - // if we find a solution for a pure feasiblity problem or if a gap limit has + // if we find a solution for a pure feasibility problem or if a gap limit has // been specified and we return a solution within this limit. In the case // where we need to return all the feasible solution, this status will only be // returned if we enumerated all of them; If we stopped before, we will return @@ -657,11 +735,15 @@ enum CpSolverStatus { OPTIMAL = 4; } +// Just a message used to store dense solution. +// This is used by the additional_solutions field. +message CpSolverSolution { + repeated int64 values = 1; +} + // The response returned by a solver trying to solve a CpModelProto. // -// TODO(user): support returning multiple solutions. Look at the Stubby -// streaming API as we probably wants to get them as they are found. -// Next id: 27 +// Next id: 32 message CpSolverResponse { // The status of the solve. CpSolverStatus status = 1; @@ -683,15 +765,13 @@ message CpSolverResponse { // maximization problem. double best_objective_bound = 4; - // Advanced usage. + // If the parameter fill_additional_solutions_in_response is set, then we + // copy all the solutions from our internal solution pool here. // - // If the problem has some variables that are not fixed at the end of the - // search (because of a particular search strategy in the CpModelProto) then - // this will be used instead of filling the solution above. The two fields - // will then contains the lower and upper bounds of each variable as they were - // when the best "solution" was found. - repeated int64 solution_lower_bounds = 18; - repeated int64 solution_upper_bounds = 19; + // Note that the one returned in the solution field will likely appear here + // too. Do not rely on the solutions order as it depends on our internal + // representation (after postsolve). + repeated CpSolverSolution additional_solutions = 27; // Advanced usage. // @@ -701,10 +781,14 @@ message CpSolverResponse { // is only filled with the info derived during a normal search and we do not // have any dedicated algorithm to improve it. // - // If the problem is a feasibility problem, then these bounds will be valid - // for any feasible solution. If the problem is an optimization problem, then - // these bounds will only be valid for any OPTIMAL solutions, it can exclude - // sub-optimal feasible ones. + // Warning: if you didn't set keep_all_feasible_solutions_in_presolve, then + // these domains might exclude valid feasible solution. Otherwise for a + // feasibility problem, all feasible solution should be there. + // + // Warning: For an optimization problem, these will correspond to valid bounds + // for the problem of finding an improving solution to the best one found so + // far. It might be better to solve a feasibility version if one just want to + // explore the feasible region. repeated IntegerVariableProto tightened_variables = 21; // A subset of the model "assumptions" field. This will only be filled if the @@ -721,18 +805,36 @@ message CpSolverResponse { // changing your model to minimize the number of assumptions at false, but // this is likely an harder problem to solve. // + // Important: Currently, this is minimized only in single-thread and if the + // problem is not an optimization problem, otherwise, it will always include + // all the assumptions. + // // TODO(user): Allows for returning multiple core at once. repeated int32 sufficient_assumptions_for_infeasibility = 23; - // This will be true iff the solver was asked to find all solutions to a - // satisfiability problem (or all optimal solutions to an optimization - // problem), and it was successful in doing so. + // Contains the integer objective optimized internally. This is only filled if + // the problem had a floating point objective, and on the final response, not + // the ones given to callbacks. + CpObjectiveProto integer_objective = 28; + + // Advanced usage. // - // TODO(user): Remove as we also use the OPTIMAL vs FEASIBLE status for that. - bool all_solutions_were_found = 5; + // A lower bound on the integer expression of the objective. This is either a + // bound on the expression in the returned integer_objective or on the integer + // expression of the original objective if the problem already has an integer + // objective. + // + // TODO(user): This should be renamed integer_objective_lower_bound. + int64 inner_objective_lower_bound = 29; // Some statistics about the solve. + // + // Important: in multithread, this correspond the statistics of the first + // subsolver. Which is usually the one with the user defined parameters. Or + // the default-search if none are specified. + int64 num_integers = 30; int64 num_booleans = 10; + int64 num_fixed_booleans = 31; int64 num_conflicts = 11; int64 num_branches = 12; int64 num_binary_propagations = 13; @@ -740,12 +842,16 @@ message CpSolverResponse { int64 num_restarts = 24; int64 num_lp_iterations = 25; + // The time counted from the beginning of the Solve() call. double wall_time = 15; double user_time = 16; double deterministic_time = 17; - double primal_integral = 22; - // Additional information about how the solution was found. + // The integral of log(1 + absolute_objective_gap) over time. + double gap_integral = 22; + + // Additional information about how the solution was found. It also stores + // model or parameters errors that caused the model to be invalid. string solution_info = 20; // The solve log will be filled if the parameter log_to_response is set to diff --git a/src/sat_parameters.proto b/src/sat_parameters.proto index 480518d..fb7d23f 100644 --- a/src/sat_parameters.proto +++ b/src/sat_parameters.proto @@ -1,4 +1,4 @@ -// Copyright 2010-2021 Google LLC +// Copyright 2010-2025 Google LLC // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,17 +11,20 @@ // See the License for the specific language governing permissions and // limitations under the License. +// LINT: LEGACY_NAMES syntax = "proto2"; package operations_research.sat; +option csharp_namespace = "Google.OrTools.Sat"; +option go_package = "github.com/google/or-tools/ortools/sat/proto/satparameters"; option java_package = "com.google.ortools.sat"; option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 188 +// NEXT TAG: 325 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -50,15 +53,6 @@ message SatParameters { POLARITY_TRUE = 0; POLARITY_FALSE = 1; POLARITY_RANDOM = 2; - - // Choose the sign that tends to satisfy the most constraints. This is - // computed using a weighted sum: if a literal l appears in a constraint of - // the form: ... + coeff * l +... <= rhs with positive coefficients and - // rhs, then -sign(l) * coeff / rhs is added to the weight of l.variable(). - POLARITY_WEIGHTED_SIGN = 3; - - // The opposite choice of POLARITY_WEIGHTED_SIGN. - POLARITY_REVERSE_WEIGHTED_SIGN = 4; } optional Polarity initial_polarity = 2 [default = POLARITY_FALSE]; @@ -80,6 +74,10 @@ message SatParameters { // 2 * x the second time, etc... optional int32 polarity_rephase_increment = 168 [default = 1000]; + // If true and we have first solution LS workers, tries in some phase to + // follow a LS solutions that violates has litle constraints as possible. + optional bool polarity_exploit_ls_hints = 309 [default = false]; + // The proportion of polarity chosen at random. Note that this take // precedence over the phase saving heuristic. This is different from // initial_polarity:POLARITY_RANDOM because it will select a new random @@ -126,7 +124,6 @@ message SatParameters { [default = RECURSIVE]; // Whether to expoit the binary clause to minimize learned clauses further. - // This will have an effect only if treat_binary_clauses_separately is true. enum BinaryMinizationAlgorithm { NO_BINARY_MINIMIZATION = 0; BINARY_MINIMIZATION_FIRST = 1; @@ -152,7 +149,12 @@ message SatParameters { // During a cleanup, we will always keep that number of "deletable" clauses. // Note that this doesn't include the "protected" clauses. - optional int32 clause_cleanup_target = 13 [default = 10000]; + optional int32 clause_cleanup_target = 13 [default = 0]; + + // During a cleanup, if clause_cleanup_target is 0, we will delete the + // clause_cleanup_ratio of "deletable" clauses instead of aiming for a fixed + // target of clauses to keep. + optional double clause_cleanup_ratio = 190 [default = 0.5]; // Each time a clause activity is bumped, the clause has a chance to be // protected during the next cleanup phase. Note that clauses used as a reason @@ -184,22 +186,6 @@ message SatParameters { optional int32 pb_cleanup_increment = 46 [default = 200]; optional double pb_cleanup_ratio = 47 [default = 0.5]; - // Parameters for an heuristic similar to the one descibed in "An effective - // learnt clause minimization approach for CDCL Sat Solvers", - // https://www.ijcai.org/proceedings/2017/0098.pdf - // - // For now, we have a somewhat simpler implementation where every x restart we - // spend y decisions on clause minimization. The minimization technique is the - // same as the one used to minimize core in max-sat. We also minimize problem - // clauses and not just the learned clause that we keep forever like in the - // paper. - // - // Changing these parameters or the kind of clause we minimize seems to have - // a big impact on the overall perf on our benchmarks. So this technique seems - // definitely useful, but it is hard to tune properly. - optional int32 minimize_with_propagation_restart_period = 96 [default = 10]; - optional int32 minimize_with_propagation_num_decisions = 97 [default = 1000]; - // ========================================================================== // Variable and clause activities // ========================================================================== @@ -309,9 +295,13 @@ message SatParameters { // solver, the time unit being as close as possible to a second. optional double max_deterministic_time = 67 [default = inf]; + // Stops after that number of batches has been scheduled. This only make sense + // when interleave_search is true. + optional int32 max_num_deterministic_batches = 291 [default = 0]; + // Maximum number of conflicts allowed to solve a problem. // - // TODO(user,user): Maybe change the way the conflict limit is enforced? + // TODO(user): Maybe change the way the conflict limit is enforced? // currently it is enforced on each independent internal SAT solve, rather // than on the overall number of conflicts across all solves. So in the // context of an optimization problem, this is not really usable directly by a @@ -323,6 +313,8 @@ message SatParameters { // solver will abort as soon as it detects that this limit is crossed. As a // result, this limit is approximative, but usually the solver will not go too // much over. + // + // TODO(user): This is only used by the pure SAT solver, generalize to CP-SAT. optional int64 max_memory_in_mb = 40 [default = 10000]; // Stop the search when the gap between the best feasible objective (O) and @@ -337,18 +329,19 @@ message SatParameters { // // Note that if the gap is reached, the search status will be OPTIMAL. But // one can check the best objective bound to see the actual gap. - optional double absolute_gap_limit = 159 [default = 0.0]; + // + // If the objective is integer, then any absolute gap < 1 will lead to a true + // optimal. If the objective is floating point, a gap of zero make little + // sense so is is why we use a non-zero default value. At the end of the + // search, we will display a warning if OPTIMAL is reported yet the gap is + // greater than this absolute gap. + optional double absolute_gap_limit = 159 [default = 1e-4]; optional double relative_gap_limit = 160 [default = 0.0]; // ========================================================================== // Other parameters // ========================================================================== - // If true, the binary clauses are treated separately from the others. This - // should be faster and uses less memory. However it changes the propagation - // order. - optional bool treat_binary_clauses_separately = 33 [default = true]; - // At the beginning of each solve, the random number generator used in some // part of the solver is reinitialized to this seed. If you change the random // seed, the solver may make different choices during the solving process. @@ -366,10 +359,17 @@ message SatParameters { optional bool permute_presolve_constraint_order = 179 [default = false]; optional bool use_absl_random = 180 [default = false]; - // Whether the solver should log the search progress. By default, it logs to - // LOG(INFO). This can be overwritten by the log_destination parameter. + // Whether the solver should log the search progress. This is the maing + // logging parameter and if this is false, none of the logging (callbacks, + // log_to_stdout, log_to_response, ...) will do anything. optional bool log_search_progress = 41 [default = false]; + // Whether the solver should display per sub-solver search statistics. + // This is only useful is log_search_progress is set to true, and if the + // number of search workers is > 1. Note that in all case we display a bit + // of stats with one line per subsolver. + optional bool log_subsolver_statistics = 189 [default = false]; + // Add a prefix to all logs. optional string log_prefix = 185 [default = ""]; @@ -411,12 +411,25 @@ message SatParameters { // occurrences of not(x) is not greater than this parameter. optional int32 presolve_bve_threshold = 54 [default = 500]; + // Internal parameter. During BVE, if we eliminate a variable x, by default we + // will push all clauses containing x and all clauses containing not(x) to the + // postsolve. However, it is possible to write the postsolve code so that only + // one such set is needed. The idea is that, if we push the set containing a + // literal l, is to set l to false except if it is needed to satisfy one of + // the clause in the set. This is always beneficial, but for historical + // reason, not all our postsolve algorithm support this. + optional bool filter_sat_postsolve_clauses = 324 [default = false]; + // During presolve, we apply BVE only if this weight times the number of // clauses plus the number of clause literals is not increased. optional int32 presolve_bve_clause_weight = 55 [default = 3]; // The maximum "deterministic" time limit to spend in probing. A value of // zero will disable the probing. + // + // TODO(user): Clean up. The first one is used in CP-SAT, the other in pure + // SAT presolve. + optional double probing_deterministic_time_limit = 226 [default = 1.0]; optional double presolve_probing_deterministic_time_limit = 57 [default = 30.0]; @@ -441,46 +454,80 @@ message SatParameters { // Whether we presolve the cp_model before solving it. optional bool cp_model_presolve = 86 [default = true]; - // Advanced usage. We have two different postsolve code. The default one - // should be better and it allows for a more powerful presolve, but some - // rarely used features like not fully assigning all variables require the - // other one. - optional bool cp_model_postsolve_with_full_solver = 162 [default = false]; - - // If positive, try to stop just after that many presolve rules have been - // applied. This is mainly useful for debugging presolve. - optional int32 cp_model_max_num_presolve_operations = 151 [default = 0]; - // How much effort do we spend on probing. 0 disables it completely. optional int32 cp_model_probing_level = 110 [default = 2]; // Whether we also use the sat presolve when cp_model_presolve is true. optional bool cp_model_use_sat_presolve = 93 [default = true]; - optional bool use_sat_inprocessing = 163 [default = false]; - // If true, the element constraints are expanded into many - // linear constraints of the form (index == i) => (element[i] == target). - optional bool expand_element_constraints = 140 [default = true]; + // If cp_model_presolve is true and there is a large proportion of fixed + // variable after the first model copy, remap all the model to a dense set of + // variable before the full presolve even starts. This should help for LNS on + // large models. + optional bool remove_fixed_variables_early = 310 [default = true]; - // If true, the automaton constraints are expanded. - optional bool expand_automaton_constraints = 143 [default = true]; + // If true, we detect variable that are unique to a table constraint and only + // there to encode a cost on each tuple. This is usually the case when a WCSP + // (weighted constraint program) is encoded into CP-SAT format. + // + // This can lead to a dramatic speed-up for such problems but is still + // experimental at this point. + optional bool detect_table_with_cost = 216 [default = false]; - // If true, the positive table constraints are expanded. - // Note that currently, negative table constraints are always expanded. - optional bool expand_table_constraints = 158 [default = true]; + // How much we try to "compress" a table constraint. Compressing more leads to + // less Booleans and faster propagation but can reduced the quality of the lp + // relaxation. Values goes from 0 to 3 where we always try to fully compress a + // table. At 2, we try to automatically decide if it is worth it. + optional int32 table_compression_level = 217 [default = 2]; // If true, expand all_different constraints that are not permutations. // Permutations (#Variables = #Values) are always expanded. optional bool expand_alldiff_constraints = 170 [default = false]; + // Max domain size for all_different constraints to be expanded. + optional int32 max_alldiff_domain_size = 320 [default = 256]; + // If true, expand the reservoir constraints by creating booleans for all // possible precedences between event and encoding the constraint. optional bool expand_reservoir_constraints = 182 [default = true]; + // Mainly useful for testing. + // + // If this and expand_reservoir_constraints is true, we use a different + // encoding of the reservoir constraint using circuit instead of precedences. + // Note that this is usually slower, but can exercise different part of the + // solver. Note that contrary to the precedence encoding, this easily support + // variable demands. + // + // WARNING: with this encoding, the constraint takes a slightly different + // meaning. There must exist a permutation of the events occurring at the same + // time such that the level is within the reservoir after each of these events + // (in this permuted order). So we cannot have +100 and -100 at the same time + // if the level must be between 0 and 10 (as authorized by the reservoir + // constraint). + optional bool expand_reservoir_using_circuit = 288 [default = false]; + + // Encore cumulative with fixed demands and capacity as a reservoir + // constraint. The only reason you might want to do that is to test the + // reservoir propagation code! + optional bool encode_cumulative_as_reservoir = 287 [default = false]; + + // If the number of expressions in the lin_max is less that the max size + // parameter, model expansion replaces target = max(xi) by linear constraint + // with the introduction of new booleans bi such that bi => target == xi. + // + // This is mainly for experimenting compared to a custom lin_max propagator. + optional int32 max_lin_max_size_for_expansion = 280 [default = 0]; + // If true, it disable all constraint expansion. // This should only be used to test the presolve of expanded constraints. optional bool disable_constraint_expansion = 181 [default = false]; + // Linear constraint with a complex right hand side (more than a single + // interval) need to be expanded, there is a couple of way to do that. + optional bool encode_complex_linear_constraint_with_integer = 223 + [default = false]; + // During presolve, we use a maximum clique heuristic to merge together // no-overlap constraints or at most one constraints. This code can be slow, // so we have a limit in place on the number of explored nodes in the @@ -503,6 +550,192 @@ message SatParameters { // created this way). optional bool presolve_extract_integer_enforcement = 174 [default = false]; + // A few presolve operations involve detecting constraints included in other + // constraint. Since there can be a quadratic number of such pairs, and + // processing them usually involve scanning them, the complexity of these + // operations can be big. This enforce a local deterministic limit on the + // number of entries scanned. Default is 1e8. + // + // A value of zero will disable these presolve rules completely. + optional int64 presolve_inclusion_work_limit = 201 [default = 100000000]; + + // If true, we don't keep names in our internal copy of the user given model. + optional bool ignore_names = 202 [default = true]; + + // Run a max-clique code amongst all the x != y we can find and try to infer + // set of variables that are all different. This allows to close neos16.mps + // for instance. Note that we only run this code if there is no all_diff + // already in the model so that if a user want to add some all_diff, we assume + // it is well done and do not try to add more. + // + // This will also detect and add no_overlap constraints, if all the relations + // x != y have "offsets" between them. I.e. x > y + offset. + optional bool infer_all_diffs = 233 [default = true]; + + // Try to find large "rectangle" in the linear constraint matrix with + // identical lines. If such rectangle is big enough, we can introduce a new + // integer variable corresponding to the common expression and greatly reduce + // the number of non-zero. + optional bool find_big_linear_overlap = 234 [default = true]; + + // ========================================================================== + // Inprocessing + // ========================================================================== + + // Enable or disable "inprocessing" which is some SAT presolving done at + // each restart to the root level. + optional bool use_sat_inprocessing = 163 [default = true]; + + // Proportion of deterministic time we should spend on inprocessing. + // At each "restart", if the proportion is below this ratio, we will do some + // inprocessing, otherwise, we skip it for this restart. + optional double inprocessing_dtime_ratio = 273 [default = 0.2]; + + // The amount of dtime we should spend on probing for each inprocessing round. + optional double inprocessing_probing_dtime = 274 [default = 1.0]; + + // Parameters for an heuristic similar to the one described in "An effective + // learnt clause minimization approach for CDCL Sat Solvers", + // https://www.ijcai.org/proceedings/2017/0098.pdf + // + // This is the amount of dtime we should spend on this technique during each + // inprocessing phase. + // + // The minimization technique is the same as the one used to minimize core in + // max-sat. We also minimize problem clauses and not just the learned clause + // that we keep forever like in the paper. + optional double inprocessing_minimization_dtime = 275 [default = 1.0]; + optional bool inprocessing_minimization_use_conflict_analysis = 297 + [default = true]; + optional bool inprocessing_minimization_use_all_orderings = 298 + [default = false]; + + // ========================================================================== + // Multithread + // ========================================================================== + + // Specify the number of parallel workers (i.e. threads) to use during search. + // This should usually be lower than your number of available cpus + + // hyperthread in your machine. + // + // A value of 0 means the solver will try to use all cores on the machine. + // A number of 1 means no parallelism. + // + // Note that 'num_workers' is the preferred name, but if it is set to zero, + // we will still read the deprecated 'num_search_workers'. + // + // As of 2020-04-10, if you're using SAT via MPSolver (to solve integer + // programs) this field is overridden with a value of 8, if the field is not + // set *explicitly*. Thus, always set this field explicitly or via + // MPSolver::SetNumThreads(). + optional int32 num_workers = 206 [default = 0]; + optional int32 num_search_workers = 100 [default = 0]; + + // We distinguish subsolvers that consume a full thread, and the ones that are + // always interleaved. If left at zero, we will fix this with a default + // formula that depends on num_workers. But if you start modifying what runs, + // you might want to fix that to a given value depending on the num_workers + // you use. + optional int32 num_full_subsolvers = 294 [default = 0]; + + // In multi-thread, the solver can be mainly seen as a portfolio of solvers + // with different parameters. This field indicates the names of the parameters + // that are used in multithread. This only applies to "full" subsolvers. + // + // See cp_model_search.cc to see a list of the names and the default value (if + // left empty) that looks like: + // - default_lp (linearization_level:1) + // - fixed (only if fixed search specified or scheduling) + // - no_lp (linearization_level:0) + // - max_lp (linearization_level:2) + // - pseudo_costs (only if objective, change search heuristic) + // - reduced_costs (only if objective, change search heuristic) + // - quick_restart (kind of probing) + // - quick_restart_no_lp (kind of probing with linearization_level:0) + // - lb_tree_search (to improve lower bound, MIP like tree search) + // - probing (continuous probing and shaving) + // + // Also, note that some set of parameters will be ignored if they do not make + // sense. For instance if there is no objective, pseudo_cost or reduced_cost + // search will be ignored. Core based search will only work if the objective + // has many terms. If there is no fixed strategy fixed will be ignored. And so + // on. + // + // The order is important, as only the first num_full_subsolvers will be + // scheduled. You can see in the log which one are selected for a given run. + repeated string subsolvers = 207; + + // A convenient way to add more workers types. + // These will be added at the beginning of the list. + repeated string extra_subsolvers = 219; + + // Rather than fully specifying subsolvers, it is often convenient to just + // remove the ones that are not useful on a given problem or only keep + // specific ones for testing. Each string is interpreted as a "glob", so we + // support '*' and '?'. + // + // The way this work is that we will only accept a name that match a filter + // pattern (if non-empty) and do not match an ignore pattern. Note also that + // these fields work on LNS or LS names even if these are currently not + // specified via the subsolvers field. + repeated string ignore_subsolvers = 209; + repeated string filter_subsolvers = 293; + + // It is possible to specify additional subsolver configuration. These can be + // referred by their params.name() in the fields above. Note that only the + // specified field will "overwrite" the ones of the base parameter. If a + // subsolver_params has the name of an existing subsolver configuration, the + // named parameters will be merged into the subsolver configuration. + repeated SatParameters subsolver_params = 210; + + // Experimental. If this is true, then we interleave all our major search + // strategy and distribute the work amongst num_workers. + // + // The search is deterministic (independently of num_workers!), and we + // schedule and wait for interleave_batch_size task to be completed before + // synchronizing and scheduling the next batch of tasks. + optional bool interleave_search = 136 [default = false]; + optional int32 interleave_batch_size = 134 [default = 0]; + + // Allows objective sharing between workers. + optional bool share_objective_bounds = 113 [default = true]; + + // Allows sharing of the bounds of modified variables at level 0. + optional bool share_level_zero_bounds = 114 [default = true]; + + // Allows sharing of new learned binary clause between workers. + optional bool share_binary_clauses = 203 [default = true]; + + // Allows sharing of short glue clauses between workers. + // Implicitly disabled if share_binary_clauses is false. + optional bool share_glue_clauses = 285 [default = false]; + + // Minimize and detect subsumption of shared clauses immediately after they + // are imported. + optional bool minimize_shared_clauses = 300 [default = true]; + + // The amount of dtime between each export of shared glue clauses. + optional double share_glue_clauses_dtime = 322 [default = 1.0]; + + // ========================================================================== + // Debugging parameters + // ========================================================================== + + // We have two different postsolve code. The default one should be better and + // it allows for a more powerful presolve, but it can be useful to postsolve + // using the full solver instead. + optional bool debug_postsolve_with_full_solver = 162 [default = false]; + + // If positive, try to stop just after that many presolve rules have been + // applied. This is mainly useful for debugging presolve. + optional int32 debug_max_num_presolve_operations = 151 [default = 0]; + + // Crash if we do not manage to complete the hint into a full solution. + optional bool debug_crash_on_bad_hint = 195 [default = false]; + + // Crash if presolve breaks a feasible hint. + optional bool debug_crash_if_presolve_breaks_hint = 306 [default = false]; + // ========================================================================== // Max-sat parameters // ========================================================================== @@ -513,8 +746,11 @@ message SatParameters { // otherwise. optional bool use_optimization_hints = 35 [default = true]; - // Whether we use a simple heuristic to try to minimize an UNSAT core. - optional bool minimize_core = 50 [default = true]; + // If positive, we spend some effort on each core: + // - At level 1, we use a simple heuristic to try to minimize an UNSAT core. + // - At level 2, we use propagation to minimize the core but also identify + // literal in at most one relationship in this core. + optional int32 core_minimization_level = 50 [default = 2]; // Whether we try to find more independent cores for a given set of // assumptions in the core based max-SAT algorithms. @@ -522,6 +758,7 @@ message SatParameters { // If true, when the max-sat algo find a core, we compute the minimal number // of literals in the core that needs to be true to have a feasible solution. + // This is also called core exhaustion in more recent max-SAT papers. optional bool cover_optimization = 89 [default = true]; // In what order do we add the assumptions in a core-based max-sat algorithm @@ -559,6 +796,17 @@ message SatParameters { // Constraint programming parameters // ========================================================================== + // Some search decisions might cause a really large number of propagations to + // happen when integer variables with large domains are only reduced by 1 at + // each step. If we propagate more than the number of variable times this + // parameters we try to take counter-measure. Setting this to 0.0 disable this + // feature. + // + // TODO(user): Setting this to something like 10 helps in most cases, but the + // code is currently buggy and can cause the solve to enter a bad state where + // no progress is made. + optional double propagation_loop_detection_factor = 221 [default = 10.0]; + // When this is true, then a disjunctive constraint will try to use the // precedence relations between time intervals to propagate their bounds // further. For instance if task A and B are both before C and task A and B @@ -570,6 +818,26 @@ message SatParameters { // depending on the problem, turning this off may lead to a faster solution. optional bool use_precedences_in_disjunctive_constraint = 74 [default = true]; + // Create one literal for each disjunction of two pairs of tasks. This slows + // down the solve time, but improves the lower bound of the objective in the + // makespan case. This will be triggered if the number of intervals is less or + // equal than the parameter and if use_strong_propagation_in_disjunctive is + // true. + optional int32 max_size_to_create_precedence_literals_in_disjunctive = 229 + [default = 60]; + + // Enable stronger and more expensive propagation on no_overlap constraint. + optional bool use_strong_propagation_in_disjunctive = 230 [default = false]; + + // Whether we try to branch on decision "interval A before interval B" rather + // than on intervals bounds. This usually works better, but slow down a bit + // the time to find the first solution. + // + // These parameters are still EXPERIMENTAL, the result should be correct, but + // it some corner cases, they can cause some failing CHECK in the solver. + optional bool use_dynamic_precedence_in_disjunctive = 263 [default = false]; + optional bool use_dynamic_precedence_in_cumulative = 268 [default = false]; + // When this is true, the cumulative constraint is reinforced with overload // checking, i.e., an additional level of reasoning based on energy. This // additional level supplements the default level of reasoning as well as @@ -577,8 +845,16 @@ message SatParameters { // // This always result in better propagation, but it is usually slow, so // depending on the problem, turning this off may lead to a faster solution. - optional bool use_overload_checker_in_cumulative_constraint = 78 - [default = false]; + optional bool use_overload_checker_in_cumulative = 78 [default = false]; + + // Enable a heuristic to solve cumulative constraints using a modified energy + // constraint. We modify the usual energy definition by applying a + // super-additive function (also called "conservative scale" or "dual-feasible + // function") to the demand and the durations of the tasks. + // + // This heuristic is fast but for most problems it does not help much to find + // a solution. + optional bool use_conservative_scale_overload_checker = 286 [default = false]; // When this is true, the cumulative constraint is reinforced with timetable // edge finding, i.e., an additional level of reasoning based on the @@ -587,8 +863,21 @@ message SatParameters { // // This always result in better propagation, but it is usually slow, so // depending on the problem, turning this off may lead to a faster solution. - optional bool use_timetable_edge_finding_in_cumulative_constraint = 79 - [default = false]; + optional bool use_timetable_edge_finding_in_cumulative = 79 [default = false]; + + // Max number of intervals for the timetable_edge_finding algorithm to + // propagate. A value of 0 disables the constraint. + optional int32 max_num_intervals_for_timetable_edge_finding = 260 + [default = 100]; + + // If true, detect and create constraint for integer variable that are "after" + // a set of intervals in the same cumulative constraint. + // + // Experimental: by default we just use "direct" precedences. If + // exploit_all_precedences is true, we explore the full precedence graph. This + // assumes we have a DAG otherwise it fails. + optional bool use_hard_precedences_in_cumulative = 215 [default = false]; + optional bool exploit_all_precedences = 220 [default = false]; // When this is true, the cumulative constraint is reinforced with propagators // from the disjunctive constraint to improve the inference on a set of tasks @@ -600,110 +889,127 @@ message SatParameters { // // This always result in better propagation, but it is usually slow, so // depending on the problem, turning this off may lead to a faster solution. - optional bool use_disjunctive_constraint_in_cumulative_constraint = 80 - [default = true]; - - // A non-negative level indicating the type of constraints we consider in the - // LP relaxation. At level zero, no LP relaxation is used. At level 1, only - // the linear constraint and full encoding are added. At level 2, we also add - // all the Boolean constraints. - optional int32 linearization_level = 90 [default = 1]; - - // A non-negative level indicating how much we should try to fully encode - // Integer variables as Boolean. - optional int32 boolean_encoding_level = 107 [default = 1]; - - // The limit on the number of cuts in our cut pool. When this is reached we do - // not generate cuts anymore. + optional bool use_disjunctive_constraint_in_cumulative = 80 [default = true]; + + // If less than this number of boxes are present in a no-overlap 2d, we + // create 4 Booleans per pair of boxes: + // - Box 2 is after Box 1 on x. + // - Box 1 is after Box 2 on x. + // - Box 2 is after Box 1 on y. + // - Box 1 is after Box 2 on y. // - // TODO(user): We should probably remove this parameters, and just always - // generate cuts but only keep the best n or something. - optional int32 max_num_cuts = 91 [default = 10000]; - - // For the cut that can be generated at any level, this control if we only - // try to generate them at the root node. - optional bool only_add_cuts_at_level_zero = 92 [default = false]; - - // Whether we generate knapsack cuts. Note that in our setting where all - // variables are integer and bounded on both side, such a cut could be applied - // to any constraint. - optional bool add_knapsack_cuts = 111 [default = false]; - - // Whether we generate and add Chvatal-Gomory cuts to the LP at root node. - // Note that for now, this is not heavily tuned. - optional bool add_cg_cuts = 117 [default = true]; - - // Whether we generate MIR cuts at root node. - // Note that for now, this is not heavily tuned. - optional bool add_mir_cuts = 120 [default = true]; - - // Whether we generate Zero-Half cuts at root node. - // Note that for now, this is not heavily tuned. - optional bool add_zero_half_cuts = 169 [default = true]; - - // Whether we generate clique cuts from the binary implication graph. Note - // that as the search goes on, this graph will contains new binary clauses - // learned by the SAT engine. - optional bool add_clique_cuts = 172 [default = true]; - - // Cut generator for all diffs can add too many cuts for large all_diff - // constraints. This parameter restricts the large all_diff constraints to - // have a cut generator. - optional int32 max_all_diff_cut_size = 148 [default = 7]; - - // For the lin max constraints, generates the cuts described in "Strong - // mixed-integer programming formulations for trained neural networks" by Ross - // Anderson et. (https://arxiv.org/pdf/1811.01988.pdf) - optional bool add_lin_max_cuts = 152 [default = true]; - - // In the integer rounding procedure used for MIR and Gomory cut, the maximum - // "scaling" we use (must be positive). The lower this is, the lower the - // integer coefficients of the cut will be. Note that cut generated by lower - // values are not necessarily worse than cut generated by larger value. There - // is no strict dominance relationship. + // Note that at least one of them must be true, and at most one on x and one + // on y can be true. // - // Setting this to 2 result in the "strong fractional rouding" of Letchford - // and Lodi. - optional int32 max_integer_rounding_scaling = 119 [default = 600]; - - // If true, we start by an empty LP, and only add constraints not satisfied - // by the current LP solution batch by batch. A constraint that is only added - // like this is known as a "lazy" constraint in the literature, except that we - // currently consider all constraints as lazy here. - optional bool add_lp_constraints_lazily = 112 [default = true]; - - // While adding constraints, skip the constraints which have orthogonality - // less than 'min_orthogonality_for_lp_constraints' with already added - // constraints during current call. Orthogonality is defined as 1 - - // cosine(vector angle between constraints). A value of zero disable this - // feature. - optional double min_orthogonality_for_lp_constraints = 115 [default = 0.05]; - - // Max number of time we perform cut generation and resolve the LP at level 0. - optional int32 max_cut_rounds_at_level_zero = 154 [default = 1]; - - // If a constraint/cut in LP is not active for that many consecutive OPTIMAL - // solves, remove it from the LP. Note that it might be added again later if - // it become violated by the current LP solution. - optional int32 max_consecutive_inactive_count = 121 [default = 100]; + // This can significantly help in closing small problem. The SAT reasoning + // can be a lot more powerful when we take decision on such positional + // relations. + optional int32 no_overlap_2d_boolean_relations_limit = 321 [default = 10]; + + // When this is true, the no_overlap_2d constraint is reinforced with + // propagators from the cumulative constraints. It consists of ignoring the + // position of rectangles in one position and projecting the no_overlap_2d on + // the other dimension to create a cumulative constraint. This is done on both + // axis. This additional level supplements the default level of reasoning. + optional bool use_timetabling_in_no_overlap_2d = 200 [default = false]; + + // When this is true, the no_overlap_2d constraint is reinforced with + // energetic reasoning. This additional level supplements the default level of + // reasoning. + optional bool use_energetic_reasoning_in_no_overlap_2d = 213 + [default = false]; - // These parameters are similar to sat clause management activity parameters. - // They are effective only if the number of generated cuts exceed the storage - // limit. Default values are based on a few experiments on miplib instances. - optional double cut_max_active_count_value = 155 [default = 1e10]; - optional double cut_active_count_decay = 156 [default = 0.8]; + // When this is true, the no_overlap_2d constraint is reinforced with + // an energetic reasoning that uses an area-based energy. This can be combined + // with the two other overlap heuristics above. + optional bool use_area_energetic_reasoning_in_no_overlap_2d = 271 + [default = false]; - // Target number of constraints to remove during cleanup. - optional int32 cut_cleanup_target = 157 [default = 1000]; + optional bool use_try_edge_reasoning_in_no_overlap_2d = 299 [default = false]; + + // If the number of pairs to look is below this threshold, do an extra step of + // propagation in the no_overlap_2d constraint by looking at all pairs of + // intervals. + optional int32 max_pairs_pairwise_reasoning_in_no_overlap_2d = 276 + [default = 1250]; + + // Detects when the space where items of a no_overlap_2d constraint can placed + // is disjoint (ie., fixed boxes split the domain). When it is the case, we + // can introduce a boolean for each pair encoding whether + // the item is in the component or not. Then we replace the original + // no_overlap_2d constraint by one no_overlap_2d constraint for each + // component, with the new booleans as the enforcement_literal of the + // intervals. This is equivalent to expanding the original no_overlap_2d + // constraint into a bin packing problem with each connected component being a + // bin. This heuristic is only done when the number of regions to split + // is less than this parameter and <= 1 disables it. + optional int32 maximum_regions_to_split_in_disconnected_no_overlap_2d = 315 + [default = 0]; + + // When set, this activates a propagator for the no_overlap_2d constraint that + // uses any eventual linear constraints of the model in the form + // `{start interval 1} - {end interval 2} + c*w <= ub` to detect that two + // intervals must overlap in one dimension for some values of `w`. This is + // particularly useful for problems where the distance between two boxes is + // part of the model. + optional bool use_linear3_for_no_overlap_2d_precedences = 323 + [default = true]; - // Add that many lazy constraints (or cuts) at once in the LP. Note that at - // the beginning of the solve, we do add more than this. - optional int32 new_constraints_batch_size = 122 [default = 50]; + // When set, it activates a few scheduling parameters to improve the lower + // bound of scheduling problems. This is only effective with multiple workers + // as it modifies the reduced_cost, lb_tree_search, and probing workers. + optional bool use_dual_scheduling_heuristics = 214 [default = true]; + + // Turn on extra propagation for the circuit constraint. + // This can be quite slow. + optional bool use_all_different_for_circuit = 311 [default = false]; + + // If the size of a subset of nodes of a RoutesConstraint is less than this + // value, use linear constraints of size 1 and 2 (such as capacity and time + // window constraints) enforced by the arc literals to compute cuts for this + // subset (unless the subset size is less than + // routing_cut_subset_size_for_tight_binary_relation_bound, in which case the + // corresponding algorithm is used instead). The algorithm for these cuts has + // a O(n^3) complexity, where n is the subset size. Hence the value of this + // parameter should not be too large (e.g. 10 or 20). + optional int32 routing_cut_subset_size_for_binary_relation_bound = 312 + [default = 0]; + + // Similar to above, but with a different algorithm producing better cuts, at + // the price of a higher O(2^n) complexity, where n is the subset size. Hence + // the value of this parameter should be small (e.g. less than 10). + optional int32 routing_cut_subset_size_for_tight_binary_relation_bound = 313 + [default = 0]; + + // Similar to above, but with an even stronger algorithm in O(n!). We try to + // be defensive and abort early or not run that often. Still the value of + // that parameter shouldn't really be much more than 10. + optional int32 routing_cut_subset_size_for_exact_binary_relation_bound = 316 + [default = 8]; + + // Similar to routing_cut_subset_size_for_exact_binary_relation_bound but + // use a bound based on shortest path distances (which respect triangular + // inequality). This allows to derive bounds that are valid for any superset + // of a given subset. This is slow, so it shouldn't really be larger than 10. + optional int32 routing_cut_subset_size_for_shortest_paths_bound = 318 + [default = 8]; + + // The amount of "effort" to spend in dynamic programming for computing + // routing cuts. This is in term of basic operations needed by the algorithm + // in the worst case, so a value like 1e8 should take less than a second to + // compute. + optional double routing_cut_dp_effort = 314 [default = 1e7]; + + // If the length of an infeasible path is less than this value, a cut will be + // added to exclude it. + optional int32 routing_cut_max_infeasible_path_length = 317 [default = 6]; // The search branching will be used to decide how to branch on unfixed nodes. enum SearchBranching { // Try to fix all literals using the underlying SAT solver's heuristics, - // then generate and fix literals until integer variables are fixed. + // then generate and fix literals until integer variables are fixed. New + // literals on integer variables are generated using the fixed search + // specified by the user or our default one. AUTOMATIC_SEARCH = 0; // If used then all decisions taken by the solver are made using a fixed @@ -711,18 +1017,16 @@ message SatParameters { // field. FIXED_SEARCH = 1; - // If used, the solver will use various generic heuristics in turn. + // Simple portfolio search used by LNS workers. PORTFOLIO_SEARCH = 2; // If used, the solver will use heuristics from the LP relaxation. This // exploit the reduced costs of the variables in the relaxation. - // - // TODO(user): Maybe rename REDUCED_COST_SEARCH? LP_SEARCH = 3; // If used, the solver uses the pseudo costs for branching. Pseudo costs // are computed using the historical change in objective bounds when some - // decision are taken. + // decision are taken. Note that this works whether we use an LP or not. PSEUDO_COST_SEARCH = 4; // Mainly exposed here for testing. This quickly tries a lot of randomized @@ -734,6 +1038,15 @@ message SatParameters { // solution_hint field of the CpModelProto rather than using the information // provided in the search_strategy. HINT_SEARCH = 6; + + // Similar to FIXED_SEARCH, but differ in how the variable not listed into + // the fixed search heuristics are branched on. This will always start the + // search tree according to the specified fixed search strategy, but will + // complete it using the default automatic search. + PARTIAL_FIXED_SEARCH = 7; + + // Randomized search. Used to increase entropy in the search. + RANDOMIZED_SEARCH = 8; } optional SearchBranching search_branching = 82 [default = AUTOMATIC_SEARCH]; @@ -746,45 +1059,50 @@ message SatParameters { // hint until the hint_conflict_limit is reached. optional bool repair_hint = 167 [default = false]; - // All the "exploit_*" parameters below work in the same way: when branching - // on an IntegerVariable, these parameters affect the value the variable is - // branched on. Currently the first heuristic that triggers win in the order - // in which they appear below. - // - // TODO(user): Maybe do like for the restart algorithm, introduce an enum - // and a repeated field that control the order on which these are applied? + // If true, variables appearing in the solution hints will be fixed to their + // hinted value. + optional bool fix_variables_to_their_hinted_value = 192 [default = false]; - // If true and the Lp relaxation of the problem has an integer optimal - // solution, try to exploit it. Note that since the LP relaxation may not - // contain all the constraints, such a solution is not necessarily a solution - // of the full problem. - optional bool exploit_integer_lp_solution = 94 [default = true]; + // If true, search will continuously probe Boolean variables, and integer + // variable bounds. This parameter is set to true in parallel on the probing + // worker. + optional bool use_probing_search = 176 [default = false]; - // If true and the Lp relaxation of the problem has a solution, try to exploit - // it. This is same as above except in this case the lp solution might not be - // an integer solution. - optional bool exploit_all_lp_solution = 116 [default = true]; + // Use extended probing (probe bool_or, at_most_one, exactly_one). + optional bool use_extended_probing = 269 [default = true]; - // When branching on a variable, follow the last best solution value. - optional bool exploit_best_solution = 130 [default = false]; + // How many combinations of pairs or triplets of variables we want to scan. + optional int32 probing_num_combinations_limit = 272 [default = 20000]; - // When branching on a variable, follow the last best relaxation solution - // value. We use the relaxation with the tightest bound on the objective as - // the best relaxation solution. - optional bool exploit_relaxation_solution = 161 [default = false]; + // Add a shaving phase (where the solver tries to prove that the lower or + // upper bound of a variable are infeasible) to the probing search. (<= 0 + // disables it). + optional double shaving_deterministic_time_in_probing_search = 204 + [default = 0.001]; - // When branching an a variable that directly affect the objective, - // branch on the value that lead to the best objective first. - optional bool exploit_objective = 131 [default = true]; + // Specifies the amount of deterministic time spent of each try at shaving a + // bound in the shaving search. + optional double shaving_search_deterministic_time = 205 [default = 0.1]; - // If set at zero (the default), it is disabled. Otherwise the solver attempts - // probing at every 'probing_period' root node. Period of 1 enables probing at - // every root node. - optional int64 probing_period_at_root = 142 [default = 0]; + // Specifies the threshold between two modes in the shaving procedure. + // If the range of the variable/objective is less than this threshold, then + // the shaving procedure will try to remove values one by one. Otherwise, it + // will try to remove one range at a time. + optional int64 shaving_search_threshold = 290 [default = 64]; - // If true, search will continuously probe Boolean variables, and integer - // variable bounds. - optional bool use_probing_search = 176 [default = false]; + // If true, search will search in ascending max objective value (when + // minimizing) starting from the lower bound of the objective. + optional bool use_objective_lb_search = 228 [default = false]; + + // This search differs from the previous search as it will not use assumptions + // to bound the objective, and it will recreate a full model with the + // hardcoded objective value. + optional bool use_objective_shaving_search = 253 [default = false]; + + // This search takes all Boolean or integer variables, and maximize or + // minimize them in order to reduce their domain. -1 is automatic, otherwise + // value 0 disables it, and 1, 2, or 3 changes something. + optional int32 variables_shaving_level = 289 [default = -1]; // The solver ignores the pseudo costs of variables with number of recordings // less than this threshold. @@ -796,6 +1114,21 @@ message SatParameters { // lower bound instead. optional bool optimize_with_core = 83 [default = false]; + // Do a more conventional tree search (by opposition to SAT based one) where + // we keep all the explored node in a tree. This is meant to be used in a + // portfolio and focus on improving the objective lower bound. Keeping the + // whole tree allow us to report a better objective lower bound coming from + // the worst open node in the tree. + optional bool optimize_with_lb_tree_search = 188 [default = false]; + + // Experimental. Save the current LP basis at each node of the search tree so + // that when we jump around, we can load it and reduce the number of LP + // iterations needed. + // + // It currently works okay if we do not change the lp with cuts or + // simplification... More work is needed to make it robust in all cases. + optional bool save_lp_basis_in_lb_tree_search = 284 [default = false]; + // If non-negative, perform a binary search on the objective variable in order // to find an [min, max] interval outside of which the solver proved unsat/sat // under this amount of conflict. This can quickly reduce the objective domain @@ -810,13 +1143,135 @@ message SatParameters { // http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14489 optional bool optimize_with_max_hs = 85 [default = false]; + // Parameters for an heuristic similar to the one described in the paper: + // "Feasibility Jump: an LP-free Lagrangian MIP heuristic", Bjørnar + // Luteberget, Giorgio Sartor, 2023, Mathematical Programming Computation. + optional bool use_feasibility_jump = 265 [default = true]; + + // Disable every other type of subsolver, setting this turns CP-SAT into a + // pure local-search solver. + optional bool use_ls_only = 240 [default = false]; + + // On each restart, we randomly choose if we use decay (with this parameter) + // or no decay. + optional double feasibility_jump_decay = 242 [default = 0.95]; + + // How much do we linearize the problem in the local search code. + optional int32 feasibility_jump_linearization_level = 257 [default = 2]; + + // This is a factor that directly influence the work before each restart. + // Increasing it leads to longer restart. + optional int32 feasibility_jump_restart_factor = 258 [default = 1]; + + // How much dtime for each LS batch. + optional double feasibility_jump_batch_dtime = 292 [default = 0.1]; + + // Probability for a variable to have a non default value upon restarts or + // perturbations. + optional double feasibility_jump_var_randomization_probability = 247 + [default = 0.05]; + + // Max distance between the default value and the pertubated value relative to + // the range of the domain of the variable. + optional double feasibility_jump_var_perburbation_range_ratio = 248 + [default = 0.2]; + + // When stagnating, feasibility jump will either restart from a default + // solution (with some possible randomization), or randomly pertubate the + // current solution. This parameter selects the first option. + optional bool feasibility_jump_enable_restarts = 250 [default = true]; + + // Maximum size of no_overlap or no_overlap_2d constraint for a quadratic + // expansion. This might look a lot, but by expanding such constraint, we get + // a linear time evaluation per single variable moves instead of a slow O(n + // log n) one. + optional int32 feasibility_jump_max_expanded_constraint_size = 264 + [default = 500]; + + // This will create incomplete subsolvers (that are not LNS subsolvers) + // that use the feasibility jump code to find improving solution, treating + // the objective improvement as a hard constraint. + optional int32 num_violation_ls = 244 [default = 0]; + + // How long violation_ls should wait before perturbating a solution. + optional int32 violation_ls_perturbation_period = 249 [default = 100]; + + // Probability of using compound move search each restart. + // TODO(user): Add reference to paper when published. + optional double violation_ls_compound_move_probability = 259 [default = 0.5]; + + // Enables shared tree search. + // If positive, start this many complete worker threads to explore a shared + // search tree. These workers communicate objective bounds and simple decision + // nogoods relating to the shared prefix of the tree, and will avoid exploring + // the same subtrees as one another. + // Specifying a negative number uses a heuristic to select an appropriate + // number of shared tree workeres based on the total number of workers. + optional int32 shared_tree_num_workers = 235 [default = 0]; + + // Set on shared subtree workers. Users should not set this directly. + optional bool use_shared_tree_search = 236 [default = false]; + + // Minimum restarts before a worker will replace a subtree + // that looks "bad" based on the average LBD of learned clauses. + optional int32 shared_tree_worker_min_restarts_per_subtree = 282 + [default = 1]; + + // If true, workers share more of the information from their local trail. + // Specifically, literals implied by the shared tree decisions. + optional bool shared_tree_worker_enable_trail_sharing = 295 [default = true]; + + // If true, shared tree workers share their target phase when returning an + // assigned subtree for the next worker to use. + optional bool shared_tree_worker_enable_phase_sharing = 304 [default = true]; + + // How many open leaf nodes should the shared tree maintain per worker. + optional double shared_tree_open_leaves_per_worker = 281 [default = 2.0]; + + // In order to limit total shared memory and communication overhead, limit the + // total number of nodes that may be generated in the shared tree. If the + // shared tree runs out of unassigned leaves, workers act as portfolio + // workers. Note: this limit includes interior nodes, not just leaves. + optional int32 shared_tree_max_nodes_per_worker = 238 [default = 10000]; + + enum SharedTreeSplitStrategy { + // Uses the default strategy, currently equivalent to + // SPLIT_STRATEGY_DISCREPANCY. + SPLIT_STRATEGY_AUTO = 0; + // Only accept splits if the node to be split's depth+discrepancy is minimal + // for the desired number of leaves. + // The preferred child for discrepancy calculation is the one with the + // lowest objective lower bound or the original branch direction if the + // bounds are equal. This rule allows twice as many workers to work in the + // preferred subtree as non-preferred. + SPLIT_STRATEGY_DISCREPANCY = 1; + // Only split nodes with an objective lb equal to the global lb. If there is + // no objective, this is equivalent to SPLIT_STRATEGY_FIRST_PROPOSAL. + SPLIT_STRATEGY_OBJECTIVE_LB = 2; + // Attempt to keep the shared tree balanced. + SPLIT_STRATEGY_BALANCED_TREE = 3; + // Workers race to split their subtree, the winner's proposal is accepted. + SPLIT_STRATEGY_FIRST_PROPOSAL = 4; + } + optional SharedTreeSplitStrategy shared_tree_split_strategy = 239 + [default = SPLIT_STRATEGY_AUTO]; + + // How much deeper compared to the ideal max depth of the tree is considered + // "balanced" enough to still accept a split. Without such a tolerance, + // sometimes the tree can only be split by a single worker, and they may not + // generate a split for some time. In contrast, with a tolerance of 1, at + // least half of all workers should be able to split the tree as soon as a + // split becomes required. This only has an effect on + // SPLIT_STRATEGY_BALANCED_TREE and SPLIT_STRATEGY_DISCREPANCY. + optional int32 shared_tree_balance_tolerance = 305 [default = 1]; + // Whether we enumerate all solutions of a problem without objective. Note - // that setting this to true automatically disable the presolve. This is - // because the presolve rules only guarantee the existence of one feasible - // solution to the presolved problem. + // that setting this to true automatically disable some presolve reduction + // that can remove feasible solution. That is it has the same effect as + // setting keep_all_feasible_solutions_in_presolve. // - // TODO(user): Do not disable the presolve and let the user choose what - // behavior is best by setting keep_all_feasible_solutions_in_presolve. + // TODO(user): Do not do that and let the user choose what behavior is best by + // setting keep_all_feasible_solutions_in_presolve ? optional bool enumerate_all_solutions = 87 [default = false]; // If true, we disable the presolve reductions that remove feasible solutions @@ -836,8 +1291,22 @@ message SatParameters { // tightened_variables field in CpSolverResponse for more details. optional bool fill_tightened_domains_in_response = 132 [default = false]; + // If true, the final response addition_solutions field will be filled with + // all solutions from our solutions pool. + // + // Note that if both this field and enumerate_all_solutions is true, we will + // copy to the pool all of the solution found. So if solution_pool_size is big + // enough, you can get all solutions this way instead of using the solution + // callback. + // + // Note that this only affect the "final" solution, not the one passed to the + // solution callbacks. + optional bool fill_additional_solutions_in_response = 194 [default = false]; + // If true, the solver will add a default integer branching strategy to the - // already defined search strategy. + // already defined search strategy. If not, some variable might still not be + // fixed at the end of the search. For now we assume these variable can just + // be set to their lower bound. optional bool instantiate_all_variables = 106 [default = true]; // If true, then the precedences propagator try to detect for each variable if @@ -850,39 +1319,26 @@ message SatParameters { optional bool stop_after_first_solution = 98 [default = false]; // Mainly used when improving the presolver. When true, stops the solver after - // the presolve is complete. + // the presolve is complete (or after loading and root level propagation). optional bool stop_after_presolve = 149 [default = false]; + optional bool stop_after_root_propagation = 252 [default = false]; - // Specify the number of parallel workers to use during search. - // A number <= 1 means no parallelism. - // As of 2020-04-10, if you're using SAT via MPSolver (to solve integer - // programs) this field is overridden with a value of 8, if the field is not - // set *explicitly*. Thus, always set this field explicitly or via - // MPSolver::SetNumThreads(). - optional int32 num_search_workers = 100 [default = 1]; - - // Experimental. If this is true, then we interleave all our major search - // strategy and distribute the work amongst num_search_workers. - // - // The search is deterministic (independently of num_search_workers!), and we - // schedule and wait for interleave_batch_size task to be completed before - // synchronizing and scheduling the next batch of tasks. - optional bool interleave_search = 136 [default = false]; - optional int32 interleave_batch_size = 134 [default = 1]; - - // Temporary parameter until the memory usage is more optimized. - optional bool reduce_memory_usage_in_interleave_mode = 141 [default = false]; + // LNS parameters. - // Allows objective sharing between workers. - optional bool share_objective_bounds = 113 [default = true]; + // Initial parameters for neighborhood generation. + optional double lns_initial_difficulty = 307 [default = 0.5]; + optional double lns_initial_deterministic_limit = 308 [default = 0.1]; - // Allows sharing of the bounds of modified variables at level 0. - optional bool share_level_zero_bounds = 114 [default = true]; + // Testing parameters used to disable all lns workers. + optional bool use_lns = 283 [default = true]; - // LNS parameters. + // Experimental parameters to disable everything but lns. optional bool use_lns_only = 101 [default = false]; - optional bool lns_focus_on_decision_variables = 105 [default = false]; - optional bool lns_expand_intervals_in_constraint_graph = 184 [default = true]; + + // Size of the top-n different solutions kept by the solver. + // This parameter must be > 0. + // Currently this only impact the "base" solution chosen for a LNS fragment. + optional int32 solution_pool_size = 193 [default = 3]; // Turns on relaxation induced neighborhood generator. optional bool use_rins_lns = 129 [default = true]; @@ -890,6 +1346,14 @@ message SatParameters { // Adds a feasibility pump subsolver along with lns subsolvers. optional bool use_feasibility_pump = 164 [default = true]; + // Turns on neighborhood generator based on local branching LP. Based on Huang + // et al., "Local Branching Relaxation Heuristics for Integer Linear + // Programs", 2023. + optional bool use_lb_relax_lns = 255 [default = true]; + + // Only use lb-relax if we have at least that many workers. + optional int32 lb_relax_num_workers_threshold = 296 [default = 16]; + // Rounding method to use for feasibility pump. enum FPRoundingMethod { // Rounds to the nearest integer value. @@ -914,29 +1378,28 @@ message SatParameters { } optional FPRoundingMethod fp_rounding = 165 [default = PROPAGATION_ASSISTED]; - // Turns on a lns worker which solves relaxed version of the original problem - // by removing constraints from the problem in order to get better bounds. - optional bool use_relaxation_lns = 150 [default = false]; - // If true, registers more lns subsolvers with different parameters. optional bool diversify_lns_params = 137 [default = false]; // Randomize fixed search. optional bool randomize_search = 103 [default = false]; - // Search randomization will collect equivalent 'max valued' variables, and - // pick one randomly. For instance, if the variable strategy is CHOOSE_FIRST, - // all unassigned variables are equivalent. If the variable strategy is - // CHOOSE_LOWEST_MIN, and `lm` is the current lowest min of all unassigned - // variables, then the set of max valued variables will be all unassigned - // variables where - // lm <= variable min <= lm + search_randomization_tolerance - optional int64 search_randomization_tolerance = 104 [default = 0]; + // Search randomization will collect the top + // 'search_random_variable_pool_size' valued variables, and pick one randomly. + // The value of the variable is specific to each strategy. + optional int64 search_random_variable_pool_size = 104 [default = 0]; + + // Experimental code: specify if the objective pushes all tasks toward the + // start of the schedule. + optional bool push_all_tasks_toward_start = 262 [default = false]; // If true, we automatically detect variables whose constraint are always // enforced by the same literal and we mark them as optional. This allows // to propagate them as if they were present in some situation. - optional bool use_optional_variables = 108 [default = true]; + // + // TODO(user): This is experimental and seems to lead to wrong optimal in + // some situation. It should however gives correct solutions. Fix. + optional bool use_optional_variables = 108 [default = false]; // The solver usually exploit the LP relaxation of a model. If this option is // true, then whatever is infered by the LP will be used like an heuristic to @@ -944,16 +1407,15 @@ message SatParameters { // numerical imprecision issues. optional bool use_exact_lp_reason = 109 [default = true]; - // If true, the solver attemts to generate more info inside lp propagator by - // branching on some variables if certain criteria are met during the search - // tree exploration. - optional bool use_branching_in_lp = 139 [default = false]; - // This can be beneficial if there is a lot of no-overlap constraints but a // relatively low number of different intervals in the problem. Like 1000 // intervals, but 1M intervals in the no-overlap constraints covering them. optional bool use_combined_no_overlap = 133 [default = false]; + // All at_most_one constraints with a size <= param will be replaced by a + // quadratic number of binary implications. + optional int32 at_most_one_max_expansion_size = 270 [default = 3]; + // Indicates if the CP-SAT layer should catch Control-C (SIGINT) signals // when calling solve. If set, catching the SIGINT signal will terminate the // search gracefully, as if a time limit was reached. @@ -970,17 +1432,215 @@ message SatParameters { // time to do such polish step. optional bool polish_lp_solution = 175 [default = false]; + // The internal LP tolerances used by CP-SAT. These applies to the internal + // and scaled problem. If the domains of your variables are large it might be + // good to use lower tolerances. If your problem is binary with low + // coefficients, it might be good to use higher ones to speed-up the lp + // solves. + optional double lp_primal_tolerance = 266 [default = 1e-7]; + optional double lp_dual_tolerance = 267 [default = 1e-7]; + // Temporary flag util the feature is more mature. This convert intervals to // the newer proto format that support affine start/var/end instead of just - // variables. It changes a bit the search and is not always better currently. - optional bool convert_intervals = 177 [default = false]; + // variables. + optional bool convert_intervals = 177 [default = true]; // Whether we try to automatically detect the symmetries in a model and // exploit them. Currently, at level 1 we detect them in presolve and try // to fix Booleans. At level 2, we also do some form of dynamic symmetry - // breaking during search. + // breaking during search. At level 3, we also detect symmetries for very + // large models, which can be slow. At level 4, we try to break as much + // symmetry as possible in presolve. optional int32 symmetry_level = 183 [default = 2]; + // When we have symmetry, it is possible to "fold" all variables from the same + // orbit into a single variable, while having the same power of LP relaxation. + // This can help significantly on symmetric problem. However there is + // currently a bit of overhead as the rest of the solver need to do some + // translation between the folded LP and the rest of the problem. + optional bool use_symmetry_in_lp = 301 [default = false]; + + // Experimental. This will compute the symmetry of the problem once and for + // all. All presolve operations we do should keep the symmetry group intact + // or modify it properly. For now we have really little support for this. We + // will disable a bunch of presolve operations that could be supported. + optional bool keep_symmetry_in_presolve = 303 [default = false]; + + // Deterministic time limit for symmetry detection. + optional double symmetry_detection_deterministic_time_limit = 302 + [default = 1.0]; + + // The new linear propagation code treat all constraints at once and use + // an adaptation of Bellman-Ford-Tarjan to propagate constraint in a smarter + // order and potentially detect propagation cycle earlier. + optional bool new_linear_propagation = 224 [default = true]; + + // Linear constraints that are not pseudo-Boolean and that are longer than + // this size will be split into sqrt(size) intermediate sums in order to have + // faster propation in the CP engine. + optional int32 linear_split_size = 256 [default = 100]; + + // ========================================================================== + // Linear programming relaxation + // ========================================================================== + + // A non-negative level indicating the type of constraints we consider in the + // LP relaxation. At level zero, no LP relaxation is used. At level 1, only + // the linear constraint and full encoding are added. At level 2, we also add + // all the Boolean constraints. + optional int32 linearization_level = 90 [default = 1]; + + // A non-negative level indicating how much we should try to fully encode + // Integer variables as Boolean. + optional int32 boolean_encoding_level = 107 [default = 1]; + + // When loading a*x + b*y ==/!= c when x and y are both fully encoded. + // The solver may decide to replace the linear equation by a set of clauses. + // This is triggered if the sizes of the domains of x and y are below the + // threshold. + optional int32 max_domain_size_when_encoding_eq_neq_constraints = 191 + [default = 16]; + + // The limit on the number of cuts in our cut pool. When this is reached we do + // not generate cuts anymore. + // + // TODO(user): We should probably remove this parameters, and just always + // generate cuts but only keep the best n or something. + optional int32 max_num_cuts = 91 [default = 10000]; + + // Control the global cut effort. Zero will turn off all cut. For now we just + // have one level. Note also that most cuts are only used at linearization + // level >= 2. + optional int32 cut_level = 196 [default = 1]; + + // For the cut that can be generated at any level, this control if we only + // try to generate them at the root node. + optional bool only_add_cuts_at_level_zero = 92 [default = false]; + + // When the LP objective is fractional, do we add the cut that forces the + // linear objective expression to be greater or equal to this fractional value + // rounded up? We can always do that since our objective is integer, and + // combined with MIR heuristic to reduce the coefficient of such cut, it can + // help. + optional bool add_objective_cut = 197 [default = false]; + + // Whether we generate and add Chvatal-Gomory cuts to the LP at root node. + // Note that for now, this is not heavily tuned. + optional bool add_cg_cuts = 117 [default = true]; + + // Whether we generate MIR cuts at root node. + // Note that for now, this is not heavily tuned. + optional bool add_mir_cuts = 120 [default = true]; + + // Whether we generate Zero-Half cuts at root node. + // Note that for now, this is not heavily tuned. + optional bool add_zero_half_cuts = 169 [default = true]; + + // Whether we generate clique cuts from the binary implication graph. Note + // that as the search goes on, this graph will contains new binary clauses + // learned by the SAT engine. + optional bool add_clique_cuts = 172 [default = true]; + + // Whether we generate RLT cuts. This is still experimental but can help on + // binary problem with a lot of clauses of size 3. + optional bool add_rlt_cuts = 279 [default = true]; + + // Cut generator for all diffs can add too many cuts for large all_diff + // constraints. This parameter restricts the large all_diff constraints to + // have a cut generator. + optional int32 max_all_diff_cut_size = 148 [default = 64]; + + // For the lin max constraints, generates the cuts described in "Strong + // mixed-integer programming formulations for trained neural networks" by Ross + // Anderson et. (https://arxiv.org/pdf/1811.01988.pdf) + optional bool add_lin_max_cuts = 152 [default = true]; + + // In the integer rounding procedure used for MIR and Gomory cut, the maximum + // "scaling" we use (must be positive). The lower this is, the lower the + // integer coefficients of the cut will be. Note that cut generated by lower + // values are not necessarily worse than cut generated by larger value. There + // is no strict dominance relationship. + // + // Setting this to 2 result in the "strong fractional rouding" of Letchford + // and Lodi. + optional int32 max_integer_rounding_scaling = 119 [default = 600]; + + // If true, we start by an empty LP, and only add constraints not satisfied + // by the current LP solution batch by batch. A constraint that is only added + // like this is known as a "lazy" constraint in the literature, except that we + // currently consider all constraints as lazy here. + optional bool add_lp_constraints_lazily = 112 [default = true]; + + // Even at the root node, we do not want to spend too much time on the LP if + // it is "difficult". So we solve it in "chunks" of that many iterations. The + // solve will be continued down in the tree or the next time we go back to the + // root node. + optional int32 root_lp_iterations = 227 [default = 2000]; + + // While adding constraints, skip the constraints which have orthogonality + // less than 'min_orthogonality_for_lp_constraints' with already added + // constraints during current call. Orthogonality is defined as 1 - + // cosine(vector angle between constraints). A value of zero disable this + // feature. + optional double min_orthogonality_for_lp_constraints = 115 [default = 0.05]; + + // Max number of time we perform cut generation and resolve the LP at level 0. + optional int32 max_cut_rounds_at_level_zero = 154 [default = 1]; + + // If a constraint/cut in LP is not active for that many consecutive OPTIMAL + // solves, remove it from the LP. Note that it might be added again later if + // it become violated by the current LP solution. + optional int32 max_consecutive_inactive_count = 121 [default = 100]; + + // These parameters are similar to sat clause management activity parameters. + // They are effective only if the number of generated cuts exceed the storage + // limit. Default values are based on a few experiments on miplib instances. + optional double cut_max_active_count_value = 155 [default = 1e10]; + optional double cut_active_count_decay = 156 [default = 0.8]; + + // Target number of constraints to remove during cleanup. + optional int32 cut_cleanup_target = 157 [default = 1000]; + + // Add that many lazy constraints (or cuts) at once in the LP. Note that at + // the beginning of the solve, we do add more than this. + optional int32 new_constraints_batch_size = 122 [default = 50]; + + // All the "exploit_*" parameters below work in the same way: when branching + // on an IntegerVariable, these parameters affect the value the variable is + // branched on. Currently the first heuristic that triggers win in the order + // in which they appear below. + // + // TODO(user): Maybe do like for the restart algorithm, introduce an enum + // and a repeated field that control the order on which these are applied? + + // If true and the Lp relaxation of the problem has an integer optimal + // solution, try to exploit it. Note that since the LP relaxation may not + // contain all the constraints, such a solution is not necessarily a solution + // of the full problem. + optional bool exploit_integer_lp_solution = 94 [default = true]; + + // If true and the Lp relaxation of the problem has a solution, try to exploit + // it. This is same as above except in this case the lp solution might not be + // an integer solution. + optional bool exploit_all_lp_solution = 116 [default = true]; + + // When branching on a variable, follow the last best solution value. + optional bool exploit_best_solution = 130 [default = false]; + + // When branching on a variable, follow the last best relaxation solution + // value. We use the relaxation with the tightest bound on the objective as + // the best relaxation solution. + optional bool exploit_relaxation_solution = 161 [default = false]; + + // When branching an a variable that directly affect the objective, + // branch on the value that lead to the best objective first. + optional bool exploit_objective = 131 [default = true]; + + // Infer products of Boolean or of Boolean time IntegerVariable from the + // linear constrainst in the problem. This can be used in some cuts, altough + // for now we don't really exploit it. + optional bool detect_linearized_product = 277 [default = false]; + // ========================================================================== // MIP -> CP-SAT (i.e. IP with integer coeff) conversion parameters that are // used by our automatic "scaling" algorithm. @@ -1001,24 +1661,49 @@ message SatParameters { // specify continuous variable domain with the wanted precision. optional double mip_var_scaling = 125 [default = 1.0]; - // If true, some continuous variable might be automatially scaled. For now, + // If this is false, then mip_var_scaling is only applied to variables with + // "small" domain. If it is true, we scale all floating point variable + // independenlty of their domain. + optional bool mip_scale_large_domain = 225 [default = false]; + + // If true, some continuous variable might be automatically scaled. For now, // this is only the case where we detect that a variable is actually an // integer multiple of a constant. For instance, variables of the form k * 0.5 // are quite frequent, and if we detect this, we will scale such variable // domain by 2 to make it implied integer. optional bool mip_automatically_scale_variables = 166 [default = true]; + // If one try to solve a MIP model with CP-SAT, because we assume all variable + // to be integer after scaling, we will not necessarily have the correct + // optimal. Note however that all feasible solutions are valid since we will + // just solve a more restricted version of the original problem. + // + // This parameters is here to prevent user to think the solution is optimal + // when it might not be. One will need to manually set this to false to solve + // a MIP model where the optimal might be different. + // + // Note that this is tested after some MIP presolve steps, so even if not + // all original variable are integer, we might end up with a pure IP after + // presolve and after implied integer detection. + optional bool only_solve_ip = 222 [default = false]; + // When scaling constraint with double coefficients to integer coefficients, // we will multiply by a power of 2 and round the coefficients. We will choose - // the lowest power such that we have no potential overflow and the worst case - // constraint activity error do not exceed this threshold relative to the - // constraint bounds. + // the lowest power such that we have no potential overflow (see + // mip_max_activity_exponent) and the worst case constraint activity error + // does not exceed this threshold. + // + // Note that we also detect constraint with rational coefficients and scale + // them accordingly when it seems better instead of using a power of 2. // - // We also use this to decide by how much we relax the constraint bounds so - // that we can have a feasible integer solution of constraints involving - // continuous variable. This is required for instance when you have an == rhs - // constraint as in many situation you cannot have a perfect equality with - // integer variables and coefficients. + // We also relax all constraint bounds by this absolute value. For pure + // integer constraint, if this value if lower than one, this will not change + // anything. However it is needed when scaling MIP problems. + // + // If we manage to scale a constraint correctly, the maximum error we can make + // will be twice this value (once for the scaling error and once for the + // relaxed bounds). If we are not able to scale that well, we will display + // that fact but still scale as best as we can. optional double mip_wanted_precision = 126 [default = 1e-6]; // To avoid integer overflow, we always force the maximum possible constraint @@ -1035,4 +1720,37 @@ message SatParameters { // always reach the wanted precision during scaling. We use this threshold to // enphasize in the logs when the precision seems bad. optional double mip_check_precision = 128 [default = 1e-4]; + + // Even if we make big error when scaling the objective, we can always derive + // a correct lower bound on the original objective by using the exact lower + // bound on the scaled integer version of the objective. This should be fast, + // but if you don't care about having a precise lower bound, you can turn it + // off. + optional bool mip_compute_true_objective_bound = 198 [default = true]; + + // Any finite values in the input MIP must be below this threshold, otherwise + // the model will be reported invalid. This is needed to avoid floating point + // overflow when evaluating bounds * coeff for instance. We are a bit more + // defensive, but in practice, users shouldn't use super large values in a + // MIP. + optional double mip_max_valid_magnitude = 199 [default = 1e20]; + + // By default, any variable/constraint bound with a finite value and a + // magnitude greater than the mip_max_valid_magnitude will result with a + // invalid model. This flags change the behavior such that such bounds are + // silently transformed to +∞ or -∞. + // + // It is recommended to keep it at false, and create valid bounds. + optional bool mip_treat_high_magnitude_bounds_as_infinity = 278 + [default = false]; + + // Any value in the input mip with a magnitude lower than this will be set to + // zero. This is to avoid some issue in LP presolving. + optional double mip_drop_tolerance = 232 [default = 1e-16]; + + // When solving a MIP, we do some basic floating point presolving before + // scaling the problem to integer to be handled by CP-SAT. This control how + // much of that presolve we do. It can help to better scale floating point + // model, but it is not always behaving nicely. + optional int32 mip_presolve_level = 261 [default = 2]; }